< prev index next >

src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp

Print this page

  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "asm/assembler.hpp"
  29 #include "c1/c1_CodeStubs.hpp"
  30 #include "c1/c1_Compilation.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_MacroAssembler.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArrayKlass.hpp"

  36 #include "ci/ciInstance.hpp"
  37 #include "code/compiledIC.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "gc/shared/gc_globals.hpp"
  40 #include "nativeInst_aarch64.hpp"
  41 #include "oops/objArrayKlass.hpp"

  42 #include "runtime/frame.inline.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "runtime/stubRoutines.hpp"
  45 #include "utilities/powerOfTwo.hpp"
  46 #include "vmreg_aarch64.inline.hpp"
  47 
  48 
  49 #ifndef PRODUCT
  50 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  51 #else
  52 #define COMMENT(x)
  53 #endif
  54 
  55 NEEDS_CLEANUP // remove this definitions ?
  56 const Register IC_Klass    = rscratch2;   // where the IC klass is cached
  57 const Register SYNC_header = r0;   // synchronization header
  58 const Register SHIFT_count = r0;   // where count for shift operations must be
  59 
  60 #define __ _masm->
  61 

 441     if (UseHeavyMonitors) {
 442       __ b(*stub->entry());
 443     } else {
 444       __ unlock_object(r5, r4, r0, *stub->entry());
 445     }
 446     __ bind(*stub->continuation());
 447   }
 448 
 449   if (compilation()->env()->dtrace_method_probes()) {
 450     __ mov(c_rarg0, rthread);
 451     __ mov_metadata(c_rarg1, method()->constant_encoding());
 452     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
 453   }
 454 
 455   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 456     __ mov(r0, r19);  // Restore the exception
 457   }
 458 
 459   // remove the activation and dispatch to the unwind handler
 460   __ block_comment("remove_frame and dispatch to the unwind handler");
 461   __ remove_frame(initial_frame_size_in_bytes());
 462   __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 463 
 464   // Emit the slow path assembly
 465   if (stub != NULL) {
 466     stub->emit_code(this);
 467   }
 468 
 469   return offset;
 470 }
 471 
 472 
 473 int LIR_Assembler::emit_deopt_handler() {
 474   // if the last instruction is a call (typically to do a throw which
 475   // is coming at the end after block reordering) the return address
 476   // must still point into the code area in order to avoid assertion
 477   // failures when searching for the corresponding bci => add a nop
 478   // (was bug 5/14/1999 - gri)
 479   __ nop();
 480 
 481   // generate code for exception handler

 492   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 493   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 494   __ end_a_stub();
 495 
 496   return offset;
 497 }
 498 
 499 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 500   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 501   int pc_offset = code_offset();
 502   flush_debug_info(pc_offset);
 503   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 504   if (info->exception_handlers() != NULL) {
 505     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 506   }
 507 }
 508 
 509 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 510   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
 511 












 512   // Pop the stack before the safepoint code
 513   __ remove_frame(initial_frame_size_in_bytes());
 514 
 515   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 516     __ reserved_stack_check();
 517   }
 518 
 519   code_stub->set_safepoint_offset(__ offset());
 520   __ relocate(relocInfo::poll_return_type);
 521   __ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
 522   __ ret(lr);
 523 }
 524 




 525 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 526   guarantee(info != NULL, "Shouldn't be NULL");
 527   __ get_polling_page(rscratch1, relocInfo::poll_type);
 528   add_debug_info_for_branch(info);  // This isn't just debug info:
 529                                     // it's the oop map
 530   __ read_polling_page(rscratch1, relocInfo::poll_type);
 531   return __ offset();
 532 }
 533 
 534 
 535 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 536   if (from_reg == r31_sp)
 537     from_reg = sp;
 538   if (to_reg == r31_sp)
 539     to_reg = sp;
 540   __ mov(to_reg, from_reg);
 541 }
 542 
 543 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
 544 

 550 
 551   switch (c->type()) {
 552     case T_INT: {
 553       assert(patch_code == lir_patch_none, "no patching handled here");
 554       __ movw(dest->as_register(), c->as_jint());
 555       break;
 556     }
 557 
 558     case T_ADDRESS: {
 559       assert(patch_code == lir_patch_none, "no patching handled here");
 560       __ mov(dest->as_register(), c->as_jint());
 561       break;
 562     }
 563 
 564     case T_LONG: {
 565       assert(patch_code == lir_patch_none, "no patching handled here");
 566       __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
 567       break;
 568     }
 569 

 570     case T_OBJECT: {
 571         if (patch_code == lir_patch_none) {
 572           jobject2reg(c->as_jobject(), dest->as_register());
 573         } else {
 574           jobject2reg_with_patching(dest->as_register(), info);


 575         }
 576       break;
 577     }
 578 
 579     case T_METADATA: {
 580       if (patch_code != lir_patch_none) {
 581         klass2reg_with_patching(dest->as_register(), info);
 582       } else {
 583         __ mov_metadata(dest->as_register(), c->as_metadata());
 584       }
 585       break;
 586     }
 587 
 588     case T_FLOAT: {
 589       if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
 590         __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
 591       } else {
 592         __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
 593         __ ldrs(dest->as_float_reg(), Address(rscratch1));
 594       }

 596     }
 597 
 598     case T_DOUBLE: {
 599       if (__ operand_valid_for_float_immediate(c->as_jdouble())) {
 600         __ fmovd(dest->as_double_reg(), (c->as_jdouble()));
 601       } else {
 602         __ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));
 603         __ ldrd(dest->as_double_reg(), Address(rscratch1));
 604       }
 605       break;
 606     }
 607 
 608     default:
 609       ShouldNotReachHere();
 610   }
 611 }
 612 
 613 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 614   LIR_Const* c = src->as_constant_ptr();
 615   switch (c->type()) {

 616   case T_OBJECT:
 617     {
 618       if (! c->as_jobject())
 619         __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 620       else {
 621         const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 622         reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 623       }
 624     }
 625     break;
 626   case T_ADDRESS:
 627     {
 628       const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 629       reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 630     }
 631   case T_INT:
 632   case T_FLOAT:
 633     {
 634       Register reg = zr;
 635       if (c->as_jint_bits() == 0)

 662 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 663   assert(src->is_constant(), "should not call otherwise");
 664   LIR_Const* c = src->as_constant_ptr();
 665   LIR_Address* to_addr = dest->as_address_ptr();
 666 
 667   void (Assembler::* insn)(Register Rt, const Address &adr);
 668 
 669   switch (type) {
 670   case T_ADDRESS:
 671     assert(c->as_jint() == 0, "should be");
 672     insn = &Assembler::str;
 673     break;
 674   case T_LONG:
 675     assert(c->as_jlong() == 0, "should be");
 676     insn = &Assembler::str;
 677     break;
 678   case T_INT:
 679     assert(c->as_jint() == 0, "should be");
 680     insn = &Assembler::strw;
 681     break;

 682   case T_OBJECT:
 683   case T_ARRAY:


 684     assert(c->as_jobject() == 0, "should be");
 685     if (UseCompressedOops && !wide) {
 686       insn = &Assembler::strw;
 687     } else {
 688       insn = &Assembler::str;
 689     }
 690     break;
 691   case T_CHAR:
 692   case T_SHORT:
 693     assert(c->as_jint() == 0, "should be");
 694     insn = &Assembler::strh;
 695     break;
 696   case T_BOOLEAN:
 697   case T_BYTE:
 698     assert(c->as_jint() == 0, "should be");
 699     insn = &Assembler::strb;
 700     break;
 701   default:
 702     ShouldNotReachHere();
 703     insn = &Assembler::str;  // unreachable
 704   }
 705 
 706   if (info) add_debug_info_for_null_check_here(info);
 707   (_masm->*insn)(zr, as_Address(to_addr, rscratch1));
 708 }
 709 
 710 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 711   assert(src->is_register(), "should not call otherwise");
 712   assert(dest->is_register(), "should not call otherwise");
 713 
 714   // move between cpu-registers
 715   if (dest->is_single_cpu()) {
 716     if (src->type() == T_LONG) {
 717       // Can do LONG -> OBJECT
 718       move_regs(src->as_register_lo(), dest->as_register());
 719       return;
 720     }
 721     assert(src->is_single_cpu(), "must match");
 722     if (src->type() == T_OBJECT) {
 723       __ verify_oop(src->as_register());
 724     }
 725     move_regs(src->as_register(), dest->as_register());
 726 
 727   } else if (dest->is_double_cpu()) {
 728     if (is_reference_type(src->type())) {
 729       // Surprising to me but we can see move of a long to t_object
 730       __ verify_oop(src->as_register());
 731       move_regs(src->as_register(), dest->as_register_lo());
 732       return;
 733     }
 734     assert(src->is_double_cpu(), "must match");
 735     Register f_lo = src->as_register_lo();
 736     Register f_hi = src->as_register_hi();
 737     Register t_lo = dest->as_register_lo();
 738     Register t_hi = dest->as_register_hi();
 739     assert(f_hi == f_lo, "must be same");
 740     assert(t_hi == t_lo, "must be same");
 741     move_regs(f_lo, t_lo);
 742 

 802 
 803     if (UseCompressedOops && !wide) {
 804       __ encode_heap_oop(compressed_src, src->as_register());
 805     } else {
 806       compressed_src = src->as_register();
 807     }
 808   }
 809 
 810   int null_check_here = code_offset();
 811   switch (type) {
 812     case T_FLOAT: {
 813       __ strs(src->as_float_reg(), as_Address(to_addr));
 814       break;
 815     }
 816 
 817     case T_DOUBLE: {
 818       __ strd(src->as_double_reg(), as_Address(to_addr));
 819       break;
 820     }
 821 

 822     case T_ARRAY:   // fall through
 823     case T_OBJECT:  // fall through
 824       if (UseCompressedOops && !wide) {
 825         __ strw(compressed_src, as_Address(to_addr, rscratch2));
 826       } else {
 827          __ str(compressed_src, as_Address(to_addr));
 828       }
 829       break;
 830     case T_METADATA:
 831       // We get here to store a method pointer to the stack to pass to
 832       // a dtrace runtime call. This can't work on 64 bit with
 833       // compressed klass ptrs: T_METADATA can be a compressed klass
 834       // ptr or a 64 bit method pointer.
 835       ShouldNotReachHere();
 836       __ str(src->as_register(), as_Address(to_addr));
 837       break;
 838     case T_ADDRESS:
 839       __ str(src->as_register(), as_Address(to_addr));
 840       break;
 841     case T_INT:

 931   add_call_info_here(info);
 932 }
 933 
 934 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
 935 
 936   LIR_Opr temp;
 937   if (type == T_LONG || type == T_DOUBLE)
 938     temp = FrameMap::rscratch1_long_opr;
 939   else
 940     temp = FrameMap::rscratch1_opr;
 941 
 942   stack2reg(src, temp, src->type());
 943   reg2stack(temp, dest, dest->type(), false);
 944 }
 945 
 946 
 947 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
 948   LIR_Address* addr = src->as_address_ptr();
 949   LIR_Address* from_addr = src->as_address_ptr();
 950 
 951   if (addr->base()->type() == T_OBJECT) {
 952     __ verify_oop(addr->base()->as_pointer_register());
 953   }
 954 
 955   if (patch_code != lir_patch_none) {
 956     deoptimize_trap(info);
 957     return;
 958   }
 959 
 960   if (info != NULL) {
 961     add_debug_info_for_null_check_here(info);
 962   }
 963   int null_check_here = code_offset();
 964   switch (type) {
 965     case T_FLOAT: {
 966       __ ldrs(dest->as_float_reg(), as_Address(from_addr));
 967       break;
 968     }
 969 
 970     case T_DOUBLE: {
 971       __ ldrd(dest->as_double_reg(), as_Address(from_addr));
 972       break;
 973     }
 974 

 975     case T_ARRAY:   // fall through
 976     case T_OBJECT:  // fall through
 977       if (UseCompressedOops && !wide) {
 978         __ ldrw(dest->as_register(), as_Address(from_addr));
 979       } else {
 980          __ ldr(dest->as_register(), as_Address(from_addr));
 981       }
 982       break;
 983     case T_METADATA:
 984       // We get here to store a method pointer to the stack to pass to
 985       // a dtrace runtime call. This can't work on 64 bit with
 986       // compressed klass ptrs: T_METADATA can be a compressed klass
 987       // ptr or a 64 bit method pointer.
 988       ShouldNotReachHere();
 989       __ ldr(dest->as_register(), as_Address(from_addr));
 990       break;
 991     case T_ADDRESS:
 992       __ ldr(dest->as_register(), as_Address(from_addr));
 993       break;
 994     case T_INT:

1014     case T_SHORT:
1015       __ ldrsh(dest->as_register(), as_Address(from_addr));
1016       break;
1017 
1018     default:
1019       ShouldNotReachHere();
1020   }
1021 
1022   if (is_reference_type(type)) {
1023     if (UseCompressedOops && !wide) {
1024       __ decode_heap_oop(dest->as_register());
1025     }
1026 
1027     if (!UseZGC) {
1028       // Load barrier has not yet been applied, so ZGC can't verify the oop here
1029       __ verify_oop(dest->as_register());
1030     }
1031   }
1032 }
1033 














1034 
1035 int LIR_Assembler::array_element_size(BasicType type) const {
1036   int elem_size = type2aelembytes(type);
1037   return exact_log2(elem_size);
1038 }
1039 
1040 
1041 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1042   switch (op->code()) {
1043   case lir_idiv:
1044   case lir_irem:
1045     arithmetic_idiv(op->code(),
1046                     op->in_opr1(),
1047                     op->in_opr2(),
1048                     op->in_opr3(),
1049                     op->result_opr(),
1050                     op->info());
1051     break;
1052   case lir_fmad:
1053     __ fmaddd(op->result_opr()->as_double_reg(),

1205     __ ldrb(rscratch1, Address(op->klass()->as_register(),
1206                                InstanceKlass::init_state_offset()));
1207     __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1208     add_debug_info_for_null_check_here(op->stub()->info());
1209     __ br(Assembler::NE, *op->stub()->entry());
1210   }
1211   __ allocate_object(op->obj()->as_register(),
1212                      op->tmp1()->as_register(),
1213                      op->tmp2()->as_register(),
1214                      op->header_size(),
1215                      op->object_size(),
1216                      op->klass()->as_register(),
1217                      *op->stub()->entry());
1218   __ bind(*op->stub()->continuation());
1219 }
1220 
1221 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1222   Register len =  op->len()->as_register();
1223   __ uxtw(len, len);
1224 
1225   if (UseSlowPath ||
1226       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1227       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1228     __ b(*op->stub()->entry());
1229   } else {
1230     Register tmp1 = op->tmp1()->as_register();
1231     Register tmp2 = op->tmp2()->as_register();
1232     Register tmp3 = op->tmp3()->as_register();
1233     if (len == tmp1) {
1234       tmp1 = tmp3;
1235     } else if (len == tmp2) {
1236       tmp2 = tmp3;
1237     } else if (len == tmp3) {
1238       // everything is ok
1239     } else {
1240       __ mov(tmp3, len);
1241     }
1242     __ allocate_array(op->obj()->as_register(),
1243                       len,
1244                       tmp1,
1245                       tmp2,

1311     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1312   }
1313   Label profile_cast_success, profile_cast_failure;
1314   Label *success_target = should_profile ? &profile_cast_success : success;
1315   Label *failure_target = should_profile ? &profile_cast_failure : failure;
1316 
1317   if (obj == k_RInfo) {
1318     k_RInfo = dst;
1319   } else if (obj == klass_RInfo) {
1320     klass_RInfo = dst;
1321   }
1322   if (k->is_loaded() && !UseCompressedClassPointers) {
1323     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1324   } else {
1325     Rtmp1 = op->tmp3()->as_register();
1326     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1327   }
1328 
1329   assert_different_registers(obj, k_RInfo, klass_RInfo);
1330 

1331     if (should_profile) {
1332       Label not_null;
1333       __ cbnz(obj, not_null);
1334       // Object is null; update MDO and exit
1335       Register mdo  = klass_RInfo;
1336       __ mov_metadata(mdo, md->constant_encoding());
1337       Address data_addr
1338         = __ form_address(rscratch2, mdo,
1339                           md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1340                           0);
1341       __ ldrb(rscratch1, data_addr);
1342       __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1343       __ strb(rscratch1, data_addr);
1344       __ b(*obj_is_null);
1345       __ bind(not_null);
1346     } else {
1347       __ cbz(obj, *obj_is_null);
1348     }

1349 
1350   if (!k->is_loaded()) {
1351     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1352   } else {
1353     __ mov_metadata(k_RInfo, k->constant_encoding());
1354   }
1355   __ verify_oop(obj);
1356 
1357   if (op->fast_check()) {
1358     // get object class
1359     // not a safepoint as obj null check happens earlier
1360     __ load_klass(rscratch1, obj);
1361     __ cmp( rscratch1, k_RInfo);
1362 
1363     __ br(Assembler::NE, *failure_target);
1364     // successful cast, fall through to profile or jump
1365   } else {
1366     // get object class
1367     // not a safepoint as obj null check happens earlier
1368     __ load_klass(klass_RInfo, obj);

1517     __ bind(success);
1518     if (dst != obj) {
1519       __ mov(dst, obj);
1520     }
1521   } else if (code == lir_instanceof) {
1522     Register obj = op->object()->as_register();
1523     Register dst = op->result_opr()->as_register();
1524     Label success, failure, done;
1525     emit_typecheck_helper(op, &success, &failure, &failure);
1526     __ bind(failure);
1527     __ mov(dst, zr);
1528     __ b(done);
1529     __ bind(success);
1530     __ mov(dst, 1);
1531     __ bind(done);
1532   } else {
1533     ShouldNotReachHere();
1534   }
1535 }
1536 































































































































1537 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1538   __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1539   __ cset(rscratch1, Assembler::NE);
1540   __ membar(__ AnyAny);
1541 }
1542 
1543 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1544   __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1545   __ cset(rscratch1, Assembler::NE);
1546   __ membar(__ AnyAny);
1547 }
1548 
1549 
1550 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1551   assert(VM_Version::supports_cx8(), "wrong machine");
1552   Register addr;
1553   if (op->addr()->is_register()) {
1554     addr = as_reg(op->addr());
1555   } else {
1556     assert(op->addr()->is_address(), "what else?");

1956     }
1957 
1958     if (opr2->is_constant()) {
1959       bool is_32bit = false; // width of register operand
1960       jlong imm;
1961 
1962       switch(opr2->type()) {
1963       case T_INT:
1964         imm = opr2->as_constant_ptr()->as_jint();
1965         is_32bit = true;
1966         break;
1967       case T_LONG:
1968         imm = opr2->as_constant_ptr()->as_jlong();
1969         break;
1970       case T_ADDRESS:
1971         imm = opr2->as_constant_ptr()->as_jint();
1972         break;
1973       case T_METADATA:
1974         imm = (intptr_t)(opr2->as_constant_ptr()->as_metadata());
1975         break;

1976       case T_OBJECT:
1977       case T_ARRAY:
1978         jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);
1979         __ cmpoop(reg1, rscratch1);
1980         return;
1981       default:
1982         ShouldNotReachHere();
1983         imm = 0;  // unreachable
1984         break;
1985       }
1986 
1987       if (Assembler::operand_valid_for_add_sub_immediate(imm)) {
1988         if (is_32bit)
1989           __ cmpw(reg1, imm);
1990         else
1991           __ subs(zr, reg1, imm);
1992         return;
1993       } else {
1994         __ mov(rscratch1, imm);
1995         if (is_32bit)

2030     __ cmp(left->as_register_lo(), right->as_register_lo());
2031     __ mov(dst->as_register(), (uint64_t)-1L);
2032     __ br(Assembler::LT, done);
2033     __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2034     __ bind(done);
2035   } else {
2036     ShouldNotReachHere();
2037   }
2038 }
2039 
2040 
2041 void LIR_Assembler::align_call(LIR_Code code) {  }
2042 
2043 
2044 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2045   address call = __ trampoline_call(Address(op->addr(), rtype));
2046   if (call == NULL) {
2047     bailout("trampoline stub overflow");
2048     return;
2049   }
2050   add_call_info(code_offset(), op->info());
2051 }
2052 
2053 
2054 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2055   address call = __ ic_call(op->addr());
2056   if (call == NULL) {
2057     bailout("trampoline stub overflow");
2058     return;
2059   }
2060   add_call_info(code_offset(), op->info());
2061 }
2062 
2063 void LIR_Assembler::emit_static_call_stub() {
2064   address call_pc = __ pc();
2065   address stub = __ start_a_stub(call_stub_size());
2066   if (stub == NULL) {
2067     bailout("static call stub overflow");
2068     return;
2069   }
2070 
2071   int start = __ offset();
2072 
2073   __ relocate(static_stub_Relocation::spec(call_pc));
2074   __ emit_static_call_stub();
2075 
2076   assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size()
2077         <= call_stub_size(), "stub too big");
2078   __ end_a_stub();
2079 }
2080 

2122   __ b(_unwind_handler_entry);
2123 }
2124 
2125 
2126 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2127   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2128   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2129 
2130   switch (left->type()) {
2131     case T_INT: {
2132       switch (code) {
2133       case lir_shl:  __ lslvw (dreg, lreg, count->as_register()); break;
2134       case lir_shr:  __ asrvw (dreg, lreg, count->as_register()); break;
2135       case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;
2136       default:
2137         ShouldNotReachHere();
2138         break;
2139       }
2140       break;
2141     case T_LONG:

2142     case T_ADDRESS:
2143     case T_OBJECT:
2144       switch (code) {
2145       case lir_shl:  __ lslv (dreg, lreg, count->as_register()); break;
2146       case lir_shr:  __ asrv (dreg, lreg, count->as_register()); break;
2147       case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;
2148       default:
2149         ShouldNotReachHere();
2150         break;
2151       }
2152       break;
2153     default:
2154       ShouldNotReachHere();
2155       break;
2156     }
2157   }
2158 }
2159 
2160 
2161 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2162   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2163   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2164 
2165   switch (left->type()) {
2166     case T_INT: {
2167       switch (code) {
2168       case lir_shl:  __ lslw (dreg, lreg, count); break;
2169       case lir_shr:  __ asrw (dreg, lreg, count); break;
2170       case lir_ushr: __ lsrw (dreg, lreg, count); break;
2171       default:
2172         ShouldNotReachHere();
2173         break;
2174       }
2175       break;
2176     case T_LONG:
2177     case T_ADDRESS:

2178     case T_OBJECT:
2179       switch (code) {
2180       case lir_shl:  __ lsl (dreg, lreg, count); break;
2181       case lir_shr:  __ asr (dreg, lreg, count); break;
2182       case lir_ushr: __ lsr (dreg, lreg, count); break;
2183       default:
2184         ShouldNotReachHere();
2185         break;
2186       }
2187       break;
2188     default:
2189       ShouldNotReachHere();
2190       break;
2191     }
2192   }
2193 }
2194 
2195 
2196 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2197   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");

2202 
2203 
2204 void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2205   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2206   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2207   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2208   __ mov (rscratch1, c);
2209   __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2210 }
2211 
2212 
2213 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
2214   ShouldNotReachHere();
2215   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2216   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2217   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2218   __ lea(rscratch1, __ constant_oop_address(o));
2219   __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2220 }
2221 






















2222 
2223 // This code replaces a call to arraycopy; no exception may
2224 // be thrown in this code, they must be thrown in the System.arraycopy
2225 // activation frame; we could save some checks if this would not be the case
2226 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2227   ciArrayKlass* default_type = op->expected_type();
2228   Register src = op->src()->as_register();
2229   Register dst = op->dst()->as_register();
2230   Register src_pos = op->src_pos()->as_register();
2231   Register dst_pos = op->dst_pos()->as_register();
2232   Register length  = op->length()->as_register();
2233   Register tmp = op->tmp()->as_register();
2234 
2235   CodeStub* stub = op->stub();
2236   int flags = op->flags();
2237   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2238   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2239 






2240   // if we don't know anything, just go through the generic arraycopy
2241   if (default_type == NULL // || basic_type == T_OBJECT
2242       ) {
2243     Label done;
2244     assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2245 
2246     // Save the arguments in case the generic arraycopy fails and we
2247     // have to fall back to the JNI stub
2248     __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2249     __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2250     __ str(src,              Address(sp, 4*BytesPerWord));
2251 
2252     address copyfunc_addr = StubRoutines::generic_arraycopy();
2253     assert(copyfunc_addr != NULL, "generic arraycopy stub required");
2254 
2255     // The arguments are in java calling convention so we shift them
2256     // to C convention
2257     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2258     __ mov(c_rarg0, j_rarg0);
2259     assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);

2273     __ cbz(r0, *stub->continuation());
2274 
2275     // Reload values from the stack so they are where the stub
2276     // expects them.
2277     __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2278     __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2279     __ ldr(src,              Address(sp, 4*BytesPerWord));
2280 
2281     // r0 is -1^K where K == partial copied count
2282     __ eonw(rscratch1, r0, zr);
2283     // adjust length down and src/end pos up by partial copied count
2284     __ subw(length, length, rscratch1);
2285     __ addw(src_pos, src_pos, rscratch1);
2286     __ addw(dst_pos, dst_pos, rscratch1);
2287     __ b(*stub->entry());
2288 
2289     __ bind(*stub->continuation());
2290     return;
2291   }
2292 









2293   assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2294 
2295   int elem_size = type2aelembytes(basic_type);
2296   int scale = exact_log2(elem_size);
2297 
2298   Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2299   Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2300   Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
2301   Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
2302 
2303   // test for NULL
2304   if (flags & LIR_OpArrayCopy::src_null_check) {
2305     __ cbz(src, *stub->entry());
2306   }
2307   if (flags & LIR_OpArrayCopy::dst_null_check) {
2308     __ cbz(dst, *stub->entry());
2309   }
2310 
2311   // If the compiler was not able to prove that exact type of the source or the destination
2312   // of the arraycopy is an array type, check at runtime if the source or the destination is

2843         // first time here. Set profile type.
2844         __ str(tmp, mdo_addr);
2845       } else {
2846         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
2847                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2848 
2849         __ ldr(tmp, mdo_addr);
2850         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2851 
2852         __ orr(tmp, tmp, TypeEntries::type_unknown);
2853         __ str(tmp, mdo_addr);
2854         // FIXME: Write barrier needed here?
2855       }
2856     }
2857 
2858     __ bind(next);
2859   }
2860   COMMENT("} emit_profile_type");
2861 }
2862 




















2863 
2864 void LIR_Assembler::align_backward_branch_target() {
2865 }
2866 
2867 
2868 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2869   // tmp must be unused
2870   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2871 
2872   if (left->is_single_cpu()) {
2873     assert(dest->is_single_cpu(), "expect single result reg");
2874     __ negw(dest->as_register(), left->as_register());
2875   } else if (left->is_double_cpu()) {
2876     assert(dest->is_double_cpu(), "expect double result reg");
2877     __ neg(dest->as_register_lo(), left->as_register_lo());
2878   } else if (left->is_single_fpu()) {
2879     assert(dest->is_single_fpu(), "expect single float result reg");
2880     __ fnegs(dest->as_float_reg(), left->as_float_reg());
2881   } else {
2882     assert(left->is_double_fpu(), "expect double float operand reg");

2982 void LIR_Assembler::membar_loadload() {
2983   __ membar(Assembler::LoadLoad);
2984 }
2985 
2986 void LIR_Assembler::membar_storestore() {
2987   __ membar(MacroAssembler::StoreStore);
2988 }
2989 
2990 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
2991 
2992 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
2993 
2994 void LIR_Assembler::on_spin_wait() {
2995   __ spin_wait();
2996 }
2997 
2998 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2999   __ mov(result_reg->as_register(), rthread);
3000 }
3001 




3002 
3003 void LIR_Assembler::peephole(LIR_List *lir) {
3004 #if 0
3005   if (tableswitch_count >= max_tableswitches)
3006     return;
3007 
3008   /*
3009     This finite-state automaton recognizes sequences of compare-and-
3010     branch instructions.  We will turn them into a tableswitch.  You
3011     could argue that C1 really shouldn't be doing this sort of
3012     optimization, but without it the code is really horrible.
3013   */
3014 
3015   enum { start_s, cmp1_s, beq_s, cmp_s } state;
3016   int first_key, last_key = -2147483648;
3017   int next_key = 0;
3018   int start_insn = -1;
3019   int last_insn = -1;
3020   Register reg = noreg;
3021   LIR_Opr reg_opr;

3129 #endif
3130 }
3131 
3132 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
3133   Address addr = as_Address(src->as_address_ptr());
3134   BasicType type = src->type();
3135   bool is_oop = is_reference_type(type);
3136 
3137   void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
3138   void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
3139 
3140   switch(type) {
3141   case T_INT:
3142     xchg = &MacroAssembler::atomic_xchgalw;
3143     add = &MacroAssembler::atomic_addalw;
3144     break;
3145   case T_LONG:
3146     xchg = &MacroAssembler::atomic_xchgal;
3147     add = &MacroAssembler::atomic_addal;
3148     break;

3149   case T_OBJECT:
3150   case T_ARRAY:
3151     if (UseCompressedOops) {
3152       xchg = &MacroAssembler::atomic_xchgalw;
3153       add = &MacroAssembler::atomic_addalw;
3154     } else {
3155       xchg = &MacroAssembler::atomic_xchgal;
3156       add = &MacroAssembler::atomic_addal;
3157     }
3158     break;
3159   default:
3160     ShouldNotReachHere();
3161     xchg = &MacroAssembler::atomic_xchgal;
3162     add = &MacroAssembler::atomic_addal; // unreachable
3163   }
3164 
3165   switch (code) {
3166   case lir_xadd:
3167     {
3168       RegisterOrConstant inc;

  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "asm/assembler.hpp"
  29 #include "c1/c1_CodeStubs.hpp"
  30 #include "c1/c1_Compilation.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_MacroAssembler.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArrayKlass.hpp"
  36 #include "ci/ciInlineKlass.hpp"
  37 #include "ci/ciInstance.hpp"
  38 #include "code/compiledIC.hpp"
  39 #include "gc/shared/collectedHeap.hpp"
  40 #include "gc/shared/gc_globals.hpp"
  41 #include "nativeInst_aarch64.hpp"
  42 #include "oops/objArrayKlass.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "runtime/frame.inline.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "runtime/stubRoutines.hpp"
  47 #include "utilities/powerOfTwo.hpp"
  48 #include "vmreg_aarch64.inline.hpp"
  49 
  50 
  51 #ifndef PRODUCT
  52 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  53 #else
  54 #define COMMENT(x)
  55 #endif
  56 
  57 NEEDS_CLEANUP // remove this definitions ?
  58 const Register IC_Klass    = rscratch2;   // where the IC klass is cached
  59 const Register SYNC_header = r0;   // synchronization header
  60 const Register SHIFT_count = r0;   // where count for shift operations must be
  61 
  62 #define __ _masm->
  63 

 443     if (UseHeavyMonitors) {
 444       __ b(*stub->entry());
 445     } else {
 446       __ unlock_object(r5, r4, r0, *stub->entry());
 447     }
 448     __ bind(*stub->continuation());
 449   }
 450 
 451   if (compilation()->env()->dtrace_method_probes()) {
 452     __ mov(c_rarg0, rthread);
 453     __ mov_metadata(c_rarg1, method()->constant_encoding());
 454     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
 455   }
 456 
 457   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 458     __ mov(r0, r19);  // Restore the exception
 459   }
 460 
 461   // remove the activation and dispatch to the unwind handler
 462   __ block_comment("remove_frame and dispatch to the unwind handler");
 463   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 464   __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 465 
 466   // Emit the slow path assembly
 467   if (stub != NULL) {
 468     stub->emit_code(this);
 469   }
 470 
 471   return offset;
 472 }
 473 
 474 
 475 int LIR_Assembler::emit_deopt_handler() {
 476   // if the last instruction is a call (typically to do a throw which
 477   // is coming at the end after block reordering) the return address
 478   // must still point into the code area in order to avoid assertion
 479   // failures when searching for the corresponding bci => add a nop
 480   // (was bug 5/14/1999 - gri)
 481   __ nop();
 482 
 483   // generate code for exception handler

 494   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 495   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 496   __ end_a_stub();
 497 
 498   return offset;
 499 }
 500 
 501 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 502   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 503   int pc_offset = code_offset();
 504   flush_debug_info(pc_offset);
 505   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 506   if (info->exception_handlers() != NULL) {
 507     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 508   }
 509 }
 510 
 511 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 512   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
 513 
 514   ciMethod* method = compilation()->method();
 515   if (InlineTypeReturnedAsFields && method->return_type()->is_inlinetype()) {
 516     ciInlineKlass* vk = method->return_type()->as_inline_klass();
 517     if (vk->can_be_returned_as_fields()) {
 518       address unpack_handler = vk->unpack_handler();
 519       assert(unpack_handler != NULL, "must be");
 520       __ far_call(RuntimeAddress(unpack_handler));
 521       // At this point, r0 points to the value object (for interpreter or C1 caller).
 522       // The fields of the object are copied into registers (for C2 caller).
 523     }
 524   }
 525 
 526   // Pop the stack before the safepoint code
 527   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 528 
 529   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 530     __ reserved_stack_check();
 531   }
 532 
 533   code_stub->set_safepoint_offset(__ offset());
 534   __ relocate(relocInfo::poll_return_type);
 535   __ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
 536   __ ret(lr);
 537 }
 538 
 539 int LIR_Assembler::store_inline_type_fields_to_buf(ciInlineKlass* vk) {
 540   return (__ store_inline_type_fields_to_buf(vk, false));
 541 }
 542 
 543 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 544   guarantee(info != NULL, "Shouldn't be NULL");
 545   __ get_polling_page(rscratch1, relocInfo::poll_type);
 546   add_debug_info_for_branch(info);  // This isn't just debug info:
 547                                     // it's the oop map
 548   __ read_polling_page(rscratch1, relocInfo::poll_type);
 549   return __ offset();
 550 }
 551 
 552 
 553 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 554   if (from_reg == r31_sp)
 555     from_reg = sp;
 556   if (to_reg == r31_sp)
 557     to_reg = sp;
 558   __ mov(to_reg, from_reg);
 559 }
 560 
 561 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
 562 

 568 
 569   switch (c->type()) {
 570     case T_INT: {
 571       assert(patch_code == lir_patch_none, "no patching handled here");
 572       __ movw(dest->as_register(), c->as_jint());
 573       break;
 574     }
 575 
 576     case T_ADDRESS: {
 577       assert(patch_code == lir_patch_none, "no patching handled here");
 578       __ mov(dest->as_register(), c->as_jint());
 579       break;
 580     }
 581 
 582     case T_LONG: {
 583       assert(patch_code == lir_patch_none, "no patching handled here");
 584       __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
 585       break;
 586     }
 587 
 588     case T_PRIMITIVE_OBJECT:
 589     case T_OBJECT: {
 590         if (patch_code != lir_patch_none) {


 591           jobject2reg_with_patching(dest->as_register(), info);
 592         } else {
 593           jobject2reg(c->as_jobject(), dest->as_register());
 594         }
 595       break;
 596     }
 597 
 598     case T_METADATA: {
 599       if (patch_code != lir_patch_none) {
 600         klass2reg_with_patching(dest->as_register(), info);
 601       } else {
 602         __ mov_metadata(dest->as_register(), c->as_metadata());
 603       }
 604       break;
 605     }
 606 
 607     case T_FLOAT: {
 608       if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
 609         __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
 610       } else {
 611         __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
 612         __ ldrs(dest->as_float_reg(), Address(rscratch1));
 613       }

 615     }
 616 
 617     case T_DOUBLE: {
 618       if (__ operand_valid_for_float_immediate(c->as_jdouble())) {
 619         __ fmovd(dest->as_double_reg(), (c->as_jdouble()));
 620       } else {
 621         __ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));
 622         __ ldrd(dest->as_double_reg(), Address(rscratch1));
 623       }
 624       break;
 625     }
 626 
 627     default:
 628       ShouldNotReachHere();
 629   }
 630 }
 631 
 632 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 633   LIR_Const* c = src->as_constant_ptr();
 634   switch (c->type()) {
 635   case T_PRIMITIVE_OBJECT:
 636   case T_OBJECT:
 637     {
 638       if (! c->as_jobject())
 639         __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 640       else {
 641         const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 642         reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 643       }
 644     }
 645     break;
 646   case T_ADDRESS:
 647     {
 648       const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 649       reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 650     }
 651   case T_INT:
 652   case T_FLOAT:
 653     {
 654       Register reg = zr;
 655       if (c->as_jint_bits() == 0)

 682 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 683   assert(src->is_constant(), "should not call otherwise");
 684   LIR_Const* c = src->as_constant_ptr();
 685   LIR_Address* to_addr = dest->as_address_ptr();
 686 
 687   void (Assembler::* insn)(Register Rt, const Address &adr);
 688 
 689   switch (type) {
 690   case T_ADDRESS:
 691     assert(c->as_jint() == 0, "should be");
 692     insn = &Assembler::str;
 693     break;
 694   case T_LONG:
 695     assert(c->as_jlong() == 0, "should be");
 696     insn = &Assembler::str;
 697     break;
 698   case T_INT:
 699     assert(c->as_jint() == 0, "should be");
 700     insn = &Assembler::strw;
 701     break;
 702   case T_PRIMITIVE_OBJECT:
 703   case T_OBJECT:
 704   case T_ARRAY:
 705     // Non-null case is not handled on aarch64 but handled on x86
 706     // FIXME: do we need to add it here?
 707     assert(c->as_jobject() == 0, "should be");
 708     if (UseCompressedOops && !wide) {
 709       insn = &Assembler::strw;
 710     } else {
 711       insn = &Assembler::str;
 712     }
 713     break;
 714   case T_CHAR:
 715   case T_SHORT:
 716     assert(c->as_jint() == 0, "should be");
 717     insn = &Assembler::strh;
 718     break;
 719   case T_BOOLEAN:
 720   case T_BYTE:
 721     assert(c->as_jint() == 0, "should be");
 722     insn = &Assembler::strb;
 723     break;
 724   default:
 725     ShouldNotReachHere();
 726     insn = &Assembler::str;  // unreachable
 727   }
 728 
 729   if (info) add_debug_info_for_null_check_here(info);
 730   (_masm->*insn)(zr, as_Address(to_addr, rscratch1));
 731 }
 732 
 733 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 734   assert(src->is_register(), "should not call otherwise");
 735   assert(dest->is_register(), "should not call otherwise");
 736 
 737   // move between cpu-registers
 738   if (dest->is_single_cpu()) {
 739     if (src->type() == T_LONG) {
 740       // Can do LONG -> OBJECT
 741       move_regs(src->as_register_lo(), dest->as_register());
 742       return;
 743     }
 744     assert(src->is_single_cpu(), "must match");
 745     if (src->type() == T_OBJECT || src->type() == T_PRIMITIVE_OBJECT) {
 746       __ verify_oop(src->as_register());
 747     }
 748     move_regs(src->as_register(), dest->as_register());
 749 
 750   } else if (dest->is_double_cpu()) {
 751     if (is_reference_type(src->type())) {
 752       // Surprising to me but we can see move of a long to t_object
 753       __ verify_oop(src->as_register());
 754       move_regs(src->as_register(), dest->as_register_lo());
 755       return;
 756     }
 757     assert(src->is_double_cpu(), "must match");
 758     Register f_lo = src->as_register_lo();
 759     Register f_hi = src->as_register_hi();
 760     Register t_lo = dest->as_register_lo();
 761     Register t_hi = dest->as_register_hi();
 762     assert(f_hi == f_lo, "must be same");
 763     assert(t_hi == t_lo, "must be same");
 764     move_regs(f_lo, t_lo);
 765 

 825 
 826     if (UseCompressedOops && !wide) {
 827       __ encode_heap_oop(compressed_src, src->as_register());
 828     } else {
 829       compressed_src = src->as_register();
 830     }
 831   }
 832 
 833   int null_check_here = code_offset();
 834   switch (type) {
 835     case T_FLOAT: {
 836       __ strs(src->as_float_reg(), as_Address(to_addr));
 837       break;
 838     }
 839 
 840     case T_DOUBLE: {
 841       __ strd(src->as_double_reg(), as_Address(to_addr));
 842       break;
 843     }
 844 
 845     case T_PRIMITIVE_OBJECT: // fall through
 846     case T_ARRAY:   // fall through
 847     case T_OBJECT:  // fall through
 848       if (UseCompressedOops && !wide) {
 849         __ strw(compressed_src, as_Address(to_addr, rscratch2));
 850       } else {
 851          __ str(compressed_src, as_Address(to_addr));
 852       }
 853       break;
 854     case T_METADATA:
 855       // We get here to store a method pointer to the stack to pass to
 856       // a dtrace runtime call. This can't work on 64 bit with
 857       // compressed klass ptrs: T_METADATA can be a compressed klass
 858       // ptr or a 64 bit method pointer.
 859       ShouldNotReachHere();
 860       __ str(src->as_register(), as_Address(to_addr));
 861       break;
 862     case T_ADDRESS:
 863       __ str(src->as_register(), as_Address(to_addr));
 864       break;
 865     case T_INT:

 955   add_call_info_here(info);
 956 }
 957 
 958 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
 959 
 960   LIR_Opr temp;
 961   if (type == T_LONG || type == T_DOUBLE)
 962     temp = FrameMap::rscratch1_long_opr;
 963   else
 964     temp = FrameMap::rscratch1_opr;
 965 
 966   stack2reg(src, temp, src->type());
 967   reg2stack(temp, dest, dest->type(), false);
 968 }
 969 
 970 
 971 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
 972   LIR_Address* addr = src->as_address_ptr();
 973   LIR_Address* from_addr = src->as_address_ptr();
 974 
 975   if (addr->base()->type() == T_OBJECT || addr->base()->type() == T_PRIMITIVE_OBJECT) {
 976     __ verify_oop(addr->base()->as_pointer_register());
 977   }
 978 
 979   if (patch_code != lir_patch_none) {
 980     deoptimize_trap(info);
 981     return;
 982   }
 983 
 984   if (info != NULL) {
 985     add_debug_info_for_null_check_here(info);
 986   }
 987   int null_check_here = code_offset();
 988   switch (type) {
 989     case T_FLOAT: {
 990       __ ldrs(dest->as_float_reg(), as_Address(from_addr));
 991       break;
 992     }
 993 
 994     case T_DOUBLE: {
 995       __ ldrd(dest->as_double_reg(), as_Address(from_addr));
 996       break;
 997     }
 998 
 999     case T_PRIMITIVE_OBJECT: // fall through
1000     case T_ARRAY:   // fall through
1001     case T_OBJECT:  // fall through
1002       if (UseCompressedOops && !wide) {
1003         __ ldrw(dest->as_register(), as_Address(from_addr));
1004       } else {
1005          __ ldr(dest->as_register(), as_Address(from_addr));
1006       }
1007       break;
1008     case T_METADATA:
1009       // We get here to store a method pointer to the stack to pass to
1010       // a dtrace runtime call. This can't work on 64 bit with
1011       // compressed klass ptrs: T_METADATA can be a compressed klass
1012       // ptr or a 64 bit method pointer.
1013       ShouldNotReachHere();
1014       __ ldr(dest->as_register(), as_Address(from_addr));
1015       break;
1016     case T_ADDRESS:
1017       __ ldr(dest->as_register(), as_Address(from_addr));
1018       break;
1019     case T_INT:

1039     case T_SHORT:
1040       __ ldrsh(dest->as_register(), as_Address(from_addr));
1041       break;
1042 
1043     default:
1044       ShouldNotReachHere();
1045   }
1046 
1047   if (is_reference_type(type)) {
1048     if (UseCompressedOops && !wide) {
1049       __ decode_heap_oop(dest->as_register());
1050     }
1051 
1052     if (!UseZGC) {
1053       // Load barrier has not yet been applied, so ZGC can't verify the oop here
1054       __ verify_oop(dest->as_register());
1055     }
1056   }
1057 }
1058 
1059 void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) {
1060   assert(dst->is_cpu_register(), "must be");
1061   assert(dst->type() == src->type(), "must be");
1062 
1063   if (src->is_cpu_register()) {
1064     reg2reg(src, dst);
1065   } else if (src->is_stack()) {
1066     stack2reg(src, dst, dst->type());
1067   } else if (src->is_constant()) {
1068     const2reg(src, dst, lir_patch_none, NULL);
1069   } else {
1070     ShouldNotReachHere();
1071   }
1072 }
1073 
1074 int LIR_Assembler::array_element_size(BasicType type) const {
1075   int elem_size = type2aelembytes(type);
1076   return exact_log2(elem_size);
1077 }
1078 
1079 
1080 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1081   switch (op->code()) {
1082   case lir_idiv:
1083   case lir_irem:
1084     arithmetic_idiv(op->code(),
1085                     op->in_opr1(),
1086                     op->in_opr2(),
1087                     op->in_opr3(),
1088                     op->result_opr(),
1089                     op->info());
1090     break;
1091   case lir_fmad:
1092     __ fmaddd(op->result_opr()->as_double_reg(),

1244     __ ldrb(rscratch1, Address(op->klass()->as_register(),
1245                                InstanceKlass::init_state_offset()));
1246     __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1247     add_debug_info_for_null_check_here(op->stub()->info());
1248     __ br(Assembler::NE, *op->stub()->entry());
1249   }
1250   __ allocate_object(op->obj()->as_register(),
1251                      op->tmp1()->as_register(),
1252                      op->tmp2()->as_register(),
1253                      op->header_size(),
1254                      op->object_size(),
1255                      op->klass()->as_register(),
1256                      *op->stub()->entry());
1257   __ bind(*op->stub()->continuation());
1258 }
1259 
1260 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1261   Register len =  op->len()->as_register();
1262   __ uxtw(len, len);
1263 
1264   if (UseSlowPath || op->type() == T_PRIMITIVE_OBJECT ||
1265       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1266       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1267     __ b(*op->stub()->entry());
1268   } else {
1269     Register tmp1 = op->tmp1()->as_register();
1270     Register tmp2 = op->tmp2()->as_register();
1271     Register tmp3 = op->tmp3()->as_register();
1272     if (len == tmp1) {
1273       tmp1 = tmp3;
1274     } else if (len == tmp2) {
1275       tmp2 = tmp3;
1276     } else if (len == tmp3) {
1277       // everything is ok
1278     } else {
1279       __ mov(tmp3, len);
1280     }
1281     __ allocate_array(op->obj()->as_register(),
1282                       len,
1283                       tmp1,
1284                       tmp2,

1350     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1351   }
1352   Label profile_cast_success, profile_cast_failure;
1353   Label *success_target = should_profile ? &profile_cast_success : success;
1354   Label *failure_target = should_profile ? &profile_cast_failure : failure;
1355 
1356   if (obj == k_RInfo) {
1357     k_RInfo = dst;
1358   } else if (obj == klass_RInfo) {
1359     klass_RInfo = dst;
1360   }
1361   if (k->is_loaded() && !UseCompressedClassPointers) {
1362     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1363   } else {
1364     Rtmp1 = op->tmp3()->as_register();
1365     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1366   }
1367 
1368   assert_different_registers(obj, k_RInfo, klass_RInfo);
1369 
1370   if (op->need_null_check()) {
1371     if (should_profile) {
1372       Label not_null;
1373       __ cbnz(obj, not_null);
1374       // Object is null; update MDO and exit
1375       Register mdo  = klass_RInfo;
1376       __ mov_metadata(mdo, md->constant_encoding());
1377       Address data_addr
1378         = __ form_address(rscratch2, mdo,
1379                           md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1380                           0);
1381       __ ldrb(rscratch1, data_addr);
1382       __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1383       __ strb(rscratch1, data_addr);
1384       __ b(*obj_is_null);
1385       __ bind(not_null);
1386     } else {
1387       __ cbz(obj, *obj_is_null);
1388     }
1389   }
1390 
1391   if (!k->is_loaded()) {
1392     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1393   } else {
1394     __ mov_metadata(k_RInfo, k->constant_encoding());
1395   }
1396   __ verify_oop(obj);
1397 
1398   if (op->fast_check()) {
1399     // get object class
1400     // not a safepoint as obj null check happens earlier
1401     __ load_klass(rscratch1, obj);
1402     __ cmp( rscratch1, k_RInfo);
1403 
1404     __ br(Assembler::NE, *failure_target);
1405     // successful cast, fall through to profile or jump
1406   } else {
1407     // get object class
1408     // not a safepoint as obj null check happens earlier
1409     __ load_klass(klass_RInfo, obj);

1558     __ bind(success);
1559     if (dst != obj) {
1560       __ mov(dst, obj);
1561     }
1562   } else if (code == lir_instanceof) {
1563     Register obj = op->object()->as_register();
1564     Register dst = op->result_opr()->as_register();
1565     Label success, failure, done;
1566     emit_typecheck_helper(op, &success, &failure, &failure);
1567     __ bind(failure);
1568     __ mov(dst, zr);
1569     __ b(done);
1570     __ bind(success);
1571     __ mov(dst, 1);
1572     __ bind(done);
1573   } else {
1574     ShouldNotReachHere();
1575   }
1576 }
1577 
1578 void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
1579   // We are loading/storing from/to an array that *may* be flattened (the
1580   // declared type is Object[], abstract[], interface[] or VT.ref[]).
1581   // If this array is flattened, take the slow path.
1582 
1583   Register klass = op->tmp()->as_register();
1584   if (UseArrayMarkWordCheck) {
1585     __ test_flattened_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1586   } else {
1587     __ load_klass(klass, op->array()->as_register());
1588     __ ldrw(klass, Address(klass, Klass::layout_helper_offset()));
1589     __ tst(klass, Klass::_lh_array_tag_flat_value_bit_inplace);
1590     __ br(Assembler::NE, *op->stub()->entry());
1591   }
1592   if (!op->value()->is_illegal()) {
1593     // The array is not flattened, but it might be null-free. If we are storing
1594     // a null into a null-free array, take the slow path (which will throw NPE).
1595     Label skip;
1596     __ cbnz(op->value()->as_register(), skip);
1597     if (UseArrayMarkWordCheck) {
1598       __ test_null_free_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1599     } else {
1600       __ tst(klass, Klass::_lh_null_free_array_bit_inplace);
1601       __ br(Assembler::NE, *op->stub()->entry());
1602     }
1603     __ bind(skip);
1604   }
1605 }
1606 
1607 void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
1608   // We are storing into an array that *may* be null-free (the declared type is
1609   // Object[], abstract[], interface[] or VT.ref[]).
1610   if (UseArrayMarkWordCheck) {
1611     Label test_mark_word;
1612     Register tmp = op->tmp()->as_register();
1613     __ ldr(tmp, Address(op->array()->as_register(), oopDesc::mark_offset_in_bytes()));
1614     __ tst(tmp, markWord::unlocked_value);
1615     __ br(Assembler::NE, test_mark_word);
1616     __ load_prototype_header(tmp, op->array()->as_register());
1617     __ bind(test_mark_word);
1618     __ tst(tmp, markWord::null_free_array_bit_in_place);
1619   } else {
1620     Register klass = op->tmp()->as_register();
1621     __ load_klass(klass, op->array()->as_register());
1622     __ ldr(klass, Address(klass, Klass::layout_helper_offset()));
1623     __ tst(klass, Klass::_lh_null_free_array_bit_inplace);
1624   }
1625 }
1626 
1627 void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
1628   Label L_oops_equal;
1629   Label L_oops_not_equal;
1630   Label L_end;
1631 
1632   Register left  = op->left()->as_register();
1633   Register right = op->right()->as_register();
1634 
1635   __ cmp(left, right);
1636   __ br(Assembler::EQ, L_oops_equal);
1637 
1638   // (1) Null check -- if one of the operands is null, the other must not be null (because
1639   //     the two references are not equal), so they are not substitutable,
1640   //     FIXME: do null check only if the operand is nullable
1641   {
1642     __ cbz(left, L_oops_not_equal);
1643     __ cbz(right, L_oops_not_equal);
1644   }
1645 
1646   ciKlass* left_klass = op->left_klass();
1647   ciKlass* right_klass = op->right_klass();
1648 
1649   // (2) Inline type check -- if either of the operands is not a inline type,
1650   //     they are not substitutable. We do this only if we are not sure that the
1651   //     operands are inline type
1652   if ((left_klass == NULL || right_klass == NULL) ||// The klass is still unloaded, or came from a Phi node.
1653       !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
1654     Register tmp1  = op->tmp1()->as_register();
1655     __ mov(tmp1, markWord::inline_type_pattern);
1656     __ ldr(rscratch1, Address(left, oopDesc::mark_offset_in_bytes()));
1657     __ andr(tmp1, tmp1, rscratch1);
1658     __ ldr(rscratch1, Address(right, oopDesc::mark_offset_in_bytes()));
1659     __ andr(tmp1, tmp1, rscratch1);
1660     __ cmp(tmp1, (u1)markWord::inline_type_pattern);
1661     __ br(Assembler::NE, L_oops_not_equal);
1662   }
1663 
1664   // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
1665   if (left_klass != NULL && left_klass->is_inlinetype() && left_klass == right_klass) {
1666     // No need to load klass -- the operands are statically known to be the same inline klass.
1667     __ b(*op->stub()->entry());
1668   } else {
1669     Register left_klass_op = op->left_klass_op()->as_register();
1670     Register right_klass_op = op->right_klass_op()->as_register();
1671 
1672     if (UseCompressedClassPointers) {
1673       __ ldrw(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
1674       __ ldrw(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1675       __ cmpw(left_klass_op, right_klass_op);
1676     } else {
1677       __ ldr(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
1678       __ ldr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1679       __ cmp(left_klass_op, right_klass_op);
1680     }
1681 
1682     __ br(Assembler::EQ, *op->stub()->entry()); // same klass -> do slow check
1683     // fall through to L_oops_not_equal
1684   }
1685 
1686   __ bind(L_oops_not_equal);
1687   move(op->not_equal_result(), op->result_opr());
1688   __ b(L_end);
1689 
1690   __ bind(L_oops_equal);
1691   move(op->equal_result(), op->result_opr());
1692   __ b(L_end);
1693 
1694   // We've returned from the stub. R0 contains 0x0 IFF the two
1695   // operands are not substitutable. (Don't compare against 0x1 in case the
1696   // C compiler is naughty)
1697   __ bind(*op->stub()->continuation());
1698   __ cbz(r0, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
1699   move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal
1700   // fall-through
1701   __ bind(L_end);
1702 }
1703 
1704 
1705 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1706   __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1707   __ cset(rscratch1, Assembler::NE);
1708   __ membar(__ AnyAny);
1709 }
1710 
1711 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1712   __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1713   __ cset(rscratch1, Assembler::NE);
1714   __ membar(__ AnyAny);
1715 }
1716 
1717 
1718 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1719   assert(VM_Version::supports_cx8(), "wrong machine");
1720   Register addr;
1721   if (op->addr()->is_register()) {
1722     addr = as_reg(op->addr());
1723   } else {
1724     assert(op->addr()->is_address(), "what else?");

2124     }
2125 
2126     if (opr2->is_constant()) {
2127       bool is_32bit = false; // width of register operand
2128       jlong imm;
2129 
2130       switch(opr2->type()) {
2131       case T_INT:
2132         imm = opr2->as_constant_ptr()->as_jint();
2133         is_32bit = true;
2134         break;
2135       case T_LONG:
2136         imm = opr2->as_constant_ptr()->as_jlong();
2137         break;
2138       case T_ADDRESS:
2139         imm = opr2->as_constant_ptr()->as_jint();
2140         break;
2141       case T_METADATA:
2142         imm = (intptr_t)(opr2->as_constant_ptr()->as_metadata());
2143         break;
2144       case T_PRIMITIVE_OBJECT:
2145       case T_OBJECT:
2146       case T_ARRAY:
2147         jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);
2148         __ cmpoop(reg1, rscratch1);
2149         return;
2150       default:
2151         ShouldNotReachHere();
2152         imm = 0;  // unreachable
2153         break;
2154       }
2155 
2156       if (Assembler::operand_valid_for_add_sub_immediate(imm)) {
2157         if (is_32bit)
2158           __ cmpw(reg1, imm);
2159         else
2160           __ subs(zr, reg1, imm);
2161         return;
2162       } else {
2163         __ mov(rscratch1, imm);
2164         if (is_32bit)

2199     __ cmp(left->as_register_lo(), right->as_register_lo());
2200     __ mov(dst->as_register(), (uint64_t)-1L);
2201     __ br(Assembler::LT, done);
2202     __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2203     __ bind(done);
2204   } else {
2205     ShouldNotReachHere();
2206   }
2207 }
2208 
2209 
2210 void LIR_Assembler::align_call(LIR_Code code) {  }
2211 
2212 
2213 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2214   address call = __ trampoline_call(Address(op->addr(), rtype));
2215   if (call == NULL) {
2216     bailout("trampoline stub overflow");
2217     return;
2218   }
2219   add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2220 }
2221 
2222 
2223 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2224   address call = __ ic_call(op->addr());
2225   if (call == NULL) {
2226     bailout("trampoline stub overflow");
2227     return;
2228   }
2229   add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2230 }
2231 
2232 void LIR_Assembler::emit_static_call_stub() {
2233   address call_pc = __ pc();
2234   address stub = __ start_a_stub(call_stub_size());
2235   if (stub == NULL) {
2236     bailout("static call stub overflow");
2237     return;
2238   }
2239 
2240   int start = __ offset();
2241 
2242   __ relocate(static_stub_Relocation::spec(call_pc));
2243   __ emit_static_call_stub();
2244 
2245   assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size()
2246         <= call_stub_size(), "stub too big");
2247   __ end_a_stub();
2248 }
2249 

2291   __ b(_unwind_handler_entry);
2292 }
2293 
2294 
2295 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2296   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2297   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2298 
2299   switch (left->type()) {
2300     case T_INT: {
2301       switch (code) {
2302       case lir_shl:  __ lslvw (dreg, lreg, count->as_register()); break;
2303       case lir_shr:  __ asrvw (dreg, lreg, count->as_register()); break;
2304       case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;
2305       default:
2306         ShouldNotReachHere();
2307         break;
2308       }
2309       break;
2310     case T_LONG:
2311     case T_PRIMITIVE_OBJECT:
2312     case T_ADDRESS:
2313     case T_OBJECT:
2314       switch (code) {
2315       case lir_shl:  __ lslv (dreg, lreg, count->as_register()); break;
2316       case lir_shr:  __ asrv (dreg, lreg, count->as_register()); break;
2317       case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;
2318       default:
2319         ShouldNotReachHere();
2320         break;
2321       }
2322       break;
2323     default:
2324       ShouldNotReachHere();
2325       break;
2326     }
2327   }
2328 }
2329 
2330 
2331 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2332   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2333   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2334 
2335   switch (left->type()) {
2336     case T_INT: {
2337       switch (code) {
2338       case lir_shl:  __ lslw (dreg, lreg, count); break;
2339       case lir_shr:  __ asrw (dreg, lreg, count); break;
2340       case lir_ushr: __ lsrw (dreg, lreg, count); break;
2341       default:
2342         ShouldNotReachHere();
2343         break;
2344       }
2345       break;
2346     case T_LONG:
2347     case T_ADDRESS:
2348     case T_PRIMITIVE_OBJECT:
2349     case T_OBJECT:
2350       switch (code) {
2351       case lir_shl:  __ lsl (dreg, lreg, count); break;
2352       case lir_shr:  __ asr (dreg, lreg, count); break;
2353       case lir_ushr: __ lsr (dreg, lreg, count); break;
2354       default:
2355         ShouldNotReachHere();
2356         break;
2357       }
2358       break;
2359     default:
2360       ShouldNotReachHere();
2361       break;
2362     }
2363   }
2364 }
2365 
2366 
2367 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2368   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");

2373 
2374 
2375 void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2376   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2377   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2378   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2379   __ mov (rscratch1, c);
2380   __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2381 }
2382 
2383 
2384 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
2385   ShouldNotReachHere();
2386   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2387   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2388   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2389   __ lea(rscratch1, __ constant_oop_address(o));
2390   __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2391 }
2392 
2393 void LIR_Assembler::arraycopy_inlinetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) {
2394   if (null_check) {
2395     __ cbz(obj, *slow_path->entry());
2396   }
2397   if (UseArrayMarkWordCheck) {
2398     if (is_dest) {
2399       __ test_null_free_array_oop(obj, tmp, *slow_path->entry());
2400     } else {
2401       __ test_flattened_array_oop(obj, tmp, *slow_path->entry());
2402     }
2403   } else {
2404     __ load_klass(tmp, obj);
2405     __ ldr(tmp, Address(tmp, Klass::layout_helper_offset()));
2406     if (is_dest) {
2407       // Take the slow path if it's a null_free destination array, in case the source array contains NULLs.
2408       __ tst(tmp, Klass::_lh_null_free_array_bit_inplace);
2409     } else {
2410       __ tst(tmp, Klass::_lh_array_tag_flat_value_bit_inplace);
2411     }
2412     __ br(Assembler::NE, *slow_path->entry());
2413   }
2414 }
2415 
2416 // This code replaces a call to arraycopy; no exception may
2417 // be thrown in this code, they must be thrown in the System.arraycopy
2418 // activation frame; we could save some checks if this would not be the case
2419 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2420   ciArrayKlass* default_type = op->expected_type();
2421   Register src = op->src()->as_register();
2422   Register dst = op->dst()->as_register();
2423   Register src_pos = op->src_pos()->as_register();
2424   Register dst_pos = op->dst_pos()->as_register();
2425   Register length  = op->length()->as_register();
2426   Register tmp = op->tmp()->as_register();
2427 
2428   CodeStub* stub = op->stub();
2429   int flags = op->flags();
2430   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2431   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2432 
2433   if (flags & LIR_OpArrayCopy::always_slow_path) {
2434     __ b(*stub->entry());
2435     __ bind(*stub->continuation());
2436     return;
2437   }
2438 
2439   // if we don't know anything, just go through the generic arraycopy
2440   if (default_type == NULL // || basic_type == T_OBJECT
2441       ) {
2442     Label done;
2443     assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2444 
2445     // Save the arguments in case the generic arraycopy fails and we
2446     // have to fall back to the JNI stub
2447     __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2448     __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2449     __ str(src,              Address(sp, 4*BytesPerWord));
2450 
2451     address copyfunc_addr = StubRoutines::generic_arraycopy();
2452     assert(copyfunc_addr != NULL, "generic arraycopy stub required");
2453 
2454     // The arguments are in java calling convention so we shift them
2455     // to C convention
2456     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2457     __ mov(c_rarg0, j_rarg0);
2458     assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);

2472     __ cbz(r0, *stub->continuation());
2473 
2474     // Reload values from the stack so they are where the stub
2475     // expects them.
2476     __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2477     __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2478     __ ldr(src,              Address(sp, 4*BytesPerWord));
2479 
2480     // r0 is -1^K where K == partial copied count
2481     __ eonw(rscratch1, r0, zr);
2482     // adjust length down and src/end pos up by partial copied count
2483     __ subw(length, length, rscratch1);
2484     __ addw(src_pos, src_pos, rscratch1);
2485     __ addw(dst_pos, dst_pos, rscratch1);
2486     __ b(*stub->entry());
2487 
2488     __ bind(*stub->continuation());
2489     return;
2490   }
2491 
2492   // Handle inline type arrays
2493   if (flags & LIR_OpArrayCopy::src_inlinetype_check) {
2494     arraycopy_inlinetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check));
2495   }
2496 
2497   if (flags & LIR_OpArrayCopy::dst_inlinetype_check) {
2498     arraycopy_inlinetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check));
2499   }
2500 
2501   assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2502 
2503   int elem_size = type2aelembytes(basic_type);
2504   int scale = exact_log2(elem_size);
2505 
2506   Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2507   Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2508   Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
2509   Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
2510 
2511   // test for NULL
2512   if (flags & LIR_OpArrayCopy::src_null_check) {
2513     __ cbz(src, *stub->entry());
2514   }
2515   if (flags & LIR_OpArrayCopy::dst_null_check) {
2516     __ cbz(dst, *stub->entry());
2517   }
2518 
2519   // If the compiler was not able to prove that exact type of the source or the destination
2520   // of the arraycopy is an array type, check at runtime if the source or the destination is

3051         // first time here. Set profile type.
3052         __ str(tmp, mdo_addr);
3053       } else {
3054         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3055                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
3056 
3057         __ ldr(tmp, mdo_addr);
3058         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
3059 
3060         __ orr(tmp, tmp, TypeEntries::type_unknown);
3061         __ str(tmp, mdo_addr);
3062         // FIXME: Write barrier needed here?
3063       }
3064     }
3065 
3066     __ bind(next);
3067   }
3068   COMMENT("} emit_profile_type");
3069 }
3070 
3071 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) {
3072   Register obj = op->obj()->as_register();
3073   Register tmp = op->tmp()->as_pointer_register();
3074   bool not_null = op->not_null();
3075   int flag = op->flag();
3076 
3077   Label not_inline_type;
3078   if (!not_null) {
3079     __ cbz(obj, not_inline_type);
3080   }
3081 
3082   __ test_oop_is_not_inline_type(obj, tmp, not_inline_type);
3083 
3084   Address mdo_addr = as_Address(op->mdp()->as_address_ptr(), rscratch2);
3085   __ ldrb(rscratch1, mdo_addr);
3086   __ orr(rscratch1, rscratch1, flag);
3087   __ strb(rscratch1, mdo_addr);
3088 
3089   __ bind(not_inline_type);
3090 }
3091 
3092 void LIR_Assembler::align_backward_branch_target() {
3093 }
3094 
3095 
3096 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
3097   // tmp must be unused
3098   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
3099 
3100   if (left->is_single_cpu()) {
3101     assert(dest->is_single_cpu(), "expect single result reg");
3102     __ negw(dest->as_register(), left->as_register());
3103   } else if (left->is_double_cpu()) {
3104     assert(dest->is_double_cpu(), "expect double result reg");
3105     __ neg(dest->as_register_lo(), left->as_register_lo());
3106   } else if (left->is_single_fpu()) {
3107     assert(dest->is_single_fpu(), "expect single float result reg");
3108     __ fnegs(dest->as_float_reg(), left->as_float_reg());
3109   } else {
3110     assert(left->is_double_fpu(), "expect double float operand reg");

3210 void LIR_Assembler::membar_loadload() {
3211   __ membar(Assembler::LoadLoad);
3212 }
3213 
3214 void LIR_Assembler::membar_storestore() {
3215   __ membar(MacroAssembler::StoreStore);
3216 }
3217 
3218 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
3219 
3220 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
3221 
3222 void LIR_Assembler::on_spin_wait() {
3223   __ spin_wait();
3224 }
3225 
3226 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3227   __ mov(result_reg->as_register(), rthread);
3228 }
3229 
3230 void LIR_Assembler::check_orig_pc() {
3231   __ ldr(rscratch2, frame_map()->address_for_orig_pc_addr());
3232   __ cmp(rscratch2, (u1)NULL_WORD);
3233 }
3234 
3235 void LIR_Assembler::peephole(LIR_List *lir) {
3236 #if 0
3237   if (tableswitch_count >= max_tableswitches)
3238     return;
3239 
3240   /*
3241     This finite-state automaton recognizes sequences of compare-and-
3242     branch instructions.  We will turn them into a tableswitch.  You
3243     could argue that C1 really shouldn't be doing this sort of
3244     optimization, but without it the code is really horrible.
3245   */
3246 
3247   enum { start_s, cmp1_s, beq_s, cmp_s } state;
3248   int first_key, last_key = -2147483648;
3249   int next_key = 0;
3250   int start_insn = -1;
3251   int last_insn = -1;
3252   Register reg = noreg;
3253   LIR_Opr reg_opr;

3361 #endif
3362 }
3363 
3364 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
3365   Address addr = as_Address(src->as_address_ptr());
3366   BasicType type = src->type();
3367   bool is_oop = is_reference_type(type);
3368 
3369   void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
3370   void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
3371 
3372   switch(type) {
3373   case T_INT:
3374     xchg = &MacroAssembler::atomic_xchgalw;
3375     add = &MacroAssembler::atomic_addalw;
3376     break;
3377   case T_LONG:
3378     xchg = &MacroAssembler::atomic_xchgal;
3379     add = &MacroAssembler::atomic_addal;
3380     break;
3381   case T_PRIMITIVE_OBJECT:
3382   case T_OBJECT:
3383   case T_ARRAY:
3384     if (UseCompressedOops) {
3385       xchg = &MacroAssembler::atomic_xchgalw;
3386       add = &MacroAssembler::atomic_addalw;
3387     } else {
3388       xchg = &MacroAssembler::atomic_xchgal;
3389       add = &MacroAssembler::atomic_addal;
3390     }
3391     break;
3392   default:
3393     ShouldNotReachHere();
3394     xchg = &MacroAssembler::atomic_xchgal;
3395     add = &MacroAssembler::atomic_addal; // unreachable
3396   }
3397 
3398   switch (code) {
3399   case lir_xadd:
3400     {
3401       RegisterOrConstant inc;
< prev index next >