< prev index next >

src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp

Print this page




  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "asm/assembler.hpp"
  29 #include "c1/c1_CodeStubs.hpp"
  30 #include "c1/c1_Compilation.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_MacroAssembler.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArrayKlass.hpp"
  36 #include "ci/ciInstance.hpp"

  37 #include "code/compiledIC.hpp"
  38 #include "gc/shared/barrierSet.hpp"
  39 #include "gc/shared/cardTableBarrierSet.hpp"
  40 #include "gc/shared/collectedHeap.hpp"
  41 #include "nativeInst_aarch64.hpp"
  42 #include "oops/objArrayKlass.hpp"

  43 #include "runtime/frame.inline.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "vmreg_aarch64.inline.hpp"
  46 
  47 
  48 
  49 #ifndef PRODUCT
  50 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  51 #else
  52 #define COMMENT(x)
  53 #endif
  54 
  55 NEEDS_CLEANUP // remove this definitions ?
  56 const Register IC_Klass    = rscratch2;   // where the IC klass is cached
  57 const Register SYNC_header = r0;   // synchronization header
  58 const Register SHIFT_count = r0;   // where count for shift operations must be
  59 
  60 #define __ _masm->
  61 
  62 


 225   // FIXME: This needs to be much more clever.  See x86.
 226 }
 227 
 228 
 229 void LIR_Assembler::osr_entry() {
 230   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
 231   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
 232   ValueStack* entry_state = osr_entry->state();
 233   int number_of_locks = entry_state->locks_size();
 234 
 235   // we jump here if osr happens with the interpreter
 236   // state set up to continue at the beginning of the
 237   // loop that triggered osr - in particular, we have
 238   // the following registers setup:
 239   //
 240   // r2: osr buffer
 241   //
 242 
 243   // build frame
 244   ciMethod* m = compilation()->method();
 245   __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
 246 
 247   // OSR buffer is
 248   //
 249   // locals[nlocals-1..0]
 250   // monitors[0..number_of_locks]
 251   //
 252   // locals is a direct copy of the interpreter frame so in the osr buffer
 253   // so first slot in the local array is the last local from the interpreter
 254   // and last slot is local[0] (receiver) from the interpreter
 255   //
 256   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 257   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 258   // in the interpreter frame (the method lock if a sync method)
 259 
 260   // Initialize monitors in the compiled activation.
 261   //   r2: pointer to osr buffer
 262   //
 263   // All other registers are dead at this point and the locals will be
 264   // copied into place by code emitted in the IR.
 265 


 435     stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);
 436     __ unlock_object(r5, r4, r0, *stub->entry());
 437     __ bind(*stub->continuation());
 438   }
 439 
 440   if (compilation()->env()->dtrace_method_probes()) {
 441     __ call_Unimplemented();
 442 #if 0
 443     __ movptr(Address(rsp, 0), rax);
 444     __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
 445     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
 446 #endif
 447   }
 448 
 449   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 450     __ mov(r0, r19);  // Restore the exception
 451   }
 452 
 453   // remove the activation and dispatch to the unwind handler
 454   __ block_comment("remove_frame and dispatch to the unwind handler");
 455   __ remove_frame(initial_frame_size_in_bytes());
 456   __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 457 
 458   // Emit the slow path assembly
 459   if (stub != NULL) {
 460     stub->emit_code(this);
 461   }
 462 
 463   return offset;
 464 }
 465 
 466 
 467 int LIR_Assembler::emit_deopt_handler() {
 468   // if the last instruction is a call (typically to do a throw which
 469   // is coming at the end after block reordering) the return address
 470   // must still point into the code area in order to avoid assertion
 471   // failures when searching for the corresponding bci => add a nop
 472   // (was bug 5/14/1999 - gri)
 473   __ nop();
 474 
 475   // generate code for exception handler


 486   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 487   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 488   __ end_a_stub();
 489 
 490   return offset;
 491 }
 492 
 493 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 494   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 495   int pc_offset = code_offset();
 496   flush_debug_info(pc_offset);
 497   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 498   if (info->exception_handlers() != NULL) {
 499     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 500   }
 501 }
 502 
 503 void LIR_Assembler::return_op(LIR_Opr result) {
 504   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
 505 
















 506   // Pop the stack before the safepoint code
 507   __ remove_frame(initial_frame_size_in_bytes());
 508 
 509   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 510     __ reserved_stack_check();
 511   }
 512 
 513   address polling_page(os::get_polling_page());
 514   __ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type);
 515   __ ret(lr);
 516 }
 517 




 518 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 519   address polling_page(os::get_polling_page());
 520   guarantee(info != NULL, "Shouldn't be NULL");
 521   assert(os::is_poll_address(polling_page), "should be");
 522   __ get_polling_page(rscratch1, polling_page, relocInfo::poll_type);
 523   add_debug_info_for_branch(info);  // This isn't just debug info:
 524                                     // it's the oop map
 525   __ read_polling_page(rscratch1, relocInfo::poll_type);
 526   return __ offset();
 527 }
 528 
 529 
 530 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 531   if (from_reg == r31_sp)
 532     from_reg = sp;
 533   if (to_reg == r31_sp)
 534     to_reg = sp;
 535   __ mov(to_reg, from_reg);
 536 }
 537 


 545 
 546   switch (c->type()) {
 547     case T_INT: {
 548       assert(patch_code == lir_patch_none, "no patching handled here");
 549       __ movw(dest->as_register(), c->as_jint());
 550       break;
 551     }
 552 
 553     case T_ADDRESS: {
 554       assert(patch_code == lir_patch_none, "no patching handled here");
 555       __ mov(dest->as_register(), c->as_jint());
 556       break;
 557     }
 558 
 559     case T_LONG: {
 560       assert(patch_code == lir_patch_none, "no patching handled here");
 561       __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
 562       break;
 563     }
 564 

 565     case T_OBJECT: {
 566         if (patch_code == lir_patch_none) {
 567           jobject2reg(c->as_jobject(), dest->as_register());
 568         } else {
 569           jobject2reg_with_patching(dest->as_register(), info);


 570         }
 571       break;
 572     }
 573 
 574     case T_METADATA: {
 575       if (patch_code != lir_patch_none) {
 576         klass2reg_with_patching(dest->as_register(), info);
 577       } else {
 578         __ mov_metadata(dest->as_register(), c->as_metadata());
 579       }
 580       break;
 581     }
 582 
 583     case T_FLOAT: {
 584       if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
 585         __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
 586       } else {
 587         __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
 588         __ ldrs(dest->as_float_reg(), Address(rscratch1));
 589       }


 591     }
 592 
 593     case T_DOUBLE: {
 594       if (__ operand_valid_for_float_immediate(c->as_jdouble())) {
 595         __ fmovd(dest->as_double_reg(), (c->as_jdouble()));
 596       } else {
 597         __ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));
 598         __ ldrd(dest->as_double_reg(), Address(rscratch1));
 599       }
 600       break;
 601     }
 602 
 603     default:
 604       ShouldNotReachHere();
 605   }
 606 }
 607 
 608 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 609   LIR_Const* c = src->as_constant_ptr();
 610   switch (c->type()) {

 611   case T_OBJECT:
 612     {
 613       if (! c->as_jobject())
 614         __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 615       else {
 616         const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 617         reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 618       }
 619     }
 620     break;
 621   case T_ADDRESS:
 622     {
 623       const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 624       reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 625     }
 626   case T_INT:
 627   case T_FLOAT:
 628     {
 629       Register reg = zr;
 630       if (c->as_jint_bits() == 0)


 657 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 658   assert(src->is_constant(), "should not call otherwise");
 659   LIR_Const* c = src->as_constant_ptr();
 660   LIR_Address* to_addr = dest->as_address_ptr();
 661 
 662   void (Assembler::* insn)(Register Rt, const Address &adr);
 663 
 664   switch (type) {
 665   case T_ADDRESS:
 666     assert(c->as_jint() == 0, "should be");
 667     insn = &Assembler::str;
 668     break;
 669   case T_LONG:
 670     assert(c->as_jlong() == 0, "should be");
 671     insn = &Assembler::str;
 672     break;
 673   case T_INT:
 674     assert(c->as_jint() == 0, "should be");
 675     insn = &Assembler::strw;
 676     break;

 677   case T_OBJECT:
 678   case T_ARRAY:


 679     assert(c->as_jobject() == 0, "should be");
 680     if (UseCompressedOops && !wide) {
 681       insn = &Assembler::strw;
 682     } else {
 683       insn = &Assembler::str;
 684     }
 685     break;
 686   case T_CHAR:
 687   case T_SHORT:
 688     assert(c->as_jint() == 0, "should be");
 689     insn = &Assembler::strh;
 690     break;
 691   case T_BOOLEAN:
 692   case T_BYTE:
 693     assert(c->as_jint() == 0, "should be");
 694     insn = &Assembler::strb;
 695     break;
 696   default:
 697     ShouldNotReachHere();
 698     insn = &Assembler::str;  // unreachable
 699   }
 700 
 701   if (info) add_debug_info_for_null_check_here(info);
 702   (_masm->*insn)(zr, as_Address(to_addr, rscratch1));
 703 }
 704 
 705 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 706   assert(src->is_register(), "should not call otherwise");
 707   assert(dest->is_register(), "should not call otherwise");
 708 
 709   // move between cpu-registers
 710   if (dest->is_single_cpu()) {
 711     if (src->type() == T_LONG) {
 712       // Can do LONG -> OBJECT
 713       move_regs(src->as_register_lo(), dest->as_register());
 714       return;
 715     }
 716     assert(src->is_single_cpu(), "must match");
 717     if (src->type() == T_OBJECT) {
 718       __ verify_oop(src->as_register());
 719     }
 720     move_regs(src->as_register(), dest->as_register());
 721 
 722   } else if (dest->is_double_cpu()) {
 723     if (src->type() == T_OBJECT || src->type() == T_ARRAY) {
 724       // Surprising to me but we can see move of a long to t_object
 725       __ verify_oop(src->as_register());
 726       move_regs(src->as_register(), dest->as_register_lo());
 727       return;
 728     }
 729     assert(src->is_double_cpu(), "must match");
 730     Register f_lo = src->as_register_lo();
 731     Register f_hi = src->as_register_hi();
 732     Register t_lo = dest->as_register_lo();
 733     Register t_hi = dest->as_register_hi();
 734     assert(f_hi == f_lo, "must be same");
 735     assert(t_hi == t_lo, "must be same");
 736     move_regs(f_lo, t_lo);
 737 
 738   } else if (dest->is_single_fpu()) {
 739     __ fmovs(dest->as_float_reg(), src->as_float_reg());
 740 
 741   } else if (dest->is_double_fpu()) {
 742     __ fmovd(dest->as_double_reg(), src->as_double_reg());
 743 
 744   } else {
 745     ShouldNotReachHere();
 746   }
 747 }
 748 
 749 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
 750   if (src->is_single_cpu()) {
 751     if (type == T_ARRAY || type == T_OBJECT) {
 752       __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 753       __ verify_oop(src->as_register());
 754     } else if (type == T_METADATA || type == T_DOUBLE) {
 755       __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 756     } else {
 757       __ strw(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 758     }
 759 
 760   } else if (src->is_double_cpu()) {
 761     Address dest_addr_LO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
 762     __ str(src->as_register_lo(), dest_addr_LO);
 763 
 764   } else if (src->is_single_fpu()) {
 765     Address dest_addr = frame_map()->address_for_slot(dest->single_stack_ix());
 766     __ strs(src->as_float_reg(), dest_addr);
 767 
 768   } else if (src->is_double_fpu()) {
 769     Address dest_addr = frame_map()->address_for_slot(dest->double_stack_ix());
 770     __ strd(src->as_double_reg(), dest_addr);
 771 
 772   } else {
 773     ShouldNotReachHere();
 774   }
 775 
 776 }
 777 
 778 
 779 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
 780   LIR_Address* to_addr = dest->as_address_ptr();
 781   PatchingStub* patch = NULL;
 782   Register compressed_src = rscratch1;
 783 
 784   if (patch_code != lir_patch_none) {
 785     deoptimize_trap(info);
 786     return;
 787   }
 788 
 789   if (type == T_ARRAY || type == T_OBJECT) {
 790     __ verify_oop(src->as_register());
 791 
 792     if (UseCompressedOops && !wide) {
 793       __ encode_heap_oop(compressed_src, src->as_register());
 794     } else {
 795       compressed_src = src->as_register();
 796     }
 797   }
 798 
 799   int null_check_here = code_offset();
 800   switch (type) {
 801     case T_FLOAT: {
 802       __ strs(src->as_float_reg(), as_Address(to_addr));
 803       break;
 804     }
 805 
 806     case T_DOUBLE: {
 807       __ strd(src->as_double_reg(), as_Address(to_addr));
 808       break;
 809     }
 810 

 811     case T_ARRAY:   // fall through
 812     case T_OBJECT:  // fall through
 813       if (UseCompressedOops && !wide) {
 814         __ strw(compressed_src, as_Address(to_addr, rscratch2));
 815       } else {
 816          __ str(compressed_src, as_Address(to_addr));
 817       }
 818       break;
 819     case T_METADATA:
 820       // We get here to store a method pointer to the stack to pass to
 821       // a dtrace runtime call. This can't work on 64 bit with
 822       // compressed klass ptrs: T_METADATA can be a compressed klass
 823       // ptr or a 64 bit method pointer.
 824       ShouldNotReachHere();
 825       __ str(src->as_register(), as_Address(to_addr));
 826       break;
 827     case T_ADDRESS:
 828       __ str(src->as_register(), as_Address(to_addr));
 829       break;
 830     case T_INT:


 844 
 845     case T_CHAR:    // fall through
 846     case T_SHORT:
 847       __ strh(src->as_register(), as_Address(to_addr));
 848       break;
 849 
 850     default:
 851       ShouldNotReachHere();
 852   }
 853   if (info != NULL) {
 854     add_debug_info_for_null_check(null_check_here, info);
 855   }
 856 }
 857 
 858 
 859 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
 860   assert(src->is_stack(), "should not call otherwise");
 861   assert(dest->is_register(), "should not call otherwise");
 862 
 863   if (dest->is_single_cpu()) {
 864     if (type == T_ARRAY || type == T_OBJECT) {
 865       __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 866       __ verify_oop(dest->as_register());
 867     } else if (type == T_METADATA) {
 868       __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 869     } else {
 870       __ ldrw(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 871     }
 872 
 873   } else if (dest->is_double_cpu()) {
 874     Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
 875     __ ldr(dest->as_register_lo(), src_addr_LO);
 876 
 877   } else if (dest->is_single_fpu()) {
 878     Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
 879     __ ldrs(dest->as_float_reg(), src_addr);
 880 
 881   } else if (dest->is_double_fpu()) {
 882     Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
 883     __ ldrd(dest->as_double_reg(), src_addr);
 884 


 916   add_call_info_here(info);
 917 }
 918 
 919 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
 920 
 921   LIR_Opr temp;
 922   if (type == T_LONG || type == T_DOUBLE)
 923     temp = FrameMap::rscratch1_long_opr;
 924   else
 925     temp = FrameMap::rscratch1_opr;
 926 
 927   stack2reg(src, temp, src->type());
 928   reg2stack(temp, dest, dest->type(), false);
 929 }
 930 
 931 
 932 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
 933   LIR_Address* addr = src->as_address_ptr();
 934   LIR_Address* from_addr = src->as_address_ptr();
 935 
 936   if (addr->base()->type() == T_OBJECT) {
 937     __ verify_oop(addr->base()->as_pointer_register());
 938   }
 939 
 940   if (patch_code != lir_patch_none) {
 941     deoptimize_trap(info);
 942     return;
 943   }
 944 
 945   if (info != NULL) {
 946     add_debug_info_for_null_check_here(info);
 947   }
 948   int null_check_here = code_offset();
 949   switch (type) {
 950     case T_FLOAT: {
 951       __ ldrs(dest->as_float_reg(), as_Address(from_addr));
 952       break;
 953     }
 954 
 955     case T_DOUBLE: {
 956       __ ldrd(dest->as_double_reg(), as_Address(from_addr));
 957       break;
 958     }
 959 

 960     case T_ARRAY:   // fall through
 961     case T_OBJECT:  // fall through
 962       if (UseCompressedOops && !wide) {
 963         __ ldrw(dest->as_register(), as_Address(from_addr));
 964       } else {
 965          __ ldr(dest->as_register(), as_Address(from_addr));
 966       }
 967       break;
 968     case T_METADATA:
 969       // We get here to store a method pointer to the stack to pass to
 970       // a dtrace runtime call. This can't work on 64 bit with
 971       // compressed klass ptrs: T_METADATA can be a compressed klass
 972       // ptr or a 64 bit method pointer.
 973       ShouldNotReachHere();
 974       __ ldr(dest->as_register(), as_Address(from_addr));
 975       break;
 976     case T_ADDRESS:
 977       // FIXME: OMG this is a horrible kludge.  Any offset from an
 978       // address that matches klass_offset_in_bytes() will be loaded
 979       // as a word, not a long.


 994 
 995     case T_BYTE:
 996       __ ldrsb(dest->as_register(), as_Address(from_addr));
 997       break;
 998     case T_BOOLEAN: {
 999       __ ldrb(dest->as_register(), as_Address(from_addr));
1000       break;
1001     }
1002 
1003     case T_CHAR:
1004       __ ldrh(dest->as_register(), as_Address(from_addr));
1005       break;
1006     case T_SHORT:
1007       __ ldrsh(dest->as_register(), as_Address(from_addr));
1008       break;
1009 
1010     default:
1011       ShouldNotReachHere();
1012   }
1013 
1014   if (type == T_ARRAY || type == T_OBJECT) {
1015     if (UseCompressedOops && !wide) {
1016       __ decode_heap_oop(dest->as_register());
1017     }
1018 
1019     if (!UseZGC) {
1020       // Load barrier has not yet been applied, so ZGC can't verify the oop here
1021       __ verify_oop(dest->as_register());
1022     }
1023   } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1024     if (UseCompressedClassPointers) {

1025       __ decode_klass_not_null(dest->as_register());


1026     }
1027   }
1028 }
1029 














1030 
1031 int LIR_Assembler::array_element_size(BasicType type) const {
1032   int elem_size = type2aelembytes(type);
1033   return exact_log2(elem_size);
1034 }
1035 
1036 
1037 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1038   switch (op->code()) {
1039   case lir_idiv:
1040   case lir_irem:
1041     arithmetic_idiv(op->code(),
1042                     op->in_opr1(),
1043                     op->in_opr2(),
1044                     op->in_opr3(),
1045                     op->result_opr(),
1046                     op->info());
1047     break;
1048   case lir_fmad:
1049     __ fmaddd(op->result_opr()->as_double_reg(),


1201     __ ldrb(rscratch1, Address(op->klass()->as_register(),
1202                                InstanceKlass::init_state_offset()));
1203     __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1204     add_debug_info_for_null_check_here(op->stub()->info());
1205     __ br(Assembler::NE, *op->stub()->entry());
1206   }
1207   __ allocate_object(op->obj()->as_register(),
1208                      op->tmp1()->as_register(),
1209                      op->tmp2()->as_register(),
1210                      op->header_size(),
1211                      op->object_size(),
1212                      op->klass()->as_register(),
1213                      *op->stub()->entry());
1214   __ bind(*op->stub()->continuation());
1215 }
1216 
1217 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1218   Register len =  op->len()->as_register();
1219   __ uxtw(len, len);
1220 
1221   if (UseSlowPath ||
1222       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
1223       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
1224     __ b(*op->stub()->entry());
1225   } else {
1226     Register tmp1 = op->tmp1()->as_register();
1227     Register tmp2 = op->tmp2()->as_register();
1228     Register tmp3 = op->tmp3()->as_register();
1229     if (len == tmp1) {
1230       tmp1 = tmp3;
1231     } else if (len == tmp2) {
1232       tmp2 = tmp3;
1233     } else if (len == tmp3) {
1234       // everything is ok
1235     } else {
1236       __ mov(tmp3, len);
1237     }
1238     __ allocate_array(op->obj()->as_register(),
1239                       len,
1240                       tmp1,
1241                       tmp2,


1513     __ bind(success);
1514     if (dst != obj) {
1515       __ mov(dst, obj);
1516     }
1517   } else if (code == lir_instanceof) {
1518     Register obj = op->object()->as_register();
1519     Register dst = op->result_opr()->as_register();
1520     Label success, failure, done;
1521     emit_typecheck_helper(op, &success, &failure, &failure);
1522     __ bind(failure);
1523     __ mov(dst, zr);
1524     __ b(done);
1525     __ bind(success);
1526     __ mov(dst, 1);
1527     __ bind(done);
1528   } else {
1529     ShouldNotReachHere();
1530   }
1531 }
1532 


























































































































1533 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1534   __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1535   __ cset(rscratch1, Assembler::NE);
1536   __ membar(__ AnyAny);
1537 }
1538 
1539 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1540   __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1541   __ cset(rscratch1, Assembler::NE);
1542   __ membar(__ AnyAny);
1543 }
1544 
1545 
1546 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1547   assert(VM_Version::supports_cx8(), "wrong machine");
1548   Register addr;
1549   if (op->addr()->is_register()) {
1550     addr = as_reg(op->addr());
1551   } else {
1552     assert(op->addr()->is_address(), "what else?");


1923       }
1924     }
1925   } else {
1926     Register rreg = right->as_register();
1927     __ corrected_idivl(dreg, lreg, rreg, is_irem, rscratch1);
1928   }
1929 }
1930 
1931 
1932 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1933   if (opr1->is_constant() && opr2->is_single_cpu()) {
1934     // tableswitch
1935     Register reg = as_reg(opr2);
1936     struct tableswitch &table = switches[opr1->as_constant_ptr()->as_jint()];
1937     __ tableswitch(reg, table._first_key, table._last_key, table._branches, table._after);
1938   } else if (opr1->is_single_cpu() || opr1->is_double_cpu()) {
1939     Register reg1 = as_reg(opr1);
1940     if (opr2->is_single_cpu()) {
1941       // cpu register - cpu register
1942       Register reg2 = opr2->as_register();
1943       if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
1944         __ cmpoop(reg1, reg2);
1945       } else {
1946         assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
1947         __ cmpw(reg1, reg2);
1948       }
1949       return;
1950     }
1951     if (opr2->is_double_cpu()) {
1952       // cpu register - cpu register
1953       Register reg2 = opr2->as_register_lo();
1954       __ cmp(reg1, reg2);
1955       return;
1956     }
1957 
1958     if (opr2->is_constant()) {
1959       bool is_32bit = false; // width of register operand
1960       jlong imm;
1961 
1962       switch(opr2->type()) {
1963       case T_INT:
1964         imm = opr2->as_constant_ptr()->as_jint();
1965         is_32bit = true;
1966         break;
1967       case T_LONG:
1968         imm = opr2->as_constant_ptr()->as_jlong();
1969         break;
1970       case T_ADDRESS:
1971         imm = opr2->as_constant_ptr()->as_jint();
1972         break;

1973       case T_OBJECT:
1974       case T_ARRAY:
1975         jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);
1976         __ cmpoop(reg1, rscratch1);
1977         return;
1978       default:
1979         ShouldNotReachHere();
1980         imm = 0;  // unreachable
1981         break;
1982       }
1983 
1984       if (Assembler::operand_valid_for_add_sub_immediate(imm)) {
1985         if (is_32bit)
1986           __ cmpw(reg1, imm);
1987         else
1988           __ subs(zr, reg1, imm);
1989         return;
1990       } else {
1991         __ mov(rscratch1, imm);
1992         if (is_32bit)


2119   __ b(_unwind_handler_entry);
2120 }
2121 
2122 
2123 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2124   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2125   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2126 
2127   switch (left->type()) {
2128     case T_INT: {
2129       switch (code) {
2130       case lir_shl:  __ lslvw (dreg, lreg, count->as_register()); break;
2131       case lir_shr:  __ asrvw (dreg, lreg, count->as_register()); break;
2132       case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;
2133       default:
2134         ShouldNotReachHere();
2135         break;
2136       }
2137       break;
2138     case T_LONG:

2139     case T_ADDRESS:
2140     case T_OBJECT:
2141       switch (code) {
2142       case lir_shl:  __ lslv (dreg, lreg, count->as_register()); break;
2143       case lir_shr:  __ asrv (dreg, lreg, count->as_register()); break;
2144       case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;
2145       default:
2146         ShouldNotReachHere();
2147         break;
2148       }
2149       break;
2150     default:
2151       ShouldNotReachHere();
2152       break;
2153     }
2154   }
2155 }
2156 
2157 
2158 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2159   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2160   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2161 
2162   switch (left->type()) {
2163     case T_INT: {
2164       switch (code) {
2165       case lir_shl:  __ lslw (dreg, lreg, count); break;
2166       case lir_shr:  __ asrw (dreg, lreg, count); break;
2167       case lir_ushr: __ lsrw (dreg, lreg, count); break;
2168       default:
2169         ShouldNotReachHere();
2170         break;
2171       }
2172       break;
2173     case T_LONG:
2174     case T_ADDRESS:

2175     case T_OBJECT:
2176       switch (code) {
2177       case lir_shl:  __ lsl (dreg, lreg, count); break;
2178       case lir_shr:  __ asr (dreg, lreg, count); break;
2179       case lir_ushr: __ lsr (dreg, lreg, count); break;
2180       default:
2181         ShouldNotReachHere();
2182         break;
2183       }
2184       break;
2185     default:
2186       ShouldNotReachHere();
2187       break;
2188     }
2189   }
2190 }
2191 
2192 
2193 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2194   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");


2199 
2200 
2201 void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2202   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2203   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2204   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2205   __ mov (rscratch1, c);
2206   __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2207 }
2208 
2209 
2210 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
2211   ShouldNotReachHere();
2212   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2213   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2214   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2215   __ lea(rscratch1, __ constant_oop_address(o));
2216   __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2217 }
2218 













2219 
2220 // This code replaces a call to arraycopy; no exception may
2221 // be thrown in this code, they must be thrown in the System.arraycopy
2222 // activation frame; we could save some checks if this would not be the case
2223 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2224   ciArrayKlass* default_type = op->expected_type();
2225   Register src = op->src()->as_register();
2226   Register dst = op->dst()->as_register();
2227   Register src_pos = op->src_pos()->as_register();
2228   Register dst_pos = op->dst_pos()->as_register();
2229   Register length  = op->length()->as_register();
2230   Register tmp = op->tmp()->as_register();
2231 
2232   __ resolve(ACCESS_READ, src);
2233   __ resolve(ACCESS_WRITE, dst);
2234 
2235   CodeStub* stub = op->stub();
2236   int flags = op->flags();
2237   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2238   if (basic_type == T_ARRAY) basic_type = T_OBJECT;
















2239 
2240   // if we don't know anything, just go through the generic arraycopy
2241   if (default_type == NULL // || basic_type == T_OBJECT
2242       ) {
2243     Label done;
2244     assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2245 
2246     // Save the arguments in case the generic arraycopy fails and we
2247     // have to fall back to the JNI stub
2248     __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2249     __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2250     __ str(src,              Address(sp, 4*BytesPerWord));
2251 
2252     address copyfunc_addr = StubRoutines::generic_arraycopy();
2253     assert(copyfunc_addr != NULL, "generic arraycopy stub required");
2254 
2255     // The arguments are in java calling convention so we shift them
2256     // to C convention
2257     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2258     __ mov(c_rarg0, j_rarg0);


2887 
2888 
2889 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2890   assert(!tmp->is_valid(), "don't need temporary");
2891 
2892   CodeBlob *cb = CodeCache::find_blob(dest);
2893   if (cb) {
2894     __ far_call(RuntimeAddress(dest));
2895   } else {
2896     __ mov(rscratch1, RuntimeAddress(dest));
2897     int len = args->length();
2898     int type = 0;
2899     if (! result->is_illegal()) {
2900       switch (result->type()) {
2901       case T_VOID:
2902         type = 0;
2903         break;
2904       case T_INT:
2905       case T_LONG:
2906       case T_OBJECT:

2907         type = 1;
2908         break;
2909       case T_FLOAT:
2910         type = 2;
2911         break;
2912       case T_DOUBLE:
2913         type = 3;
2914         break;
2915       default:
2916         ShouldNotReachHere();
2917         break;
2918       }
2919     }
2920     int num_gpargs = 0;
2921     int num_fpargs = 0;
2922     for (int i = 0; i < args->length(); i++) {
2923       LIR_Opr arg = args->at(i);
2924       if (arg->type() == T_FLOAT || arg->type() == T_DOUBLE) {
2925         num_fpargs++;
2926       } else {


3153 #endif
3154 }
3155 
3156 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
3157   Address addr = as_Address(src->as_address_ptr());
3158   BasicType type = src->type();
3159   bool is_oop = type == T_OBJECT || type == T_ARRAY;
3160 
3161   void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
3162   void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
3163 
3164   switch(type) {
3165   case T_INT:
3166     xchg = &MacroAssembler::atomic_xchgalw;
3167     add = &MacroAssembler::atomic_addalw;
3168     break;
3169   case T_LONG:
3170     xchg = &MacroAssembler::atomic_xchgal;
3171     add = &MacroAssembler::atomic_addal;
3172     break;

3173   case T_OBJECT:
3174   case T_ARRAY:
3175     if (UseCompressedOops) {
3176       xchg = &MacroAssembler::atomic_xchgalw;
3177       add = &MacroAssembler::atomic_addalw;
3178     } else {
3179       xchg = &MacroAssembler::atomic_xchgal;
3180       add = &MacroAssembler::atomic_addal;
3181     }
3182     break;
3183   default:
3184     ShouldNotReachHere();
3185     xchg = &MacroAssembler::atomic_xchgal;
3186     add = &MacroAssembler::atomic_addal; // unreachable
3187   }
3188 
3189   switch (code) {
3190   case lir_xadd:
3191     {
3192       RegisterOrConstant inc;




  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "asm/assembler.hpp"
  29 #include "c1/c1_CodeStubs.hpp"
  30 #include "c1/c1_Compilation.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_MacroAssembler.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArrayKlass.hpp"
  36 #include "ci/ciInstance.hpp"
  37 #include "ci/ciValueKlass.hpp"
  38 #include "code/compiledIC.hpp"
  39 #include "gc/shared/barrierSet.hpp"
  40 #include "gc/shared/cardTableBarrierSet.hpp"
  41 #include "gc/shared/collectedHeap.hpp"
  42 #include "nativeInst_aarch64.hpp"
  43 #include "oops/objArrayKlass.hpp"
  44 #include "oops/oop.inline.hpp"
  45 #include "runtime/frame.inline.hpp"
  46 #include "runtime/sharedRuntime.hpp"
  47 #include "vmreg_aarch64.inline.hpp"
  48 
  49 
  50 
  51 #ifndef PRODUCT
  52 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  53 #else
  54 #define COMMENT(x)
  55 #endif
  56 
  57 NEEDS_CLEANUP // remove this definitions ?
  58 const Register IC_Klass    = rscratch2;   // where the IC klass is cached
  59 const Register SYNC_header = r0;   // synchronization header
  60 const Register SHIFT_count = r0;   // where count for shift operations must be
  61 
  62 #define __ _masm->
  63 
  64 


 227   // FIXME: This needs to be much more clever.  See x86.
 228 }
 229 
 230 
 231 void LIR_Assembler::osr_entry() {
 232   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
 233   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
 234   ValueStack* entry_state = osr_entry->state();
 235   int number_of_locks = entry_state->locks_size();
 236 
 237   // we jump here if osr happens with the interpreter
 238   // state set up to continue at the beginning of the
 239   // loop that triggered osr - in particular, we have
 240   // the following registers setup:
 241   //
 242   // r2: osr buffer
 243   //
 244 
 245   // build frame
 246   ciMethod* m = compilation()->method();
 247   __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes(), needs_stack_repair(), NULL); 
 248 
 249   // OSR buffer is
 250   //
 251   // locals[nlocals-1..0]
 252   // monitors[0..number_of_locks]
 253   //
 254   // locals is a direct copy of the interpreter frame so in the osr buffer
 255   // so first slot in the local array is the last local from the interpreter
 256   // and last slot is local[0] (receiver) from the interpreter
 257   //
 258   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 259   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 260   // in the interpreter frame (the method lock if a sync method)
 261 
 262   // Initialize monitors in the compiled activation.
 263   //   r2: pointer to osr buffer
 264   //
 265   // All other registers are dead at this point and the locals will be
 266   // copied into place by code emitted in the IR.
 267 


 437     stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);
 438     __ unlock_object(r5, r4, r0, *stub->entry());
 439     __ bind(*stub->continuation());
 440   }
 441 
 442   if (compilation()->env()->dtrace_method_probes()) {
 443     __ call_Unimplemented();
 444 #if 0
 445     __ movptr(Address(rsp, 0), rax);
 446     __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
 447     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
 448 #endif
 449   }
 450 
 451   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 452     __ mov(r0, r19);  // Restore the exception
 453   }
 454 
 455   // remove the activation and dispatch to the unwind handler
 456   __ block_comment("remove_frame and dispatch to the unwind handler");
 457   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 458   __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 459 
 460   // Emit the slow path assembly
 461   if (stub != NULL) {
 462     stub->emit_code(this);
 463   }
 464 
 465   return offset;
 466 }
 467 
 468 
 469 int LIR_Assembler::emit_deopt_handler() {
 470   // if the last instruction is a call (typically to do a throw which
 471   // is coming at the end after block reordering) the return address
 472   // must still point into the code area in order to avoid assertion
 473   // failures when searching for the corresponding bci => add a nop
 474   // (was bug 5/14/1999 - gri)
 475   __ nop();
 476 
 477   // generate code for exception handler


 488   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 489   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 490   __ end_a_stub();
 491 
 492   return offset;
 493 }
 494 
 495 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 496   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 497   int pc_offset = code_offset();
 498   flush_debug_info(pc_offset);
 499   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 500   if (info->exception_handlers() != NULL) {
 501     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 502   }
 503 }
 504 
 505 void LIR_Assembler::return_op(LIR_Opr result) {
 506   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
 507 
 508   ciMethod* method = compilation()->method();
 509 
 510   if (ValueTypeReturnedAsFields && method->signature()->returns_never_null()) {
 511     ciType* return_type = method->return_type();
 512     if (return_type->is_valuetype()) {
 513       ciValueKlass* vk = return_type->as_value_klass();
 514       if (vk->can_be_returned_as_fields()) {
 515         address unpack_handler = vk->unpack_handler();
 516         assert(unpack_handler != NULL, "must be");
 517         __ far_call(RuntimeAddress(unpack_handler));
 518         // At this point, rax points to the value object (for interpreter or C1 caller).
 519         // The fields of the object are copied into registers (for C2 caller).
 520       }
 521     }
 522   }
 523 
 524   // Pop the stack before the safepoint code
 525   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 526 
 527   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 528     __ reserved_stack_check();
 529   }
 530 
 531   address polling_page(os::get_polling_page());
 532   __ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type);
 533   __ ret(lr);
 534 }
 535 
 536 int LIR_Assembler::store_value_type_fields_to_buf(ciValueKlass* vk) { 
 537  return (__ store_value_type_fields_to_buf(vk, false));
 538 }
 539 
 540 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 541   address polling_page(os::get_polling_page());
 542   guarantee(info != NULL, "Shouldn't be NULL");
 543   assert(os::is_poll_address(polling_page), "should be");
 544   __ get_polling_page(rscratch1, polling_page, relocInfo::poll_type);
 545   add_debug_info_for_branch(info);  // This isn't just debug info:
 546                                     // it's the oop map
 547   __ read_polling_page(rscratch1, relocInfo::poll_type);
 548   return __ offset();
 549 }
 550 
 551 
 552 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 553   if (from_reg == r31_sp)
 554     from_reg = sp;
 555   if (to_reg == r31_sp)
 556     to_reg = sp;
 557   __ mov(to_reg, from_reg);
 558 }
 559 


 567 
 568   switch (c->type()) {
 569     case T_INT: {
 570       assert(patch_code == lir_patch_none, "no patching handled here");
 571       __ movw(dest->as_register(), c->as_jint());
 572       break;
 573     }
 574 
 575     case T_ADDRESS: {
 576       assert(patch_code == lir_patch_none, "no patching handled here");
 577       __ mov(dest->as_register(), c->as_jint());
 578       break;
 579     }
 580 
 581     case T_LONG: {
 582       assert(patch_code == lir_patch_none, "no patching handled here");
 583       __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
 584       break;
 585     }
 586 
 587     case T_VALUETYPE:
 588     case T_OBJECT: {
 589         if (patch_code != lir_patch_none) {


 590           jobject2reg_with_patching(dest->as_register(), info);
 591         } else {
 592           jobject2reg(c->as_jobject(), dest->as_register());
 593         }
 594       break;
 595     }
 596 
 597     case T_METADATA: {
 598       if (patch_code != lir_patch_none) {
 599         klass2reg_with_patching(dest->as_register(), info);
 600       } else {
 601         __ mov_metadata(dest->as_register(), c->as_metadata());
 602       }
 603       break;
 604     }
 605 
 606     case T_FLOAT: {
 607       if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
 608         __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
 609       } else {
 610         __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
 611         __ ldrs(dest->as_float_reg(), Address(rscratch1));
 612       }


 614     }
 615 
 616     case T_DOUBLE: {
 617       if (__ operand_valid_for_float_immediate(c->as_jdouble())) {
 618         __ fmovd(dest->as_double_reg(), (c->as_jdouble()));
 619       } else {
 620         __ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));
 621         __ ldrd(dest->as_double_reg(), Address(rscratch1));
 622       }
 623       break;
 624     }
 625 
 626     default:
 627       ShouldNotReachHere();
 628   }
 629 }
 630 
 631 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 632   LIR_Const* c = src->as_constant_ptr();
 633   switch (c->type()) {
 634   case T_VALUETYPE: 
 635   case T_OBJECT:
 636     {
 637       if (! c->as_jobject())
 638         __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 639       else {
 640         const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 641         reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 642       }
 643     }
 644     break;
 645   case T_ADDRESS:
 646     {
 647       const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 648       reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 649     }
 650   case T_INT:
 651   case T_FLOAT:
 652     {
 653       Register reg = zr;
 654       if (c->as_jint_bits() == 0)


 681 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 682   assert(src->is_constant(), "should not call otherwise");
 683   LIR_Const* c = src->as_constant_ptr();
 684   LIR_Address* to_addr = dest->as_address_ptr();
 685 
 686   void (Assembler::* insn)(Register Rt, const Address &adr);
 687 
 688   switch (type) {
 689   case T_ADDRESS:
 690     assert(c->as_jint() == 0, "should be");
 691     insn = &Assembler::str;
 692     break;
 693   case T_LONG:
 694     assert(c->as_jlong() == 0, "should be");
 695     insn = &Assembler::str;
 696     break;
 697   case T_INT:
 698     assert(c->as_jint() == 0, "should be");
 699     insn = &Assembler::strw;
 700     break;
 701   case T_VALUETYPE: 
 702   case T_OBJECT:
 703   case T_ARRAY:
 704     // Non-null case is not handled on aarch64 but handled on x86
 705     // FIXME: do we need to add it here?
 706     assert(c->as_jobject() == 0, "should be");
 707     if (UseCompressedOops && !wide) {
 708       insn = &Assembler::strw;
 709     } else {
 710       insn = &Assembler::str;
 711     }
 712     break;
 713   case T_CHAR:
 714   case T_SHORT:
 715     assert(c->as_jint() == 0, "should be");
 716     insn = &Assembler::strh;
 717     break;
 718   case T_BOOLEAN:
 719   case T_BYTE:
 720     assert(c->as_jint() == 0, "should be");
 721     insn = &Assembler::strb;
 722     break;
 723   default:
 724     ShouldNotReachHere();
 725     insn = &Assembler::str;  // unreachable
 726   }
 727 
 728   if (info) add_debug_info_for_null_check_here(info);
 729   (_masm->*insn)(zr, as_Address(to_addr, rscratch1));
 730 }
 731 
 732 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 733   assert(src->is_register(), "should not call otherwise");
 734   assert(dest->is_register(), "should not call otherwise");
 735 
 736   // move between cpu-registers
 737   if (dest->is_single_cpu()) {
 738     if (src->type() == T_LONG) {
 739       // Can do LONG -> OBJECT
 740       move_regs(src->as_register_lo(), dest->as_register());
 741       return;
 742     }
 743     assert(src->is_single_cpu(), "must match");
 744     if (src->type() == T_OBJECT || src->type() == T_VALUETYPE) {
 745       __ verify_oop(src->as_register());
 746     }
 747     move_regs(src->as_register(), dest->as_register());
 748 
 749   } else if (dest->is_double_cpu()) {
 750     if (src->type() == T_OBJECT || src->type() == T_ARRAY || src->type() == T_VALUETYPE) {
 751       // Surprising to me but we can see move of a long to t_object
 752       __ verify_oop(src->as_register());
 753       move_regs(src->as_register(), dest->as_register_lo());
 754       return;
 755     }
 756     assert(src->is_double_cpu(), "must match");
 757     Register f_lo = src->as_register_lo();
 758     Register f_hi = src->as_register_hi();
 759     Register t_lo = dest->as_register_lo();
 760     Register t_hi = dest->as_register_hi();
 761     assert(f_hi == f_lo, "must be same");
 762     assert(t_hi == t_lo, "must be same");
 763     move_regs(f_lo, t_lo);
 764 
 765   } else if (dest->is_single_fpu()) {
 766     __ fmovs(dest->as_float_reg(), src->as_float_reg());
 767 
 768   } else if (dest->is_double_fpu()) {
 769     __ fmovd(dest->as_double_reg(), src->as_double_reg());
 770 
 771   } else {
 772     ShouldNotReachHere();
 773   }
 774 }
 775 
 776 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
 777   if (src->is_single_cpu()) {
 778     if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) {
 779       __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 780       __ verify_oop(src->as_register());
 781     } else if (type == T_METADATA || type == T_DOUBLE) {
 782       __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 783     } else {
 784       __ strw(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 785     }
 786 
 787   } else if (src->is_double_cpu()) {
 788     Address dest_addr_LO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
 789     __ str(src->as_register_lo(), dest_addr_LO);
 790 
 791   } else if (src->is_single_fpu()) {
 792     Address dest_addr = frame_map()->address_for_slot(dest->single_stack_ix());
 793     __ strs(src->as_float_reg(), dest_addr);
 794 
 795   } else if (src->is_double_fpu()) {
 796     Address dest_addr = frame_map()->address_for_slot(dest->double_stack_ix());
 797     __ strd(src->as_double_reg(), dest_addr);
 798 
 799   } else {
 800     ShouldNotReachHere();
 801   }
 802 
 803 }
 804 
 805 
 806 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
 807   LIR_Address* to_addr = dest->as_address_ptr();
 808   PatchingStub* patch = NULL;
 809   Register compressed_src = rscratch1;
 810 
 811   if (patch_code != lir_patch_none) {
 812     deoptimize_trap(info);
 813     return;
 814   }
 815 
 816   if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) {
 817     __ verify_oop(src->as_register());
 818 
 819     if (UseCompressedOops && !wide) {
 820       __ encode_heap_oop(compressed_src, src->as_register());
 821     } else {
 822       compressed_src = src->as_register();
 823     }
 824   }
 825 
 826   int null_check_here = code_offset();
 827   switch (type) {
 828     case T_FLOAT: {
 829       __ strs(src->as_float_reg(), as_Address(to_addr));
 830       break;
 831     }
 832 
 833     case T_DOUBLE: {
 834       __ strd(src->as_double_reg(), as_Address(to_addr));
 835       break;
 836     }
 837 
 838     case T_VALUETYPE: // fall through
 839     case T_ARRAY:   // fall through
 840     case T_OBJECT:  // fall through
 841       if (UseCompressedOops && !wide) {
 842         __ strw(compressed_src, as_Address(to_addr, rscratch2));
 843       } else {
 844          __ str(compressed_src, as_Address(to_addr));
 845       }
 846       break;
 847     case T_METADATA:
 848       // We get here to store a method pointer to the stack to pass to
 849       // a dtrace runtime call. This can't work on 64 bit with
 850       // compressed klass ptrs: T_METADATA can be a compressed klass
 851       // ptr or a 64 bit method pointer.
 852       ShouldNotReachHere();
 853       __ str(src->as_register(), as_Address(to_addr));
 854       break;
 855     case T_ADDRESS:
 856       __ str(src->as_register(), as_Address(to_addr));
 857       break;
 858     case T_INT:


 872 
 873     case T_CHAR:    // fall through
 874     case T_SHORT:
 875       __ strh(src->as_register(), as_Address(to_addr));
 876       break;
 877 
 878     default:
 879       ShouldNotReachHere();
 880   }
 881   if (info != NULL) {
 882     add_debug_info_for_null_check(null_check_here, info);
 883   }
 884 }
 885 
 886 
 887 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
 888   assert(src->is_stack(), "should not call otherwise");
 889   assert(dest->is_register(), "should not call otherwise");
 890 
 891   if (dest->is_single_cpu()) {
 892     if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) {
 893       __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 894       __ verify_oop(dest->as_register());
 895     } else if (type == T_METADATA) {
 896       __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 897     } else {
 898       __ ldrw(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 899     }
 900 
 901   } else if (dest->is_double_cpu()) {
 902     Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
 903     __ ldr(dest->as_register_lo(), src_addr_LO);
 904 
 905   } else if (dest->is_single_fpu()) {
 906     Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
 907     __ ldrs(dest->as_float_reg(), src_addr);
 908 
 909   } else if (dest->is_double_fpu()) {
 910     Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
 911     __ ldrd(dest->as_double_reg(), src_addr);
 912 


 944   add_call_info_here(info);
 945 }
 946 
 947 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
 948 
 949   LIR_Opr temp;
 950   if (type == T_LONG || type == T_DOUBLE)
 951     temp = FrameMap::rscratch1_long_opr;
 952   else
 953     temp = FrameMap::rscratch1_opr;
 954 
 955   stack2reg(src, temp, src->type());
 956   reg2stack(temp, dest, dest->type(), false);
 957 }
 958 
 959 
 960 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
 961   LIR_Address* addr = src->as_address_ptr();
 962   LIR_Address* from_addr = src->as_address_ptr();
 963 
 964   if (addr->base()->type() == T_OBJECT || addr->base()->type() == T_VALUETYPE) { 
 965     __ verify_oop(addr->base()->as_pointer_register());
 966   }
 967 
 968   if (patch_code != lir_patch_none) {
 969     deoptimize_trap(info);
 970     return;
 971   }
 972 
 973   if (info != NULL) {
 974     add_debug_info_for_null_check_here(info);
 975   }
 976   int null_check_here = code_offset();
 977   switch (type) {
 978     case T_FLOAT: {
 979       __ ldrs(dest->as_float_reg(), as_Address(from_addr));
 980       break;
 981     }
 982 
 983     case T_DOUBLE: {
 984       __ ldrd(dest->as_double_reg(), as_Address(from_addr));
 985       break;
 986     }
 987 
 988     case T_VALUETYPE: // fall through
 989     case T_ARRAY:   // fall through
 990     case T_OBJECT:  // fall through
 991       if (UseCompressedOops && !wide) {
 992         __ ldrw(dest->as_register(), as_Address(from_addr));
 993       } else {
 994          __ ldr(dest->as_register(), as_Address(from_addr));
 995       }
 996       break;
 997     case T_METADATA:
 998       // We get here to store a method pointer to the stack to pass to
 999       // a dtrace runtime call. This can't work on 64 bit with
1000       // compressed klass ptrs: T_METADATA can be a compressed klass
1001       // ptr or a 64 bit method pointer.
1002       ShouldNotReachHere();
1003       __ ldr(dest->as_register(), as_Address(from_addr));
1004       break;
1005     case T_ADDRESS:
1006       // FIXME: OMG this is a horrible kludge.  Any offset from an
1007       // address that matches klass_offset_in_bytes() will be loaded
1008       // as a word, not a long.


1023 
1024     case T_BYTE:
1025       __ ldrsb(dest->as_register(), as_Address(from_addr));
1026       break;
1027     case T_BOOLEAN: {
1028       __ ldrb(dest->as_register(), as_Address(from_addr));
1029       break;
1030     }
1031 
1032     case T_CHAR:
1033       __ ldrh(dest->as_register(), as_Address(from_addr));
1034       break;
1035     case T_SHORT:
1036       __ ldrsh(dest->as_register(), as_Address(from_addr));
1037       break;
1038 
1039     default:
1040       ShouldNotReachHere();
1041   }
1042 
1043   if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) {
1044     if (UseCompressedOops && !wide) {
1045       __ decode_heap_oop(dest->as_register());
1046     }
1047 
1048     if (!UseZGC) {
1049       // Load barrier has not yet been applied, so ZGC can't verify the oop here
1050       __ verify_oop(dest->as_register());
1051     }
1052   } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1053     if (UseCompressedClassPointers) {
1054       __ andr(dest->as_register(), dest->as_register(), oopDesc::compressed_klass_mask());
1055       __ decode_klass_not_null(dest->as_register());
1056     } else {
1057       __   ubfm(dest->as_register(), dest->as_register(), 0, 63 - oopDesc::storage_props_nof_bits);
1058     }
1059   }
1060 }
1061 
1062 void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) {
1063   assert(dst->is_cpu_register(), "must be");
1064   assert(dst->type() == src->type(), "must be");
1065 
1066   if (src->is_cpu_register()) {
1067     reg2reg(src, dst);
1068   } else if (src->is_stack()) {
1069     stack2reg(src, dst, dst->type());
1070   } else if (src->is_constant()) {
1071     const2reg(src, dst, lir_patch_none, NULL);
1072   } else {
1073     ShouldNotReachHere();
1074   }
1075 }
1076 
1077 int LIR_Assembler::array_element_size(BasicType type) const {
1078   int elem_size = type2aelembytes(type);
1079   return exact_log2(elem_size);
1080 }
1081 
1082 
1083 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1084   switch (op->code()) {
1085   case lir_idiv:
1086   case lir_irem:
1087     arithmetic_idiv(op->code(),
1088                     op->in_opr1(),
1089                     op->in_opr2(),
1090                     op->in_opr3(),
1091                     op->result_opr(),
1092                     op->info());
1093     break;
1094   case lir_fmad:
1095     __ fmaddd(op->result_opr()->as_double_reg(),


1247     __ ldrb(rscratch1, Address(op->klass()->as_register(),
1248                                InstanceKlass::init_state_offset()));
1249     __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1250     add_debug_info_for_null_check_here(op->stub()->info());
1251     __ br(Assembler::NE, *op->stub()->entry());
1252   }
1253   __ allocate_object(op->obj()->as_register(),
1254                      op->tmp1()->as_register(),
1255                      op->tmp2()->as_register(),
1256                      op->header_size(),
1257                      op->object_size(),
1258                      op->klass()->as_register(),
1259                      *op->stub()->entry());
1260   __ bind(*op->stub()->continuation());
1261 }
1262 
1263 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1264   Register len =  op->len()->as_register();
1265   __ uxtw(len, len);
1266 
1267   if (UseSlowPath || op->type() == T_VALUETYPE ||
1268       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
1269       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
1270     __ b(*op->stub()->entry());
1271   } else {
1272     Register tmp1 = op->tmp1()->as_register();
1273     Register tmp2 = op->tmp2()->as_register();
1274     Register tmp3 = op->tmp3()->as_register();
1275     if (len == tmp1) {
1276       tmp1 = tmp3;
1277     } else if (len == tmp2) {
1278       tmp2 = tmp3;
1279     } else if (len == tmp3) {
1280       // everything is ok
1281     } else {
1282       __ mov(tmp3, len);
1283     }
1284     __ allocate_array(op->obj()->as_register(),
1285                       len,
1286                       tmp1,
1287                       tmp2,


1559     __ bind(success);
1560     if (dst != obj) {
1561       __ mov(dst, obj);
1562     }
1563   } else if (code == lir_instanceof) {
1564     Register obj = op->object()->as_register();
1565     Register dst = op->result_opr()->as_register();
1566     Label success, failure, done;
1567     emit_typecheck_helper(op, &success, &failure, &failure);
1568     __ bind(failure);
1569     __ mov(dst, zr);
1570     __ b(done);
1571     __ bind(success);
1572     __ mov(dst, 1);
1573     __ bind(done);
1574   } else {
1575     ShouldNotReachHere();
1576   }
1577 }
1578 
1579 void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
1580   // We are loading/storing an array that *may* be a flattened array (the declared type
1581   // Object[], interface[], or VT?[]). If this array is flattened, take slow path.
1582 
1583   __ load_storage_props(op->tmp()->as_register(), op->array()->as_register());
1584   __ tst(op->tmp()->as_register(), ArrayStorageProperties::flattened_value);
1585   __ br(Assembler::NE, *op->stub()->entry());
1586   if (!op->value()->is_illegal()) {
1587     // We are storing into the array.
1588     Label skip;
1589     __ tst(op->tmp()->as_register(), ArrayStorageProperties::null_free_value);
1590     __ br(Assembler::EQ, skip);
1591     // The array is not flattened, but it is null_free. If we are storing
1592     // a null, take the slow path (which will throw NPE).
1593     __ cbz(op->value()->as_register(), *op->stub()->entry());
1594     __ bind(skip);
1595   }
1596 
1597 }
1598 
1599 void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
1600   // This is called when we use aastore into a an array declared as "[LVT;",
1601   // where we know VT is not flattenable (due to ValueArrayElemMaxFlatOops, etc).
1602   // However, we need to do a NULL check if the actual array is a "[QVT;".
1603 
1604   __ load_storage_props(op->tmp()->as_register(), op->array()->as_register());
1605   __ mov(rscratch1, (uint64_t) ArrayStorageProperties::null_free_value);
1606   __ cmp(op->tmp()->as_register(), rscratch1);
1607 }
1608 
1609 void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
1610   Label L_oops_equal;
1611   Label L_oops_not_equal;
1612   Label L_end;
1613 
1614   Register left  = op->left()->as_register();
1615   Register right = op->right()->as_register();
1616 
1617   __ cmp(left, right);
1618   __ br(Assembler::EQ, L_oops_equal);
1619 
1620   // (1) Null check -- if one of the operands is null, the other must not be null (because
1621   //     the two references are not equal), so they are not substitutable,
1622   //     FIXME: do null check only if the operand is nullable
1623   {
1624     __ cbz(left, L_oops_not_equal);
1625     __ cbz(right, L_oops_not_equal);
1626   }
1627 
1628 
1629   ciKlass* left_klass = op->left_klass();
1630   ciKlass* right_klass = op->right_klass();
1631 
1632   // (2) Value object check -- if either of the operands is not a value object,
1633   //     they are not substitutable. We do this only if we are not sure that the
1634   //     operands are value objects
1635   if ((left_klass == NULL || right_klass == NULL) ||// The klass is still unloaded, or came from a Phi node.
1636       !left_klass->is_valuetype() || !right_klass->is_valuetype()) {
1637     Register tmp1  = rscratch1; /* op->tmp1()->as_register(); */
1638     Register tmp2  = rscratch2; /* op->tmp2()->as_register(); */
1639 
1640     __ mov(tmp1, (intptr_t)markOopDesc::always_locked_pattern);
1641 
1642     __ ldr(tmp2, Address(left, oopDesc::mark_offset_in_bytes()));
1643     __ andr(tmp1, tmp1, tmp2);
1644 
1645     __ ldr(tmp2, Address(right, oopDesc::mark_offset_in_bytes()));
1646     __ andr(tmp1, tmp1, tmp2); 
1647 
1648     __ mov(tmp2, (intptr_t)markOopDesc::always_locked_pattern);
1649     __ cmp(tmp1, tmp2); 
1650     __ br(Assembler::NE, L_oops_not_equal);
1651   }
1652 
1653   // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
1654   if (left_klass != NULL && left_klass->is_valuetype() && left_klass == right_klass) {
1655     // No need to load klass -- the operands are statically known to be the same value klass.
1656     __ b(*op->stub()->entry());
1657   } else {
1658     Register left_klass_op = op->left_klass_op()->as_register();
1659     Register right_klass_op = op->right_klass_op()->as_register();
1660 
1661     if (UseCompressedOops) {
1662       __ ldrw(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
1663       __ ldrw(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1664       __ cmpw(left_klass_op, right_klass_op);
1665     } else {
1666       __ ldr(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
1667       __ ldr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1668       __ cmp(left_klass_op, right_klass_op);
1669     }
1670 
1671     __ br(Assembler::EQ, *op->stub()->entry()); // same klass -> do slow check
1672     // fall through to L_oops_not_equal
1673   }
1674 
1675   __ bind(L_oops_not_equal);
1676   move(op->not_equal_result(), op->result_opr());
1677   __ b(L_end);
1678 
1679   __ bind(L_oops_equal);
1680   move(op->equal_result(), op->result_opr());
1681   __ b(L_end);
1682 
1683   // We've returned from the stub. op->result_opr() contains 0x0 IFF the two
1684   // operands are not substitutable. (Don't compare against 0x1 in case the
1685   // C compiler is naughty)
1686   __ bind(*op->stub()->continuation());
1687 
1688   if (op->result_opr()->type() == T_LONG) {
1689     __ cbzw(op->result_opr()->as_register(), L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
1690   } else {
1691     __ cbz(op->result_opr()->as_register(), L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
1692   }
1693 
1694   move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal
1695   // fall-through
1696   __ bind(L_end);
1697 
1698 }
1699 
1700 
1701 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1702   __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1703   __ cset(rscratch1, Assembler::NE);
1704   __ membar(__ AnyAny);
1705 }
1706 
1707 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1708   __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1709   __ cset(rscratch1, Assembler::NE);
1710   __ membar(__ AnyAny);
1711 }
1712 
1713 
1714 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1715   assert(VM_Version::supports_cx8(), "wrong machine");
1716   Register addr;
1717   if (op->addr()->is_register()) {
1718     addr = as_reg(op->addr());
1719   } else {
1720     assert(op->addr()->is_address(), "what else?");


2091       }
2092     }
2093   } else {
2094     Register rreg = right->as_register();
2095     __ corrected_idivl(dreg, lreg, rreg, is_irem, rscratch1);
2096   }
2097 }
2098 
2099 
2100 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
2101   if (opr1->is_constant() && opr2->is_single_cpu()) {
2102     // tableswitch
2103     Register reg = as_reg(opr2);
2104     struct tableswitch &table = switches[opr1->as_constant_ptr()->as_jint()];
2105     __ tableswitch(reg, table._first_key, table._last_key, table._branches, table._after);
2106   } else if (opr1->is_single_cpu() || opr1->is_double_cpu()) {
2107     Register reg1 = as_reg(opr1);
2108     if (opr2->is_single_cpu()) {
2109       // cpu register - cpu register
2110       Register reg2 = opr2->as_register();
2111       if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY || opr1->type() == T_VALUETYPE) {
2112         __ cmpoop(reg1, reg2);
2113       } else {
2114         assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY && opr2->type() != T_VALUETYPE,  "cmp int, oop?");
2115         __ cmpw(reg1, reg2);
2116       }
2117       return;
2118     }
2119     if (opr2->is_double_cpu()) {
2120       // cpu register - cpu register
2121       Register reg2 = opr2->as_register_lo();
2122       __ cmp(reg1, reg2);
2123       return;
2124     }
2125 
2126     if (opr2->is_constant()) {
2127       bool is_32bit = false; // width of register operand
2128       jlong imm;
2129 
2130       switch(opr2->type()) {
2131       case T_INT:
2132         imm = opr2->as_constant_ptr()->as_jint();
2133         is_32bit = true;
2134         break;
2135       case T_LONG:
2136         imm = opr2->as_constant_ptr()->as_jlong();
2137         break;
2138       case T_ADDRESS:
2139         imm = opr2->as_constant_ptr()->as_jint();
2140         break;
2141       case T_VALUETYPE:
2142       case T_OBJECT:
2143       case T_ARRAY:
2144         jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);
2145         __ cmpoop(reg1, rscratch1);
2146         return;
2147       default:
2148         ShouldNotReachHere();
2149         imm = 0;  // unreachable
2150         break;
2151       }
2152 
2153       if (Assembler::operand_valid_for_add_sub_immediate(imm)) {
2154         if (is_32bit)
2155           __ cmpw(reg1, imm);
2156         else
2157           __ subs(zr, reg1, imm);
2158         return;
2159       } else {
2160         __ mov(rscratch1, imm);
2161         if (is_32bit)


2288   __ b(_unwind_handler_entry);
2289 }
2290 
2291 
2292 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2293   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2294   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2295 
2296   switch (left->type()) {
2297     case T_INT: {
2298       switch (code) {
2299       case lir_shl:  __ lslvw (dreg, lreg, count->as_register()); break;
2300       case lir_shr:  __ asrvw (dreg, lreg, count->as_register()); break;
2301       case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;
2302       default:
2303         ShouldNotReachHere();
2304         break;
2305       }
2306       break;
2307     case T_LONG:
2308     case T_VALUETYPE: 
2309     case T_ADDRESS:
2310     case T_OBJECT:
2311       switch (code) {
2312       case lir_shl:  __ lslv (dreg, lreg, count->as_register()); break;
2313       case lir_shr:  __ asrv (dreg, lreg, count->as_register()); break;
2314       case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;
2315       default:
2316         ShouldNotReachHere();
2317         break;
2318       }
2319       break;
2320     default:
2321       ShouldNotReachHere();
2322       break;
2323     }
2324   }
2325 }
2326 
2327 
2328 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2329   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2330   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2331 
2332   switch (left->type()) {
2333     case T_INT: {
2334       switch (code) {
2335       case lir_shl:  __ lslw (dreg, lreg, count); break;
2336       case lir_shr:  __ asrw (dreg, lreg, count); break;
2337       case lir_ushr: __ lsrw (dreg, lreg, count); break;
2338       default:
2339         ShouldNotReachHere();
2340         break;
2341       }
2342       break;
2343     case T_LONG:
2344     case T_ADDRESS:
2345     case T_VALUETYPE:
2346     case T_OBJECT:
2347       switch (code) {
2348       case lir_shl:  __ lsl (dreg, lreg, count); break;
2349       case lir_shr:  __ asr (dreg, lreg, count); break;
2350       case lir_ushr: __ lsr (dreg, lreg, count); break;
2351       default:
2352         ShouldNotReachHere();
2353         break;
2354       }
2355       break;
2356     default:
2357       ShouldNotReachHere();
2358       break;
2359     }
2360   }
2361 }
2362 
2363 
2364 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2365   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");


2370 
2371 
2372 void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2373   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2374   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2375   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2376   __ mov (rscratch1, c);
2377   __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2378 }
2379 
2380 
2381 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
2382   ShouldNotReachHere();
2383   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2384   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2385   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2386   __ lea(rscratch1, __ constant_oop_address(o));
2387   __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2388 }
2389 
2390 void LIR_Assembler::arraycopy_valuetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest) {
2391   __ load_storage_props(tmp, obj);
2392   if (is_dest) {
2393     // We also take slow path if it's a null_free destination array, just in case the source array
2394     // contains NULLs.
2395     __ tst(tmp, ArrayStorageProperties::flattened_value | ArrayStorageProperties::null_free_value);
2396   } else {
2397     __ tst(tmp, ArrayStorageProperties::flattened_value);
2398   }
2399   __ br(Assembler::NE, *slow_path->entry());
2400 }
2401 
2402 
2403 
2404 // This code replaces a call to arraycopy; no exception may
2405 // be thrown in this code, they must be thrown in the System.arraycopy
2406 // activation frame; we could save some checks if this would not be the case
2407 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2408   ciArrayKlass* default_type = op->expected_type();
2409   Register src = op->src()->as_register();
2410   Register dst = op->dst()->as_register();
2411   Register src_pos = op->src_pos()->as_register();
2412   Register dst_pos = op->dst_pos()->as_register();
2413   Register length  = op->length()->as_register();
2414   Register tmp = op->tmp()->as_register();
2415 
2416   __ resolve(ACCESS_READ, src);
2417   __ resolve(ACCESS_WRITE, dst);
2418 
2419   CodeStub* stub = op->stub();
2420   int flags = op->flags();
2421   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2422   if (basic_type == T_ARRAY || basic_type == T_VALUETYPE) basic_type = T_OBJECT;
2423 
2424   if (flags & LIR_OpArrayCopy::always_slow_path) {
2425     __ b(*stub->entry());
2426     __ bind(*stub->continuation());
2427     return;
2428   }
2429 
2430   if (flags & LIR_OpArrayCopy::src_valuetype_check) {
2431     arraycopy_valuetype_check(src, tmp, stub, false);
2432   }
2433 
2434   if (flags & LIR_OpArrayCopy::dst_valuetype_check) {
2435     arraycopy_valuetype_check(dst, tmp, stub, true);
2436   }
2437 
2438 
2439 
2440   // if we don't know anything, just go through the generic arraycopy
2441   if (default_type == NULL // || basic_type == T_OBJECT
2442       ) {
2443     Label done;
2444     assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2445 
2446     // Save the arguments in case the generic arraycopy fails and we
2447     // have to fall back to the JNI stub
2448     __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2449     __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2450     __ str(src,              Address(sp, 4*BytesPerWord));
2451 
2452     address copyfunc_addr = StubRoutines::generic_arraycopy();
2453     assert(copyfunc_addr != NULL, "generic arraycopy stub required");
2454 
2455     // The arguments are in java calling convention so we shift them
2456     // to C convention
2457     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2458     __ mov(c_rarg0, j_rarg0);


3087 
3088 
3089 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3090   assert(!tmp->is_valid(), "don't need temporary");
3091 
3092   CodeBlob *cb = CodeCache::find_blob(dest);
3093   if (cb) {
3094     __ far_call(RuntimeAddress(dest));
3095   } else {
3096     __ mov(rscratch1, RuntimeAddress(dest));
3097     int len = args->length();
3098     int type = 0;
3099     if (! result->is_illegal()) {
3100       switch (result->type()) {
3101       case T_VOID:
3102         type = 0;
3103         break;
3104       case T_INT:
3105       case T_LONG:
3106       case T_OBJECT:
3107       case T_VALUETYPE:
3108         type = 1;
3109         break;
3110       case T_FLOAT:
3111         type = 2;
3112         break;
3113       case T_DOUBLE:
3114         type = 3;
3115         break;
3116       default:
3117         ShouldNotReachHere();
3118         break;
3119       }
3120     }
3121     int num_gpargs = 0;
3122     int num_fpargs = 0;
3123     for (int i = 0; i < args->length(); i++) {
3124       LIR_Opr arg = args->at(i);
3125       if (arg->type() == T_FLOAT || arg->type() == T_DOUBLE) {
3126         num_fpargs++;
3127       } else {


3354 #endif
3355 }
3356 
3357 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
3358   Address addr = as_Address(src->as_address_ptr());
3359   BasicType type = src->type();
3360   bool is_oop = type == T_OBJECT || type == T_ARRAY;
3361 
3362   void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
3363   void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
3364 
3365   switch(type) {
3366   case T_INT:
3367     xchg = &MacroAssembler::atomic_xchgalw;
3368     add = &MacroAssembler::atomic_addalw;
3369     break;
3370   case T_LONG:
3371     xchg = &MacroAssembler::atomic_xchgal;
3372     add = &MacroAssembler::atomic_addal;
3373     break;
3374   case T_VALUETYPE:
3375   case T_OBJECT:
3376   case T_ARRAY:
3377     if (UseCompressedOops) {
3378       xchg = &MacroAssembler::atomic_xchgalw;
3379       add = &MacroAssembler::atomic_addalw;
3380     } else {
3381       xchg = &MacroAssembler::atomic_xchgal;
3382       add = &MacroAssembler::atomic_addal;
3383     }
3384     break;
3385   default:
3386     ShouldNotReachHere();
3387     xchg = &MacroAssembler::atomic_xchgal;
3388     add = &MacroAssembler::atomic_addal; // unreachable
3389   }
3390 
3391   switch (code) {
3392   case lir_xadd:
3393     {
3394       RegisterOrConstant inc;


< prev index next >