< prev index next >

src/hotspot/cpu/aarch64/aarch64.ad

Print this page

 1628 
 1629 int MachCallDynamicJavaNode::ret_addr_offset()
 1630 {
 1631   return 16; // movz, movk, movk, bl
 1632 }
 1633 
 1634 int MachCallRuntimeNode::ret_addr_offset() {
 1635   // for generated stubs the call will be
 1636   //   bl(addr)
 1637   // or with far branches
 1638   //   bl(trampoline_stub)
 1639   // for real runtime callouts it will be six instructions
 1640   // see aarch64_enc_java_to_runtime
 1641   //   adr(rscratch2, retaddr)
 1642   //   lea(rscratch1, RuntimeAddress(addr)
 1643   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
 1644   //   blr(rscratch1)
 1645   CodeBlob *cb = CodeCache::find_blob(_entry_point);
 1646   if (cb) {
 1647     return 1 * NativeInstruction::instruction_size;



 1648   } else {
 1649     return 6 * NativeInstruction::instruction_size;
 1650   }
 1651 }
 1652 
 1653 //=============================================================================
 1654 
 1655 #ifndef PRODUCT
 1656 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1657   st->print("BREAKPOINT");
 1658 }
 1659 #endif
 1660 
 1661 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1662   C2_MacroAssembler _masm(&cbuf);
 1663   __ brk(0);
 1664 }
 1665 
 1666 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 1667   return MachNode::size(ra_);

 1739     st->print("\n\t");
 1740     st->print("ldr  rscratch1, [guard]\n\t");
 1741     st->print("dmb ishld\n\t");
 1742     st->print("ldr  rscratch2, [rthread, #thread_disarmed_offset]\n\t");
 1743     st->print("cmp  rscratch1, rscratch2\n\t");
 1744     st->print("b.eq skip");
 1745     st->print("\n\t");
 1746     st->print("blr #nmethod_entry_barrier_stub\n\t");
 1747     st->print("b skip\n\t");
 1748     st->print("guard: int\n\t");
 1749     st->print("\n\t");
 1750     st->print("skip:\n\t");
 1751   }
 1752 }
 1753 #endif
 1754 
 1755 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1756   Compile* C = ra_->C;
 1757   C2_MacroAssembler _masm(&cbuf);
 1758 
 1759   // n.b. frame size includes space for return pc and rfp
 1760   const int framesize = C->output()->frame_size_in_bytes();
 1761 
 1762   // insert a nop at the start of the prolog so we can patch in a
 1763   // branch if we need to invalidate the method later
 1764   __ nop();
 1765 
 1766   if (C->clinit_barrier_on_entry()) {
 1767     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
 1768 
 1769     Label L_skip_barrier;
 1770 
 1771     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
 1772     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 1773     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 1774     __ bind(L_skip_barrier);
 1775   }
 1776 
 1777   if (C->max_vector_size() > 0) {
 1778     __ reinitialize_ptrue();
 1779   }
 1780 
 1781   int bangsize = C->output()->bang_size_in_bytes();
 1782   if (C->output()->need_stack_bang(bangsize))
 1783     __ generate_stack_overflow_check(bangsize);
 1784 
 1785   __ build_frame(framesize);
 1786 
 1787   if (C->stub_function() == NULL) {
 1788     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 1789     if (BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
 1790       // Dummy labels for just measuring the code size
 1791       Label dummy_slow_path;
 1792       Label dummy_continuation;
 1793       Label dummy_guard;
 1794       Label* slow_path = &dummy_slow_path;
 1795       Label* continuation = &dummy_continuation;
 1796       Label* guard = &dummy_guard;
 1797       if (!Compile::current()->output()->in_scratch_emit_size()) {
 1798         // Use real labels from actual stub when not emitting code for the purpose of measuring its size
 1799         C2EntryBarrierStub* stub = Compile::current()->output()->entry_barrier_table()->add_entry_barrier();
 1800         slow_path = &stub->slow_path();
 1801         continuation = &stub->continuation();
 1802         guard = &stub->guard();
 1803       }
 1804       // In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
 1805       bs->nmethod_entry_barrier(&_masm, slow_path, continuation, guard);
 1806     }
 1807   }
 1808 
 1809   if (VerifyStackAtCalls) {
 1810     Unimplemented();
 1811   }
 1812 
 1813   C->output()->set_frame_complete(cbuf.insts_size());
 1814 
 1815   if (C->has_mach_constant_base_node()) {
 1816     // NOTE: We set the table base offset here because users might be
 1817     // emitted before MachConstantBaseNode.
 1818     ConstantTable& constant_table = C->output()->constant_table();
 1819     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1820   }
 1821 }
 1822 
 1823 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
 1824 {
 1825   return MachNode::size(ra_); // too many variables; just compute it
 1826                               // the hard way
 1827 }
 1828 
 1829 int MachPrologNode::reloc() const
 1830 {
 1831   return 0;
 1832 }
 1833 
 1834 //=============================================================================
 1835 
 1836 #ifndef PRODUCT
 1837 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1838   Compile* C = ra_->C;
 1839   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1840 
 1841   st->print("# pop frame %d\n\t",framesize);
 1842 
 1843   if (framesize == 0) {
 1844     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1845   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
 1846     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
 1847     st->print("add  sp, sp, #%d\n\t", framesize);
 1848   } else {

 1852   }
 1853   if (VM_Version::use_rop_protection()) {
 1854     st->print("autia lr, rfp\n\t");
 1855     st->print("ldr zr, [lr]\n\t");
 1856   }
 1857 
 1858   if (do_polling() && C->is_method_compilation()) {
 1859     st->print("# test polling word\n\t");
 1860     st->print("ldr  rscratch1, [rthread],#%d\n\t", in_bytes(JavaThread::polling_word_offset()));
 1861     st->print("cmp  sp, rscratch1\n\t");
 1862     st->print("bhi #slow_path");
 1863   }
 1864 }
 1865 #endif
 1866 
 1867 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1868   Compile* C = ra_->C;
 1869   C2_MacroAssembler _masm(&cbuf);
 1870   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1871 
 1872   __ remove_frame(framesize);
 1873 
 1874   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1875     __ reserved_stack_check();
 1876   }
 1877 
 1878   if (do_polling() && C->is_method_compilation()) {
 1879     Label dummy_label;
 1880     Label* code_stub = &dummy_label;
 1881     if (!C->output()->in_scratch_emit_size()) {
 1882       code_stub = &C->output()->safepoint_poll_table()->add_safepoint(__ offset());
 1883     }
 1884     __ relocate(relocInfo::poll_return_type);
 1885     __ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
 1886   }
 1887 }
 1888 
 1889 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1890   // Variable size. Determine dynamically.
 1891   return MachNode::size(ra_);
 1892 }
 1893 
 1894 int MachEpilogNode::reloc() const {
 1895   // Return number of relocatable values contained in this instruction.
 1896   return 1; // 1 for polling page.
 1897 }
 1898 
 1899 const Pipeline * MachEpilogNode::pipeline() const {
 1900   return MachNode::pipeline_class();
 1901 }
 1902 
 1903 //=============================================================================
 1904 
 1905 // Figure out which register class each belongs in: rc_int, rc_float or
 1906 // rc_stack.
 1907 enum RC { rc_bad, rc_int, rc_float, rc_predicate, rc_stack };
 1908 
 1909 static enum RC rc_class(OptoReg::Name reg) {
 1910 
 1911   if (reg == OptoReg::Bad) {
 1912     return rc_bad;
 1913   }

 2179 
 2180   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2181   int reg    = ra_->get_encode(this);
 2182 
 2183   // This add will handle any 24-bit signed offset. 24 bits allows an
 2184   // 8 megabyte stack frame.
 2185   __ add(as_Register(reg), sp, offset);
 2186 }
 2187 
 2188 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 2189   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 2190   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2191 
 2192   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
 2193     return NativeInstruction::instruction_size;
 2194   } else {
 2195     return 2 * NativeInstruction::instruction_size;
 2196   }
 2197 }
 2198 
 2199 //=============================================================================
















































 2200 

 2201 #ifndef PRODUCT
 2202 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 2203 {
 2204   st->print_cr("# MachUEPNode");
 2205   if (UseCompressedClassPointers) {
 2206     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2207     if (CompressedKlassPointers::shift() != 0) {
 2208       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
 2209     }
 2210   } else {
 2211    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2212   }
 2213   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
 2214   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
 2215 }
 2216 #endif
 2217 
 2218 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
 2219 {
 2220   // This is the unverified entry point.
 2221   C2_MacroAssembler _masm(&cbuf);

 2222 

 2223   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
 2224   Label skip;
 2225   // TODO
 2226   // can we avoid this skip and still use a reloc?
 2227   __ br(Assembler::EQ, skip);
 2228   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 2229   __ bind(skip);
 2230 }
 2231 
 2232 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 2233 {
 2234   return MachNode::size(ra_);
 2235 }
 2236 
 2237 // REQUIRED EMIT CODE
 2238 
 2239 //=============================================================================
 2240 
 2241 // Emit exception handler code.
 2242 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
 2243 {
 2244   // mov rscratch1 #exception_blob_entry_point
 2245   // br rscratch1
 2246   // Note that the code buffer's insts_mark is always relative to insts.
 2247   // That's why we must use the macroassembler to generate a handler.
 2248   C2_MacroAssembler _masm(&cbuf);
 2249   address base = __ start_a_stub(size_exception_handler());
 2250   if (base == NULL) {
 2251     ciEnv::current()->record_failure("CodeCache is full");
 2252     return 0;  // CodeBuffer::expand failed
 2253   }
 2254   int offset = __ offset();
 2255   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
 2256   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");

 3666     C2_MacroAssembler _masm(&cbuf);
 3667     int method_index = resolved_method_index(cbuf);
 3668     address call = __ ic_call((address)$meth$$method, method_index);
 3669     if (call == NULL) {
 3670       ciEnv::current()->record_failure("CodeCache is full");
 3671       return;
 3672     }
 3673     _masm.clear_inst_mark();
 3674     __ post_call_nop();
 3675     if (Compile::current()->max_vector_size() > 0) {
 3676       __ reinitialize_ptrue();
 3677     }
 3678   %}
 3679 
 3680   enc_class aarch64_enc_call_epilog() %{
 3681     C2_MacroAssembler _masm(&cbuf);
 3682     if (VerifyStackAtCalls) {
 3683       // Check that stack depth is unchanged: find majik cookie on stack
 3684       __ call_Unimplemented();
 3685     }

































 3686   %}
 3687 
 3688   enc_class aarch64_enc_java_to_runtime(method meth) %{
 3689     C2_MacroAssembler _masm(&cbuf);
 3690 
 3691     // some calls to generated routines (arraycopy code) are scheduled
 3692     // by C2 as runtime calls. if so we can call them using a br (they
 3693     // will be in a reachable segment) otherwise we have to use a blr
 3694     // which loads the absolute address into a register.
 3695     address entry = (address)$meth$$method;
 3696     CodeBlob *cb = CodeCache::find_blob(entry);
 3697     if (cb) {
 3698       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
 3699       if (call == NULL) {
 3700         ciEnv::current()->record_failure("CodeCache is full");
 3701         return;
 3702       }
 3703       __ post_call_nop();
 3704     } else {
 3705       Label retaddr;

 3760 
 3761     assert_different_registers(oop, box, tmp, disp_hdr);
 3762 
 3763     // Load markWord from object into displaced_header.
 3764     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
 3765 
 3766     if (DiagnoseSyncOnValueBasedClasses != 0) {
 3767       __ load_klass(tmp, oop);
 3768       __ ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
 3769       __ tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
 3770       __ br(Assembler::NE, cont);
 3771     }
 3772 
 3773     // Check for existing monitor
 3774     __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
 3775 
 3776     if (!UseHeavyMonitors) {
 3777       // Set tmp to be (markWord of object | UNLOCK_VALUE).
 3778       __ orr(tmp, disp_hdr, markWord::unlocked_value);
 3779 





 3780       // Initialize the box. (Must happen before we update the object mark!)
 3781       __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 3782 
 3783       // Compare object markWord with an unlocked value (tmp) and if
 3784       // equal exchange the stack address of our box with object markWord.
 3785       // On failure disp_hdr contains the possibly locked markWord.
 3786       __ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
 3787                  /*release*/ true, /*weak*/ false, disp_hdr);
 3788       __ br(Assembler::EQ, cont);
 3789 
 3790       assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
 3791 
 3792       // If the compare-and-exchange succeeded, then we found an unlocked
 3793       // object, will have now locked it will continue at label cont
 3794 
 3795       // Check if the owner is self by comparing the value in the
 3796       // markWord of object (disp_hdr) with the stack pointer.
 3797       __ mov(rscratch1, sp);
 3798       __ sub(disp_hdr, disp_hdr, rscratch1);
 3799       __ mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));

 7233 instruct loadConL(iRegLNoSp dst, immL src)
 7234 %{
 7235   match(Set dst src);
 7236 
 7237   ins_cost(INSN_COST);
 7238   format %{ "mov $dst, $src\t# long" %}
 7239 
 7240   ins_encode( aarch64_enc_mov_imm(dst, src) );
 7241 
 7242   ins_pipe(ialu_imm);
 7243 %}
 7244 
 7245 // Load Pointer Constant
 7246 
 7247 instruct loadConP(iRegPNoSp dst, immP con)
 7248 %{
 7249   match(Set dst con);
 7250 
 7251   ins_cost(INSN_COST * 4);
 7252   format %{
 7253     "mov  $dst, $con\t# ptr\n\t"
 7254   %}
 7255 
 7256   ins_encode(aarch64_enc_mov_p(dst, con));
 7257 
 7258   ins_pipe(ialu_imm);
 7259 %}
 7260 
 7261 // Load Null Pointer Constant
 7262 
 7263 instruct loadConP0(iRegPNoSp dst, immP0 con)
 7264 %{
 7265   match(Set dst con);
 7266 
 7267   ins_cost(INSN_COST);
 7268   format %{ "mov  $dst, $con\t# NULL ptr" %}
 7269 
 7270   ins_encode(aarch64_enc_mov_p0(dst, con));
 7271 
 7272   ins_pipe(ialu_imm);
 7273 %}

 8434 %}
 8435 
 8436 // ============================================================================
 8437 // Cast/Convert Instructions
 8438 
 8439 instruct castX2P(iRegPNoSp dst, iRegL src) %{
 8440   match(Set dst (CastX2P src));
 8441 
 8442   ins_cost(INSN_COST);
 8443   format %{ "mov $dst, $src\t# long -> ptr" %}
 8444 
 8445   ins_encode %{
 8446     if ($dst$$reg != $src$$reg) {
 8447       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8448     }
 8449   %}
 8450 
 8451   ins_pipe(ialu_reg);
 8452 %}
 8453 















 8454 instruct castP2X(iRegLNoSp dst, iRegP src) %{
 8455   match(Set dst (CastP2X src));
 8456 
 8457   ins_cost(INSN_COST);
 8458   format %{ "mov $dst, $src\t# ptr -> long" %}
 8459 
 8460   ins_encode %{
 8461     if ($dst$$reg != $src$$reg) {
 8462       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8463     }
 8464   %}
 8465 
 8466   ins_pipe(ialu_reg);
 8467 %}
 8468 
 8469 // Convert oop into int for vectors alignment masking
 8470 instruct convP2I(iRegINoSp dst, iRegP src) %{
 8471   match(Set dst (ConvL2I (CastP2X src)));
 8472 
 8473   ins_cost(INSN_COST);

14884 
14885   match(Set dst (MoveL2D src));
14886 
14887   effect(DEF dst, USE src);
14888 
14889   ins_cost(INSN_COST);
14890 
14891   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
14892 
14893   ins_encode %{
14894     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
14895   %}
14896 
14897   ins_pipe(fp_l2d);
14898 
14899 %}
14900 
14901 // ============================================================================
14902 // clearing of an array
14903 
14904 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14905 %{
14906   match(Set dummy (ClearArray cnt base));
14907   effect(USE_KILL cnt, USE_KILL base, KILL cr);
14908 
14909   ins_cost(4 * INSN_COST);
14910   format %{ "ClearArray $cnt, $base" %}
14911 
14912   ins_encode %{
14913     address tpc = __ zero_words($base$$Register, $cnt$$Register);
14914     if (tpc == NULL) {
14915       ciEnv::current()->record_failure("CodeCache is full");
14916       return;
14917     }
14918   %}
14919 
14920   ins_pipe(pipe_class_memory);
14921 %}
14922 
















14923 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 temp, Universe dummy, rFlagsReg cr)
14924 %{
14925   predicate((uint64_t)n->in(2)->get_long()
14926             < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));

14927   match(Set dummy (ClearArray cnt base));
14928   effect(TEMP temp, USE_KILL base, KILL cr);
14929 
14930   ins_cost(4 * INSN_COST);
14931   format %{ "ClearArray $cnt, $base" %}
14932 
14933   ins_encode %{
14934     address tpc = __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
14935     if (tpc == NULL) {
14936       ciEnv::current()->record_failure("CodeCache is full");
14937       return;
14938     }
14939   %}
14940 
14941   ins_pipe(pipe_class_memory);
14942 %}
14943 
14944 // ============================================================================
14945 // Overflow Math Instructions
14946 

16240 
16241 // Call Runtime Instruction
16242 
16243 instruct CallLeafDirect(method meth)
16244 %{
16245   match(CallLeaf);
16246 
16247   effect(USE meth);
16248 
16249   ins_cost(CALL_COST);
16250 
16251   format %{ "CALL, runtime leaf $meth" %}
16252 
16253   ins_encode( aarch64_enc_java_to_runtime(meth) );
16254 
16255   ins_pipe(pipe_class_call);
16256 %}
16257 
16258 // Call Runtime Instruction
16259 


















16260 instruct CallLeafNoFPDirect(method meth)
16261 %{


16262   match(CallLeafNoFP);
16263 
16264   effect(USE meth);
16265 
16266   ins_cost(CALL_COST);
16267 
16268   format %{ "CALL, runtime leaf nofp $meth" %}
16269 
16270   ins_encode( aarch64_enc_java_to_runtime(meth) );
16271 
16272   ins_pipe(pipe_class_call);
16273 %}
16274 
16275 // Tail Call; Jump from runtime stub to Java code.
16276 // Also known as an 'interprocedural jump'.
16277 // Target of jump will eventually return to caller.
16278 // TailJump below removes the return address.
16279 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_ptr)
16280 %{
16281   match(TailCall jump_target method_ptr);

 1628 
 1629 int MachCallDynamicJavaNode::ret_addr_offset()
 1630 {
 1631   return 16; // movz, movk, movk, bl
 1632 }
 1633 
 1634 int MachCallRuntimeNode::ret_addr_offset() {
 1635   // for generated stubs the call will be
 1636   //   bl(addr)
 1637   // or with far branches
 1638   //   bl(trampoline_stub)
 1639   // for real runtime callouts it will be six instructions
 1640   // see aarch64_enc_java_to_runtime
 1641   //   adr(rscratch2, retaddr)
 1642   //   lea(rscratch1, RuntimeAddress(addr)
 1643   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
 1644   //   blr(rscratch1)
 1645   CodeBlob *cb = CodeCache::find_blob(_entry_point);
 1646   if (cb) {
 1647     return 1 * NativeInstruction::instruction_size;
 1648   } else if (_entry_point == NULL) {
 1649     // See CallLeafNoFPIndirect
 1650     return 1 * NativeInstruction::instruction_size;
 1651   } else {
 1652     return 6 * NativeInstruction::instruction_size;
 1653   }
 1654 }
 1655 
 1656 //=============================================================================
 1657 
 1658 #ifndef PRODUCT
 1659 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1660   st->print("BREAKPOINT");
 1661 }
 1662 #endif
 1663 
 1664 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1665   C2_MacroAssembler _masm(&cbuf);
 1666   __ brk(0);
 1667 }
 1668 
 1669 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 1670   return MachNode::size(ra_);

 1742     st->print("\n\t");
 1743     st->print("ldr  rscratch1, [guard]\n\t");
 1744     st->print("dmb ishld\n\t");
 1745     st->print("ldr  rscratch2, [rthread, #thread_disarmed_offset]\n\t");
 1746     st->print("cmp  rscratch1, rscratch2\n\t");
 1747     st->print("b.eq skip");
 1748     st->print("\n\t");
 1749     st->print("blr #nmethod_entry_barrier_stub\n\t");
 1750     st->print("b skip\n\t");
 1751     st->print("guard: int\n\t");
 1752     st->print("\n\t");
 1753     st->print("skip:\n\t");
 1754   }
 1755 }
 1756 #endif
 1757 
 1758 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1759   Compile* C = ra_->C;
 1760   C2_MacroAssembler _masm(&cbuf);
 1761 



 1762   // insert a nop at the start of the prolog so we can patch in a
 1763   // branch if we need to invalidate the method later
 1764   __ nop();
 1765 
 1766   __ verified_entry(C, 0);



 1767 
 1768   if (C->stub_function() == NULL) {
 1769     __ entry_barrier();






 1770   }
 1771 
 1772   if (!Compile::current()->output()->in_scratch_emit_size()) {
 1773     __ bind(*_verified_entry);
























 1774   }
 1775 
 1776   if (VerifyStackAtCalls) {
 1777     Unimplemented();
 1778   }
 1779 
 1780   C->output()->set_frame_complete(cbuf.insts_size());
 1781 
 1782   if (C->has_mach_constant_base_node()) {
 1783     // NOTE: We set the table base offset here because users might be
 1784     // emitted before MachConstantBaseNode.
 1785     ConstantTable& constant_table = C->output()->constant_table();
 1786     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1787   }
 1788 }
 1789 






 1790 int MachPrologNode::reloc() const
 1791 {
 1792   return 0;
 1793 }
 1794 
 1795 //=============================================================================
 1796 
 1797 #ifndef PRODUCT
 1798 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1799   Compile* C = ra_->C;
 1800   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1801 
 1802   st->print("# pop frame %d\n\t",framesize);
 1803 
 1804   if (framesize == 0) {
 1805     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1806   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
 1807     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
 1808     st->print("add  sp, sp, #%d\n\t", framesize);
 1809   } else {

 1813   }
 1814   if (VM_Version::use_rop_protection()) {
 1815     st->print("autia lr, rfp\n\t");
 1816     st->print("ldr zr, [lr]\n\t");
 1817   }
 1818 
 1819   if (do_polling() && C->is_method_compilation()) {
 1820     st->print("# test polling word\n\t");
 1821     st->print("ldr  rscratch1, [rthread],#%d\n\t", in_bytes(JavaThread::polling_word_offset()));
 1822     st->print("cmp  sp, rscratch1\n\t");
 1823     st->print("bhi #slow_path");
 1824   }
 1825 }
 1826 #endif
 1827 
 1828 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1829   Compile* C = ra_->C;
 1830   C2_MacroAssembler _masm(&cbuf);
 1831   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1832 
 1833   __ remove_frame(framesize, C->needs_stack_repair());
 1834 
 1835   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1836     __ reserved_stack_check();
 1837   }
 1838 
 1839   if (do_polling() && C->is_method_compilation()) {
 1840     Label dummy_label;
 1841     Label* code_stub = &dummy_label;
 1842     if (!C->output()->in_scratch_emit_size()) {
 1843       code_stub = &C->output()->safepoint_poll_table()->add_safepoint(__ offset());
 1844     }
 1845     __ relocate(relocInfo::poll_return_type);
 1846     __ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
 1847   }
 1848 }
 1849 





 1850 int MachEpilogNode::reloc() const {
 1851   // Return number of relocatable values contained in this instruction.
 1852   return 1; // 1 for polling page.
 1853 }
 1854 
 1855 const Pipeline * MachEpilogNode::pipeline() const {
 1856   return MachNode::pipeline_class();
 1857 }
 1858 
 1859 //=============================================================================
 1860 
 1861 // Figure out which register class each belongs in: rc_int, rc_float or
 1862 // rc_stack.
 1863 enum RC { rc_bad, rc_int, rc_float, rc_predicate, rc_stack };
 1864 
 1865 static enum RC rc_class(OptoReg::Name reg) {
 1866 
 1867   if (reg == OptoReg::Bad) {
 1868     return rc_bad;
 1869   }

 2135 
 2136   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2137   int reg    = ra_->get_encode(this);
 2138 
 2139   // This add will handle any 24-bit signed offset. 24 bits allows an
 2140   // 8 megabyte stack frame.
 2141   __ add(as_Register(reg), sp, offset);
 2142 }
 2143 
 2144 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 2145   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 2146   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2147 
 2148   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
 2149     return NativeInstruction::instruction_size;
 2150   } else {
 2151     return 2 * NativeInstruction::instruction_size;
 2152   }
 2153 }
 2154 
 2155 ///=============================================================================
 2156 #ifndef PRODUCT
 2157 void MachVEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 2158 {
 2159   st->print_cr("# MachVEPNode");
 2160   if (!_verified) {
 2161     st->print_cr("\t load_class");
 2162   } else {
 2163     st->print_cr("\t unpack_inline_arg");
 2164   }
 2165 }
 2166 #endif
 2167 
 2168 void MachVEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
 2169 {
 2170   C2_MacroAssembler _masm(&cbuf);
 2171 
 2172   if (!_verified) {
 2173     Label skip;
 2174     __ cmp_klass(j_rarg0, rscratch2, rscratch1);
 2175     __ br(Assembler::EQ, skip);
 2176       __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 2177     __ bind(skip);
 2178 
 2179   } else {
 2180     // insert a nop at the start of the prolog so we can patch in a
 2181     // branch if we need to invalidate the method later
 2182     __ nop();
 2183 
 2184     // TODO 8284443 Avoid creation of temporary frame
 2185     if (ra_->C->stub_function() == NULL) {
 2186       __ verified_entry(ra_->C, 0);
 2187       __ entry_barrier();
 2188       int framesize = ra_->C->output()->frame_slots() << LogBytesPerInt;
 2189       __ remove_frame(framesize, false);
 2190     }
 2191     // Unpack inline type args passed as oop and then jump to
 2192     // the verified entry point (skipping the unverified entry).
 2193     int sp_inc = __ unpack_inline_args(ra_->C, _receiver_only);
 2194     // Emit code for verified entry and save increment for stack repair on return
 2195     __ verified_entry(ra_->C, sp_inc);
 2196     if (Compile::current()->output()->in_scratch_emit_size()) {
 2197       Label dummy_verified_entry;
 2198       __ b(dummy_verified_entry);
 2199     } else {
 2200       __ b(*_verified_entry);
 2201     }
 2202   }
 2203 }
 2204 
 2205 //=============================================================================
 2206 #ifndef PRODUCT
 2207 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 2208 {
 2209   st->print_cr("# MachUEPNode");
 2210   if (UseCompressedClassPointers) {
 2211     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2212     if (CompressedKlassPointers::shift() != 0) {
 2213       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
 2214     }
 2215   } else {
 2216    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2217   }
 2218   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
 2219   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
 2220 }
 2221 #endif
 2222 
 2223 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
 2224 {
 2225   // This is the unverified entry point.
 2226   C2_MacroAssembler _masm(&cbuf);
 2227   Label skip;
 2228 
 2229   // UseCompressedClassPointers logic are inside cmp_klass
 2230   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
 2231 
 2232   // TODO
 2233   // can we avoid this skip and still use a reloc?
 2234   __ br(Assembler::EQ, skip);
 2235   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 2236   __ bind(skip);
 2237 }
 2238 





 2239 // REQUIRED EMIT CODE
 2240 
 2241 //=============================================================================
 2242 
 2243 // Emit exception handler code.
 2244 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
 2245 {
 2246   // mov rscratch1 #exception_blob_entry_point
 2247   // br rscratch1
 2248   // Note that the code buffer's insts_mark is always relative to insts.
 2249   // That's why we must use the macroassembler to generate a handler.
 2250   C2_MacroAssembler _masm(&cbuf);
 2251   address base = __ start_a_stub(size_exception_handler());
 2252   if (base == NULL) {
 2253     ciEnv::current()->record_failure("CodeCache is full");
 2254     return 0;  // CodeBuffer::expand failed
 2255   }
 2256   int offset = __ offset();
 2257   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
 2258   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");

 3668     C2_MacroAssembler _masm(&cbuf);
 3669     int method_index = resolved_method_index(cbuf);
 3670     address call = __ ic_call((address)$meth$$method, method_index);
 3671     if (call == NULL) {
 3672       ciEnv::current()->record_failure("CodeCache is full");
 3673       return;
 3674     }
 3675     _masm.clear_inst_mark();
 3676     __ post_call_nop();
 3677     if (Compile::current()->max_vector_size() > 0) {
 3678       __ reinitialize_ptrue();
 3679     }
 3680   %}
 3681 
 3682   enc_class aarch64_enc_call_epilog() %{
 3683     C2_MacroAssembler _masm(&cbuf);
 3684     if (VerifyStackAtCalls) {
 3685       // Check that stack depth is unchanged: find majik cookie on stack
 3686       __ call_Unimplemented();
 3687     }
 3688     if (tf()->returns_inline_type_as_fields() && !_method->is_method_handle_intrinsic()) {
 3689       if (!_method->signature()->returns_null_free_inline_type()) {
 3690         // The last return value is not set by the callee but used to pass IsInit information to compiled code.
 3691         // Search for the corresponding projection, get the register and emit code that initialized it.
 3692         uint con = (tf()->range_cc()->cnt() - 1);
 3693         for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 3694           ProjNode* proj = fast_out(i)->as_Proj();
 3695           if (proj->_con == con) {
 3696             // Set IsInit if r0 is non-null (a non-null value is returned buffered or scalarized)
 3697             OptoReg::Name optoReg = ra_->get_reg_first(proj);
 3698             VMReg reg = OptoReg::as_VMReg(optoReg, ra_->_framesize, OptoReg::reg2stack(ra_->_matcher._new_SP));
 3699             Register toReg = reg->is_reg() ? reg->as_Register() : rscratch1;
 3700             __ cmp(r0, zr);
 3701             __ cset(toReg, Assembler::NE);
 3702             if (reg->is_stack()) {
 3703               int st_off = reg->reg2stack() * VMRegImpl::stack_slot_size;
 3704               __ str(toReg, Address(sp, st_off));
 3705             }
 3706             break;
 3707           }
 3708         }
 3709       }
 3710       if (return_value_is_used()) {
 3711         // An inline type is returned as fields in multiple registers.
 3712         // R0 either contains an oop if the inline type is buffered or a pointer
 3713         // to the corresponding InlineKlass with the lowest bit set to 1. Zero r0
 3714         // if the lowest bit is set to allow C2 to use the oop after null checking.
 3715         // r0 &= (r0 & 1) - 1
 3716         __ andr(rscratch1, r0, 0x1);
 3717         __ sub(rscratch1, rscratch1, 0x1);
 3718         __ andr(r0, r0, rscratch1);
 3719       }
 3720     }
 3721   %}
 3722 
 3723   enc_class aarch64_enc_java_to_runtime(method meth) %{
 3724     C2_MacroAssembler _masm(&cbuf);
 3725 
 3726     // some calls to generated routines (arraycopy code) are scheduled
 3727     // by C2 as runtime calls. if so we can call them using a br (they
 3728     // will be in a reachable segment) otherwise we have to use a blr
 3729     // which loads the absolute address into a register.
 3730     address entry = (address)$meth$$method;
 3731     CodeBlob *cb = CodeCache::find_blob(entry);
 3732     if (cb) {
 3733       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
 3734       if (call == NULL) {
 3735         ciEnv::current()->record_failure("CodeCache is full");
 3736         return;
 3737       }
 3738       __ post_call_nop();
 3739     } else {
 3740       Label retaddr;

 3795 
 3796     assert_different_registers(oop, box, tmp, disp_hdr);
 3797 
 3798     // Load markWord from object into displaced_header.
 3799     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
 3800 
 3801     if (DiagnoseSyncOnValueBasedClasses != 0) {
 3802       __ load_klass(tmp, oop);
 3803       __ ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
 3804       __ tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
 3805       __ br(Assembler::NE, cont);
 3806     }
 3807 
 3808     // Check for existing monitor
 3809     __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
 3810 
 3811     if (!UseHeavyMonitors) {
 3812       // Set tmp to be (markWord of object | UNLOCK_VALUE).
 3813       __ orr(tmp, disp_hdr, markWord::unlocked_value);
 3814 
 3815       if (EnableValhalla) {
 3816         // Mask inline_type bit such that we go to the slow path if object is an inline type
 3817         __ andr(tmp, tmp, ~((int) markWord::inline_type_bit_in_place));
 3818       }
 3819 
 3820       // Initialize the box. (Must happen before we update the object mark!)
 3821       __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 3822 
 3823       // Compare object markWord with an unlocked value (tmp) and if
 3824       // equal exchange the stack address of our box with object markWord.
 3825       // On failure disp_hdr contains the possibly locked markWord.
 3826       __ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
 3827                  /*release*/ true, /*weak*/ false, disp_hdr);
 3828       __ br(Assembler::EQ, cont);
 3829 
 3830       assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
 3831 
 3832       // If the compare-and-exchange succeeded, then we found an unlocked
 3833       // object, will have now locked it will continue at label cont
 3834 
 3835       // Check if the owner is self by comparing the value in the
 3836       // markWord of object (disp_hdr) with the stack pointer.
 3837       __ mov(rscratch1, sp);
 3838       __ sub(disp_hdr, disp_hdr, rscratch1);
 3839       __ mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));

 7273 instruct loadConL(iRegLNoSp dst, immL src)
 7274 %{
 7275   match(Set dst src);
 7276 
 7277   ins_cost(INSN_COST);
 7278   format %{ "mov $dst, $src\t# long" %}
 7279 
 7280   ins_encode( aarch64_enc_mov_imm(dst, src) );
 7281 
 7282   ins_pipe(ialu_imm);
 7283 %}
 7284 
 7285 // Load Pointer Constant
 7286 
 7287 instruct loadConP(iRegPNoSp dst, immP con)
 7288 %{
 7289   match(Set dst con);
 7290 
 7291   ins_cost(INSN_COST * 4);
 7292   format %{
 7293     "mov  $dst, $con\t# ptr"
 7294   %}
 7295 
 7296   ins_encode(aarch64_enc_mov_p(dst, con));
 7297 
 7298   ins_pipe(ialu_imm);
 7299 %}
 7300 
 7301 // Load Null Pointer Constant
 7302 
 7303 instruct loadConP0(iRegPNoSp dst, immP0 con)
 7304 %{
 7305   match(Set dst con);
 7306 
 7307   ins_cost(INSN_COST);
 7308   format %{ "mov  $dst, $con\t# NULL ptr" %}
 7309 
 7310   ins_encode(aarch64_enc_mov_p0(dst, con));
 7311 
 7312   ins_pipe(ialu_imm);
 7313 %}

 8474 %}
 8475 
 8476 // ============================================================================
 8477 // Cast/Convert Instructions
 8478 
 8479 instruct castX2P(iRegPNoSp dst, iRegL src) %{
 8480   match(Set dst (CastX2P src));
 8481 
 8482   ins_cost(INSN_COST);
 8483   format %{ "mov $dst, $src\t# long -> ptr" %}
 8484 
 8485   ins_encode %{
 8486     if ($dst$$reg != $src$$reg) {
 8487       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8488     }
 8489   %}
 8490 
 8491   ins_pipe(ialu_reg);
 8492 %}
 8493 
 8494 instruct castN2X(iRegLNoSp dst, iRegN src) %{
 8495   match(Set dst (CastP2X src));
 8496 
 8497   ins_cost(INSN_COST);
 8498   format %{ "mov $dst, $src\t# ptr -> long" %}
 8499 
 8500   ins_encode %{
 8501     if ($dst$$reg != $src$$reg) {
 8502       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8503     }
 8504   %}
 8505 
 8506   ins_pipe(ialu_reg);
 8507 %}
 8508 
 8509 instruct castP2X(iRegLNoSp dst, iRegP src) %{
 8510   match(Set dst (CastP2X src));
 8511 
 8512   ins_cost(INSN_COST);
 8513   format %{ "mov $dst, $src\t# ptr -> long" %}
 8514 
 8515   ins_encode %{
 8516     if ($dst$$reg != $src$$reg) {
 8517       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8518     }
 8519   %}
 8520 
 8521   ins_pipe(ialu_reg);
 8522 %}
 8523 
 8524 // Convert oop into int for vectors alignment masking
 8525 instruct convP2I(iRegINoSp dst, iRegP src) %{
 8526   match(Set dst (ConvL2I (CastP2X src)));
 8527 
 8528   ins_cost(INSN_COST);

14939 
14940   match(Set dst (MoveL2D src));
14941 
14942   effect(DEF dst, USE src);
14943 
14944   ins_cost(INSN_COST);
14945 
14946   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
14947 
14948   ins_encode %{
14949     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
14950   %}
14951 
14952   ins_pipe(fp_l2d);
14953 
14954 %}
14955 
14956 // ============================================================================
14957 // clearing of an array
14958 
14959 instruct clearArray_reg_reg_immL0(iRegL_R11 cnt, iRegP_R10 base, immL0 zero, Universe dummy, rFlagsReg cr)
14960 %{
14961   match(Set dummy (ClearArray (Binary cnt base) zero));
14962   effect(USE_KILL cnt, USE_KILL base, KILL cr);
14963 
14964   ins_cost(4 * INSN_COST);
14965   format %{ "ClearArray $cnt, $base" %}
14966 
14967   ins_encode %{
14968     address tpc = __ zero_words($base$$Register, $cnt$$Register);
14969     if (tpc == NULL) {
14970       ciEnv::current()->record_failure("CodeCache is full");
14971       return;
14972     }
14973   %}
14974 
14975   ins_pipe(pipe_class_memory);
14976 %}
14977 
14978 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, iRegL val, Universe dummy, rFlagsReg cr)
14979 %{
14980   predicate(((ClearArrayNode*)n)->word_copy_only());
14981   match(Set dummy (ClearArray (Binary cnt base) val));
14982   effect(USE_KILL cnt, USE_KILL base, KILL cr);
14983 
14984   ins_cost(4 * INSN_COST);
14985   format %{ "ClearArray $cnt, $base, $val" %}
14986 
14987   ins_encode %{
14988     __ fill_words($base$$Register, $cnt$$Register, $val$$Register);
14989   %}
14990 
14991   ins_pipe(pipe_class_memory);
14992 %}
14993 
14994 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 temp, Universe dummy, rFlagsReg cr)
14995 %{
14996   predicate((uint64_t)n->in(2)->get_long()
14997             < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord)
14998             && !((ClearArrayNode*)n)->word_copy_only());
14999   match(Set dummy (ClearArray cnt base));
15000   effect(TEMP temp, USE_KILL base, KILL cr);
15001 
15002   ins_cost(4 * INSN_COST);
15003   format %{ "ClearArray $cnt, $base" %}
15004 
15005   ins_encode %{
15006     address tpc = __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
15007     if (tpc == NULL) {
15008       ciEnv::current()->record_failure("CodeCache is full");
15009       return;
15010     }
15011   %}
15012 
15013   ins_pipe(pipe_class_memory);
15014 %}
15015 
15016 // ============================================================================
15017 // Overflow Math Instructions
15018 

16312 
16313 // Call Runtime Instruction
16314 
16315 instruct CallLeafDirect(method meth)
16316 %{
16317   match(CallLeaf);
16318 
16319   effect(USE meth);
16320 
16321   ins_cost(CALL_COST);
16322 
16323   format %{ "CALL, runtime leaf $meth" %}
16324 
16325   ins_encode( aarch64_enc_java_to_runtime(meth) );
16326 
16327   ins_pipe(pipe_class_call);
16328 %}
16329 
16330 // Call Runtime Instruction
16331 
16332 // entry point is null, target holds the address to call
16333 instruct CallLeafNoFPIndirect(iRegP target)
16334 %{
16335   predicate(n->as_Call()->entry_point() == NULL);
16336 
16337   match(CallLeafNoFP target);
16338 
16339   ins_cost(CALL_COST);
16340 
16341   format %{ "CALL, runtime leaf nofp indirect $target" %}
16342 
16343   ins_encode %{
16344     __ blr($target$$Register);
16345   %}
16346 
16347   ins_pipe(pipe_class_call);
16348 %}
16349 
16350 instruct CallLeafNoFPDirect(method meth)
16351 %{
16352   predicate(n->as_Call()->entry_point() != NULL);
16353 
16354   match(CallLeafNoFP);
16355 
16356   effect(USE meth);
16357 
16358   ins_cost(CALL_COST);
16359 
16360   format %{ "CALL, runtime leaf nofp $meth" %}
16361 
16362   ins_encode( aarch64_enc_java_to_runtime(meth) );
16363 
16364   ins_pipe(pipe_class_call);
16365 %}
16366 
16367 // Tail Call; Jump from runtime stub to Java code.
16368 // Also known as an 'interprocedural jump'.
16369 // Target of jump will eventually return to caller.
16370 // TailJump below removes the return address.
16371 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_ptr)
16372 %{
16373   match(TailCall jump_target method_ptr);
< prev index next >