< prev index next >

src/hotspot/cpu/aarch64/aarch64.ad

Print this page

 1637 
 1638 int MachCallDynamicJavaNode::ret_addr_offset()
 1639 {
 1640   return 16; // movz, movk, movk, bl
 1641 }
 1642 
 1643 int MachCallRuntimeNode::ret_addr_offset() {
 1644   // for generated stubs the call will be
 1645   //   bl(addr)
 1646   // or with far branches
 1647   //   bl(trampoline_stub)
 1648   // for real runtime callouts it will be six instructions
 1649   // see aarch64_enc_java_to_runtime
 1650   //   adr(rscratch2, retaddr)
 1651   //   str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
 1652   //   lea(rscratch1, RuntimeAddress(addr)
 1653   //   blr(rscratch1)
 1654   CodeBlob *cb = CodeCache::find_blob(_entry_point);
 1655   if (cb) {
 1656     return 1 * NativeInstruction::instruction_size;



 1657   } else {
 1658     return 6 * NativeInstruction::instruction_size;
 1659   }
 1660 }
 1661 
 1662 //=============================================================================
 1663 
 1664 #ifndef PRODUCT
 1665 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1666   st->print("BREAKPOINT");
 1667 }
 1668 #endif
 1669 
 1670 void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1671   __ brk(0);
 1672 }
 1673 
 1674 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 1675   return MachNode::size(ra_);
 1676 }

 1745   if (C->stub_function() == nullptr) {
 1746     st->print("\n\t");
 1747     st->print("ldr  rscratch1, [guard]\n\t");
 1748     st->print("dmb ishld\n\t");
 1749     st->print("ldr  rscratch2, [rthread, #thread_disarmed_guard_value_offset]\n\t");
 1750     st->print("cmp  rscratch1, rscratch2\n\t");
 1751     st->print("b.eq skip");
 1752     st->print("\n\t");
 1753     st->print("blr #nmethod_entry_barrier_stub\n\t");
 1754     st->print("b skip\n\t");
 1755     st->print("guard: int\n\t");
 1756     st->print("\n\t");
 1757     st->print("skip:\n\t");
 1758   }
 1759 }
 1760 #endif
 1761 
 1762 void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1763   Compile* C = ra_->C;
 1764 
 1765   // n.b. frame size includes space for return pc and rfp
 1766   const int framesize = C->output()->frame_size_in_bytes();
 1767 
 1768   // insert a nop at the start of the prolog so we can patch in a
 1769   // branch if we need to invalidate the method later
 1770   __ nop();
 1771 
 1772   if (C->clinit_barrier_on_entry()) {
 1773     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
 1774 
 1775     Label L_skip_barrier;
 1776 
 1777     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
 1778     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 1779     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 1780     __ bind(L_skip_barrier);
 1781   }
 1782 
 1783   if (C->max_vector_size() > 0) {
 1784     __ reinitialize_ptrue();
 1785   }
 1786 
 1787   int bangsize = C->output()->bang_size_in_bytes();
 1788   if (C->output()->need_stack_bang(bangsize))
 1789     __ generate_stack_overflow_check(bangsize);
 1790 
 1791   __ build_frame(framesize);
 1792 
 1793   if (C->stub_function() == nullptr) {
 1794     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 1795     // Dummy labels for just measuring the code size
 1796     Label dummy_slow_path;
 1797     Label dummy_continuation;
 1798     Label dummy_guard;
 1799     Label* slow_path = &dummy_slow_path;
 1800     Label* continuation = &dummy_continuation;
 1801     Label* guard = &dummy_guard;
 1802     if (!Compile::current()->output()->in_scratch_emit_size()) {
 1803       // Use real labels from actual stub when not emitting code for the purpose of measuring its size
 1804       C2EntryBarrierStub* stub = new (Compile::current()->comp_arena()) C2EntryBarrierStub();
 1805       Compile::current()->output()->add_stub(stub);
 1806       slow_path = &stub->entry();
 1807       continuation = &stub->continuation();
 1808       guard = &stub->guard();
 1809     }
 1810     // In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
 1811     bs->nmethod_entry_barrier(masm, slow_path, continuation, guard);
 1812   }
 1813 
 1814   if (VerifyStackAtCalls) {
 1815     Unimplemented();
 1816   }
 1817 
 1818   C->output()->set_frame_complete(__ offset());
 1819 
 1820   if (C->has_mach_constant_base_node()) {
 1821     // NOTE: We set the table base offset here because users might be
 1822     // emitted before MachConstantBaseNode.
 1823     ConstantTable& constant_table = C->output()->constant_table();
 1824     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1825   }
 1826 }
 1827 
 1828 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
 1829 {
 1830   return MachNode::size(ra_); // too many variables; just compute it
 1831                               // the hard way
 1832 }
 1833 
 1834 int MachPrologNode::reloc() const
 1835 {
 1836   return 0;
 1837 }
 1838 
 1839 //=============================================================================
 1840 
 1841 #ifndef PRODUCT
 1842 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1843   Compile* C = ra_->C;
 1844   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1845 
 1846   st->print("# pop frame %d\n\t",framesize);
 1847 
 1848   if (framesize == 0) {
 1849     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1850   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
 1851     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
 1852     st->print("add  sp, sp, #%d\n\t", framesize);
 1853   } else {

 1856     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1857   }
 1858   if (VM_Version::use_rop_protection()) {
 1859     st->print("autiaz\n\t");
 1860     st->print("ldr  zr, [lr]\n\t");
 1861   }
 1862 
 1863   if (do_polling() && C->is_method_compilation()) {
 1864     st->print("# test polling word\n\t");
 1865     st->print("ldr  rscratch1, [rthread],#%d\n\t", in_bytes(JavaThread::polling_word_offset()));
 1866     st->print("cmp  sp, rscratch1\n\t");
 1867     st->print("bhi #slow_path");
 1868   }
 1869 }
 1870 #endif
 1871 
 1872 void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1873   Compile* C = ra_->C;
 1874   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1875 
 1876   __ remove_frame(framesize);
 1877 
 1878   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1879     __ reserved_stack_check();
 1880   }
 1881 
 1882   if (do_polling() && C->is_method_compilation()) {
 1883     Label dummy_label;
 1884     Label* code_stub = &dummy_label;
 1885     if (!C->output()->in_scratch_emit_size()) {
 1886       C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
 1887       C->output()->add_stub(stub);
 1888       code_stub = &stub->entry();
 1889     }
 1890     __ relocate(relocInfo::poll_return_type);
 1891     __ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
 1892   }
 1893 }
 1894 
 1895 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1896   // Variable size. Determine dynamically.
 1897   return MachNode::size(ra_);
 1898 }
 1899 
 1900 int MachEpilogNode::reloc() const {
 1901   // Return number of relocatable values contained in this instruction.
 1902   return 1; // 1 for polling page.
 1903 }
 1904 
 1905 const Pipeline * MachEpilogNode::pipeline() const {
 1906   return MachNode::pipeline_class();
 1907 }
 1908 
 1909 //=============================================================================
 1910 
 1911 static enum RC rc_class(OptoReg::Name reg) {
 1912 
 1913   if (reg == OptoReg::Bad) {
 1914     return rc_bad;
 1915   }
 1916 
 1917   // we have 32 int registers * 2 halves
 1918   int slots_of_int_registers = Register::number_of_registers * Register::max_slots_per_register;
 1919 

 2175 void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 2176   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2177   int reg    = ra_->get_encode(this);
 2178 
 2179   // This add will handle any 24-bit signed offset. 24 bits allows an
 2180   // 8 megabyte stack frame.
 2181   __ add(as_Register(reg), sp, offset);
 2182 }
 2183 
 2184 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 2185   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 2186   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2187 
 2188   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
 2189     return NativeInstruction::instruction_size;
 2190   } else {
 2191     return 2 * NativeInstruction::instruction_size;
 2192   }
 2193 }
 2194 
 2195 //=============================================================================











 2196 































 2197 #ifndef PRODUCT
 2198 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 2199 {
 2200   st->print_cr("# MachUEPNode");
 2201   if (UseCompressedClassPointers) {
 2202     st->print_cr("\tldrw rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2203     st->print_cr("\tldrw r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
 2204     st->print_cr("\tcmpw rscratch1, r10");
 2205   } else {
 2206     st->print_cr("\tldr rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2207     st->print_cr("\tldr r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
 2208     st->print_cr("\tcmp rscratch1, r10");
 2209   }
 2210   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
 2211 }
 2212 #endif
 2213 
 2214 void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
 2215 {
 2216   __ ic_check(InteriorEntryAlignment);
 2217 }
 2218 
 2219 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 2220 {
 2221   return MachNode::size(ra_);
 2222 }
 2223 
 2224 // REQUIRED EMIT CODE
 2225 
 2226 //=============================================================================
 2227 
 2228 // Emit exception handler code.
 2229 int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm)
 2230 {
 2231   // mov rscratch1 #exception_blob_entry_point
 2232   // br rscratch1
 2233   // Note that the code buffer's insts_mark is always relative to insts.
 2234   // That's why we must use the macroassembler to generate a handler.
 2235   address base = __ start_a_stub(size_exception_handler());
 2236   if (base == nullptr) {
 2237     ciEnv::current()->record_failure("CodeCache is full");
 2238     return 0;  // CodeBuffer::expand failed
 2239   }
 2240   int offset = __ offset();
 2241   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
 2242   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
 2243   __ end_a_stub();

 3676   %}
 3677 
 3678   enc_class aarch64_enc_java_dynamic_call(method meth) %{
 3679     int method_index = resolved_method_index(masm);
 3680     address call = __ ic_call((address)$meth$$method, method_index);
 3681     if (call == nullptr) {
 3682       ciEnv::current()->record_failure("CodeCache is full");
 3683       return;
 3684     }
 3685     __ post_call_nop();
 3686     if (Compile::current()->max_vector_size() > 0) {
 3687       __ reinitialize_ptrue();
 3688     }
 3689   %}
 3690 
 3691   enc_class aarch64_enc_call_epilog() %{
 3692     if (VerifyStackAtCalls) {
 3693       // Check that stack depth is unchanged: find majik cookie on stack
 3694       __ call_Unimplemented();
 3695     }































 3696   %}
 3697 
 3698   enc_class aarch64_enc_java_to_runtime(method meth) %{
 3699     // some calls to generated routines (arraycopy code) are scheduled
 3700     // by C2 as runtime calls. if so we can call them using a br (they
 3701     // will be in a reachable segment) otherwise we have to use a blr
 3702     // which loads the absolute address into a register.
 3703     address entry = (address)$meth$$method;
 3704     CodeBlob *cb = CodeCache::find_blob(entry);
 3705     if (cb) {
 3706       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
 3707       if (call == nullptr) {
 3708         ciEnv::current()->record_failure("CodeCache is full");
 3709         return;
 3710       }
 3711       __ post_call_nop();
 3712     } else {
 3713       Label retaddr;
 3714       // Make the anchor frame walkable
 3715       __ adr(rscratch2, retaddr);

 6772 instruct loadConL(iRegLNoSp dst, immL src)
 6773 %{
 6774   match(Set dst src);
 6775 
 6776   ins_cost(INSN_COST);
 6777   format %{ "mov $dst, $src\t# long" %}
 6778 
 6779   ins_encode( aarch64_enc_mov_imm(dst, src) );
 6780 
 6781   ins_pipe(ialu_imm);
 6782 %}
 6783 
 6784 // Load Pointer Constant
 6785 
 6786 instruct loadConP(iRegPNoSp dst, immP con)
 6787 %{
 6788   match(Set dst con);
 6789 
 6790   ins_cost(INSN_COST * 4);
 6791   format %{
 6792     "mov  $dst, $con\t# ptr\n\t"
 6793   %}
 6794 
 6795   ins_encode(aarch64_enc_mov_p(dst, con));
 6796 
 6797   ins_pipe(ialu_imm);
 6798 %}
 6799 
 6800 // Load Null Pointer Constant
 6801 
 6802 instruct loadConP0(iRegPNoSp dst, immP0 con)
 6803 %{
 6804   match(Set dst con);
 6805 
 6806   ins_cost(INSN_COST);
 6807   format %{ "mov  $dst, $con\t# nullptr ptr" %}
 6808 
 6809   ins_encode(aarch64_enc_mov_p0(dst, con));
 6810 
 6811   ins_pipe(ialu_imm);
 6812 %}

 7968 %}
 7969 
 7970 // ============================================================================
 7971 // Cast/Convert Instructions
 7972 
 7973 instruct castX2P(iRegPNoSp dst, iRegL src) %{
 7974   match(Set dst (CastX2P src));
 7975 
 7976   ins_cost(INSN_COST);
 7977   format %{ "mov $dst, $src\t# long -> ptr" %}
 7978 
 7979   ins_encode %{
 7980     if ($dst$$reg != $src$$reg) {
 7981       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 7982     }
 7983   %}
 7984 
 7985   ins_pipe(ialu_reg);
 7986 %}
 7987 






























 7988 instruct castP2X(iRegLNoSp dst, iRegP src) %{
 7989   match(Set dst (CastP2X src));
 7990 
 7991   ins_cost(INSN_COST);
 7992   format %{ "mov $dst, $src\t# ptr -> long" %}
 7993 
 7994   ins_encode %{
 7995     if ($dst$$reg != $src$$reg) {
 7996       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 7997     }
 7998   %}
 7999 
 8000   ins_pipe(ialu_reg);
 8001 %}
 8002 
 8003 // Convert oop into int for vectors alignment masking
 8004 instruct convP2I(iRegINoSp dst, iRegP src) %{
 8005   match(Set dst (ConvL2I (CastP2X src)));
 8006 
 8007   ins_cost(INSN_COST);

14793 
14794   match(Set dst (MoveL2D src));
14795 
14796   effect(DEF dst, USE src);
14797 
14798   ins_cost(INSN_COST);
14799 
14800   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
14801 
14802   ins_encode %{
14803     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
14804   %}
14805 
14806   ins_pipe(fp_l2d);
14807 
14808 %}
14809 
14810 // ============================================================================
14811 // clearing of an array
14812 
14813 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14814 %{
14815   match(Set dummy (ClearArray cnt base));
14816   effect(USE_KILL cnt, USE_KILL base, KILL cr);
14817 
14818   ins_cost(4 * INSN_COST);
14819   format %{ "ClearArray $cnt, $base" %}
14820 
14821   ins_encode %{
14822     address tpc = __ zero_words($base$$Register, $cnt$$Register);
14823     if (tpc == nullptr) {
14824       ciEnv::current()->record_failure("CodeCache is full");
14825       return;
14826     }
14827   %}
14828 
14829   ins_pipe(pipe_class_memory);
14830 %}
14831 
















14832 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 temp, Universe dummy, rFlagsReg cr)
14833 %{
14834   predicate((uint64_t)n->in(2)->get_long()
14835             < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));

14836   match(Set dummy (ClearArray cnt base));
14837   effect(TEMP temp, USE_KILL base, KILL cr);
14838 
14839   ins_cost(4 * INSN_COST);
14840   format %{ "ClearArray $cnt, $base" %}
14841 
14842   ins_encode %{
14843     address tpc = __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
14844     if (tpc == nullptr) {
14845       ciEnv::current()->record_failure("CodeCache is full");
14846       return;
14847     }
14848   %}
14849 
14850   ins_pipe(pipe_class_memory);
14851 %}
14852 
14853 // ============================================================================
14854 // Overflow Math Instructions
14855 

16166 %}
16167 
16168 // Call Runtime Instruction without safepoint and with vector arguments
16169 instruct CallLeafDirectVector(method meth)
16170 %{
16171   match(CallLeafVector);
16172 
16173   effect(USE meth);
16174 
16175   ins_cost(CALL_COST);
16176 
16177   format %{ "CALL, runtime leaf vector $meth" %}
16178 
16179   ins_encode(aarch64_enc_java_to_runtime(meth));
16180 
16181   ins_pipe(pipe_class_call);
16182 %}
16183 
16184 // Call Runtime Instruction
16185 


















16186 instruct CallLeafNoFPDirect(method meth)
16187 %{


16188   match(CallLeafNoFP);
16189 
16190   effect(USE meth);
16191 
16192   ins_cost(CALL_COST);
16193 
16194   format %{ "CALL, runtime leaf nofp $meth" %}
16195 
16196   ins_encode( aarch64_enc_java_to_runtime(meth) );
16197 
16198   ins_pipe(pipe_class_call);
16199 %}
16200 
16201 // Tail Call; Jump from runtime stub to Java code.
16202 // Also known as an 'interprocedural jump'.
16203 // Target of jump will eventually return to caller.
16204 // TailJump below removes the return address.
16205 // Don't use rfp for 'jump_target' because a MachEpilogNode has already been
16206 // emitted just above the TailCall which has reset rfp to the caller state.
16207 instruct TailCalljmpInd(iRegPNoSpNoRfp jump_target, inline_cache_RegP method_ptr)

 1637 
 1638 int MachCallDynamicJavaNode::ret_addr_offset()
 1639 {
 1640   return 16; // movz, movk, movk, bl
 1641 }
 1642 
 1643 int MachCallRuntimeNode::ret_addr_offset() {
 1644   // for generated stubs the call will be
 1645   //   bl(addr)
 1646   // or with far branches
 1647   //   bl(trampoline_stub)
 1648   // for real runtime callouts it will be six instructions
 1649   // see aarch64_enc_java_to_runtime
 1650   //   adr(rscratch2, retaddr)
 1651   //   str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
 1652   //   lea(rscratch1, RuntimeAddress(addr)
 1653   //   blr(rscratch1)
 1654   CodeBlob *cb = CodeCache::find_blob(_entry_point);
 1655   if (cb) {
 1656     return 1 * NativeInstruction::instruction_size;
 1657   } else if (_entry_point == nullptr) {
 1658     // See CallLeafNoFPIndirect
 1659     return 1 * NativeInstruction::instruction_size;
 1660   } else {
 1661     return 6 * NativeInstruction::instruction_size;
 1662   }
 1663 }
 1664 
 1665 //=============================================================================
 1666 
 1667 #ifndef PRODUCT
 1668 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1669   st->print("BREAKPOINT");
 1670 }
 1671 #endif
 1672 
 1673 void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1674   __ brk(0);
 1675 }
 1676 
 1677 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 1678   return MachNode::size(ra_);
 1679 }

 1748   if (C->stub_function() == nullptr) {
 1749     st->print("\n\t");
 1750     st->print("ldr  rscratch1, [guard]\n\t");
 1751     st->print("dmb ishld\n\t");
 1752     st->print("ldr  rscratch2, [rthread, #thread_disarmed_guard_value_offset]\n\t");
 1753     st->print("cmp  rscratch1, rscratch2\n\t");
 1754     st->print("b.eq skip");
 1755     st->print("\n\t");
 1756     st->print("blr #nmethod_entry_barrier_stub\n\t");
 1757     st->print("b skip\n\t");
 1758     st->print("guard: int\n\t");
 1759     st->print("\n\t");
 1760     st->print("skip:\n\t");
 1761   }
 1762 }
 1763 #endif
 1764 
 1765 void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1766   Compile* C = ra_->C;
 1767 



 1768   // insert a nop at the start of the prolog so we can patch in a
 1769   // branch if we need to invalidate the method later
 1770   __ nop();
 1771 
 1772   __ verified_entry(C, 0);



 1773 
 1774   if (C->stub_function() == nullptr) {
 1775     __ entry_barrier();






 1776   }
 1777 
 1778   if (!Compile::current()->output()->in_scratch_emit_size()) {
 1779     __ bind(*_verified_entry);























 1780   }
 1781 
 1782   if (VerifyStackAtCalls) {
 1783     Unimplemented();
 1784   }
 1785 
 1786   C->output()->set_frame_complete(__ offset());
 1787 
 1788   if (C->has_mach_constant_base_node()) {
 1789     // NOTE: We set the table base offset here because users might be
 1790     // emitted before MachConstantBaseNode.
 1791     ConstantTable& constant_table = C->output()->constant_table();
 1792     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1793   }
 1794 }
 1795 






 1796 int MachPrologNode::reloc() const
 1797 {
 1798   return 0;
 1799 }
 1800 
 1801 //=============================================================================
 1802 
 1803 #ifndef PRODUCT
 1804 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1805   Compile* C = ra_->C;
 1806   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1807 
 1808   st->print("# pop frame %d\n\t",framesize);
 1809 
 1810   if (framesize == 0) {
 1811     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1812   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
 1813     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
 1814     st->print("add  sp, sp, #%d\n\t", framesize);
 1815   } else {

 1818     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1819   }
 1820   if (VM_Version::use_rop_protection()) {
 1821     st->print("autiaz\n\t");
 1822     st->print("ldr  zr, [lr]\n\t");
 1823   }
 1824 
 1825   if (do_polling() && C->is_method_compilation()) {
 1826     st->print("# test polling word\n\t");
 1827     st->print("ldr  rscratch1, [rthread],#%d\n\t", in_bytes(JavaThread::polling_word_offset()));
 1828     st->print("cmp  sp, rscratch1\n\t");
 1829     st->print("bhi #slow_path");
 1830   }
 1831 }
 1832 #endif
 1833 
 1834 void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1835   Compile* C = ra_->C;
 1836   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1837 
 1838   __ remove_frame(framesize, C->needs_stack_repair());
 1839 
 1840   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1841     __ reserved_stack_check();
 1842   }
 1843 
 1844   if (do_polling() && C->is_method_compilation()) {
 1845     Label dummy_label;
 1846     Label* code_stub = &dummy_label;
 1847     if (!C->output()->in_scratch_emit_size()) {
 1848       C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
 1849       C->output()->add_stub(stub);
 1850       code_stub = &stub->entry();
 1851     }
 1852     __ relocate(relocInfo::poll_return_type);
 1853     __ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
 1854   }
 1855 }
 1856 





 1857 int MachEpilogNode::reloc() const {
 1858   // Return number of relocatable values contained in this instruction.
 1859   return 1; // 1 for polling page.
 1860 }
 1861 
 1862 const Pipeline * MachEpilogNode::pipeline() const {
 1863   return MachNode::pipeline_class();
 1864 }
 1865 
 1866 //=============================================================================
 1867 
 1868 static enum RC rc_class(OptoReg::Name reg) {
 1869 
 1870   if (reg == OptoReg::Bad) {
 1871     return rc_bad;
 1872   }
 1873 
 1874   // we have 32 int registers * 2 halves
 1875   int slots_of_int_registers = Register::number_of_registers * Register::max_slots_per_register;
 1876 

 2132 void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 2133   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2134   int reg    = ra_->get_encode(this);
 2135 
 2136   // This add will handle any 24-bit signed offset. 24 bits allows an
 2137   // 8 megabyte stack frame.
 2138   __ add(as_Register(reg), sp, offset);
 2139 }
 2140 
 2141 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 2142   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 2143   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2144 
 2145   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
 2146     return NativeInstruction::instruction_size;
 2147   } else {
 2148     return 2 * NativeInstruction::instruction_size;
 2149   }
 2150 }
 2151 
 2152 ///=============================================================================
 2153 #ifndef PRODUCT
 2154 void MachVEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 2155 {
 2156   st->print_cr("# MachVEPNode");
 2157   if (!_verified) {
 2158     st->print_cr("\t load_class");
 2159   } else {
 2160     st->print_cr("\t unpack_inline_arg");
 2161   }
 2162 }
 2163 #endif
 2164 
 2165 void MachVEPNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc* ra_) const
 2166 {
 2167   if (!_verified) {
 2168     __ ic_check(1);
 2169   } else {
 2170     // insert a nop at the start of the prolog so we can patch in a
 2171     // branch if we need to invalidate the method later
 2172     __ nop();
 2173 
 2174     // TODO 8284443 Avoid creation of temporary frame
 2175     if (ra_->C->stub_function() == nullptr) {
 2176       __ verified_entry(ra_->C, 0);
 2177       __ entry_barrier();
 2178       int framesize = ra_->C->output()->frame_slots() << LogBytesPerInt;
 2179       __ remove_frame(framesize, false);
 2180     }
 2181     // Unpack inline type args passed as oop and then jump to
 2182     // the verified entry point (skipping the unverified entry).
 2183     int sp_inc = __ unpack_inline_args(ra_->C, _receiver_only);
 2184     // Emit code for verified entry and save increment for stack repair on return
 2185     __ verified_entry(ra_->C, sp_inc);
 2186     if (Compile::current()->output()->in_scratch_emit_size()) {
 2187       Label dummy_verified_entry;
 2188       __ b(dummy_verified_entry);
 2189     } else {
 2190       __ b(*_verified_entry);
 2191     }
 2192   }
 2193 }
 2194 
 2195 //=============================================================================
 2196 #ifndef PRODUCT
 2197 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 2198 {
 2199   st->print_cr("# MachUEPNode");
 2200   if (UseCompressedClassPointers) {
 2201     st->print_cr("\tldrw rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2202     st->print_cr("\tldrw r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
 2203     st->print_cr("\tcmpw rscratch1, r10");
 2204   } else {
 2205     st->print_cr("\tldr rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2206     st->print_cr("\tldr r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
 2207     st->print_cr("\tcmp rscratch1, r10");
 2208   }
 2209   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
 2210 }
 2211 #endif
 2212 
 2213 void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
 2214 {
 2215   __ ic_check(InteriorEntryAlignment);
 2216 }
 2217 





 2218 // REQUIRED EMIT CODE
 2219 
 2220 //=============================================================================
 2221 
 2222 // Emit exception handler code.
 2223 int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm)
 2224 {
 2225   // mov rscratch1 #exception_blob_entry_point
 2226   // br rscratch1
 2227   // Note that the code buffer's insts_mark is always relative to insts.
 2228   // That's why we must use the macroassembler to generate a handler.
 2229   address base = __ start_a_stub(size_exception_handler());
 2230   if (base == nullptr) {
 2231     ciEnv::current()->record_failure("CodeCache is full");
 2232     return 0;  // CodeBuffer::expand failed
 2233   }
 2234   int offset = __ offset();
 2235   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
 2236   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
 2237   __ end_a_stub();

 3670   %}
 3671 
 3672   enc_class aarch64_enc_java_dynamic_call(method meth) %{
 3673     int method_index = resolved_method_index(masm);
 3674     address call = __ ic_call((address)$meth$$method, method_index);
 3675     if (call == nullptr) {
 3676       ciEnv::current()->record_failure("CodeCache is full");
 3677       return;
 3678     }
 3679     __ post_call_nop();
 3680     if (Compile::current()->max_vector_size() > 0) {
 3681       __ reinitialize_ptrue();
 3682     }
 3683   %}
 3684 
 3685   enc_class aarch64_enc_call_epilog() %{
 3686     if (VerifyStackAtCalls) {
 3687       // Check that stack depth is unchanged: find majik cookie on stack
 3688       __ call_Unimplemented();
 3689     }
 3690     if (tf()->returns_inline_type_as_fields() && !_method->is_method_handle_intrinsic()) {
 3691       // The last return value is not set by the callee but used to pass IsInit information to compiled code.
 3692       // Search for the corresponding projection, get the register and emit code that initialized it.
 3693       uint con = (tf()->range_cc()->cnt() - 1);
 3694       for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 3695         ProjNode* proj = fast_out(i)->as_Proj();
 3696         if (proj->_con == con) {
 3697           // Set IsInit if r0 is non-null (a non-null value is returned buffered or scalarized)
 3698           OptoReg::Name optoReg = ra_->get_reg_first(proj);
 3699           VMReg reg = OptoReg::as_VMReg(optoReg, ra_->_framesize, OptoReg::reg2stack(ra_->_matcher._new_SP));
 3700           Register toReg = reg->is_reg() ? reg->as_Register() : rscratch1;
 3701           __ cmp(r0, zr);
 3702           __ cset(toReg, Assembler::NE);
 3703           if (reg->is_stack()) {
 3704             int st_off = reg->reg2stack() * VMRegImpl::stack_slot_size;
 3705             __ str(toReg, Address(sp, st_off));
 3706           }
 3707           break;
 3708         }
 3709       }
 3710       if (return_value_is_used()) {
 3711         // An inline type is returned as fields in multiple registers.
 3712         // R0 either contains an oop if the inline type is buffered or a pointer
 3713         // to the corresponding InlineKlass with the lowest bit set to 1. Zero r0
 3714         // if the lowest bit is set to allow C2 to use the oop after null checking.
 3715         // r0 &= (r0 & 1) - 1
 3716         __ andr(rscratch1, r0, 0x1);
 3717         __ sub(rscratch1, rscratch1, 0x1);
 3718         __ andr(r0, r0, rscratch1);
 3719       }
 3720     }
 3721   %}
 3722 
 3723   enc_class aarch64_enc_java_to_runtime(method meth) %{
 3724     // some calls to generated routines (arraycopy code) are scheduled
 3725     // by C2 as runtime calls. if so we can call them using a br (they
 3726     // will be in a reachable segment) otherwise we have to use a blr
 3727     // which loads the absolute address into a register.
 3728     address entry = (address)$meth$$method;
 3729     CodeBlob *cb = CodeCache::find_blob(entry);
 3730     if (cb) {
 3731       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
 3732       if (call == nullptr) {
 3733         ciEnv::current()->record_failure("CodeCache is full");
 3734         return;
 3735       }
 3736       __ post_call_nop();
 3737     } else {
 3738       Label retaddr;
 3739       // Make the anchor frame walkable
 3740       __ adr(rscratch2, retaddr);

 6797 instruct loadConL(iRegLNoSp dst, immL src)
 6798 %{
 6799   match(Set dst src);
 6800 
 6801   ins_cost(INSN_COST);
 6802   format %{ "mov $dst, $src\t# long" %}
 6803 
 6804   ins_encode( aarch64_enc_mov_imm(dst, src) );
 6805 
 6806   ins_pipe(ialu_imm);
 6807 %}
 6808 
 6809 // Load Pointer Constant
 6810 
 6811 instruct loadConP(iRegPNoSp dst, immP con)
 6812 %{
 6813   match(Set dst con);
 6814 
 6815   ins_cost(INSN_COST * 4);
 6816   format %{
 6817     "mov  $dst, $con\t# ptr"
 6818   %}
 6819 
 6820   ins_encode(aarch64_enc_mov_p(dst, con));
 6821 
 6822   ins_pipe(ialu_imm);
 6823 %}
 6824 
 6825 // Load Null Pointer Constant
 6826 
 6827 instruct loadConP0(iRegPNoSp dst, immP0 con)
 6828 %{
 6829   match(Set dst con);
 6830 
 6831   ins_cost(INSN_COST);
 6832   format %{ "mov  $dst, $con\t# nullptr ptr" %}
 6833 
 6834   ins_encode(aarch64_enc_mov_p0(dst, con));
 6835 
 6836   ins_pipe(ialu_imm);
 6837 %}

 7993 %}
 7994 
 7995 // ============================================================================
 7996 // Cast/Convert Instructions
 7997 
 7998 instruct castX2P(iRegPNoSp dst, iRegL src) %{
 7999   match(Set dst (CastX2P src));
 8000 
 8001   ins_cost(INSN_COST);
 8002   format %{ "mov $dst, $src\t# long -> ptr" %}
 8003 
 8004   ins_encode %{
 8005     if ($dst$$reg != $src$$reg) {
 8006       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8007     }
 8008   %}
 8009 
 8010   ins_pipe(ialu_reg);
 8011 %}
 8012 
 8013 instruct castI2N(iRegNNoSp dst, iRegI src) %{
 8014   match(Set dst (CastI2N src));
 8015 
 8016   ins_cost(INSN_COST);
 8017   format %{ "mov $dst, $src\t# int -> narrow ptr" %}
 8018 
 8019   ins_encode %{
 8020     if ($dst$$reg != $src$$reg) {
 8021       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8022     }
 8023   %}
 8024 
 8025   ins_pipe(ialu_reg);
 8026 %}
 8027 
 8028 instruct castN2X(iRegLNoSp dst, iRegN src) %{
 8029   match(Set dst (CastP2X src));
 8030 
 8031   ins_cost(INSN_COST);
 8032   format %{ "mov $dst, $src\t# ptr -> long" %}
 8033 
 8034   ins_encode %{
 8035     if ($dst$$reg != $src$$reg) {
 8036       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8037     }
 8038   %}
 8039 
 8040   ins_pipe(ialu_reg);
 8041 %}
 8042 
 8043 instruct castP2X(iRegLNoSp dst, iRegP src) %{
 8044   match(Set dst (CastP2X src));
 8045 
 8046   ins_cost(INSN_COST);
 8047   format %{ "mov $dst, $src\t# ptr -> long" %}
 8048 
 8049   ins_encode %{
 8050     if ($dst$$reg != $src$$reg) {
 8051       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8052     }
 8053   %}
 8054 
 8055   ins_pipe(ialu_reg);
 8056 %}
 8057 
 8058 // Convert oop into int for vectors alignment masking
 8059 instruct convP2I(iRegINoSp dst, iRegP src) %{
 8060   match(Set dst (ConvL2I (CastP2X src)));
 8061 
 8062   ins_cost(INSN_COST);

14848 
14849   match(Set dst (MoveL2D src));
14850 
14851   effect(DEF dst, USE src);
14852 
14853   ins_cost(INSN_COST);
14854 
14855   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
14856 
14857   ins_encode %{
14858     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
14859   %}
14860 
14861   ins_pipe(fp_l2d);
14862 
14863 %}
14864 
14865 // ============================================================================
14866 // clearing of an array
14867 
14868 instruct clearArray_reg_reg_immL0(iRegL_R11 cnt, iRegP_R10 base, immL0 zero, Universe dummy, rFlagsReg cr)
14869 %{
14870   match(Set dummy (ClearArray (Binary cnt base) zero));
14871   effect(USE_KILL cnt, USE_KILL base, KILL cr);
14872 
14873   ins_cost(4 * INSN_COST);
14874   format %{ "ClearArray $cnt, $base" %}
14875 
14876   ins_encode %{
14877     address tpc = __ zero_words($base$$Register, $cnt$$Register);
14878     if (tpc == nullptr) {
14879       ciEnv::current()->record_failure("CodeCache is full");
14880       return;
14881     }
14882   %}
14883 
14884   ins_pipe(pipe_class_memory);
14885 %}
14886 
14887 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, iRegL val, Universe dummy, rFlagsReg cr)
14888 %{
14889   predicate(((ClearArrayNode*)n)->word_copy_only());
14890   match(Set dummy (ClearArray (Binary cnt base) val));
14891   effect(USE_KILL cnt, USE_KILL base, KILL cr);
14892 
14893   ins_cost(4 * INSN_COST);
14894   format %{ "ClearArray $cnt, $base, $val" %}
14895 
14896   ins_encode %{
14897     __ fill_words($base$$Register, $cnt$$Register, $val$$Register);
14898   %}
14899 
14900   ins_pipe(pipe_class_memory);
14901 %}
14902 
14903 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 temp, Universe dummy, rFlagsReg cr)
14904 %{
14905   predicate((uint64_t)n->in(2)->get_long()
14906             < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord)
14907             && !((ClearArrayNode*)n)->word_copy_only());
14908   match(Set dummy (ClearArray cnt base));
14909   effect(TEMP temp, USE_KILL base, KILL cr);
14910 
14911   ins_cost(4 * INSN_COST);
14912   format %{ "ClearArray $cnt, $base" %}
14913 
14914   ins_encode %{
14915     address tpc = __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
14916     if (tpc == nullptr) {
14917       ciEnv::current()->record_failure("CodeCache is full");
14918       return;
14919     }
14920   %}
14921 
14922   ins_pipe(pipe_class_memory);
14923 %}
14924 
14925 // ============================================================================
14926 // Overflow Math Instructions
14927 

16238 %}
16239 
16240 // Call Runtime Instruction without safepoint and with vector arguments
16241 instruct CallLeafDirectVector(method meth)
16242 %{
16243   match(CallLeafVector);
16244 
16245   effect(USE meth);
16246 
16247   ins_cost(CALL_COST);
16248 
16249   format %{ "CALL, runtime leaf vector $meth" %}
16250 
16251   ins_encode(aarch64_enc_java_to_runtime(meth));
16252 
16253   ins_pipe(pipe_class_call);
16254 %}
16255 
16256 // Call Runtime Instruction
16257 
16258 // entry point is null, target holds the address to call
16259 instruct CallLeafNoFPIndirect(iRegP target)
16260 %{
16261   predicate(n->as_Call()->entry_point() == nullptr);
16262 
16263   match(CallLeafNoFP target);
16264 
16265   ins_cost(CALL_COST);
16266 
16267   format %{ "CALL, runtime leaf nofp indirect $target" %}
16268 
16269   ins_encode %{
16270     __ blr($target$$Register);
16271   %}
16272 
16273   ins_pipe(pipe_class_call);
16274 %}
16275 
16276 instruct CallLeafNoFPDirect(method meth)
16277 %{
16278   predicate(n->as_Call()->entry_point() != nullptr);
16279 
16280   match(CallLeafNoFP);
16281 
16282   effect(USE meth);
16283 
16284   ins_cost(CALL_COST);
16285 
16286   format %{ "CALL, runtime leaf nofp $meth" %}
16287 
16288   ins_encode( aarch64_enc_java_to_runtime(meth) );
16289 
16290   ins_pipe(pipe_class_call);
16291 %}
16292 
16293 // Tail Call; Jump from runtime stub to Java code.
16294 // Also known as an 'interprocedural jump'.
16295 // Target of jump will eventually return to caller.
16296 // TailJump below removes the return address.
16297 // Don't use rfp for 'jump_target' because a MachEpilogNode has already been
16298 // emitted just above the TailCall which has reset rfp to the caller state.
16299 instruct TailCalljmpInd(iRegPNoSpNoRfp jump_target, inline_cache_RegP method_ptr)
< prev index next >