1677
1678 int MachCallDynamicJavaNode::ret_addr_offset()
1679 {
1680 return 16; // movz, movk, movk, bl
1681 }
1682
1683 int MachCallRuntimeNode::ret_addr_offset() {
1684 // for generated stubs the call will be
1685 // bl(addr)
1686 // or with far branches
1687 // bl(trampoline_stub)
1688 // for real runtime callouts it will be six instructions
1689 // see aarch64_enc_java_to_runtime
1690 // adr(rscratch2, retaddr)
1691 // str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
1692 // lea(rscratch1, RuntimeAddress(addr)
1693 // blr(rscratch1)
1694 CodeBlob *cb = CodeCache::find_blob(_entry_point);
1695 if (cb) {
1696 return 1 * NativeInstruction::instruction_size;
1697 } else {
1698 return 6 * NativeInstruction::instruction_size;
1699 }
1700 }
1701
1702 //=============================================================================
1703
1704 #ifndef PRODUCT
1705 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1706 st->print("BREAKPOINT");
1707 }
1708 #endif
1709
1710 void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
1711 __ brk(0);
1712 }
1713
1714 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
1715 return MachNode::size(ra_);
1716 }
1785 if (C->stub_function() == nullptr) {
1786 st->print("\n\t");
1787 st->print("ldr rscratch1, [guard]\n\t");
1788 st->print("dmb ishld\n\t");
1789 st->print("ldr rscratch2, [rthread, #thread_disarmed_guard_value_offset]\n\t");
1790 st->print("cmp rscratch1, rscratch2\n\t");
1791 st->print("b.eq skip");
1792 st->print("\n\t");
1793 st->print("blr #nmethod_entry_barrier_stub\n\t");
1794 st->print("b skip\n\t");
1795 st->print("guard: int\n\t");
1796 st->print("\n\t");
1797 st->print("skip:\n\t");
1798 }
1799 }
1800 #endif
1801
1802 void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
1803 Compile* C = ra_->C;
1804
1805 // n.b. frame size includes space for return pc and rfp
1806 const int framesize = C->output()->frame_size_in_bytes();
1807
1808 if (C->clinit_barrier_on_entry()) {
1809 assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
1810
1811 Label L_skip_barrier;
1812
1813 __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
1814 __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
1815 __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1816 __ bind(L_skip_barrier);
1817 }
1818
1819 if (C->max_vector_size() > 0) {
1820 __ reinitialize_ptrue();
1821 }
1822
1823 int bangsize = C->output()->bang_size_in_bytes();
1824 if (C->output()->need_stack_bang(bangsize))
1825 __ generate_stack_overflow_check(bangsize);
1826
1827 __ build_frame(framesize);
1828
1829 if (C->stub_function() == nullptr) {
1830 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1831 // Dummy labels for just measuring the code size
1832 Label dummy_slow_path;
1833 Label dummy_continuation;
1834 Label dummy_guard;
1835 Label* slow_path = &dummy_slow_path;
1836 Label* continuation = &dummy_continuation;
1837 Label* guard = &dummy_guard;
1838 if (!Compile::current()->output()->in_scratch_emit_size()) {
1839 // Use real labels from actual stub when not emitting code for the purpose of measuring its size
1840 C2EntryBarrierStub* stub = new (Compile::current()->comp_arena()) C2EntryBarrierStub();
1841 Compile::current()->output()->add_stub(stub);
1842 slow_path = &stub->entry();
1843 continuation = &stub->continuation();
1844 guard = &stub->guard();
1845 }
1846 // In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
1847 bs->nmethod_entry_barrier(masm, slow_path, continuation, guard);
1848 }
1849
1850 if (VerifyStackAtCalls) {
1851 Unimplemented();
1852 }
1853
1854 C->output()->set_frame_complete(__ offset());
1855
1856 if (C->has_mach_constant_base_node()) {
1857 // NOTE: We set the table base offset here because users might be
1858 // emitted before MachConstantBaseNode.
1859 ConstantTable& constant_table = C->output()->constant_table();
1860 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1861 }
1862 }
1863
1864 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
1865 {
1866 return MachNode::size(ra_); // too many variables; just compute it
1867 // the hard way
1868 }
1869
1870 int MachPrologNode::reloc() const
1871 {
1872 return 0;
1873 }
1874
1875 //=============================================================================
1876
1877 #ifndef PRODUCT
1878 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1879 Compile* C = ra_->C;
1880 int framesize = C->output()->frame_slots() << LogBytesPerInt;
1881
1882 st->print("# pop frame %d\n\t",framesize);
1883
1884 if (framesize == 0) {
1885 st->print("ldp lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1886 } else if (framesize < ((1 << 9) + 2 * wordSize)) {
1887 st->print("ldp lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
1888 st->print("add sp, sp, #%d\n\t", framesize);
1889 } else {
1892 st->print("ldp lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1893 }
1894 if (VM_Version::use_rop_protection()) {
1895 st->print("autiaz\n\t");
1896 st->print("ldr zr, [lr]\n\t");
1897 }
1898
1899 if (do_polling() && C->is_method_compilation()) {
1900 st->print("# test polling word\n\t");
1901 st->print("ldr rscratch1, [rthread],#%d\n\t", in_bytes(JavaThread::polling_word_offset()));
1902 st->print("cmp sp, rscratch1\n\t");
1903 st->print("bhi #slow_path");
1904 }
1905 }
1906 #endif
1907
1908 void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
1909 Compile* C = ra_->C;
1910 int framesize = C->output()->frame_slots() << LogBytesPerInt;
1911
1912 __ remove_frame(framesize);
1913
1914 if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
1915 __ reserved_stack_check();
1916 }
1917
1918 if (do_polling() && C->is_method_compilation()) {
1919 Label dummy_label;
1920 Label* code_stub = &dummy_label;
1921 if (!C->output()->in_scratch_emit_size()) {
1922 C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
1923 C->output()->add_stub(stub);
1924 code_stub = &stub->entry();
1925 }
1926 __ relocate(relocInfo::poll_return_type);
1927 __ safepoint_poll(*code_stub, true /* at_return */, true /* in_nmethod */);
1928 }
1929 }
1930
1931 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1932 // Variable size. Determine dynamically.
1933 return MachNode::size(ra_);
1934 }
1935
1936 int MachEpilogNode::reloc() const {
1937 // Return number of relocatable values contained in this instruction.
1938 return 1; // 1 for polling page.
1939 }
1940
1941 const Pipeline * MachEpilogNode::pipeline() const {
1942 return MachNode::pipeline_class();
1943 }
1944
1945 //=============================================================================
1946
1947 static enum RC rc_class(OptoReg::Name reg) {
1948
1949 if (reg == OptoReg::Bad) {
1950 return rc_bad;
1951 }
1952
1953 // we have 32 int registers * 2 halves
1954 int slots_of_int_registers = Register::number_of_registers * Register::max_slots_per_register;
1955
2211 void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
2212 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
2213 int reg = ra_->get_encode(this);
2214
2215 // This add will handle any 24-bit signed offset. 24 bits allows an
2216 // 8 megabyte stack frame.
2217 __ add(as_Register(reg), sp, offset);
2218 }
2219
2220 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
2221 // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
2222 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
2223
2224 if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
2225 return NativeInstruction::instruction_size;
2226 } else {
2227 return 2 * NativeInstruction::instruction_size;
2228 }
2229 }
2230
2231 //=============================================================================
2232
2233 #ifndef PRODUCT
2234 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
2235 {
2236 st->print_cr("# MachUEPNode");
2237 if (UseCompressedClassPointers) {
2238 st->print_cr("\tldrw rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2239 st->print_cr("\tldrw r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
2240 st->print_cr("\tcmpw rscratch1, r10");
2241 } else {
2242 st->print_cr("\tldr rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2243 st->print_cr("\tldr r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
2244 st->print_cr("\tcmp rscratch1, r10");
2245 }
2246 st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
2247 }
2248 #endif
2249
2250 void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
2251 {
2252 __ ic_check(InteriorEntryAlignment);
2253 }
2254
2255 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
2256 {
2257 return MachNode::size(ra_);
2258 }
2259
2260 // REQUIRED EMIT CODE
2261
2262 //=============================================================================
2263
2264 // Emit exception handler code.
2265 int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm)
2266 {
2267 // mov rscratch1 #exception_blob_entry_point
2268 // br rscratch1
2269 // Note that the code buffer's insts_mark is always relative to insts.
2270 // That's why we must use the macroassembler to generate a handler.
2271 address base = __ start_a_stub(size_exception_handler());
2272 if (base == nullptr) {
2273 ciEnv::current()->record_failure("CodeCache is full");
2274 return 0; // CodeBuffer::expand failed
2275 }
2276 int offset = __ offset();
2277 __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
2278 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
2279 __ end_a_stub();
3745 %}
3746
3747 enc_class aarch64_enc_java_dynamic_call(method meth) %{
3748 int method_index = resolved_method_index(masm);
3749 address call = __ ic_call((address)$meth$$method, method_index);
3750 if (call == nullptr) {
3751 ciEnv::current()->record_failure("CodeCache is full");
3752 return;
3753 }
3754 __ post_call_nop();
3755 if (Compile::current()->max_vector_size() > 0) {
3756 __ reinitialize_ptrue();
3757 }
3758 %}
3759
3760 enc_class aarch64_enc_call_epilog() %{
3761 if (VerifyStackAtCalls) {
3762 // Check that stack depth is unchanged: find majik cookie on stack
3763 __ call_Unimplemented();
3764 }
3765 %}
3766
3767 enc_class aarch64_enc_java_to_runtime(method meth) %{
3768 // some calls to generated routines (arraycopy code) are scheduled
3769 // by C2 as runtime calls. if so we can call them using a br (they
3770 // will be in a reachable segment) otherwise we have to use a blr
3771 // which loads the absolute address into a register.
3772 address entry = (address)$meth$$method;
3773 CodeBlob *cb = CodeCache::find_blob(entry);
3774 if (cb) {
3775 address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
3776 if (call == nullptr) {
3777 ciEnv::current()->record_failure("CodeCache is full");
3778 return;
3779 }
3780 __ post_call_nop();
3781 } else {
3782 Label retaddr;
3783 // Make the anchor frame walkable
3784 __ adr(rscratch2, retaddr);
6929 instruct loadConL(iRegLNoSp dst, immL src)
6930 %{
6931 match(Set dst src);
6932
6933 ins_cost(INSN_COST);
6934 format %{ "mov $dst, $src\t# long" %}
6935
6936 ins_encode( aarch64_enc_mov_imm(dst, src) );
6937
6938 ins_pipe(ialu_imm);
6939 %}
6940
6941 // Load Pointer Constant
6942
6943 instruct loadConP(iRegPNoSp dst, immP con)
6944 %{
6945 match(Set dst con);
6946
6947 ins_cost(INSN_COST * 4);
6948 format %{
6949 "mov $dst, $con\t# ptr\n\t"
6950 %}
6951
6952 ins_encode(aarch64_enc_mov_p(dst, con));
6953
6954 ins_pipe(ialu_imm);
6955 %}
6956
6957 // Load Null Pointer Constant
6958
6959 instruct loadConP0(iRegPNoSp dst, immP0 con)
6960 %{
6961 match(Set dst con);
6962
6963 ins_cost(INSN_COST);
6964 format %{ "mov $dst, $con\t# nullptr ptr" %}
6965
6966 ins_encode(aarch64_enc_mov_p0(dst, con));
6967
6968 ins_pipe(ialu_imm);
6969 %}
8122 %}
8123
8124 // ============================================================================
8125 // Cast/Convert Instructions
8126
8127 instruct castX2P(iRegPNoSp dst, iRegL src) %{
8128 match(Set dst (CastX2P src));
8129
8130 ins_cost(INSN_COST);
8131 format %{ "mov $dst, $src\t# long -> ptr" %}
8132
8133 ins_encode %{
8134 if ($dst$$reg != $src$$reg) {
8135 __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8136 }
8137 %}
8138
8139 ins_pipe(ialu_reg);
8140 %}
8141
8142 instruct castP2X(iRegLNoSp dst, iRegP src) %{
8143 match(Set dst (CastP2X src));
8144
8145 ins_cost(INSN_COST);
8146 format %{ "mov $dst, $src\t# ptr -> long" %}
8147
8148 ins_encode %{
8149 if ($dst$$reg != $src$$reg) {
8150 __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8151 }
8152 %}
8153
8154 ins_pipe(ialu_reg);
8155 %}
8156
8157 // Convert oop into int for vectors alignment masking
8158 instruct convP2I(iRegINoSp dst, iRegP src) %{
8159 match(Set dst (ConvL2I (CastP2X src)));
8160
8161 ins_cost(INSN_COST);
15075
15076 match(Set dst (MoveL2D src));
15077
15078 effect(DEF dst, USE src);
15079
15080 ins_cost(INSN_COST);
15081
15082 format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
15083
15084 ins_encode %{
15085 __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
15086 %}
15087
15088 ins_pipe(fp_l2d);
15089
15090 %}
15091
15092 // ============================================================================
15093 // clearing of an array
15094
15095 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
15096 %{
15097 match(Set dummy (ClearArray cnt base));
15098 effect(USE_KILL cnt, USE_KILL base, KILL cr);
15099
15100 ins_cost(4 * INSN_COST);
15101 format %{ "ClearArray $cnt, $base" %}
15102
15103 ins_encode %{
15104 address tpc = __ zero_words($base$$Register, $cnt$$Register);
15105 if (tpc == nullptr) {
15106 ciEnv::current()->record_failure("CodeCache is full");
15107 return;
15108 }
15109 %}
15110
15111 ins_pipe(pipe_class_memory);
15112 %}
15113
15114 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 temp, Universe dummy, rFlagsReg cr)
15115 %{
15116 predicate((uint64_t)n->in(2)->get_long()
15117 < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
15118 match(Set dummy (ClearArray cnt base));
15119 effect(TEMP temp, USE_KILL base, KILL cr);
15120
15121 ins_cost(4 * INSN_COST);
15122 format %{ "ClearArray $cnt, $base" %}
15123
15124 ins_encode %{
15125 address tpc = __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
15126 if (tpc == nullptr) {
15127 ciEnv::current()->record_failure("CodeCache is full");
15128 return;
15129 }
15130 %}
15131
15132 ins_pipe(pipe_class_memory);
15133 %}
15134
15135 // ============================================================================
15136 // Overflow Math Instructions
15137
16414 %}
16415
16416 // Call Runtime Instruction without safepoint and with vector arguments
16417 instruct CallLeafDirectVector(method meth)
16418 %{
16419 match(CallLeafVector);
16420
16421 effect(USE meth);
16422
16423 ins_cost(CALL_COST);
16424
16425 format %{ "CALL, runtime leaf vector $meth" %}
16426
16427 ins_encode(aarch64_enc_java_to_runtime(meth));
16428
16429 ins_pipe(pipe_class_call);
16430 %}
16431
16432 // Call Runtime Instruction
16433
16434 instruct CallLeafNoFPDirect(method meth)
16435 %{
16436 match(CallLeafNoFP);
16437
16438 effect(USE meth);
16439
16440 ins_cost(CALL_COST);
16441
16442 format %{ "CALL, runtime leaf nofp $meth" %}
16443
16444 ins_encode( aarch64_enc_java_to_runtime(meth) );
16445
16446 ins_pipe(pipe_class_call);
16447 %}
16448
16449 // Tail Call; Jump from runtime stub to Java code.
16450 // Also known as an 'interprocedural jump'.
16451 // Target of jump will eventually return to caller.
16452 // TailJump below removes the return address.
16453 // Don't use rfp for 'jump_target' because a MachEpilogNode has already been
16454 // emitted just above the TailCall which has reset rfp to the caller state.
16455 instruct TailCalljmpInd(iRegPNoSpNoRfp jump_target, inline_cache_RegP method_ptr)
|
1677
1678 int MachCallDynamicJavaNode::ret_addr_offset()
1679 {
1680 return 16; // movz, movk, movk, bl
1681 }
1682
1683 int MachCallRuntimeNode::ret_addr_offset() {
1684 // for generated stubs the call will be
1685 // bl(addr)
1686 // or with far branches
1687 // bl(trampoline_stub)
1688 // for real runtime callouts it will be six instructions
1689 // see aarch64_enc_java_to_runtime
1690 // adr(rscratch2, retaddr)
1691 // str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
1692 // lea(rscratch1, RuntimeAddress(addr)
1693 // blr(rscratch1)
1694 CodeBlob *cb = CodeCache::find_blob(_entry_point);
1695 if (cb) {
1696 return 1 * NativeInstruction::instruction_size;
1697 } else if (_entry_point == nullptr) {
1698 // See CallLeafNoFPIndirect
1699 return 1 * NativeInstruction::instruction_size;
1700 } else {
1701 return 6 * NativeInstruction::instruction_size;
1702 }
1703 }
1704
1705 //=============================================================================
1706
1707 #ifndef PRODUCT
1708 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1709 st->print("BREAKPOINT");
1710 }
1711 #endif
1712
1713 void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
1714 __ brk(0);
1715 }
1716
1717 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
1718 return MachNode::size(ra_);
1719 }
1788 if (C->stub_function() == nullptr) {
1789 st->print("\n\t");
1790 st->print("ldr rscratch1, [guard]\n\t");
1791 st->print("dmb ishld\n\t");
1792 st->print("ldr rscratch2, [rthread, #thread_disarmed_guard_value_offset]\n\t");
1793 st->print("cmp rscratch1, rscratch2\n\t");
1794 st->print("b.eq skip");
1795 st->print("\n\t");
1796 st->print("blr #nmethod_entry_barrier_stub\n\t");
1797 st->print("b skip\n\t");
1798 st->print("guard: int\n\t");
1799 st->print("\n\t");
1800 st->print("skip:\n\t");
1801 }
1802 }
1803 #endif
1804
1805 void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
1806 Compile* C = ra_->C;
1807
1808
1809 __ verified_entry(C, 0);
1810
1811 if (C->stub_function() == nullptr) {
1812 __ entry_barrier();
1813 }
1814
1815 if (!Compile::current()->output()->in_scratch_emit_size()) {
1816 __ bind(*_verified_entry);
1817 }
1818
1819 if (VerifyStackAtCalls) {
1820 Unimplemented();
1821 }
1822
1823 C->output()->set_frame_complete(__ offset());
1824
1825 if (C->has_mach_constant_base_node()) {
1826 // NOTE: We set the table base offset here because users might be
1827 // emitted before MachConstantBaseNode.
1828 ConstantTable& constant_table = C->output()->constant_table();
1829 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1830 }
1831 }
1832
1833 int MachPrologNode::reloc() const
1834 {
1835 return 0;
1836 }
1837
1838 //=============================================================================
1839
1840 #ifndef PRODUCT
1841 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1842 Compile* C = ra_->C;
1843 int framesize = C->output()->frame_slots() << LogBytesPerInt;
1844
1845 st->print("# pop frame %d\n\t",framesize);
1846
1847 if (framesize == 0) {
1848 st->print("ldp lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1849 } else if (framesize < ((1 << 9) + 2 * wordSize)) {
1850 st->print("ldp lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
1851 st->print("add sp, sp, #%d\n\t", framesize);
1852 } else {
1855 st->print("ldp lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1856 }
1857 if (VM_Version::use_rop_protection()) {
1858 st->print("autiaz\n\t");
1859 st->print("ldr zr, [lr]\n\t");
1860 }
1861
1862 if (do_polling() && C->is_method_compilation()) {
1863 st->print("# test polling word\n\t");
1864 st->print("ldr rscratch1, [rthread],#%d\n\t", in_bytes(JavaThread::polling_word_offset()));
1865 st->print("cmp sp, rscratch1\n\t");
1866 st->print("bhi #slow_path");
1867 }
1868 }
1869 #endif
1870
1871 void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
1872 Compile* C = ra_->C;
1873 int framesize = C->output()->frame_slots() << LogBytesPerInt;
1874
1875 __ remove_frame(framesize, C->needs_stack_repair());
1876
1877 if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
1878 __ reserved_stack_check();
1879 }
1880
1881 if (do_polling() && C->is_method_compilation()) {
1882 Label dummy_label;
1883 Label* code_stub = &dummy_label;
1884 if (!C->output()->in_scratch_emit_size()) {
1885 C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
1886 C->output()->add_stub(stub);
1887 code_stub = &stub->entry();
1888 }
1889 __ relocate(relocInfo::poll_return_type);
1890 __ safepoint_poll(*code_stub, true /* at_return */, true /* in_nmethod */);
1891 }
1892 }
1893
1894 int MachEpilogNode::reloc() const {
1895 // Return number of relocatable values contained in this instruction.
1896 return 1; // 1 for polling page.
1897 }
1898
1899 const Pipeline * MachEpilogNode::pipeline() const {
1900 return MachNode::pipeline_class();
1901 }
1902
1903 //=============================================================================
1904
1905 static enum RC rc_class(OptoReg::Name reg) {
1906
1907 if (reg == OptoReg::Bad) {
1908 return rc_bad;
1909 }
1910
1911 // we have 32 int registers * 2 halves
1912 int slots_of_int_registers = Register::number_of_registers * Register::max_slots_per_register;
1913
2169 void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
2170 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
2171 int reg = ra_->get_encode(this);
2172
2173 // This add will handle any 24-bit signed offset. 24 bits allows an
2174 // 8 megabyte stack frame.
2175 __ add(as_Register(reg), sp, offset);
2176 }
2177
2178 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
2179 // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
2180 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
2181
2182 if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
2183 return NativeInstruction::instruction_size;
2184 } else {
2185 return 2 * NativeInstruction::instruction_size;
2186 }
2187 }
2188
2189 ///=============================================================================
2190 #ifndef PRODUCT
2191 void MachVEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
2192 {
2193 st->print_cr("# MachVEPNode");
2194 if (!_verified) {
2195 st->print_cr("\t load_class");
2196 } else {
2197 st->print_cr("\t unpack_inline_arg");
2198 }
2199 }
2200 #endif
2201
2202 void MachVEPNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc* ra_) const
2203 {
2204 if (!_verified) {
2205 __ ic_check(1);
2206 } else {
2207 // TODO 8284443 Avoid creation of temporary frame
2208 if (ra_->C->stub_function() == nullptr) {
2209 __ verified_entry(ra_->C, 0);
2210 __ entry_barrier();
2211 int framesize = ra_->C->output()->frame_slots() << LogBytesPerInt;
2212 __ remove_frame(framesize, false);
2213 }
2214 // Unpack inline type args passed as oop and then jump to
2215 // the verified entry point (skipping the unverified entry).
2216 int sp_inc = __ unpack_inline_args(ra_->C, _receiver_only);
2217 // Emit code for verified entry and save increment for stack repair on return
2218 __ verified_entry(ra_->C, sp_inc);
2219 if (Compile::current()->output()->in_scratch_emit_size()) {
2220 Label dummy_verified_entry;
2221 __ b(dummy_verified_entry);
2222 } else {
2223 __ b(*_verified_entry);
2224 }
2225 }
2226 }
2227
2228 //=============================================================================
2229 #ifndef PRODUCT
2230 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
2231 {
2232 st->print_cr("# MachUEPNode");
2233 if (UseCompressedClassPointers) {
2234 st->print_cr("\tldrw rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2235 st->print_cr("\tldrw r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
2236 st->print_cr("\tcmpw rscratch1, r10");
2237 } else {
2238 st->print_cr("\tldr rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2239 st->print_cr("\tldr r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
2240 st->print_cr("\tcmp rscratch1, r10");
2241 }
2242 st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
2243 }
2244 #endif
2245
2246 void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
2247 {
2248 __ ic_check(InteriorEntryAlignment);
2249 }
2250
2251 // REQUIRED EMIT CODE
2252
2253 //=============================================================================
2254
2255 // Emit exception handler code.
2256 int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm)
2257 {
2258 // mov rscratch1 #exception_blob_entry_point
2259 // br rscratch1
2260 // Note that the code buffer's insts_mark is always relative to insts.
2261 // That's why we must use the macroassembler to generate a handler.
2262 address base = __ start_a_stub(size_exception_handler());
2263 if (base == nullptr) {
2264 ciEnv::current()->record_failure("CodeCache is full");
2265 return 0; // CodeBuffer::expand failed
2266 }
2267 int offset = __ offset();
2268 __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
2269 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
2270 __ end_a_stub();
3736 %}
3737
3738 enc_class aarch64_enc_java_dynamic_call(method meth) %{
3739 int method_index = resolved_method_index(masm);
3740 address call = __ ic_call((address)$meth$$method, method_index);
3741 if (call == nullptr) {
3742 ciEnv::current()->record_failure("CodeCache is full");
3743 return;
3744 }
3745 __ post_call_nop();
3746 if (Compile::current()->max_vector_size() > 0) {
3747 __ reinitialize_ptrue();
3748 }
3749 %}
3750
3751 enc_class aarch64_enc_call_epilog() %{
3752 if (VerifyStackAtCalls) {
3753 // Check that stack depth is unchanged: find majik cookie on stack
3754 __ call_Unimplemented();
3755 }
3756 if (tf()->returns_inline_type_as_fields() && !_method->is_method_handle_intrinsic() && _method->return_type()->is_loaded()) {
3757 // The last return value is not set by the callee but used to pass the null marker to compiled code.
3758 // Search for the corresponding projection, get the register and emit code that initialized it.
3759 uint con = (tf()->range_cc()->cnt() - 1);
3760 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
3761 ProjNode* proj = fast_out(i)->as_Proj();
3762 if (proj->_con == con) {
3763 // Set null marker if r0 is non-null (a non-null value is returned buffered or scalarized)
3764 OptoReg::Name optoReg = ra_->get_reg_first(proj);
3765 VMReg reg = OptoReg::as_VMReg(optoReg, ra_->_framesize, OptoReg::reg2stack(ra_->_matcher._new_SP));
3766 Register toReg = reg->is_reg() ? reg->as_Register() : rscratch1;
3767 __ cmp(r0, zr);
3768 __ cset(toReg, Assembler::NE);
3769 if (reg->is_stack()) {
3770 int st_off = reg->reg2stack() * VMRegImpl::stack_slot_size;
3771 __ str(toReg, Address(sp, st_off));
3772 }
3773 break;
3774 }
3775 }
3776 if (return_value_is_used()) {
3777 // An inline type is returned as fields in multiple registers.
3778 // R0 either contains an oop if the inline type is buffered or a pointer
3779 // to the corresponding InlineKlass with the lowest bit set to 1. Zero r0
3780 // if the lowest bit is set to allow C2 to use the oop after null checking.
3781 // r0 &= (r0 & 1) - 1
3782 __ andr(rscratch1, r0, 0x1);
3783 __ sub(rscratch1, rscratch1, 0x1);
3784 __ andr(r0, r0, rscratch1);
3785 }
3786 }
3787 %}
3788
3789 enc_class aarch64_enc_java_to_runtime(method meth) %{
3790 // some calls to generated routines (arraycopy code) are scheduled
3791 // by C2 as runtime calls. if so we can call them using a br (they
3792 // will be in a reachable segment) otherwise we have to use a blr
3793 // which loads the absolute address into a register.
3794 address entry = (address)$meth$$method;
3795 CodeBlob *cb = CodeCache::find_blob(entry);
3796 if (cb) {
3797 address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
3798 if (call == nullptr) {
3799 ciEnv::current()->record_failure("CodeCache is full");
3800 return;
3801 }
3802 __ post_call_nop();
3803 } else {
3804 Label retaddr;
3805 // Make the anchor frame walkable
3806 __ adr(rscratch2, retaddr);
6951 instruct loadConL(iRegLNoSp dst, immL src)
6952 %{
6953 match(Set dst src);
6954
6955 ins_cost(INSN_COST);
6956 format %{ "mov $dst, $src\t# long" %}
6957
6958 ins_encode( aarch64_enc_mov_imm(dst, src) );
6959
6960 ins_pipe(ialu_imm);
6961 %}
6962
6963 // Load Pointer Constant
6964
6965 instruct loadConP(iRegPNoSp dst, immP con)
6966 %{
6967 match(Set dst con);
6968
6969 ins_cost(INSN_COST * 4);
6970 format %{
6971 "mov $dst, $con\t# ptr"
6972 %}
6973
6974 ins_encode(aarch64_enc_mov_p(dst, con));
6975
6976 ins_pipe(ialu_imm);
6977 %}
6978
6979 // Load Null Pointer Constant
6980
6981 instruct loadConP0(iRegPNoSp dst, immP0 con)
6982 %{
6983 match(Set dst con);
6984
6985 ins_cost(INSN_COST);
6986 format %{ "mov $dst, $con\t# nullptr ptr" %}
6987
6988 ins_encode(aarch64_enc_mov_p0(dst, con));
6989
6990 ins_pipe(ialu_imm);
6991 %}
8144 %}
8145
8146 // ============================================================================
8147 // Cast/Convert Instructions
8148
8149 instruct castX2P(iRegPNoSp dst, iRegL src) %{
8150 match(Set dst (CastX2P src));
8151
8152 ins_cost(INSN_COST);
8153 format %{ "mov $dst, $src\t# long -> ptr" %}
8154
8155 ins_encode %{
8156 if ($dst$$reg != $src$$reg) {
8157 __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8158 }
8159 %}
8160
8161 ins_pipe(ialu_reg);
8162 %}
8163
8164 instruct castI2N(iRegNNoSp dst, iRegI src) %{
8165 match(Set dst (CastI2N src));
8166
8167 ins_cost(INSN_COST);
8168 format %{ "mov $dst, $src\t# int -> narrow ptr" %}
8169
8170 ins_encode %{
8171 if ($dst$$reg != $src$$reg) {
8172 __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8173 }
8174 %}
8175
8176 ins_pipe(ialu_reg);
8177 %}
8178
8179 instruct castN2X(iRegLNoSp dst, iRegN src) %{
8180 match(Set dst (CastP2X src));
8181
8182 ins_cost(INSN_COST);
8183 format %{ "mov $dst, $src\t# ptr -> long" %}
8184
8185 ins_encode %{
8186 if ($dst$$reg != $src$$reg) {
8187 __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8188 }
8189 %}
8190
8191 ins_pipe(ialu_reg);
8192 %}
8193
8194 instruct castP2X(iRegLNoSp dst, iRegP src) %{
8195 match(Set dst (CastP2X src));
8196
8197 ins_cost(INSN_COST);
8198 format %{ "mov $dst, $src\t# ptr -> long" %}
8199
8200 ins_encode %{
8201 if ($dst$$reg != $src$$reg) {
8202 __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8203 }
8204 %}
8205
8206 ins_pipe(ialu_reg);
8207 %}
8208
8209 // Convert oop into int for vectors alignment masking
8210 instruct convP2I(iRegINoSp dst, iRegP src) %{
8211 match(Set dst (ConvL2I (CastP2X src)));
8212
8213 ins_cost(INSN_COST);
15127
15128 match(Set dst (MoveL2D src));
15129
15130 effect(DEF dst, USE src);
15131
15132 ins_cost(INSN_COST);
15133
15134 format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
15135
15136 ins_encode %{
15137 __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
15138 %}
15139
15140 ins_pipe(fp_l2d);
15141
15142 %}
15143
15144 // ============================================================================
15145 // clearing of an array
15146
15147 instruct clearArray_reg_reg_immL0(iRegL_R11 cnt, iRegP_R10 base, immL0 zero, Universe dummy, rFlagsReg cr)
15148 %{
15149 match(Set dummy (ClearArray (Binary cnt base) zero));
15150 effect(USE_KILL cnt, USE_KILL base, KILL cr);
15151
15152 ins_cost(4 * INSN_COST);
15153 format %{ "ClearArray $cnt, $base" %}
15154
15155 ins_encode %{
15156 address tpc = __ zero_words($base$$Register, $cnt$$Register);
15157 if (tpc == nullptr) {
15158 ciEnv::current()->record_failure("CodeCache is full");
15159 return;
15160 }
15161 %}
15162
15163 ins_pipe(pipe_class_memory);
15164 %}
15165
15166 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, iRegL val, Universe dummy, rFlagsReg cr)
15167 %{
15168 predicate(((ClearArrayNode*)n)->word_copy_only());
15169 match(Set dummy (ClearArray (Binary cnt base) val));
15170 effect(USE_KILL cnt, USE_KILL base, KILL cr);
15171
15172 ins_cost(4 * INSN_COST);
15173 format %{ "ClearArray $cnt, $base, $val" %}
15174
15175 ins_encode %{
15176 __ fill_words($base$$Register, $cnt$$Register, $val$$Register);
15177 %}
15178
15179 ins_pipe(pipe_class_memory);
15180 %}
15181
15182 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 temp, Universe dummy, rFlagsReg cr)
15183 %{
15184 predicate((uint64_t)n->in(2)->get_long()
15185 < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord)
15186 && !((ClearArrayNode*)n)->word_copy_only());
15187 match(Set dummy (ClearArray cnt base));
15188 effect(TEMP temp, USE_KILL base, KILL cr);
15189
15190 ins_cost(4 * INSN_COST);
15191 format %{ "ClearArray $cnt, $base" %}
15192
15193 ins_encode %{
15194 address tpc = __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
15195 if (tpc == nullptr) {
15196 ciEnv::current()->record_failure("CodeCache is full");
15197 return;
15198 }
15199 %}
15200
15201 ins_pipe(pipe_class_memory);
15202 %}
15203
15204 // ============================================================================
15205 // Overflow Math Instructions
15206
16483 %}
16484
16485 // Call Runtime Instruction without safepoint and with vector arguments
16486 instruct CallLeafDirectVector(method meth)
16487 %{
16488 match(CallLeafVector);
16489
16490 effect(USE meth);
16491
16492 ins_cost(CALL_COST);
16493
16494 format %{ "CALL, runtime leaf vector $meth" %}
16495
16496 ins_encode(aarch64_enc_java_to_runtime(meth));
16497
16498 ins_pipe(pipe_class_call);
16499 %}
16500
16501 // Call Runtime Instruction
16502
16503 // entry point is null, target holds the address to call
16504 instruct CallLeafNoFPIndirect(iRegP target)
16505 %{
16506 predicate(n->as_Call()->entry_point() == nullptr);
16507
16508 match(CallLeafNoFP target);
16509
16510 ins_cost(CALL_COST);
16511
16512 format %{ "CALL, runtime leaf nofp indirect $target" %}
16513
16514 ins_encode %{
16515 __ blr($target$$Register);
16516 %}
16517
16518 ins_pipe(pipe_class_call);
16519 %}
16520
16521 instruct CallLeafNoFPDirect(method meth)
16522 %{
16523 predicate(n->as_Call()->entry_point() != nullptr);
16524
16525 match(CallLeafNoFP);
16526
16527 effect(USE meth);
16528
16529 ins_cost(CALL_COST);
16530
16531 format %{ "CALL, runtime leaf nofp $meth" %}
16532
16533 ins_encode( aarch64_enc_java_to_runtime(meth) );
16534
16535 ins_pipe(pipe_class_call);
16536 %}
16537
16538 // Tail Call; Jump from runtime stub to Java code.
16539 // Also known as an 'interprocedural jump'.
16540 // Target of jump will eventually return to caller.
16541 // TailJump below removes the return address.
16542 // Don't use rfp for 'jump_target' because a MachEpilogNode has already been
16543 // emitted just above the TailCall which has reset rfp to the caller state.
16544 instruct TailCalljmpInd(iRegPNoSpNoRfp jump_target, inline_cache_RegP method_ptr)
|