12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include <sys/types.h>
27
28 #include "precompiled.hpp"
29 #include "asm/assembler.hpp"
30 #include "asm/assembler.inline.hpp"
31 #include "ci/ciEnv.hpp"
32 #include "code/compiledIC.hpp"
33 #include "compiler/compileTask.hpp"
34 #include "compiler/disassembler.hpp"
35 #include "compiler/oopMap.hpp"
36 #include "gc/shared/barrierSet.hpp"
37 #include "gc/shared/barrierSetAssembler.hpp"
38 #include "gc/shared/cardTableBarrierSet.hpp"
39 #include "gc/shared/cardTable.hpp"
40 #include "gc/shared/collectedHeap.hpp"
41 #include "gc/shared/tlab_globals.hpp"
42 #include "interpreter/bytecodeHistogram.hpp"
43 #include "interpreter/interpreter.hpp"
44 #include "jvm.h"
45 #include "memory/resourceArea.hpp"
46 #include "memory/universe.hpp"
47 #include "nativeInst_aarch64.hpp"
48 #include "oops/accessDecorators.hpp"
49 #include "oops/compressedKlass.inline.hpp"
50 #include "oops/compressedOops.inline.hpp"
51 #include "oops/klass.inline.hpp"
52 #include "runtime/continuation.hpp"
53 #include "runtime/icache.hpp"
54 #include "runtime/interfaceSupport.inline.hpp"
55 #include "runtime/javaThread.hpp"
56 #include "runtime/jniHandles.inline.hpp"
57 #include "runtime/sharedRuntime.hpp"
58 #include "runtime/stubRoutines.hpp"
59 #include "utilities/powerOfTwo.hpp"
60 #ifdef COMPILER1
61 #include "c1/c1_LIRAssembler.hpp"
62 #endif
63 #ifdef COMPILER2
64 #include "oops/oop.hpp"
65 #include "opto/compile.hpp"
66 #include "opto/node.hpp"
67 #include "opto/output.hpp"
68 #endif
69
70 #ifdef PRODUCT
71 #define BLOCK_COMMENT(str) /* nothing */
72 #else
73 #define BLOCK_COMMENT(str) block_comment(str)
74 #endif
75 #define STOP(str) stop(str);
76 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
77
78 #ifdef ASSERT
79 extern "C" void disnm(intptr_t p);
1153 }
1154
1155 void MacroAssembler::post_call_nop() {
1156 if (!Continuations::enabled()) {
1157 return;
1158 }
1159 InstructionMark im(this);
1160 relocate(post_call_nop_Relocation::spec());
1161 InlineSkippedInstructionsCounter skipCounter(this);
1162 nop();
1163 movk(zr, 0);
1164 movk(zr, 0);
1165 }
1166
1167 // these are no-ops overridden by InterpreterMacroAssembler
1168
1169 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { }
1170
1171 void MacroAssembler::check_and_handle_popframe(Register java_thread) { }
1172
1173 // Look up the method for a megamorphic invokeinterface call.
1174 // The target method is determined by <intf_klass, itable_index>.
1175 // The receiver klass is in recv_klass.
1176 // On success, the result will be in method_result, and execution falls through.
1177 // On failure, execution transfers to the given label.
1178 void MacroAssembler::lookup_interface_method(Register recv_klass,
1179 Register intf_klass,
1180 RegisterOrConstant itable_index,
1181 Register method_result,
1182 Register scan_temp,
1183 Label& L_no_such_interface,
1184 bool return_method) {
1185 assert_different_registers(recv_klass, intf_klass, scan_temp);
1186 assert_different_registers(method_result, intf_klass, scan_temp);
1187 assert(recv_klass != method_result || !return_method,
1188 "recv_klass can be destroyed when method isn't needed");
1189 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
1190 "caller must use same register for non-constant itable index as for method");
1191
1192 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
1606 ldrb(scratch, Address(klass, InstanceKlass::init_state_offset()));
1607 subs(zr, scratch, InstanceKlass::fully_initialized);
1608 br(Assembler::EQ, *L_fast_path);
1609
1610 // Fast path check: current thread is initializer thread
1611 ldr(scratch, Address(klass, InstanceKlass::init_thread_offset()));
1612 cmp(rthread, scratch);
1613
1614 if (L_slow_path == &L_fallthrough) {
1615 br(Assembler::EQ, *L_fast_path);
1616 bind(*L_slow_path);
1617 } else if (L_fast_path == &L_fallthrough) {
1618 br(Assembler::NE, *L_slow_path);
1619 bind(*L_fast_path);
1620 } else {
1621 Unimplemented();
1622 }
1623 }
1624
1625 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
1626 if (!VerifyOops) return;
1627
1628 // Pass register number to verify_oop_subroutine
1629 const char* b = nullptr;
1630 {
1631 ResourceMark rm;
1632 stringStream ss;
1633 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
1634 b = code_string(ss.as_string());
1635 }
1636 BLOCK_COMMENT("verify_oop {");
1637
1638 strip_return_address(); // This might happen within a stack frame.
1639 protect_return_address();
1640 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1641 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1642
1643 mov(r0, reg);
1644 movptr(rscratch1, (uintptr_t)(address)b);
1645
1646 // call indirectly to solve generation ordering problem
1647 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
1648 ldr(rscratch2, Address(rscratch2));
1649 blr(rscratch2);
1650
1651 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
1652 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
1653 authenticate_return_address();
1654
1655 BLOCK_COMMENT("} verify_oop");
1656 }
1657
1658 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
1659 if (!VerifyOops) return;
1660
1661 const char* b = nullptr;
1662 {
1663 ResourceMark rm;
1664 stringStream ss;
1665 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line);
1666 b = code_string(ss.as_string());
1667 }
1668 BLOCK_COMMENT("verify_oop_addr {");
1669
1670 strip_return_address(); // This might happen within a stack frame.
1671 protect_return_address();
1672 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1673 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1674
1675 // addr may contain sp so we will have to adjust it based on the
1676 // pushes that we just did.
1677 if (addr.uses(sp)) {
1678 lea(r0, addr);
1679 ldr(r0, Address(r0, 4 * wordSize));
1737 call_VM_leaf_base(entry_point, 1);
1738 }
1739
1740 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1741 assert_different_registers(arg_1, c_rarg0);
1742 pass_arg0(this, arg_0);
1743 pass_arg1(this, arg_1);
1744 call_VM_leaf_base(entry_point, 2);
1745 }
1746
1747 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0,
1748 Register arg_1, Register arg_2) {
1749 assert_different_registers(arg_1, c_rarg0);
1750 assert_different_registers(arg_2, c_rarg0, c_rarg1);
1751 pass_arg0(this, arg_0);
1752 pass_arg1(this, arg_1);
1753 pass_arg2(this, arg_2);
1754 call_VM_leaf_base(entry_point, 3);
1755 }
1756
1757 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
1758 pass_arg0(this, arg_0);
1759 MacroAssembler::call_VM_leaf_base(entry_point, 1);
1760 }
1761
1762 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1763
1764 assert_different_registers(arg_0, c_rarg1);
1765 pass_arg1(this, arg_1);
1766 pass_arg0(this, arg_0);
1767 MacroAssembler::call_VM_leaf_base(entry_point, 2);
1768 }
1769
1770 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1771 assert_different_registers(arg_0, c_rarg1, c_rarg2);
1772 assert_different_registers(arg_1, c_rarg2);
1773 pass_arg2(this, arg_2);
1774 pass_arg1(this, arg_1);
1775 pass_arg0(this, arg_0);
1776 MacroAssembler::call_VM_leaf_base(entry_point, 3);
1782 assert_different_registers(arg_2, c_rarg3);
1783 pass_arg3(this, arg_3);
1784 pass_arg2(this, arg_2);
1785 pass_arg1(this, arg_1);
1786 pass_arg0(this, arg_0);
1787 MacroAssembler::call_VM_leaf_base(entry_point, 4);
1788 }
1789
1790 void MacroAssembler::null_check(Register reg, int offset) {
1791 if (needs_explicit_null_check(offset)) {
1792 // provoke OS null exception if reg is null by
1793 // accessing M[reg] w/o changing any registers
1794 // NOTE: this is plenty to provoke a segv
1795 ldr(zr, Address(reg));
1796 } else {
1797 // nothing to do, (later) access of M[reg + offset]
1798 // will provoke OS null exception if reg is null
1799 }
1800 }
1801
1802 // MacroAssembler protected routines needed to implement
1803 // public methods
1804
1805 void MacroAssembler::mov(Register r, Address dest) {
1806 code_section()->relocate(pc(), dest.rspec());
1807 uint64_t imm64 = (uint64_t)dest.target();
1808 movptr(r, imm64);
1809 }
1810
1811 // Move a constant pointer into r. In AArch64 mode the virtual
1812 // address space is 48 bits in size, so we only need three
1813 // instructions to create a patchable instruction sequence that can
1814 // reach anywhere.
1815 void MacroAssembler::movptr(Register r, uintptr_t imm64) {
1816 #ifndef PRODUCT
1817 {
1818 char buffer[64];
1819 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64);
1820 block_comment(buffer);
1821 }
4460 adrp(rscratch1, src2, offset);
4461 ldr(rscratch1, Address(rscratch1, offset));
4462 cmp(src1, rscratch1);
4463 }
4464
4465 void MacroAssembler::cmpoop(Register obj1, Register obj2) {
4466 cmp(obj1, obj2);
4467 }
4468
4469 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
4470 load_method_holder(rresult, rmethod);
4471 ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
4472 }
4473
4474 void MacroAssembler::load_method_holder(Register holder, Register method) {
4475 ldr(holder, Address(method, Method::const_offset())); // ConstMethod*
4476 ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
4477 ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
4478 }
4479
4480 void MacroAssembler::load_klass(Register dst, Register src) {
4481 if (UseCompressedClassPointers) {
4482 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4483 decode_klass_not_null(dst);
4484 } else {
4485 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4486 }
4487 }
4488
4489 void MacroAssembler::restore_cpu_control_state_after_jni(Register tmp1, Register tmp2) {
4490 if (RestoreMXCSROnJNICalls) {
4491 Label OK;
4492 get_fpcr(tmp1);
4493 mov(tmp2, tmp1);
4494 // Set FPCR to the state we need. We do want Round to Nearest. We
4495 // don't want non-IEEE rounding modes or floating-point traps.
4496 bfi(tmp1, zr, 22, 4); // Clear DN, FZ, and Rmode
4497 bfi(tmp1, zr, 8, 5); // Clear exception-control bits (8-12)
4498 bfi(tmp1, zr, 0, 2); // Clear AH:FIZ
4499 eor(tmp2, tmp1, tmp2);
4535
4536 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
4537 if (UseCompressedClassPointers) {
4538 ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
4539 if (CompressedKlassPointers::base() == nullptr) {
4540 cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
4541 return;
4542 } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
4543 && CompressedKlassPointers::shift() == 0) {
4544 // Only the bottom 32 bits matter
4545 cmpw(trial_klass, tmp);
4546 return;
4547 }
4548 decode_klass_not_null(tmp);
4549 } else {
4550 ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
4551 }
4552 cmp(trial_klass, tmp);
4553 }
4554
4555 void MacroAssembler::store_klass(Register dst, Register src) {
4556 // FIXME: Should this be a store release? concurrent gcs assumes
4557 // klass length is valid if klass field is not null.
4558 if (UseCompressedClassPointers) {
4559 encode_klass_not_null(src);
4560 strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
4561 } else {
4562 str(src, Address(dst, oopDesc::klass_offset_in_bytes()));
4563 }
4564 }
4565
4566 void MacroAssembler::store_klass_gap(Register dst, Register src) {
4567 if (UseCompressedClassPointers) {
4568 // Store to klass gap in destination
4569 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
4570 }
4571 }
4572
4573 // Algorithm must match CompressedOops::encode.
4574 void MacroAssembler::encode_heap_oop(Register d, Register s) {
4859 if (as_raw) {
4860 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, tmp2);
4861 } else {
4862 bs->load_at(this, decorators, type, dst, src, tmp1, tmp2);
4863 }
4864 }
4865
4866 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
4867 Address dst, Register val,
4868 Register tmp1, Register tmp2, Register tmp3) {
4869 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
4870 decorators = AccessInternal::decorator_fixup(decorators, type);
4871 bool as_raw = (decorators & AS_RAW) != 0;
4872 if (as_raw) {
4873 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
4874 } else {
4875 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
4876 }
4877 }
4878
4879 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
4880 Register tmp2, DecoratorSet decorators) {
4881 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2);
4882 }
4883
4884 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
4885 Register tmp2, DecoratorSet decorators) {
4886 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, tmp2);
4887 }
4888
4889 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
4890 Register tmp2, Register tmp3, DecoratorSet decorators) {
4891 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
4892 }
4893
4894 // Used for storing nulls.
4895 void MacroAssembler::store_heap_oop_null(Address dst) {
4896 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
4897 }
4898
4935 oop_index = oop_recorder()->allocate_metadata_index(obj);
4936 } else {
4937 oop_index = oop_recorder()->find_index(obj);
4938 }
4939 RelocationHolder rspec = metadata_Relocation::spec(oop_index);
4940 mov(dst, Address((address)obj, rspec));
4941 }
4942
4943 Address MacroAssembler::constant_oop_address(jobject obj) {
4944 #ifdef ASSERT
4945 {
4946 ThreadInVMfromUnknown tiv;
4947 assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder");
4948 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop");
4949 }
4950 #endif
4951 int oop_index = oop_recorder()->find_index(obj);
4952 return Address((address)obj, oop_Relocation::spec(oop_index));
4953 }
4954
4955 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
4956 void MacroAssembler::tlab_allocate(Register obj,
4957 Register var_size_in_bytes,
4958 int con_size_in_bytes,
4959 Register t1,
4960 Register t2,
4961 Label& slow_case) {
4962 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
4963 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
4964 }
4965
4966 void MacroAssembler::verify_tlab() {
4967 #ifdef ASSERT
4968 if (UseTLAB && VerifyOops) {
4969 Label next, ok;
4970
4971 stp(rscratch2, rscratch1, Address(pre(sp, -16)));
4972
4973 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
4974 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset())));
4975 cmp(rscratch2, rscratch1);
4976 br(Assembler::HS, next);
4977 STOP("assert(top >= start)");
4978 should_not_reach_here();
4979
4980 bind(next);
4981 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset())));
4982 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
4983 cmp(rscratch2, rscratch1);
4984 br(Assembler::HS, ok);
4985 STOP("assert(top <= end)");
4986 should_not_reach_here();
4987
4988 bind(ok);
4989 ldp(rscratch2, rscratch1, Address(post(sp, 16)));
4990 }
4991 #endif
4992 }
4993
4994 // Writes to stack successive pages until offset reached to check for
4995 // stack overflow + shadow pages. This clobbers tmp.
4996 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
4997 assert_different_registers(tmp, size, rscratch1);
4998 mov(tmp, sp);
4999 // Bang stack for total size given plus shadow page size.
5000 // Bang one page at a time because large size can bang beyond yellow and
5001 // red zones.
5002 Label loop;
5003 mov(rscratch1, (int)os::vm_page_size());
5004 bind(loop);
5005 lea(tmp, Address(tmp, -(int)os::vm_page_size()));
5006 subsw(size, size, rscratch1);
5007 str(size, Address(tmp));
5008 br(Assembler::GT, loop);
5009
5010 // Bang down shadow pages too.
5011 // At this point, (tmp-0) is the last address touched, so don't
5012 // touch it again. (It was touched as (tmp-pagesize) but then tmp
5013 // was post-decremented.) Skip this address by starting at i=1, and
5099 }
5100
5101 void MacroAssembler::remove_frame(int framesize) {
5102 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5103 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
5104 if (framesize < ((1 << 9) + 2 * wordSize)) {
5105 ldp(rfp, lr, Address(sp, framesize - 2 * wordSize));
5106 add(sp, sp, framesize);
5107 } else {
5108 if (framesize < ((1 << 12) + 2 * wordSize))
5109 add(sp, sp, framesize - 2 * wordSize);
5110 else {
5111 mov(rscratch1, framesize - 2 * wordSize);
5112 add(sp, sp, rscratch1);
5113 }
5114 ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
5115 }
5116 authenticate_return_address();
5117 }
5118
5119
5120 // This method counts leading positive bytes (highest bit not set) in provided byte array
5121 address MacroAssembler::count_positives(Register ary1, Register len, Register result) {
5122 // Simple and most common case of aligned small array which is not at the
5123 // end of memory page is placed here. All other cases are in stub.
5124 Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE;
5125 const uint64_t UPPER_BIT_MASK=0x8080808080808080;
5126 assert_different_registers(ary1, len, result);
5127
5128 mov(result, len);
5129 cmpw(len, 0);
5130 br(LE, DONE);
5131 cmpw(len, 4 * wordSize);
5132 br(GE, STUB_LONG); // size > 32 then go to stub
5133
5134 int shift = 64 - exact_log2(os::vm_page_size());
5135 lsl(rscratch1, ary1, shift);
5136 mov(rscratch2, (size_t)(4 * wordSize) << shift);
5137 adds(rscratch2, rscratch1, rscratch2); // At end of page?
5138 br(CS, STUB); // at the end of page then go to stub
6008 // On other systems, the helper is a usual C function.
6009 //
6010 void MacroAssembler::get_thread(Register dst) {
6011 RegSet saved_regs =
6012 LINUX_ONLY(RegSet::range(r0, r1) + lr - dst)
6013 NOT_LINUX (RegSet::range(r0, r17) + lr - dst);
6014
6015 protect_return_address();
6016 push(saved_regs, sp);
6017
6018 mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper));
6019 blr(lr);
6020 if (dst != c_rarg0) {
6021 mov(dst, c_rarg0);
6022 }
6023
6024 pop(saved_regs, sp);
6025 authenticate_return_address();
6026 }
6027
6028 void MacroAssembler::cache_wb(Address line) {
6029 assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset");
6030 assert(line.index() == noreg, "index should be noreg");
6031 assert(line.offset() == 0, "offset should be 0");
6032 // would like to assert this
6033 // assert(line._ext.shift == 0, "shift should be zero");
6034 if (VM_Version::supports_dcpop()) {
6035 // writeback using clear virtual address to point of persistence
6036 dc(Assembler::CVAP, line.base());
6037 } else {
6038 // no need to generate anything as Unsafe.writebackMemory should
6039 // never invoke this stub
6040 }
6041 }
6042
6043 void MacroAssembler::cache_wbsync(bool is_pre) {
6044 // we only need a barrier post sync
6045 if (!is_pre) {
6046 membar(Assembler::AnyAny);
6047 }
6381 }
6382
6383 // Implements lightweight-locking.
6384 // Branches to slow upon failure to lock the object, with ZF cleared.
6385 // Falls through upon success with ZF set.
6386 //
6387 // - obj: the object to be locked
6388 // - hdr: the header, already loaded from obj, will be destroyed
6389 // - t1, t2: temporary registers, will be destroyed
6390 void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow) {
6391 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
6392 assert_different_registers(obj, hdr, t1, t2, rscratch1);
6393
6394 // Check if we would have space on lock-stack for the object.
6395 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
6396 cmpw(t1, (unsigned)LockStack::end_offset() - 1);
6397 br(Assembler::GT, slow);
6398
6399 // Load (object->mark() | 1) into hdr
6400 orr(hdr, hdr, markWord::unlocked_value);
6401 // Clear lock-bits, into t2
6402 eor(t2, hdr, markWord::unlocked_value);
6403 // Try to swing header from unlocked to locked
6404 // Clobbers rscratch1 when UseLSE is false
6405 cmpxchg(/*addr*/ obj, /*expected*/ hdr, /*new*/ t2, Assembler::xword,
6406 /*acquire*/ true, /*release*/ true, /*weak*/ false, t1);
6407 br(Assembler::NE, slow);
6408
6409 // After successful lock, push object on lock-stack
6410 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
6411 str(obj, Address(rthread, t1));
6412 addw(t1, t1, oopSize);
6413 strw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
6414 }
6415
6416 // Implements lightweight-unlocking.
6417 // Branches to slow upon failure, with ZF cleared.
6418 // Falls through upon success, with ZF set.
6419 //
6420 // - obj: the object to be unlocked
|
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include <sys/types.h>
27
28 #include "precompiled.hpp"
29 #include "asm/assembler.hpp"
30 #include "asm/assembler.inline.hpp"
31 #include "ci/ciEnv.hpp"
32 #include "ci/ciInlineKlass.hpp"
33 #include "code/compiledIC.hpp"
34 #include "compiler/compileTask.hpp"
35 #include "compiler/disassembler.hpp"
36 #include "compiler/oopMap.hpp"
37 #include "gc/shared/barrierSet.hpp"
38 #include "gc/shared/barrierSetAssembler.hpp"
39 #include "gc/shared/cardTableBarrierSet.hpp"
40 #include "gc/shared/cardTable.hpp"
41 #include "gc/shared/collectedHeap.hpp"
42 #include "gc/shared/tlab_globals.hpp"
43 #include "interpreter/bytecodeHistogram.hpp"
44 #include "interpreter/interpreter.hpp"
45 #include "jvm.h"
46 #include "memory/resourceArea.hpp"
47 #include "memory/universe.hpp"
48 #include "nativeInst_aarch64.hpp"
49 #include "oops/accessDecorators.hpp"
50 #include "oops/compressedKlass.inline.hpp"
51 #include "oops/compressedOops.inline.hpp"
52 #include "oops/klass.inline.hpp"
53 #include "oops/resolvedFieldEntry.hpp"
54 #include "runtime/continuation.hpp"
55 #include "runtime/icache.hpp"
56 #include "runtime/interfaceSupport.inline.hpp"
57 #include "runtime/javaThread.hpp"
58 #include "runtime/jniHandles.inline.hpp"
59 #include "runtime/sharedRuntime.hpp"
60 #include "runtime/signature_cc.hpp"
61 #include "runtime/stubRoutines.hpp"
62 #include "utilities/powerOfTwo.hpp"
63 #include "vmreg_aarch64.inline.hpp"
64 #ifdef COMPILER1
65 #include "c1/c1_LIRAssembler.hpp"
66 #endif
67 #ifdef COMPILER2
68 #include "oops/oop.hpp"
69 #include "opto/compile.hpp"
70 #include "opto/node.hpp"
71 #include "opto/output.hpp"
72 #endif
73
74 #ifdef PRODUCT
75 #define BLOCK_COMMENT(str) /* nothing */
76 #else
77 #define BLOCK_COMMENT(str) block_comment(str)
78 #endif
79 #define STOP(str) stop(str);
80 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
81
82 #ifdef ASSERT
83 extern "C" void disnm(intptr_t p);
1157 }
1158
1159 void MacroAssembler::post_call_nop() {
1160 if (!Continuations::enabled()) {
1161 return;
1162 }
1163 InstructionMark im(this);
1164 relocate(post_call_nop_Relocation::spec());
1165 InlineSkippedInstructionsCounter skipCounter(this);
1166 nop();
1167 movk(zr, 0);
1168 movk(zr, 0);
1169 }
1170
1171 // these are no-ops overridden by InterpreterMacroAssembler
1172
1173 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { }
1174
1175 void MacroAssembler::check_and_handle_popframe(Register java_thread) { }
1176
1177 void MacroAssembler::get_default_value_oop(Register inline_klass, Register temp_reg, Register obj) {
1178 #ifdef ASSERT
1179 {
1180 Label done_check;
1181 test_klass_is_inline_type(inline_klass, temp_reg, done_check);
1182 stop("get_default_value_oop from non inline type klass");
1183 bind(done_check);
1184 }
1185 #endif
1186 Register offset = temp_reg;
1187 // Getting the offset of the pre-allocated default value
1188 ldr(offset, Address(inline_klass, in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset())));
1189 ldr(offset, Address(offset, in_bytes(InlineKlass::default_value_offset_offset())));
1190
1191 // Getting the mirror
1192 ldr(obj, Address(inline_klass, in_bytes(Klass::java_mirror_offset())));
1193 resolve_oop_handle(obj, inline_klass, temp_reg);
1194
1195 // Getting the pre-allocated default value from the mirror
1196 Address field(obj, offset);
1197 load_heap_oop(obj, field, inline_klass, rscratch2);
1198 }
1199
1200 void MacroAssembler::get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj) {
1201 #ifdef ASSERT
1202 {
1203 Label done_check;
1204 test_klass_is_empty_inline_type(inline_klass, temp_reg, done_check);
1205 stop("get_empty_value from non-empty inline klass");
1206 bind(done_check);
1207 }
1208 #endif
1209 get_default_value_oop(inline_klass, temp_reg, obj);
1210 }
1211
1212 // Look up the method for a megamorphic invokeinterface call.
1213 // The target method is determined by <intf_klass, itable_index>.
1214 // The receiver klass is in recv_klass.
1215 // On success, the result will be in method_result, and execution falls through.
1216 // On failure, execution transfers to the given label.
1217 void MacroAssembler::lookup_interface_method(Register recv_klass,
1218 Register intf_klass,
1219 RegisterOrConstant itable_index,
1220 Register method_result,
1221 Register scan_temp,
1222 Label& L_no_such_interface,
1223 bool return_method) {
1224 assert_different_registers(recv_klass, intf_klass, scan_temp);
1225 assert_different_registers(method_result, intf_klass, scan_temp);
1226 assert(recv_klass != method_result || !return_method,
1227 "recv_klass can be destroyed when method isn't needed");
1228 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
1229 "caller must use same register for non-constant itable index as for method");
1230
1231 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
1645 ldrb(scratch, Address(klass, InstanceKlass::init_state_offset()));
1646 subs(zr, scratch, InstanceKlass::fully_initialized);
1647 br(Assembler::EQ, *L_fast_path);
1648
1649 // Fast path check: current thread is initializer thread
1650 ldr(scratch, Address(klass, InstanceKlass::init_thread_offset()));
1651 cmp(rthread, scratch);
1652
1653 if (L_slow_path == &L_fallthrough) {
1654 br(Assembler::EQ, *L_fast_path);
1655 bind(*L_slow_path);
1656 } else if (L_fast_path == &L_fallthrough) {
1657 br(Assembler::NE, *L_slow_path);
1658 bind(*L_fast_path);
1659 } else {
1660 Unimplemented();
1661 }
1662 }
1663
1664 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
1665 if (!VerifyOops || VerifyAdapterSharing) {
1666 // Below address of the code string confuses VerifyAdapterSharing
1667 // because it may differ between otherwise equivalent adapters.
1668 return;
1669 }
1670
1671 // Pass register number to verify_oop_subroutine
1672 const char* b = nullptr;
1673 {
1674 ResourceMark rm;
1675 stringStream ss;
1676 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
1677 b = code_string(ss.as_string());
1678 }
1679 BLOCK_COMMENT("verify_oop {");
1680
1681 strip_return_address(); // This might happen within a stack frame.
1682 protect_return_address();
1683 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1684 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1685
1686 mov(r0, reg);
1687 movptr(rscratch1, (uintptr_t)(address)b);
1688
1689 // call indirectly to solve generation ordering problem
1690 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
1691 ldr(rscratch2, Address(rscratch2));
1692 blr(rscratch2);
1693
1694 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
1695 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
1696 authenticate_return_address();
1697
1698 BLOCK_COMMENT("} verify_oop");
1699 }
1700
1701 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
1702 if (!VerifyOops || VerifyAdapterSharing) {
1703 // Below address of the code string confuses VerifyAdapterSharing
1704 // because it may differ between otherwise equivalent adapters.
1705 return;
1706 }
1707
1708 const char* b = nullptr;
1709 {
1710 ResourceMark rm;
1711 stringStream ss;
1712 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line);
1713 b = code_string(ss.as_string());
1714 }
1715 BLOCK_COMMENT("verify_oop_addr {");
1716
1717 strip_return_address(); // This might happen within a stack frame.
1718 protect_return_address();
1719 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1720 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1721
1722 // addr may contain sp so we will have to adjust it based on the
1723 // pushes that we just did.
1724 if (addr.uses(sp)) {
1725 lea(r0, addr);
1726 ldr(r0, Address(r0, 4 * wordSize));
1784 call_VM_leaf_base(entry_point, 1);
1785 }
1786
1787 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1788 assert_different_registers(arg_1, c_rarg0);
1789 pass_arg0(this, arg_0);
1790 pass_arg1(this, arg_1);
1791 call_VM_leaf_base(entry_point, 2);
1792 }
1793
1794 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0,
1795 Register arg_1, Register arg_2) {
1796 assert_different_registers(arg_1, c_rarg0);
1797 assert_different_registers(arg_2, c_rarg0, c_rarg1);
1798 pass_arg0(this, arg_0);
1799 pass_arg1(this, arg_1);
1800 pass_arg2(this, arg_2);
1801 call_VM_leaf_base(entry_point, 3);
1802 }
1803
1804 void MacroAssembler::super_call_VM_leaf(address entry_point) {
1805 MacroAssembler::call_VM_leaf_base(entry_point, 1);
1806 }
1807
1808 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
1809 pass_arg0(this, arg_0);
1810 MacroAssembler::call_VM_leaf_base(entry_point, 1);
1811 }
1812
1813 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1814
1815 assert_different_registers(arg_0, c_rarg1);
1816 pass_arg1(this, arg_1);
1817 pass_arg0(this, arg_0);
1818 MacroAssembler::call_VM_leaf_base(entry_point, 2);
1819 }
1820
1821 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1822 assert_different_registers(arg_0, c_rarg1, c_rarg2);
1823 assert_different_registers(arg_1, c_rarg2);
1824 pass_arg2(this, arg_2);
1825 pass_arg1(this, arg_1);
1826 pass_arg0(this, arg_0);
1827 MacroAssembler::call_VM_leaf_base(entry_point, 3);
1833 assert_different_registers(arg_2, c_rarg3);
1834 pass_arg3(this, arg_3);
1835 pass_arg2(this, arg_2);
1836 pass_arg1(this, arg_1);
1837 pass_arg0(this, arg_0);
1838 MacroAssembler::call_VM_leaf_base(entry_point, 4);
1839 }
1840
1841 void MacroAssembler::null_check(Register reg, int offset) {
1842 if (needs_explicit_null_check(offset)) {
1843 // provoke OS null exception if reg is null by
1844 // accessing M[reg] w/o changing any registers
1845 // NOTE: this is plenty to provoke a segv
1846 ldr(zr, Address(reg));
1847 } else {
1848 // nothing to do, (later) access of M[reg + offset]
1849 // will provoke OS null exception if reg is null
1850 }
1851 }
1852
1853 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) {
1854 assert_different_registers(markword, rscratch2);
1855 andr(markword, markword, markWord::inline_type_mask_in_place);
1856 mov(rscratch2, markWord::inline_type_pattern);
1857 cmp(markword, rscratch2);
1858 br(Assembler::EQ, is_inline_type);
1859 }
1860
1861 void MacroAssembler::test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type) {
1862 ldrw(temp_reg, Address(klass, Klass::access_flags_offset()));
1863 andr(temp_reg, temp_reg, JVM_ACC_IDENTITY);
1864 cbz(temp_reg, is_inline_type);
1865 }
1866
1867 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type) {
1868 assert_different_registers(tmp, rscratch1);
1869 cbz(object, not_inline_type);
1870 const int is_inline_type_mask = markWord::inline_type_pattern;
1871 ldr(tmp, Address(object, oopDesc::mark_offset_in_bytes()));
1872 mov(rscratch1, is_inline_type_mask);
1873 andr(tmp, tmp, rscratch1);
1874 cmp(tmp, rscratch1);
1875 br(Assembler::NE, not_inline_type);
1876 }
1877
1878 void MacroAssembler::test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type) {
1879 #ifdef ASSERT
1880 {
1881 Label done_check;
1882 test_klass_is_inline_type(klass, temp_reg, done_check);
1883 stop("test_klass_is_empty_inline_type with non inline type klass");
1884 bind(done_check);
1885 }
1886 #endif
1887 ldrw(temp_reg, Address(klass, InstanceKlass::misc_flags_offset()));
1888 andr(temp_reg, temp_reg, InstanceKlassFlags::is_empty_inline_type_value());
1889 cbnz(temp_reg, is_empty_inline_type);
1890 }
1891
1892 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) {
1893 assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86
1894 tbnz(flags, ResolvedFieldEntry::is_null_free_inline_type_shift, is_null_free_inline_type);
1895 }
1896
1897 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) {
1898 assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86
1899 tbz(flags, ResolvedFieldEntry::is_null_free_inline_type_shift, not_null_free_inline_type);
1900 }
1901
1902 void MacroAssembler::test_field_is_flat(Register flags, Register temp_reg, Label& is_flat) {
1903 assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86
1904 tbnz(flags, ResolvedFieldEntry::is_flat_shift, is_flat);
1905 }
1906
1907 void MacroAssembler::test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker) {
1908 assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86
1909 tbnz(flags, ResolvedFieldEntry::has_null_marker_shift, has_null_marker);
1910 }
1911
1912 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) {
1913 Label test_mark_word;
1914 // load mark word
1915 ldr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes()));
1916 // check displaced
1917 tst(temp_reg, markWord::unlocked_value);
1918 br(Assembler::NE, test_mark_word);
1919 // slow path use klass prototype
1920 load_prototype_header(temp_reg, oop);
1921
1922 bind(test_mark_word);
1923 andr(temp_reg, temp_reg, test_bit);
1924 if (jmp_set) {
1925 cbnz(temp_reg, jmp_label);
1926 } else {
1927 cbz(temp_reg, jmp_label);
1928 }
1929 }
1930
1931 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg, Label& is_flat_array) {
1932 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flat_array);
1933 }
1934
1935 void MacroAssembler::test_non_flat_array_oop(Register oop, Register temp_reg,
1936 Label&is_non_flat_array) {
1937 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flat_array);
1938 }
1939
1940 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array) {
1941 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array);
1942 }
1943
1944 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) {
1945 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array);
1946 }
1947
1948 void MacroAssembler::test_flat_array_layout(Register lh, Label& is_flat_array) {
1949 tst(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
1950 br(Assembler::NE, is_flat_array);
1951 }
1952
1953 void MacroAssembler::test_non_flat_array_layout(Register lh, Label& is_non_flat_array) {
1954 tst(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
1955 br(Assembler::EQ, is_non_flat_array);
1956 }
1957
1958 // MacroAssembler protected routines needed to implement
1959 // public methods
1960
1961 void MacroAssembler::mov(Register r, Address dest) {
1962 code_section()->relocate(pc(), dest.rspec());
1963 uint64_t imm64 = (uint64_t)dest.target();
1964 movptr(r, imm64);
1965 }
1966
1967 // Move a constant pointer into r. In AArch64 mode the virtual
1968 // address space is 48 bits in size, so we only need three
1969 // instructions to create a patchable instruction sequence that can
1970 // reach anywhere.
1971 void MacroAssembler::movptr(Register r, uintptr_t imm64) {
1972 #ifndef PRODUCT
1973 {
1974 char buffer[64];
1975 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64);
1976 block_comment(buffer);
1977 }
4616 adrp(rscratch1, src2, offset);
4617 ldr(rscratch1, Address(rscratch1, offset));
4618 cmp(src1, rscratch1);
4619 }
4620
4621 void MacroAssembler::cmpoop(Register obj1, Register obj2) {
4622 cmp(obj1, obj2);
4623 }
4624
4625 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
4626 load_method_holder(rresult, rmethod);
4627 ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
4628 }
4629
4630 void MacroAssembler::load_method_holder(Register holder, Register method) {
4631 ldr(holder, Address(method, Method::const_offset())); // ConstMethod*
4632 ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
4633 ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
4634 }
4635
4636 void MacroAssembler::load_metadata(Register dst, Register src) {
4637 if (UseCompressedClassPointers) {
4638 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4639 } else {
4640 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4641 }
4642 }
4643
4644 void MacroAssembler::load_klass(Register dst, Register src) {
4645 if (UseCompressedClassPointers) {
4646 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4647 decode_klass_not_null(dst);
4648 } else {
4649 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4650 }
4651 }
4652
4653 void MacroAssembler::restore_cpu_control_state_after_jni(Register tmp1, Register tmp2) {
4654 if (RestoreMXCSROnJNICalls) {
4655 Label OK;
4656 get_fpcr(tmp1);
4657 mov(tmp2, tmp1);
4658 // Set FPCR to the state we need. We do want Round to Nearest. We
4659 // don't want non-IEEE rounding modes or floating-point traps.
4660 bfi(tmp1, zr, 22, 4); // Clear DN, FZ, and Rmode
4661 bfi(tmp1, zr, 8, 5); // Clear exception-control bits (8-12)
4662 bfi(tmp1, zr, 0, 2); // Clear AH:FIZ
4663 eor(tmp2, tmp1, tmp2);
4699
4700 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
4701 if (UseCompressedClassPointers) {
4702 ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
4703 if (CompressedKlassPointers::base() == nullptr) {
4704 cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
4705 return;
4706 } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
4707 && CompressedKlassPointers::shift() == 0) {
4708 // Only the bottom 32 bits matter
4709 cmpw(trial_klass, tmp);
4710 return;
4711 }
4712 decode_klass_not_null(tmp);
4713 } else {
4714 ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
4715 }
4716 cmp(trial_klass, tmp);
4717 }
4718
4719 void MacroAssembler::load_prototype_header(Register dst, Register src) {
4720 load_klass(dst, src);
4721 ldr(dst, Address(dst, Klass::prototype_header_offset()));
4722 }
4723
4724 void MacroAssembler::store_klass(Register dst, Register src) {
4725 // FIXME: Should this be a store release? concurrent gcs assumes
4726 // klass length is valid if klass field is not null.
4727 if (UseCompressedClassPointers) {
4728 encode_klass_not_null(src);
4729 strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
4730 } else {
4731 str(src, Address(dst, oopDesc::klass_offset_in_bytes()));
4732 }
4733 }
4734
4735 void MacroAssembler::store_klass_gap(Register dst, Register src) {
4736 if (UseCompressedClassPointers) {
4737 // Store to klass gap in destination
4738 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
4739 }
4740 }
4741
4742 // Algorithm must match CompressedOops::encode.
4743 void MacroAssembler::encode_heap_oop(Register d, Register s) {
5028 if (as_raw) {
5029 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, tmp2);
5030 } else {
5031 bs->load_at(this, decorators, type, dst, src, tmp1, tmp2);
5032 }
5033 }
5034
5035 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
5036 Address dst, Register val,
5037 Register tmp1, Register tmp2, Register tmp3) {
5038 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
5039 decorators = AccessInternal::decorator_fixup(decorators, type);
5040 bool as_raw = (decorators & AS_RAW) != 0;
5041 if (as_raw) {
5042 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
5043 } else {
5044 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
5045 }
5046 }
5047
5048 void MacroAssembler::access_value_copy(DecoratorSet decorators, Register src, Register dst,
5049 Register inline_klass) {
5050 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
5051 bs->value_copy(this, decorators, src, dst, inline_klass);
5052 }
5053
5054 void MacroAssembler::first_field_offset(Register inline_klass, Register offset) {
5055 ldr(offset, Address(inline_klass, InstanceKlass::adr_inlineklass_fixed_block_offset()));
5056 ldrw(offset, Address(offset, InlineKlass::first_field_offset_offset()));
5057 }
5058
5059 void MacroAssembler::data_for_oop(Register oop, Register data, Register inline_klass) {
5060 // ((address) (void*) o) + vk->first_field_offset();
5061 Register offset = (data == oop) ? rscratch1 : data;
5062 first_field_offset(inline_klass, offset);
5063 if (data == oop) {
5064 add(data, data, offset);
5065 } else {
5066 lea(data, Address(oop, offset));
5067 }
5068 }
5069
5070 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass,
5071 Register index, Register data) {
5072 assert_different_registers(array, array_klass, index);
5073 assert_different_registers(rscratch1, array, index);
5074
5075 // array->base() + (index << Klass::layout_helper_log2_element_size(lh));
5076 ldrw(rscratch1, Address(array_klass, Klass::layout_helper_offset()));
5077
5078 // Klass::layout_helper_log2_element_size(lh)
5079 // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask;
5080 lsr(rscratch1, rscratch1, Klass::_lh_log2_element_size_shift);
5081 andr(rscratch1, rscratch1, Klass::_lh_log2_element_size_mask);
5082 lslv(index, index, rscratch1);
5083
5084 add(data, array, index);
5085 add(data, data, arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT));
5086 }
5087
5088 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
5089 Register tmp2, DecoratorSet decorators) {
5090 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2);
5091 }
5092
5093 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
5094 Register tmp2, DecoratorSet decorators) {
5095 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, tmp2);
5096 }
5097
5098 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
5099 Register tmp2, Register tmp3, DecoratorSet decorators) {
5100 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
5101 }
5102
5103 // Used for storing nulls.
5104 void MacroAssembler::store_heap_oop_null(Address dst) {
5105 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
5106 }
5107
5144 oop_index = oop_recorder()->allocate_metadata_index(obj);
5145 } else {
5146 oop_index = oop_recorder()->find_index(obj);
5147 }
5148 RelocationHolder rspec = metadata_Relocation::spec(oop_index);
5149 mov(dst, Address((address)obj, rspec));
5150 }
5151
5152 Address MacroAssembler::constant_oop_address(jobject obj) {
5153 #ifdef ASSERT
5154 {
5155 ThreadInVMfromUnknown tiv;
5156 assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5157 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop");
5158 }
5159 #endif
5160 int oop_index = oop_recorder()->find_index(obj);
5161 return Address((address)obj, oop_Relocation::spec(oop_index));
5162 }
5163
5164 // Object / value buffer allocation...
5165 void MacroAssembler::allocate_instance(Register klass, Register new_obj,
5166 Register t1, Register t2,
5167 bool clear_fields, Label& alloc_failed)
5168 {
5169 Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop;
5170 Register layout_size = t1;
5171 assert(new_obj == r0, "needs to be r0");
5172 assert_different_registers(klass, new_obj, t1, t2);
5173
5174 // get instance_size in InstanceKlass (scaled to a count of bytes)
5175 ldrw(layout_size, Address(klass, Klass::layout_helper_offset()));
5176 // test to see if it has a finalizer or is malformed in some way
5177 tst(layout_size, Klass::_lh_instance_slow_path_bit);
5178 br(Assembler::NE, slow_case_no_pop);
5179
5180 // Allocate the instance:
5181 // If TLAB is enabled:
5182 // Try to allocate in the TLAB.
5183 // If fails, go to the slow path.
5184 // Initialize the allocation.
5185 // Exit.
5186 //
5187 // Go to slow path.
5188
5189 if (UseTLAB) {
5190 push(klass);
5191 tlab_allocate(new_obj, layout_size, 0, klass, t2, slow_case);
5192 if (ZeroTLAB || (!clear_fields)) {
5193 // the fields have been already cleared
5194 b(initialize_header);
5195 } else {
5196 // initialize both the header and fields
5197 b(initialize_object);
5198 }
5199
5200 if (clear_fields) {
5201 // The object is initialized before the header. If the object size is
5202 // zero, go directly to the header initialization.
5203 bind(initialize_object);
5204 subs(layout_size, layout_size, sizeof(oopDesc));
5205 br(Assembler::EQ, initialize_header);
5206
5207 // Initialize topmost object field, divide size by 8, check if odd and
5208 // test if zero.
5209
5210 #ifdef ASSERT
5211 // make sure instance_size was multiple of 8
5212 Label L;
5213 tst(layout_size, 7);
5214 br(Assembler::EQ, L);
5215 stop("object size is not multiple of 8 - adjust this code");
5216 bind(L);
5217 // must be > 0, no extra check needed here
5218 #endif
5219
5220 lsr(layout_size, layout_size, LogBytesPerLong);
5221
5222 // initialize remaining object fields: instance_size was a multiple of 8
5223 {
5224 Label loop;
5225 Register base = t2;
5226
5227 bind(loop);
5228 add(rscratch1, new_obj, layout_size, Assembler::LSL, LogBytesPerLong);
5229 str(zr, Address(rscratch1, sizeof(oopDesc) - 1*oopSize));
5230 subs(layout_size, layout_size, 1);
5231 br(Assembler::NE, loop);
5232 }
5233 } // clear_fields
5234
5235 // initialize object header only.
5236 bind(initialize_header);
5237 pop(klass);
5238 Register mark_word = t2;
5239 ldr(mark_word, Address(klass, Klass::prototype_header_offset()));
5240 str(mark_word, Address(new_obj, oopDesc::mark_offset_in_bytes ()));
5241 store_klass_gap(new_obj, zr); // zero klass gap for compressed oops
5242 mov(t2, klass); // preserve klass
5243 store_klass(new_obj, t2); // src klass reg is potentially compressed
5244
5245 // TODO: Valhalla removed SharedRuntime::dtrace_object_alloc from here ?
5246
5247 b(done);
5248 }
5249
5250 if (UseTLAB) {
5251 bind(slow_case);
5252 pop(klass);
5253 }
5254 bind(slow_case_no_pop);
5255 b(alloc_failed);
5256
5257 bind(done);
5258 }
5259
5260 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
5261 void MacroAssembler::tlab_allocate(Register obj,
5262 Register var_size_in_bytes,
5263 int con_size_in_bytes,
5264 Register t1,
5265 Register t2,
5266 Label& slow_case) {
5267 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
5268 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
5269 }
5270
5271 void MacroAssembler::verify_tlab() {
5272 #ifdef ASSERT
5273 if (UseTLAB && VerifyOops) {
5274 Label next, ok;
5275
5276 stp(rscratch2, rscratch1, Address(pre(sp, -16)));
5277
5278 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
5279 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset())));
5280 cmp(rscratch2, rscratch1);
5281 br(Assembler::HS, next);
5282 STOP("assert(top >= start)");
5283 should_not_reach_here();
5284
5285 bind(next);
5286 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset())));
5287 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
5288 cmp(rscratch2, rscratch1);
5289 br(Assembler::HS, ok);
5290 STOP("assert(top <= end)");
5291 should_not_reach_here();
5292
5293 bind(ok);
5294 ldp(rscratch2, rscratch1, Address(post(sp, 16)));
5295 }
5296 #endif
5297 }
5298
5299 void MacroAssembler::get_inline_type_field_klass(Register klass, Register index, Register inline_klass) {
5300 ldr(inline_klass, Address(klass, InstanceKlass::inline_type_field_klasses_offset()));
5301 #ifdef ASSERT
5302 {
5303 Label done;
5304 cbnz(inline_klass, done);
5305 stop("get_inline_type_field_klass contains no inline klass");
5306 bind(done);
5307 }
5308 #endif
5309 lea(inline_klass, Address(inline_klass, Array<InlineKlass*>::base_offset_in_bytes()));
5310 ldr(inline_klass, Address(inline_klass, index, Address::lsl(3)));
5311 }
5312
5313 // Writes to stack successive pages until offset reached to check for
5314 // stack overflow + shadow pages. This clobbers tmp.
5315 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
5316 assert_different_registers(tmp, size, rscratch1);
5317 mov(tmp, sp);
5318 // Bang stack for total size given plus shadow page size.
5319 // Bang one page at a time because large size can bang beyond yellow and
5320 // red zones.
5321 Label loop;
5322 mov(rscratch1, (int)os::vm_page_size());
5323 bind(loop);
5324 lea(tmp, Address(tmp, -(int)os::vm_page_size()));
5325 subsw(size, size, rscratch1);
5326 str(size, Address(tmp));
5327 br(Assembler::GT, loop);
5328
5329 // Bang down shadow pages too.
5330 // At this point, (tmp-0) is the last address touched, so don't
5331 // touch it again. (It was touched as (tmp-pagesize) but then tmp
5332 // was post-decremented.) Skip this address by starting at i=1, and
5418 }
5419
5420 void MacroAssembler::remove_frame(int framesize) {
5421 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5422 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
5423 if (framesize < ((1 << 9) + 2 * wordSize)) {
5424 ldp(rfp, lr, Address(sp, framesize - 2 * wordSize));
5425 add(sp, sp, framesize);
5426 } else {
5427 if (framesize < ((1 << 12) + 2 * wordSize))
5428 add(sp, sp, framesize - 2 * wordSize);
5429 else {
5430 mov(rscratch1, framesize - 2 * wordSize);
5431 add(sp, sp, rscratch1);
5432 }
5433 ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
5434 }
5435 authenticate_return_address();
5436 }
5437
5438 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) {
5439 if (needs_stack_repair) {
5440 // Remove the extension of the caller's frame used for inline type unpacking
5441 //
5442 // Right now the stack looks like this:
5443 //
5444 // | Arguments from caller |
5445 // |---------------------------| <-- caller's SP
5446 // | Saved LR #1 |
5447 // | Saved FP #1 |
5448 // |---------------------------|
5449 // | Extension space for |
5450 // | inline arg (un)packing |
5451 // |---------------------------| <-- start of this method's frame
5452 // | Saved LR #2 |
5453 // | Saved FP #2 |
5454 // |---------------------------| <-- FP
5455 // | sp_inc |
5456 // | method locals |
5457 // |---------------------------| <-- SP
5458 //
5459 // There are two copies of FP and LR on the stack. They will be identical
5460 // unless the caller has been deoptimized, in which case LR #1 will be patched
5461 // to point at the deopt blob, and LR #2 will still point into the old method.
5462 //
5463 // The sp_inc stack slot holds the total size of the frame including the
5464 // extension space minus two words for the saved FP and LR.
5465
5466 int sp_inc_offset = initial_framesize - 3 * wordSize; // Immediately below saved LR and FP
5467
5468 ldr(rscratch1, Address(sp, sp_inc_offset));
5469 add(sp, sp, rscratch1);
5470 ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
5471 } else {
5472 remove_frame(initial_framesize);
5473 }
5474 }
5475
5476 void MacroAssembler::save_stack_increment(int sp_inc, int frame_size) {
5477 int real_frame_size = frame_size + sp_inc;
5478 assert(sp_inc == 0 || sp_inc > 2*wordSize, "invalid sp_inc value");
5479 assert(real_frame_size >= 2*wordSize, "frame size must include FP/LR space");
5480 assert((real_frame_size & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
5481
5482 int sp_inc_offset = frame_size - 3 * wordSize; // Immediately below saved LR and FP
5483
5484 // Subtract two words for the saved FP and LR as these will be popped
5485 // separately. See remove_frame above.
5486 mov(rscratch1, real_frame_size - 2*wordSize);
5487 str(rscratch1, Address(sp, sp_inc_offset));
5488 }
5489
5490 // This method counts leading positive bytes (highest bit not set) in provided byte array
5491 address MacroAssembler::count_positives(Register ary1, Register len, Register result) {
5492 // Simple and most common case of aligned small array which is not at the
5493 // end of memory page is placed here. All other cases are in stub.
5494 Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE;
5495 const uint64_t UPPER_BIT_MASK=0x8080808080808080;
5496 assert_different_registers(ary1, len, result);
5497
5498 mov(result, len);
5499 cmpw(len, 0);
5500 br(LE, DONE);
5501 cmpw(len, 4 * wordSize);
5502 br(GE, STUB_LONG); // size > 32 then go to stub
5503
5504 int shift = 64 - exact_log2(os::vm_page_size());
5505 lsl(rscratch1, ary1, shift);
5506 mov(rscratch2, (size_t)(4 * wordSize) << shift);
5507 adds(rscratch2, rscratch1, rscratch2); // At end of page?
5508 br(CS, STUB); // at the end of page then go to stub
6378 // On other systems, the helper is a usual C function.
6379 //
6380 void MacroAssembler::get_thread(Register dst) {
6381 RegSet saved_regs =
6382 LINUX_ONLY(RegSet::range(r0, r1) + lr - dst)
6383 NOT_LINUX (RegSet::range(r0, r17) + lr - dst);
6384
6385 protect_return_address();
6386 push(saved_regs, sp);
6387
6388 mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper));
6389 blr(lr);
6390 if (dst != c_rarg0) {
6391 mov(dst, c_rarg0);
6392 }
6393
6394 pop(saved_regs, sp);
6395 authenticate_return_address();
6396 }
6397
6398 #ifdef COMPILER2
6399 // C2 compiled method's prolog code
6400 // Moved here from aarch64.ad to support Valhalla code belows
6401 void MacroAssembler::verified_entry(Compile* C, int sp_inc) {
6402 if (C->clinit_barrier_on_entry()) {
6403 assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
6404
6405 Label L_skip_barrier;
6406
6407 mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
6408 clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
6409 far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
6410 bind(L_skip_barrier);
6411 }
6412
6413 if (C->max_vector_size() > 0) {
6414 reinitialize_ptrue();
6415 }
6416
6417 int bangsize = C->output()->bang_size_in_bytes();
6418 if (C->output()->need_stack_bang(bangsize))
6419 generate_stack_overflow_check(bangsize);
6420
6421 // n.b. frame size includes space for return pc and rfp
6422 const long framesize = C->output()->frame_size_in_bytes();
6423 build_frame(framesize);
6424
6425 if (C->needs_stack_repair()) {
6426 save_stack_increment(sp_inc, framesize);
6427 }
6428
6429 if (VerifyStackAtCalls) {
6430 Unimplemented();
6431 }
6432 }
6433 #endif // COMPILER2
6434
6435 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) {
6436 assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
6437 // An inline type might be returned. If fields are in registers we
6438 // need to allocate an inline type instance and initialize it with
6439 // the value of the fields.
6440 Label skip;
6441 // We only need a new buffered inline type if a new one is not returned
6442 tbz(r0, 0, skip);
6443 int call_offset = -1;
6444
6445 // Be careful not to clobber r1-7 which hold returned fields
6446 // Also do not use callee-saved registers as these may be live in the interpreter
6447 Register tmp1 = r13, tmp2 = r14, klass = r15, r0_preserved = r12;
6448
6449 // The following code is similar to allocate_instance but has some slight differences,
6450 // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after
6451 // allocating is not necessary if vk != nullptr, etc. allocate_instance is not aware of these.
6452 Label slow_case;
6453 // 1. Try to allocate a new buffered inline instance either from TLAB or eden space
6454 mov(r0_preserved, r0); // save r0 for slow_case since *_allocate may corrupt it when allocation failed
6455
6456 if (vk != nullptr) {
6457 // Called from C1, where the return type is statically known.
6458 movptr(klass, (intptr_t)vk->get_InlineKlass());
6459 jint obj_size = vk->layout_helper();
6460 assert(obj_size != Klass::_lh_neutral_value, "inline class in return type must have been resolved");
6461 if (UseTLAB) {
6462 tlab_allocate(r0, noreg, obj_size, tmp1, tmp2, slow_case);
6463 } else {
6464 b(slow_case);
6465 }
6466 } else {
6467 // Call from interpreter. R0 contains ((the InlineKlass* of the return type) | 0x01)
6468 andr(klass, r0, -2);
6469 ldrw(tmp2, Address(klass, Klass::layout_helper_offset()));
6470 if (UseTLAB) {
6471 tlab_allocate(r0, tmp2, 0, tmp1, tmp2, slow_case);
6472 } else {
6473 b(slow_case);
6474 }
6475 }
6476 if (UseTLAB) {
6477 // 2. Initialize buffered inline instance header
6478 Register buffer_obj = r0;
6479 mov(rscratch1, (intptr_t)markWord::inline_type_prototype().value());
6480 str(rscratch1, Address(buffer_obj, oopDesc::mark_offset_in_bytes()));
6481 store_klass_gap(buffer_obj, zr);
6482 if (vk == nullptr) {
6483 // store_klass corrupts klass, so save it for later use (interpreter case only).
6484 mov(tmp1, klass);
6485 }
6486 store_klass(buffer_obj, klass);
6487 // 3. Initialize its fields with an inline class specific handler
6488 if (vk != nullptr) {
6489 far_call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint.
6490 } else {
6491 // tmp1 holds klass preserved above
6492 ldr(tmp1, Address(tmp1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
6493 ldr(tmp1, Address(tmp1, InlineKlass::pack_handler_offset()));
6494 blr(tmp1);
6495 }
6496
6497 membar(Assembler::StoreStore);
6498 b(skip);
6499 } else {
6500 // Must have already branched to slow_case above.
6501 DEBUG_ONLY(should_not_reach_here());
6502 }
6503 bind(slow_case);
6504 // We failed to allocate a new inline type, fall back to a runtime
6505 // call. Some oop field may be live in some registers but we can't
6506 // tell. That runtime call will take care of preserving them
6507 // across a GC if there's one.
6508 mov(r0, r0_preserved);
6509
6510 if (from_interpreter) {
6511 super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf());
6512 } else {
6513 far_call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf()));
6514 call_offset = offset();
6515 }
6516 membar(Assembler::StoreStore);
6517
6518 bind(skip);
6519 return call_offset;
6520 }
6521
6522 // Move a value between registers/stack slots and update the reg_state
6523 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) {
6524 assert(from->is_valid() && to->is_valid(), "source and destination must be valid");
6525 if (reg_state[to->value()] == reg_written) {
6526 return true; // Already written
6527 }
6528
6529 if (from != to && bt != T_VOID) {
6530 if (reg_state[to->value()] == reg_readonly) {
6531 return false; // Not yet writable
6532 }
6533 if (from->is_reg()) {
6534 if (to->is_reg()) {
6535 if (from->is_Register() && to->is_Register()) {
6536 mov(to->as_Register(), from->as_Register());
6537 } else if (from->is_FloatRegister() && to->is_FloatRegister()) {
6538 fmovd(to->as_FloatRegister(), from->as_FloatRegister());
6539 } else {
6540 ShouldNotReachHere();
6541 }
6542 } else {
6543 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size;
6544 Address to_addr = Address(sp, st_off);
6545 if (from->is_FloatRegister()) {
6546 if (bt == T_DOUBLE) {
6547 strd(from->as_FloatRegister(), to_addr);
6548 } else {
6549 assert(bt == T_FLOAT, "must be float");
6550 strs(from->as_FloatRegister(), to_addr);
6551 }
6552 } else {
6553 str(from->as_Register(), to_addr);
6554 }
6555 }
6556 } else {
6557 Address from_addr = Address(sp, from->reg2stack() * VMRegImpl::stack_slot_size);
6558 if (to->is_reg()) {
6559 if (to->is_FloatRegister()) {
6560 if (bt == T_DOUBLE) {
6561 ldrd(to->as_FloatRegister(), from_addr);
6562 } else {
6563 assert(bt == T_FLOAT, "must be float");
6564 ldrs(to->as_FloatRegister(), from_addr);
6565 }
6566 } else {
6567 ldr(to->as_Register(), from_addr);
6568 }
6569 } else {
6570 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size;
6571 ldr(rscratch1, from_addr);
6572 str(rscratch1, Address(sp, st_off));
6573 }
6574 }
6575 }
6576
6577 // Update register states
6578 reg_state[from->value()] = reg_writable;
6579 reg_state[to->value()] = reg_written;
6580 return true;
6581 }
6582
6583 // Calculate the extra stack space required for packing or unpacking inline
6584 // args and adjust the stack pointer
6585 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) {
6586 int sp_inc = args_on_stack * VMRegImpl::stack_slot_size;
6587 sp_inc = align_up(sp_inc, StackAlignmentInBytes);
6588 assert(sp_inc > 0, "sanity");
6589
6590 // Save a copy of the FP and LR here for deoptimization patching and frame walking
6591 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
6592
6593 // Adjust the stack pointer. This will be repaired on return by MacroAssembler::remove_frame
6594 if (sp_inc < (1 << 9)) {
6595 sub(sp, sp, sp_inc); // Fits in an immediate
6596 } else {
6597 mov(rscratch1, sp_inc);
6598 sub(sp, sp, rscratch1);
6599 }
6600
6601 return sp_inc + 2 * wordSize; // Account for the FP/LR space
6602 }
6603
6604 // Read all fields from an inline type oop and store the values in registers/stack slots
6605 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
6606 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
6607 RegState reg_state[]) {
6608 assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
6609 assert(from->is_valid(), "source must be valid");
6610 bool progress = false;
6611 #ifdef ASSERT
6612 const int start_offset = offset();
6613 #endif
6614
6615 Label L_null, L_notNull;
6616 // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for)
6617 Register tmp1 = r10;
6618 Register tmp2 = r11;
6619 Register fromReg = noreg;
6620 ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, -1);
6621 bool done = true;
6622 bool mark_done = true;
6623 VMReg toReg;
6624 BasicType bt;
6625 // Check if argument requires a null check
6626 bool null_check = false;
6627 VMReg nullCheckReg;
6628 while (stream.next(nullCheckReg, bt)) {
6629 if (sig->at(stream.sig_index())._offset == -1) {
6630 null_check = true;
6631 break;
6632 }
6633 }
6634 stream.reset(sig_index, to_index);
6635 while (stream.next(toReg, bt)) {
6636 assert(toReg->is_valid(), "destination must be valid");
6637 int idx = (int)toReg->value();
6638 if (reg_state[idx] == reg_readonly) {
6639 if (idx != from->value()) {
6640 mark_done = false;
6641 }
6642 done = false;
6643 continue;
6644 } else if (reg_state[idx] == reg_written) {
6645 continue;
6646 }
6647 assert(reg_state[idx] == reg_writable, "must be writable");
6648 reg_state[idx] = reg_written;
6649 progress = true;
6650
6651 if (fromReg == noreg) {
6652 if (from->is_reg()) {
6653 fromReg = from->as_Register();
6654 } else {
6655 int st_off = from->reg2stack() * VMRegImpl::stack_slot_size;
6656 ldr(tmp1, Address(sp, st_off));
6657 fromReg = tmp1;
6658 }
6659 if (null_check) {
6660 // Nullable inline type argument, emit null check
6661 cbz(fromReg, L_null);
6662 }
6663 }
6664 int off = sig->at(stream.sig_index())._offset;
6665 if (off == -1) {
6666 assert(null_check, "Missing null check at");
6667 if (toReg->is_stack()) {
6668 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size;
6669 mov(tmp2, 1);
6670 str(tmp2, Address(sp, st_off));
6671 } else {
6672 mov(toReg->as_Register(), 1);
6673 }
6674 continue;
6675 }
6676 assert(off > 0, "offset in object should be positive");
6677 Address fromAddr = Address(fromReg, off);
6678 if (!toReg->is_FloatRegister()) {
6679 Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register();
6680 if (is_reference_type(bt)) {
6681 load_heap_oop(dst, fromAddr, rscratch1, rscratch2);
6682 } else {
6683 bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
6684 load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed);
6685 }
6686 if (toReg->is_stack()) {
6687 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size;
6688 str(dst, Address(sp, st_off));
6689 }
6690 } else if (bt == T_DOUBLE) {
6691 ldrd(toReg->as_FloatRegister(), fromAddr);
6692 } else {
6693 assert(bt == T_FLOAT, "must be float");
6694 ldrs(toReg->as_FloatRegister(), fromAddr);
6695 }
6696 }
6697 if (progress && null_check) {
6698 if (done) {
6699 b(L_notNull);
6700 bind(L_null);
6701 // Set IsInit field to zero to signal that the argument is null.
6702 // Also set all oop fields to zero to make the GC happy.
6703 stream.reset(sig_index, to_index);
6704 while (stream.next(toReg, bt)) {
6705 if (sig->at(stream.sig_index())._offset == -1 ||
6706 bt == T_OBJECT || bt == T_ARRAY) {
6707 if (toReg->is_stack()) {
6708 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size;
6709 str(zr, Address(sp, st_off));
6710 } else {
6711 mov(toReg->as_Register(), zr);
6712 }
6713 }
6714 }
6715 bind(L_notNull);
6716 } else {
6717 bind(L_null);
6718 }
6719 }
6720
6721 sig_index = stream.sig_index();
6722 to_index = stream.regs_index();
6723
6724 if (mark_done && reg_state[from->value()] != reg_written) {
6725 // This is okay because no one else will write to that slot
6726 reg_state[from->value()] = reg_writable;
6727 }
6728 from_index--;
6729 assert(progress || (start_offset == offset()), "should not emit code");
6730 return done;
6731 }
6732
6733 // Pack fields back into an inline type oop
6734 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
6735 VMRegPair* from, int from_count, int& from_index, VMReg to,
6736 RegState reg_state[], Register val_array) {
6737 assert(sig->at(sig_index)._bt == T_METADATA, "should be at delimiter");
6738 assert(to->is_valid(), "destination must be valid");
6739
6740 if (reg_state[to->value()] == reg_written) {
6741 skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
6742 return true; // Already written
6743 }
6744
6745 // The GC barrier expanded by store_heap_oop below may call into the
6746 // runtime so use callee-saved registers for any values that need to be
6747 // preserved. The GC barrier assembler should take care of saving the
6748 // Java argument registers.
6749 // TODO 8284443 Isn't it an issue if below code uses r14 as tmp when it contains a spilled value?
6750 // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for).
6751 Register val_obj_tmp = r21;
6752 Register from_reg_tmp = r22;
6753 Register tmp1 = r14;
6754 Register tmp2 = r13;
6755 Register tmp3 = r12;
6756 Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register();
6757
6758 assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array);
6759
6760 if (reg_state[to->value()] == reg_readonly) {
6761 if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) {
6762 skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
6763 return false; // Not yet writable
6764 }
6765 val_obj = val_obj_tmp;
6766 }
6767
6768 int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_OBJECT);
6769 load_heap_oop(val_obj, Address(val_array, index), tmp1, tmp2);
6770
6771 ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index);
6772 VMReg fromReg;
6773 BasicType bt;
6774 Label L_null;
6775 while (stream.next(fromReg, bt)) {
6776 assert(fromReg->is_valid(), "source must be valid");
6777 reg_state[fromReg->value()] = reg_writable;
6778
6779 int off = sig->at(stream.sig_index())._offset;
6780 if (off == -1) {
6781 // Nullable inline type argument, emit null check
6782 Label L_notNull;
6783 if (fromReg->is_stack()) {
6784 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size;
6785 ldrb(tmp2, Address(sp, ld_off));
6786 cbnz(tmp2, L_notNull);
6787 } else {
6788 cbnz(fromReg->as_Register(), L_notNull);
6789 }
6790 mov(val_obj, 0);
6791 b(L_null);
6792 bind(L_notNull);
6793 continue;
6794 }
6795
6796 assert(off > 0, "offset in object should be positive");
6797 size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
6798
6799 // Pack the scalarized field into the value object.
6800 Address dst(val_obj, off);
6801
6802 if (!fromReg->is_FloatRegister()) {
6803 Register src;
6804 if (fromReg->is_stack()) {
6805 src = from_reg_tmp;
6806 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size;
6807 load_sized_value(src, Address(sp, ld_off), size_in_bytes, /* is_signed */ false);
6808 } else {
6809 src = fromReg->as_Register();
6810 }
6811 assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array);
6812 if (is_reference_type(bt)) {
6813 store_heap_oop(dst, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
6814 } else {
6815 store_sized_value(dst, src, size_in_bytes);
6816 }
6817 } else if (bt == T_DOUBLE) {
6818 strd(fromReg->as_FloatRegister(), dst);
6819 } else {
6820 assert(bt == T_FLOAT, "must be float");
6821 strs(fromReg->as_FloatRegister(), dst);
6822 }
6823 }
6824 bind(L_null);
6825 sig_index = stream.sig_index();
6826 from_index = stream.regs_index();
6827
6828 assert(reg_state[to->value()] == reg_writable, "must have already been read");
6829 bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state);
6830 assert(success, "to register must be writeable");
6831
6832 return true;
6833 }
6834
6835 VMReg MacroAssembler::spill_reg_for(VMReg reg) {
6836 return (reg->is_FloatRegister()) ? v8->as_VMReg() : r14->as_VMReg();
6837 }
6838
6839 void MacroAssembler::cache_wb(Address line) {
6840 assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset");
6841 assert(line.index() == noreg, "index should be noreg");
6842 assert(line.offset() == 0, "offset should be 0");
6843 // would like to assert this
6844 // assert(line._ext.shift == 0, "shift should be zero");
6845 if (VM_Version::supports_dcpop()) {
6846 // writeback using clear virtual address to point of persistence
6847 dc(Assembler::CVAP, line.base());
6848 } else {
6849 // no need to generate anything as Unsafe.writebackMemory should
6850 // never invoke this stub
6851 }
6852 }
6853
6854 void MacroAssembler::cache_wbsync(bool is_pre) {
6855 // we only need a barrier post sync
6856 if (!is_pre) {
6857 membar(Assembler::AnyAny);
6858 }
7192 }
7193
7194 // Implements lightweight-locking.
7195 // Branches to slow upon failure to lock the object, with ZF cleared.
7196 // Falls through upon success with ZF set.
7197 //
7198 // - obj: the object to be locked
7199 // - hdr: the header, already loaded from obj, will be destroyed
7200 // - t1, t2: temporary registers, will be destroyed
7201 void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow) {
7202 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
7203 assert_different_registers(obj, hdr, t1, t2, rscratch1);
7204
7205 // Check if we would have space on lock-stack for the object.
7206 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
7207 cmpw(t1, (unsigned)LockStack::end_offset() - 1);
7208 br(Assembler::GT, slow);
7209
7210 // Load (object->mark() | 1) into hdr
7211 orr(hdr, hdr, markWord::unlocked_value);
7212 if (EnableValhalla) {
7213 // Mask inline_type bit such that we go to the slow path if object is an inline type
7214 andr(hdr, hdr, ~((int) markWord::inline_type_bit_in_place));
7215 }
7216
7217 // Clear lock-bits, into t2
7218 eor(t2, hdr, markWord::unlocked_value);
7219 // Try to swing header from unlocked to locked
7220 // Clobbers rscratch1 when UseLSE is false
7221 cmpxchg(/*addr*/ obj, /*expected*/ hdr, /*new*/ t2, Assembler::xword,
7222 /*acquire*/ true, /*release*/ true, /*weak*/ false, t1);
7223 br(Assembler::NE, slow);
7224
7225 // After successful lock, push object on lock-stack
7226 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
7227 str(obj, Address(rthread, t1));
7228 addw(t1, t1, oopSize);
7229 strw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
7230 }
7231
7232 // Implements lightweight-unlocking.
7233 // Branches to slow upon failure, with ZF cleared.
7234 // Falls through upon success, with ZF set.
7235 //
7236 // - obj: the object to be unlocked
|