12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include <sys/types.h>
27
28 #include "precompiled.hpp"
29 #include "asm/assembler.hpp"
30 #include "asm/assembler.inline.hpp"
31 #include "ci/ciEnv.hpp"
32 #include "compiler/compileTask.hpp"
33 #include "compiler/disassembler.hpp"
34 #include "compiler/oopMap.hpp"
35 #include "gc/shared/barrierSet.hpp"
36 #include "gc/shared/barrierSetAssembler.hpp"
37 #include "gc/shared/cardTableBarrierSet.hpp"
38 #include "gc/shared/cardTable.hpp"
39 #include "gc/shared/collectedHeap.hpp"
40 #include "gc/shared/tlab_globals.hpp"
41 #include "interpreter/bytecodeHistogram.hpp"
42 #include "interpreter/interpreter.hpp"
43 #include "jvm.h"
44 #include "memory/resourceArea.hpp"
45 #include "memory/universe.hpp"
46 #include "nativeInst_aarch64.hpp"
47 #include "oops/accessDecorators.hpp"
48 #include "oops/compressedKlass.inline.hpp"
49 #include "oops/compressedOops.inline.hpp"
50 #include "oops/klass.inline.hpp"
51 #include "runtime/continuation.hpp"
52 #include "runtime/icache.hpp"
53 #include "runtime/interfaceSupport.inline.hpp"
54 #include "runtime/javaThread.hpp"
55 #include "runtime/jniHandles.inline.hpp"
56 #include "runtime/sharedRuntime.hpp"
57 #include "runtime/stubRoutines.hpp"
58 #include "utilities/powerOfTwo.hpp"
59 #ifdef COMPILER1
60 #include "c1/c1_LIRAssembler.hpp"
61 #endif
62 #ifdef COMPILER2
63 #include "oops/oop.hpp"
64 #include "opto/compile.hpp"
65 #include "opto/node.hpp"
66 #include "opto/output.hpp"
67 #endif
68
69 #ifdef PRODUCT
70 #define BLOCK_COMMENT(str) /* nothing */
71 #else
72 #define BLOCK_COMMENT(str) block_comment(str)
73 #endif
74 #define STOP(str) stop(str);
75 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
76
77 #ifdef ASSERT
78 extern "C" void disnm(intptr_t p);
1104 }
1105
1106 void MacroAssembler::post_call_nop() {
1107 if (!Continuations::enabled()) {
1108 return;
1109 }
1110 InstructionMark im(this);
1111 relocate(post_call_nop_Relocation::spec());
1112 InlineSkippedInstructionsCounter skipCounter(this);
1113 nop();
1114 movk(zr, 0);
1115 movk(zr, 0);
1116 }
1117
1118 // these are no-ops overridden by InterpreterMacroAssembler
1119
1120 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { }
1121
1122 void MacroAssembler::check_and_handle_popframe(Register java_thread) { }
1123
1124 // Look up the method for a megamorphic invokeinterface call.
1125 // The target method is determined by <intf_klass, itable_index>.
1126 // The receiver klass is in recv_klass.
1127 // On success, the result will be in method_result, and execution falls through.
1128 // On failure, execution transfers to the given label.
1129 void MacroAssembler::lookup_interface_method(Register recv_klass,
1130 Register intf_klass,
1131 RegisterOrConstant itable_index,
1132 Register method_result,
1133 Register scan_temp,
1134 Label& L_no_such_interface,
1135 bool return_method) {
1136 assert_different_registers(recv_klass, intf_klass, scan_temp);
1137 assert_different_registers(method_result, intf_klass, scan_temp);
1138 assert(recv_klass != method_result || !return_method,
1139 "recv_klass can be destroyed when method isn't needed");
1140 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
1141 "caller must use same register for non-constant itable index as for method");
1142
1143 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
1453 ldrb(scratch, Address(klass, InstanceKlass::init_state_offset()));
1454 subs(zr, scratch, InstanceKlass::fully_initialized);
1455 br(Assembler::EQ, *L_fast_path);
1456
1457 // Fast path check: current thread is initializer thread
1458 ldr(scratch, Address(klass, InstanceKlass::init_thread_offset()));
1459 cmp(rthread, scratch);
1460
1461 if (L_slow_path == &L_fallthrough) {
1462 br(Assembler::EQ, *L_fast_path);
1463 bind(*L_slow_path);
1464 } else if (L_fast_path == &L_fallthrough) {
1465 br(Assembler::NE, *L_slow_path);
1466 bind(*L_fast_path);
1467 } else {
1468 Unimplemented();
1469 }
1470 }
1471
1472 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
1473 if (!VerifyOops) return;
1474
1475 // Pass register number to verify_oop_subroutine
1476 const char* b = nullptr;
1477 {
1478 ResourceMark rm;
1479 stringStream ss;
1480 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
1481 b = code_string(ss.as_string());
1482 }
1483 BLOCK_COMMENT("verify_oop {");
1484
1485 strip_return_address(); // This might happen within a stack frame.
1486 protect_return_address();
1487 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1488 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1489
1490 mov(r0, reg);
1491 movptr(rscratch1, (uintptr_t)(address)b);
1492
1493 // call indirectly to solve generation ordering problem
1494 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
1495 ldr(rscratch2, Address(rscratch2));
1496 blr(rscratch2);
1497
1498 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
1499 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
1500 authenticate_return_address();
1501
1502 BLOCK_COMMENT("} verify_oop");
1503 }
1504
1505 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
1506 if (!VerifyOops) return;
1507
1508 const char* b = nullptr;
1509 {
1510 ResourceMark rm;
1511 stringStream ss;
1512 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line);
1513 b = code_string(ss.as_string());
1514 }
1515 BLOCK_COMMENT("verify_oop_addr {");
1516
1517 strip_return_address(); // This might happen within a stack frame.
1518 protect_return_address();
1519 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1520 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1521
1522 // addr may contain sp so we will have to adjust it based on the
1523 // pushes that we just did.
1524 if (addr.uses(sp)) {
1525 lea(r0, addr);
1526 ldr(r0, Address(r0, 4 * wordSize));
1584 call_VM_leaf_base(entry_point, 1);
1585 }
1586
1587 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1588 assert_different_registers(arg_1, c_rarg0);
1589 pass_arg0(this, arg_0);
1590 pass_arg1(this, arg_1);
1591 call_VM_leaf_base(entry_point, 2);
1592 }
1593
1594 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0,
1595 Register arg_1, Register arg_2) {
1596 assert_different_registers(arg_1, c_rarg0);
1597 assert_different_registers(arg_2, c_rarg0, c_rarg1);
1598 pass_arg0(this, arg_0);
1599 pass_arg1(this, arg_1);
1600 pass_arg2(this, arg_2);
1601 call_VM_leaf_base(entry_point, 3);
1602 }
1603
1604 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
1605 pass_arg0(this, arg_0);
1606 MacroAssembler::call_VM_leaf_base(entry_point, 1);
1607 }
1608
1609 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1610
1611 assert_different_registers(arg_0, c_rarg1);
1612 pass_arg1(this, arg_1);
1613 pass_arg0(this, arg_0);
1614 MacroAssembler::call_VM_leaf_base(entry_point, 2);
1615 }
1616
1617 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1618 assert_different_registers(arg_0, c_rarg1, c_rarg2);
1619 assert_different_registers(arg_1, c_rarg2);
1620 pass_arg2(this, arg_2);
1621 pass_arg1(this, arg_1);
1622 pass_arg0(this, arg_0);
1623 MacroAssembler::call_VM_leaf_base(entry_point, 3);
1629 assert_different_registers(arg_2, c_rarg3);
1630 pass_arg3(this, arg_3);
1631 pass_arg2(this, arg_2);
1632 pass_arg1(this, arg_1);
1633 pass_arg0(this, arg_0);
1634 MacroAssembler::call_VM_leaf_base(entry_point, 4);
1635 }
1636
1637 void MacroAssembler::null_check(Register reg, int offset) {
1638 if (needs_explicit_null_check(offset)) {
1639 // provoke OS null exception if reg is null by
1640 // accessing M[reg] w/o changing any registers
1641 // NOTE: this is plenty to provoke a segv
1642 ldr(zr, Address(reg));
1643 } else {
1644 // nothing to do, (later) access of M[reg + offset]
1645 // will provoke OS null exception if reg is null
1646 }
1647 }
1648
1649 // MacroAssembler protected routines needed to implement
1650 // public methods
1651
1652 void MacroAssembler::mov(Register r, Address dest) {
1653 code_section()->relocate(pc(), dest.rspec());
1654 uint64_t imm64 = (uint64_t)dest.target();
1655 movptr(r, imm64);
1656 }
1657
1658 // Move a constant pointer into r. In AArch64 mode the virtual
1659 // address space is 48 bits in size, so we only need three
1660 // instructions to create a patchable instruction sequence that can
1661 // reach anywhere.
1662 void MacroAssembler::movptr(Register r, uintptr_t imm64) {
1663 #ifndef PRODUCT
1664 {
1665 char buffer[64];
1666 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64);
1667 block_comment(buffer);
1668 }
4294 adrp(rscratch1, src2, offset);
4295 ldr(rscratch1, Address(rscratch1, offset));
4296 cmp(src1, rscratch1);
4297 }
4298
4299 void MacroAssembler::cmpoop(Register obj1, Register obj2) {
4300 cmp(obj1, obj2);
4301 }
4302
4303 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
4304 load_method_holder(rresult, rmethod);
4305 ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
4306 }
4307
4308 void MacroAssembler::load_method_holder(Register holder, Register method) {
4309 ldr(holder, Address(method, Method::const_offset())); // ConstMethod*
4310 ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
4311 ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
4312 }
4313
4314 void MacroAssembler::load_klass(Register dst, Register src) {
4315 if (UseCompressedClassPointers) {
4316 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4317 decode_klass_not_null(dst);
4318 } else {
4319 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4320 }
4321 }
4322
4323 // ((OopHandle)result).resolve();
4324 void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) {
4325 // OopHandle::resolve is an indirection.
4326 access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2);
4327 }
4328
4329 // ((WeakHandle)result).resolve();
4330 void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) {
4331 assert_different_registers(result, tmp1, tmp2);
4332 Label resolved;
4333
4352
4353 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
4354 if (UseCompressedClassPointers) {
4355 ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
4356 if (CompressedKlassPointers::base() == nullptr) {
4357 cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
4358 return;
4359 } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
4360 && CompressedKlassPointers::shift() == 0) {
4361 // Only the bottom 32 bits matter
4362 cmpw(trial_klass, tmp);
4363 return;
4364 }
4365 decode_klass_not_null(tmp);
4366 } else {
4367 ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
4368 }
4369 cmp(trial_klass, tmp);
4370 }
4371
4372 void MacroAssembler::store_klass(Register dst, Register src) {
4373 // FIXME: Should this be a store release? concurrent gcs assumes
4374 // klass length is valid if klass field is not null.
4375 if (UseCompressedClassPointers) {
4376 encode_klass_not_null(src);
4377 strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
4378 } else {
4379 str(src, Address(dst, oopDesc::klass_offset_in_bytes()));
4380 }
4381 }
4382
4383 void MacroAssembler::store_klass_gap(Register dst, Register src) {
4384 if (UseCompressedClassPointers) {
4385 // Store to klass gap in destination
4386 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
4387 }
4388 }
4389
4390 // Algorithm must match CompressedOops::encode.
4391 void MacroAssembler::encode_heap_oop(Register d, Register s) {
4676 if (as_raw) {
4677 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, tmp2);
4678 } else {
4679 bs->load_at(this, decorators, type, dst, src, tmp1, tmp2);
4680 }
4681 }
4682
4683 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
4684 Address dst, Register val,
4685 Register tmp1, Register tmp2, Register tmp3) {
4686 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
4687 decorators = AccessInternal::decorator_fixup(decorators, type);
4688 bool as_raw = (decorators & AS_RAW) != 0;
4689 if (as_raw) {
4690 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
4691 } else {
4692 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
4693 }
4694 }
4695
4696 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
4697 Register tmp2, DecoratorSet decorators) {
4698 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2);
4699 }
4700
4701 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
4702 Register tmp2, DecoratorSet decorators) {
4703 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, tmp2);
4704 }
4705
4706 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
4707 Register tmp2, Register tmp3, DecoratorSet decorators) {
4708 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
4709 }
4710
4711 // Used for storing nulls.
4712 void MacroAssembler::store_heap_oop_null(Address dst) {
4713 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
4714 }
4715
4752 oop_index = oop_recorder()->allocate_metadata_index(obj);
4753 } else {
4754 oop_index = oop_recorder()->find_index(obj);
4755 }
4756 RelocationHolder rspec = metadata_Relocation::spec(oop_index);
4757 mov(dst, Address((address)obj, rspec));
4758 }
4759
4760 Address MacroAssembler::constant_oop_address(jobject obj) {
4761 #ifdef ASSERT
4762 {
4763 ThreadInVMfromUnknown tiv;
4764 assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder");
4765 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop");
4766 }
4767 #endif
4768 int oop_index = oop_recorder()->find_index(obj);
4769 return Address((address)obj, oop_Relocation::spec(oop_index));
4770 }
4771
4772 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
4773 void MacroAssembler::tlab_allocate(Register obj,
4774 Register var_size_in_bytes,
4775 int con_size_in_bytes,
4776 Register t1,
4777 Register t2,
4778 Label& slow_case) {
4779 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
4780 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
4781 }
4782
4783 void MacroAssembler::verify_tlab() {
4784 #ifdef ASSERT
4785 if (UseTLAB && VerifyOops) {
4786 Label next, ok;
4787
4788 stp(rscratch2, rscratch1, Address(pre(sp, -16)));
4789
4790 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
4791 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset())));
4792 cmp(rscratch2, rscratch1);
4793 br(Assembler::HS, next);
4794 STOP("assert(top >= start)");
4795 should_not_reach_here();
4796
4797 bind(next);
4798 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset())));
4799 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
4800 cmp(rscratch2, rscratch1);
4801 br(Assembler::HS, ok);
4802 STOP("assert(top <= end)");
4803 should_not_reach_here();
4804
4805 bind(ok);
4806 ldp(rscratch2, rscratch1, Address(post(sp, 16)));
4807 }
4808 #endif
4809 }
4810
4811 // Writes to stack successive pages until offset reached to check for
4812 // stack overflow + shadow pages. This clobbers tmp.
4813 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
4814 assert_different_registers(tmp, size, rscratch1);
4815 mov(tmp, sp);
4816 // Bang stack for total size given plus shadow page size.
4817 // Bang one page at a time because large size can bang beyond yellow and
4818 // red zones.
4819 Label loop;
4820 mov(rscratch1, (int)os::vm_page_size());
4821 bind(loop);
4822 lea(tmp, Address(tmp, -(int)os::vm_page_size()));
4823 subsw(size, size, rscratch1);
4824 str(size, Address(tmp));
4825 br(Assembler::GT, loop);
4826
4827 // Bang down shadow pages too.
4828 // At this point, (tmp-0) is the last address touched, so don't
4829 // touch it again. (It was touched as (tmp-pagesize) but then tmp
4830 // was post-decremented.) Skip this address by starting at i=1, and
4916 }
4917
4918 void MacroAssembler::remove_frame(int framesize) {
4919 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
4920 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
4921 if (framesize < ((1 << 9) + 2 * wordSize)) {
4922 ldp(rfp, lr, Address(sp, framesize - 2 * wordSize));
4923 add(sp, sp, framesize);
4924 } else {
4925 if (framesize < ((1 << 12) + 2 * wordSize))
4926 add(sp, sp, framesize - 2 * wordSize);
4927 else {
4928 mov(rscratch1, framesize - 2 * wordSize);
4929 add(sp, sp, rscratch1);
4930 }
4931 ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
4932 }
4933 authenticate_return_address();
4934 }
4935
4936
4937 // This method counts leading positive bytes (highest bit not set) in provided byte array
4938 address MacroAssembler::count_positives(Register ary1, Register len, Register result) {
4939 // Simple and most common case of aligned small array which is not at the
4940 // end of memory page is placed here. All other cases are in stub.
4941 Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE;
4942 const uint64_t UPPER_BIT_MASK=0x8080808080808080;
4943 assert_different_registers(ary1, len, result);
4944
4945 mov(result, len);
4946 cmpw(len, 0);
4947 br(LE, DONE);
4948 cmpw(len, 4 * wordSize);
4949 br(GE, STUB_LONG); // size > 32 then go to stub
4950
4951 int shift = 64 - exact_log2(os::vm_page_size());
4952 lsl(rscratch1, ary1, shift);
4953 mov(rscratch2, (size_t)(4 * wordSize) << shift);
4954 adds(rscratch2, rscratch1, rscratch2); // At end of page?
4955 br(CS, STUB); // at the end of page then go to stub
5830 // On other systems, the helper is a usual C function.
5831 //
5832 void MacroAssembler::get_thread(Register dst) {
5833 RegSet saved_regs =
5834 LINUX_ONLY(RegSet::range(r0, r1) + lr - dst)
5835 NOT_LINUX (RegSet::range(r0, r17) + lr - dst);
5836
5837 protect_return_address();
5838 push(saved_regs, sp);
5839
5840 mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper));
5841 blr(lr);
5842 if (dst != c_rarg0) {
5843 mov(dst, c_rarg0);
5844 }
5845
5846 pop(saved_regs, sp);
5847 authenticate_return_address();
5848 }
5849
5850 void MacroAssembler::cache_wb(Address line) {
5851 assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset");
5852 assert(line.index() == noreg, "index should be noreg");
5853 assert(line.offset() == 0, "offset should be 0");
5854 // would like to assert this
5855 // assert(line._ext.shift == 0, "shift should be zero");
5856 if (VM_Version::supports_dcpop()) {
5857 // writeback using clear virtual address to point of persistence
5858 dc(Assembler::CVAP, line.base());
5859 } else {
5860 // no need to generate anything as Unsafe.writebackMemory should
5861 // never invoke this stub
5862 }
5863 }
5864
5865 void MacroAssembler::cache_wbsync(bool is_pre) {
5866 // we only need a barrier post sync
5867 if (!is_pre) {
5868 membar(Assembler::AnyAny);
5869 }
|
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include <sys/types.h>
27
28 #include "precompiled.hpp"
29 #include "asm/assembler.hpp"
30 #include "asm/assembler.inline.hpp"
31 #include "ci/ciEnv.hpp"
32 #include "ci/ciInlineKlass.hpp"
33 #include "compiler/compileTask.hpp"
34 #include "compiler/disassembler.hpp"
35 #include "compiler/oopMap.hpp"
36 #include "gc/shared/barrierSet.hpp"
37 #include "gc/shared/barrierSetAssembler.hpp"
38 #include "gc/shared/cardTableBarrierSet.hpp"
39 #include "gc/shared/cardTable.hpp"
40 #include "gc/shared/collectedHeap.hpp"
41 #include "gc/shared/tlab_globals.hpp"
42 #include "interpreter/bytecodeHistogram.hpp"
43 #include "interpreter/interpreter.hpp"
44 #include "jvm.h"
45 #include "memory/resourceArea.hpp"
46 #include "memory/universe.hpp"
47 #include "nativeInst_aarch64.hpp"
48 #include "oops/accessDecorators.hpp"
49 #include "oops/compressedKlass.inline.hpp"
50 #include "oops/compressedOops.inline.hpp"
51 #include "oops/klass.inline.hpp"
52 #include "runtime/continuation.hpp"
53 #include "runtime/icache.hpp"
54 #include "runtime/interfaceSupport.inline.hpp"
55 #include "runtime/javaThread.hpp"
56 #include "runtime/jniHandles.inline.hpp"
57 #include "runtime/sharedRuntime.hpp"
58 #include "runtime/signature_cc.hpp"
59 #include "runtime/stubRoutines.hpp"
60 #include "utilities/powerOfTwo.hpp"
61 #include "vmreg_aarch64.inline.hpp"
62 #ifdef COMPILER1
63 #include "c1/c1_LIRAssembler.hpp"
64 #endif
65 #ifdef COMPILER2
66 #include "oops/oop.hpp"
67 #include "opto/compile.hpp"
68 #include "opto/node.hpp"
69 #include "opto/output.hpp"
70 #endif
71
72 #ifdef PRODUCT
73 #define BLOCK_COMMENT(str) /* nothing */
74 #else
75 #define BLOCK_COMMENT(str) block_comment(str)
76 #endif
77 #define STOP(str) stop(str);
78 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
79
80 #ifdef ASSERT
81 extern "C" void disnm(intptr_t p);
1107 }
1108
1109 void MacroAssembler::post_call_nop() {
1110 if (!Continuations::enabled()) {
1111 return;
1112 }
1113 InstructionMark im(this);
1114 relocate(post_call_nop_Relocation::spec());
1115 InlineSkippedInstructionsCounter skipCounter(this);
1116 nop();
1117 movk(zr, 0);
1118 movk(zr, 0);
1119 }
1120
1121 // these are no-ops overridden by InterpreterMacroAssembler
1122
1123 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { }
1124
1125 void MacroAssembler::check_and_handle_popframe(Register java_thread) { }
1126
1127 void MacroAssembler::get_default_value_oop(Register inline_klass, Register temp_reg, Register obj) {
1128 #ifdef ASSERT
1129 {
1130 Label done_check;
1131 test_klass_is_inline_type(inline_klass, temp_reg, done_check);
1132 stop("get_default_value_oop from non inline type klass");
1133 bind(done_check);
1134 }
1135 #endif
1136 Register offset = temp_reg;
1137 // Getting the offset of the pre-allocated default value
1138 ldr(offset, Address(inline_klass, in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset())));
1139 ldr(offset, Address(offset, in_bytes(InlineKlass::default_value_offset_offset())));
1140
1141 // Getting the mirror
1142 ldr(obj, Address(inline_klass, in_bytes(Klass::java_mirror_offset())));
1143 resolve_oop_handle(obj, inline_klass, temp_reg);
1144
1145 // Getting the pre-allocated default value from the mirror
1146 Address field(obj, offset);
1147 load_heap_oop(obj, field, inline_klass, rscratch2);
1148 }
1149
1150 void MacroAssembler::get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj) {
1151 #ifdef ASSERT
1152 {
1153 Label done_check;
1154 test_klass_is_empty_inline_type(inline_klass, temp_reg, done_check);
1155 stop("get_empty_value from non-empty inline klass");
1156 bind(done_check);
1157 }
1158 #endif
1159 get_default_value_oop(inline_klass, temp_reg, obj);
1160 }
1161
1162 // Look up the method for a megamorphic invokeinterface call.
1163 // The target method is determined by <intf_klass, itable_index>.
1164 // The receiver klass is in recv_klass.
1165 // On success, the result will be in method_result, and execution falls through.
1166 // On failure, execution transfers to the given label.
1167 void MacroAssembler::lookup_interface_method(Register recv_klass,
1168 Register intf_klass,
1169 RegisterOrConstant itable_index,
1170 Register method_result,
1171 Register scan_temp,
1172 Label& L_no_such_interface,
1173 bool return_method) {
1174 assert_different_registers(recv_klass, intf_klass, scan_temp);
1175 assert_different_registers(method_result, intf_klass, scan_temp);
1176 assert(recv_klass != method_result || !return_method,
1177 "recv_klass can be destroyed when method isn't needed");
1178 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
1179 "caller must use same register for non-constant itable index as for method");
1180
1181 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
1491 ldrb(scratch, Address(klass, InstanceKlass::init_state_offset()));
1492 subs(zr, scratch, InstanceKlass::fully_initialized);
1493 br(Assembler::EQ, *L_fast_path);
1494
1495 // Fast path check: current thread is initializer thread
1496 ldr(scratch, Address(klass, InstanceKlass::init_thread_offset()));
1497 cmp(rthread, scratch);
1498
1499 if (L_slow_path == &L_fallthrough) {
1500 br(Assembler::EQ, *L_fast_path);
1501 bind(*L_slow_path);
1502 } else if (L_fast_path == &L_fallthrough) {
1503 br(Assembler::NE, *L_slow_path);
1504 bind(*L_fast_path);
1505 } else {
1506 Unimplemented();
1507 }
1508 }
1509
1510 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
1511 if (!VerifyOops || VerifyAdapterSharing) {
1512 // Below address of the code string confuses VerifyAdapterSharing
1513 // because it may differ between otherwise equivalent adapters.
1514 return;
1515 }
1516
1517 // Pass register number to verify_oop_subroutine
1518 const char* b = nullptr;
1519 {
1520 ResourceMark rm;
1521 stringStream ss;
1522 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
1523 b = code_string(ss.as_string());
1524 }
1525 BLOCK_COMMENT("verify_oop {");
1526
1527 strip_return_address(); // This might happen within a stack frame.
1528 protect_return_address();
1529 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1530 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1531
1532 mov(r0, reg);
1533 movptr(rscratch1, (uintptr_t)(address)b);
1534
1535 // call indirectly to solve generation ordering problem
1536 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
1537 ldr(rscratch2, Address(rscratch2));
1538 blr(rscratch2);
1539
1540 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
1541 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
1542 authenticate_return_address();
1543
1544 BLOCK_COMMENT("} verify_oop");
1545 }
1546
1547 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
1548 if (!VerifyOops || VerifyAdapterSharing) {
1549 // Below address of the code string confuses VerifyAdapterSharing
1550 // because it may differ between otherwise equivalent adapters.
1551 return;
1552 }
1553
1554 const char* b = nullptr;
1555 {
1556 ResourceMark rm;
1557 stringStream ss;
1558 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line);
1559 b = code_string(ss.as_string());
1560 }
1561 BLOCK_COMMENT("verify_oop_addr {");
1562
1563 strip_return_address(); // This might happen within a stack frame.
1564 protect_return_address();
1565 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1566 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1567
1568 // addr may contain sp so we will have to adjust it based on the
1569 // pushes that we just did.
1570 if (addr.uses(sp)) {
1571 lea(r0, addr);
1572 ldr(r0, Address(r0, 4 * wordSize));
1630 call_VM_leaf_base(entry_point, 1);
1631 }
1632
1633 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1634 assert_different_registers(arg_1, c_rarg0);
1635 pass_arg0(this, arg_0);
1636 pass_arg1(this, arg_1);
1637 call_VM_leaf_base(entry_point, 2);
1638 }
1639
1640 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0,
1641 Register arg_1, Register arg_2) {
1642 assert_different_registers(arg_1, c_rarg0);
1643 assert_different_registers(arg_2, c_rarg0, c_rarg1);
1644 pass_arg0(this, arg_0);
1645 pass_arg1(this, arg_1);
1646 pass_arg2(this, arg_2);
1647 call_VM_leaf_base(entry_point, 3);
1648 }
1649
1650 void MacroAssembler::super_call_VM_leaf(address entry_point) {
1651 MacroAssembler::call_VM_leaf_base(entry_point, 1);
1652 }
1653
1654 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
1655 pass_arg0(this, arg_0);
1656 MacroAssembler::call_VM_leaf_base(entry_point, 1);
1657 }
1658
1659 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1660
1661 assert_different_registers(arg_0, c_rarg1);
1662 pass_arg1(this, arg_1);
1663 pass_arg0(this, arg_0);
1664 MacroAssembler::call_VM_leaf_base(entry_point, 2);
1665 }
1666
1667 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1668 assert_different_registers(arg_0, c_rarg1, c_rarg2);
1669 assert_different_registers(arg_1, c_rarg2);
1670 pass_arg2(this, arg_2);
1671 pass_arg1(this, arg_1);
1672 pass_arg0(this, arg_0);
1673 MacroAssembler::call_VM_leaf_base(entry_point, 3);
1679 assert_different_registers(arg_2, c_rarg3);
1680 pass_arg3(this, arg_3);
1681 pass_arg2(this, arg_2);
1682 pass_arg1(this, arg_1);
1683 pass_arg0(this, arg_0);
1684 MacroAssembler::call_VM_leaf_base(entry_point, 4);
1685 }
1686
1687 void MacroAssembler::null_check(Register reg, int offset) {
1688 if (needs_explicit_null_check(offset)) {
1689 // provoke OS null exception if reg is null by
1690 // accessing M[reg] w/o changing any registers
1691 // NOTE: this is plenty to provoke a segv
1692 ldr(zr, Address(reg));
1693 } else {
1694 // nothing to do, (later) access of M[reg + offset]
1695 // will provoke OS null exception if reg is null
1696 }
1697 }
1698
1699 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) {
1700 assert_different_registers(markword, rscratch2);
1701 andr(markword, markword, markWord::inline_type_mask_in_place);
1702 mov(rscratch2, markWord::inline_type_pattern);
1703 cmp(markword, rscratch2);
1704 br(Assembler::EQ, is_inline_type);
1705 }
1706
1707 void MacroAssembler::test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type) {
1708 ldrw(temp_reg, Address(klass, Klass::access_flags_offset()));
1709 andr(temp_reg, temp_reg, JVM_ACC_VALUE);
1710 cbnz(temp_reg, is_inline_type);
1711 }
1712
1713 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type) {
1714 assert_different_registers(tmp, rscratch1);
1715 cbz(object, not_inline_type);
1716 const int is_inline_type_mask = markWord::inline_type_pattern;
1717 ldr(tmp, Address(object, oopDesc::mark_offset_in_bytes()));
1718 mov(rscratch1, is_inline_type_mask);
1719 andr(tmp, tmp, rscratch1);
1720 cmp(tmp, rscratch1);
1721 br(Assembler::NE, not_inline_type);
1722 }
1723
1724 void MacroAssembler::test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type) {
1725 #ifdef ASSERT
1726 {
1727 Label done_check;
1728 test_klass_is_inline_type(klass, temp_reg, done_check);
1729 stop("test_klass_is_empty_inline_type with non inline type klass");
1730 bind(done_check);
1731 }
1732 #endif
1733 ldrw(temp_reg, Address(klass, InstanceKlass::misc_flags_offset()));
1734 andr(temp_reg, temp_reg, InstanceKlassFlags::is_empty_inline_type_value());
1735 cbnz(temp_reg, is_empty_inline_type);
1736 }
1737
1738 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) {
1739 assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86
1740 tbnz(flags, ConstantPoolCacheEntry::is_null_free_inline_type_shift, is_null_free_inline_type);
1741 }
1742
1743 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) {
1744 assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86
1745 tbz(flags, ConstantPoolCacheEntry::is_null_free_inline_type_shift, not_null_free_inline_type);
1746 }
1747
1748 void MacroAssembler::test_field_is_flat(Register flags, Register temp_reg, Label& is_flat) {
1749 assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86
1750 tbnz(flags, ConstantPoolCacheEntry::is_flat_shift, is_flat);
1751 }
1752
1753 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) {
1754 Label test_mark_word;
1755 // load mark word
1756 ldr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes()));
1757 // check displaced
1758 tst(temp_reg, markWord::unlocked_value);
1759 br(Assembler::NE, test_mark_word);
1760 // slow path use klass prototype
1761 load_prototype_header(temp_reg, oop);
1762
1763 bind(test_mark_word);
1764 andr(temp_reg, temp_reg, test_bit);
1765 if (jmp_set) {
1766 cbnz(temp_reg, jmp_label);
1767 } else {
1768 cbz(temp_reg, jmp_label);
1769 }
1770 }
1771
1772 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg, Label& is_flat_array) {
1773 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flat_array);
1774 }
1775
1776 void MacroAssembler::test_non_flat_array_oop(Register oop, Register temp_reg,
1777 Label&is_non_flat_array) {
1778 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flat_array);
1779 }
1780
1781 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array) {
1782 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array);
1783 }
1784
1785 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) {
1786 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array);
1787 }
1788
1789 void MacroAssembler::test_flat_array_layout(Register lh, Label& is_flat_array) {
1790 tst(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
1791 br(Assembler::NE, is_flat_array);
1792 }
1793
1794 void MacroAssembler::test_non_flat_array_layout(Register lh, Label& is_non_flat_array) {
1795 tst(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
1796 br(Assembler::EQ, is_non_flat_array);
1797 }
1798
1799 void MacroAssembler::test_null_free_array_layout(Register lh, Label& is_null_free_array) {
1800 tst(lh, Klass::_lh_null_free_array_bit_inplace);
1801 br(Assembler::NE, is_null_free_array);
1802 }
1803
1804 void MacroAssembler::test_non_null_free_array_layout(Register lh, Label& is_non_null_free_array) {
1805 tst(lh, Klass::_lh_null_free_array_bit_inplace);
1806 br(Assembler::EQ, is_non_null_free_array);
1807 }
1808
1809 // MacroAssembler protected routines needed to implement
1810 // public methods
1811
1812 void MacroAssembler::mov(Register r, Address dest) {
1813 code_section()->relocate(pc(), dest.rspec());
1814 uint64_t imm64 = (uint64_t)dest.target();
1815 movptr(r, imm64);
1816 }
1817
1818 // Move a constant pointer into r. In AArch64 mode the virtual
1819 // address space is 48 bits in size, so we only need three
1820 // instructions to create a patchable instruction sequence that can
1821 // reach anywhere.
1822 void MacroAssembler::movptr(Register r, uintptr_t imm64) {
1823 #ifndef PRODUCT
1824 {
1825 char buffer[64];
1826 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64);
1827 block_comment(buffer);
1828 }
4454 adrp(rscratch1, src2, offset);
4455 ldr(rscratch1, Address(rscratch1, offset));
4456 cmp(src1, rscratch1);
4457 }
4458
4459 void MacroAssembler::cmpoop(Register obj1, Register obj2) {
4460 cmp(obj1, obj2);
4461 }
4462
4463 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
4464 load_method_holder(rresult, rmethod);
4465 ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
4466 }
4467
4468 void MacroAssembler::load_method_holder(Register holder, Register method) {
4469 ldr(holder, Address(method, Method::const_offset())); // ConstMethod*
4470 ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
4471 ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
4472 }
4473
4474 void MacroAssembler::load_metadata(Register dst, Register src) {
4475 if (UseCompressedClassPointers) {
4476 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4477 } else {
4478 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4479 }
4480 }
4481
4482 void MacroAssembler::load_klass(Register dst, Register src) {
4483 if (UseCompressedClassPointers) {
4484 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4485 decode_klass_not_null(dst);
4486 } else {
4487 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4488 }
4489 }
4490
4491 // ((OopHandle)result).resolve();
4492 void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) {
4493 // OopHandle::resolve is an indirection.
4494 access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2);
4495 }
4496
4497 // ((WeakHandle)result).resolve();
4498 void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) {
4499 assert_different_registers(result, tmp1, tmp2);
4500 Label resolved;
4501
4520
4521 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
4522 if (UseCompressedClassPointers) {
4523 ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
4524 if (CompressedKlassPointers::base() == nullptr) {
4525 cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
4526 return;
4527 } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
4528 && CompressedKlassPointers::shift() == 0) {
4529 // Only the bottom 32 bits matter
4530 cmpw(trial_klass, tmp);
4531 return;
4532 }
4533 decode_klass_not_null(tmp);
4534 } else {
4535 ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
4536 }
4537 cmp(trial_klass, tmp);
4538 }
4539
4540 void MacroAssembler::load_prototype_header(Register dst, Register src) {
4541 load_klass(dst, src);
4542 ldr(dst, Address(dst, Klass::prototype_header_offset()));
4543 }
4544
4545 void MacroAssembler::store_klass(Register dst, Register src) {
4546 // FIXME: Should this be a store release? concurrent gcs assumes
4547 // klass length is valid if klass field is not null.
4548 if (UseCompressedClassPointers) {
4549 encode_klass_not_null(src);
4550 strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
4551 } else {
4552 str(src, Address(dst, oopDesc::klass_offset_in_bytes()));
4553 }
4554 }
4555
4556 void MacroAssembler::store_klass_gap(Register dst, Register src) {
4557 if (UseCompressedClassPointers) {
4558 // Store to klass gap in destination
4559 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
4560 }
4561 }
4562
4563 // Algorithm must match CompressedOops::encode.
4564 void MacroAssembler::encode_heap_oop(Register d, Register s) {
4849 if (as_raw) {
4850 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, tmp2);
4851 } else {
4852 bs->load_at(this, decorators, type, dst, src, tmp1, tmp2);
4853 }
4854 }
4855
4856 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
4857 Address dst, Register val,
4858 Register tmp1, Register tmp2, Register tmp3) {
4859 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
4860 decorators = AccessInternal::decorator_fixup(decorators, type);
4861 bool as_raw = (decorators & AS_RAW) != 0;
4862 if (as_raw) {
4863 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
4864 } else {
4865 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
4866 }
4867 }
4868
4869 void MacroAssembler::access_value_copy(DecoratorSet decorators, Register src, Register dst,
4870 Register inline_klass) {
4871 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
4872 bs->value_copy(this, decorators, src, dst, inline_klass);
4873 }
4874
4875 void MacroAssembler::first_field_offset(Register inline_klass, Register offset) {
4876 ldr(offset, Address(inline_klass, InstanceKlass::adr_inlineklass_fixed_block_offset()));
4877 ldrw(offset, Address(offset, InlineKlass::first_field_offset_offset()));
4878 }
4879
4880 void MacroAssembler::data_for_oop(Register oop, Register data, Register inline_klass) {
4881 // ((address) (void*) o) + vk->first_field_offset();
4882 Register offset = (data == oop) ? rscratch1 : data;
4883 first_field_offset(inline_klass, offset);
4884 if (data == oop) {
4885 add(data, data, offset);
4886 } else {
4887 lea(data, Address(oop, offset));
4888 }
4889 }
4890
4891 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass,
4892 Register index, Register data) {
4893 assert_different_registers(array, array_klass, index);
4894 assert_different_registers(rscratch1, array, index);
4895
4896 // array->base() + (index << Klass::layout_helper_log2_element_size(lh));
4897 ldrw(rscratch1, Address(array_klass, Klass::layout_helper_offset()));
4898
4899 // Klass::layout_helper_log2_element_size(lh)
4900 // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask;
4901 lsr(rscratch1, rscratch1, Klass::_lh_log2_element_size_shift);
4902 andr(rscratch1, rscratch1, Klass::_lh_log2_element_size_mask);
4903 lslv(index, index, rscratch1);
4904
4905 add(data, array, index);
4906 add(data, data, arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT));
4907 }
4908
4909 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
4910 Register tmp2, DecoratorSet decorators) {
4911 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2);
4912 }
4913
4914 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
4915 Register tmp2, DecoratorSet decorators) {
4916 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, tmp2);
4917 }
4918
4919 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
4920 Register tmp2, Register tmp3, DecoratorSet decorators) {
4921 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
4922 }
4923
4924 // Used for storing nulls.
4925 void MacroAssembler::store_heap_oop_null(Address dst) {
4926 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
4927 }
4928
4965 oop_index = oop_recorder()->allocate_metadata_index(obj);
4966 } else {
4967 oop_index = oop_recorder()->find_index(obj);
4968 }
4969 RelocationHolder rspec = metadata_Relocation::spec(oop_index);
4970 mov(dst, Address((address)obj, rspec));
4971 }
4972
4973 Address MacroAssembler::constant_oop_address(jobject obj) {
4974 #ifdef ASSERT
4975 {
4976 ThreadInVMfromUnknown tiv;
4977 assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder");
4978 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop");
4979 }
4980 #endif
4981 int oop_index = oop_recorder()->find_index(obj);
4982 return Address((address)obj, oop_Relocation::spec(oop_index));
4983 }
4984
4985 // Object / value buffer allocation...
4986 void MacroAssembler::allocate_instance(Register klass, Register new_obj,
4987 Register t1, Register t2,
4988 bool clear_fields, Label& alloc_failed)
4989 {
4990 Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop;
4991 Register layout_size = t1;
4992 assert(new_obj == r0, "needs to be r0");
4993 assert_different_registers(klass, new_obj, t1, t2);
4994
4995 // get instance_size in InstanceKlass (scaled to a count of bytes)
4996 ldrw(layout_size, Address(klass, Klass::layout_helper_offset()));
4997 // test to see if it has a finalizer or is malformed in some way
4998 tst(layout_size, Klass::_lh_instance_slow_path_bit);
4999 br(Assembler::NE, slow_case_no_pop);
5000
5001 // Allocate the instance:
5002 // If TLAB is enabled:
5003 // Try to allocate in the TLAB.
5004 // If fails, go to the slow path.
5005 // Initialize the allocation.
5006 // Exit.
5007 //
5008 // Go to slow path.
5009
5010 if (UseTLAB) {
5011 push(klass);
5012 tlab_allocate(new_obj, layout_size, 0, klass, t2, slow_case);
5013 if (ZeroTLAB || (!clear_fields)) {
5014 // the fields have been already cleared
5015 b(initialize_header);
5016 } else {
5017 // initialize both the header and fields
5018 b(initialize_object);
5019 }
5020
5021 if (clear_fields) {
5022 // The object is initialized before the header. If the object size is
5023 // zero, go directly to the header initialization.
5024 bind(initialize_object);
5025 subs(layout_size, layout_size, sizeof(oopDesc));
5026 br(Assembler::EQ, initialize_header);
5027
5028 // Initialize topmost object field, divide size by 8, check if odd and
5029 // test if zero.
5030
5031 #ifdef ASSERT
5032 // make sure instance_size was multiple of 8
5033 Label L;
5034 tst(layout_size, 7);
5035 br(Assembler::EQ, L);
5036 stop("object size is not multiple of 8 - adjust this code");
5037 bind(L);
5038 // must be > 0, no extra check needed here
5039 #endif
5040
5041 lsr(layout_size, layout_size, LogBytesPerLong);
5042
5043 // initialize remaining object fields: instance_size was a multiple of 8
5044 {
5045 Label loop;
5046 Register base = t2;
5047
5048 bind(loop);
5049 add(rscratch1, new_obj, layout_size, Assembler::LSL, LogBytesPerLong);
5050 str(zr, Address(rscratch1, sizeof(oopDesc) - 1*oopSize));
5051 subs(layout_size, layout_size, 1);
5052 br(Assembler::NE, loop);
5053 }
5054 } // clear_fields
5055
5056 // initialize object header only.
5057 bind(initialize_header);
5058 pop(klass);
5059 Register mark_word = t2;
5060 ldr(mark_word, Address(klass, Klass::prototype_header_offset()));
5061 str(mark_word, Address(new_obj, oopDesc::mark_offset_in_bytes ()));
5062 store_klass_gap(new_obj, zr); // zero klass gap for compressed oops
5063 mov(t2, klass); // preserve klass
5064 store_klass(new_obj, t2); // src klass reg is potentially compressed
5065
5066 // TODO: Valhalla removed SharedRuntime::dtrace_object_alloc from here ?
5067
5068 b(done);
5069 }
5070
5071 if (UseTLAB) {
5072 bind(slow_case);
5073 pop(klass);
5074 }
5075 bind(slow_case_no_pop);
5076 b(alloc_failed);
5077
5078 bind(done);
5079 }
5080
5081 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
5082 void MacroAssembler::tlab_allocate(Register obj,
5083 Register var_size_in_bytes,
5084 int con_size_in_bytes,
5085 Register t1,
5086 Register t2,
5087 Label& slow_case) {
5088 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
5089 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
5090 }
5091
5092 void MacroAssembler::verify_tlab() {
5093 #ifdef ASSERT
5094 if (UseTLAB && VerifyOops) {
5095 Label next, ok;
5096
5097 stp(rscratch2, rscratch1, Address(pre(sp, -16)));
5098
5099 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
5100 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset())));
5101 cmp(rscratch2, rscratch1);
5102 br(Assembler::HS, next);
5103 STOP("assert(top >= start)");
5104 should_not_reach_here();
5105
5106 bind(next);
5107 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset())));
5108 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
5109 cmp(rscratch2, rscratch1);
5110 br(Assembler::HS, ok);
5111 STOP("assert(top <= end)");
5112 should_not_reach_here();
5113
5114 bind(ok);
5115 ldp(rscratch2, rscratch1, Address(post(sp, 16)));
5116 }
5117 #endif
5118 }
5119
5120 void MacroAssembler::get_inline_type_field_klass(Register klass, Register index, Register inline_klass) {
5121 ldr(inline_klass, Address(klass, InstanceKlass::inline_type_field_klasses_offset()));
5122 #ifdef ASSERT
5123 {
5124 Label done;
5125 cbnz(inline_klass, done);
5126 stop("get_inline_type_field_klass contains no inline klass");
5127 bind(done);
5128 }
5129 #endif
5130 ldr(inline_klass, Address(inline_klass, index, Address::lsl(3)));
5131 }
5132
5133 // Writes to stack successive pages until offset reached to check for
5134 // stack overflow + shadow pages. This clobbers tmp.
5135 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
5136 assert_different_registers(tmp, size, rscratch1);
5137 mov(tmp, sp);
5138 // Bang stack for total size given plus shadow page size.
5139 // Bang one page at a time because large size can bang beyond yellow and
5140 // red zones.
5141 Label loop;
5142 mov(rscratch1, (int)os::vm_page_size());
5143 bind(loop);
5144 lea(tmp, Address(tmp, -(int)os::vm_page_size()));
5145 subsw(size, size, rscratch1);
5146 str(size, Address(tmp));
5147 br(Assembler::GT, loop);
5148
5149 // Bang down shadow pages too.
5150 // At this point, (tmp-0) is the last address touched, so don't
5151 // touch it again. (It was touched as (tmp-pagesize) but then tmp
5152 // was post-decremented.) Skip this address by starting at i=1, and
5238 }
5239
5240 void MacroAssembler::remove_frame(int framesize) {
5241 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5242 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
5243 if (framesize < ((1 << 9) + 2 * wordSize)) {
5244 ldp(rfp, lr, Address(sp, framesize - 2 * wordSize));
5245 add(sp, sp, framesize);
5246 } else {
5247 if (framesize < ((1 << 12) + 2 * wordSize))
5248 add(sp, sp, framesize - 2 * wordSize);
5249 else {
5250 mov(rscratch1, framesize - 2 * wordSize);
5251 add(sp, sp, rscratch1);
5252 }
5253 ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
5254 }
5255 authenticate_return_address();
5256 }
5257
5258 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) {
5259 if (needs_stack_repair) {
5260 // Remove the extension of the caller's frame used for inline type unpacking
5261 //
5262 // Right now the stack looks like this:
5263 //
5264 // | Arguments from caller |
5265 // |---------------------------| <-- caller's SP
5266 // | Saved LR #1 |
5267 // | Saved FP #1 |
5268 // |---------------------------|
5269 // | Extension space for |
5270 // | inline arg (un)packing |
5271 // |---------------------------| <-- start of this method's frame
5272 // | Saved LR #2 |
5273 // | Saved FP #2 |
5274 // |---------------------------| <-- FP
5275 // | sp_inc |
5276 // | method locals |
5277 // |---------------------------| <-- SP
5278 //
5279 // There are two copies of FP and LR on the stack. They will be identical
5280 // unless the caller has been deoptimized, in which case LR #1 will be patched
5281 // to point at the deopt blob, and LR #2 will still point into the old method.
5282 //
5283 // The sp_inc stack slot holds the total size of the frame including the
5284 // extension space minus two words for the saved FP and LR.
5285
5286 int sp_inc_offset = initial_framesize - 3 * wordSize; // Immediately below saved LR and FP
5287
5288 ldr(rscratch1, Address(sp, sp_inc_offset));
5289 add(sp, sp, rscratch1);
5290 ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
5291 } else {
5292 remove_frame(initial_framesize);
5293 }
5294 }
5295
5296 void MacroAssembler::save_stack_increment(int sp_inc, int frame_size) {
5297 int real_frame_size = frame_size + sp_inc;
5298 assert(sp_inc == 0 || sp_inc > 2*wordSize, "invalid sp_inc value");
5299 assert(real_frame_size >= 2*wordSize, "frame size must include FP/LR space");
5300 assert((real_frame_size & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
5301
5302 int sp_inc_offset = frame_size - 3 * wordSize; // Immediately below saved LR and FP
5303
5304 // Subtract two words for the saved FP and LR as these will be popped
5305 // separately. See remove_frame above.
5306 mov(rscratch1, real_frame_size - 2*wordSize);
5307 str(rscratch1, Address(sp, sp_inc_offset));
5308 }
5309
5310 // This method counts leading positive bytes (highest bit not set) in provided byte array
5311 address MacroAssembler::count_positives(Register ary1, Register len, Register result) {
5312 // Simple and most common case of aligned small array which is not at the
5313 // end of memory page is placed here. All other cases are in stub.
5314 Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE;
5315 const uint64_t UPPER_BIT_MASK=0x8080808080808080;
5316 assert_different_registers(ary1, len, result);
5317
5318 mov(result, len);
5319 cmpw(len, 0);
5320 br(LE, DONE);
5321 cmpw(len, 4 * wordSize);
5322 br(GE, STUB_LONG); // size > 32 then go to stub
5323
5324 int shift = 64 - exact_log2(os::vm_page_size());
5325 lsl(rscratch1, ary1, shift);
5326 mov(rscratch2, (size_t)(4 * wordSize) << shift);
5327 adds(rscratch2, rscratch1, rscratch2); // At end of page?
5328 br(CS, STUB); // at the end of page then go to stub
6203 // On other systems, the helper is a usual C function.
6204 //
6205 void MacroAssembler::get_thread(Register dst) {
6206 RegSet saved_regs =
6207 LINUX_ONLY(RegSet::range(r0, r1) + lr - dst)
6208 NOT_LINUX (RegSet::range(r0, r17) + lr - dst);
6209
6210 protect_return_address();
6211 push(saved_regs, sp);
6212
6213 mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper));
6214 blr(lr);
6215 if (dst != c_rarg0) {
6216 mov(dst, c_rarg0);
6217 }
6218
6219 pop(saved_regs, sp);
6220 authenticate_return_address();
6221 }
6222
6223 #ifdef COMPILER2
6224 // C2 compiled method's prolog code
6225 // Moved here from aarch64.ad to support Valhalla code belows
6226 void MacroAssembler::verified_entry(Compile* C, int sp_inc) {
6227 if (C->clinit_barrier_on_entry()) {
6228 assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
6229
6230 Label L_skip_barrier;
6231
6232 mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
6233 clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
6234 far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
6235 bind(L_skip_barrier);
6236 }
6237
6238 if (C->max_vector_size() > 0) {
6239 reinitialize_ptrue();
6240 }
6241
6242 int bangsize = C->output()->bang_size_in_bytes();
6243 if (C->output()->need_stack_bang(bangsize))
6244 generate_stack_overflow_check(bangsize);
6245
6246 // n.b. frame size includes space for return pc and rfp
6247 const long framesize = C->output()->frame_size_in_bytes();
6248 build_frame(framesize);
6249
6250 if (C->needs_stack_repair()) {
6251 save_stack_increment(sp_inc, framesize);
6252 }
6253
6254 if (VerifyStackAtCalls) {
6255 Unimplemented();
6256 }
6257 }
6258 #endif // COMPILER2
6259
6260 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) {
6261 assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
6262 // An inline type might be returned. If fields are in registers we
6263 // need to allocate an inline type instance and initialize it with
6264 // the value of the fields.
6265 Label skip;
6266 // We only need a new buffered inline type if a new one is not returned
6267 tbz(r0, 0, skip);
6268 int call_offset = -1;
6269
6270 // Be careful not to clobber r1-7 which hold returned fields
6271 // Also do not use callee-saved registers as these may be live in the interpreter
6272 Register tmp1 = r13, tmp2 = r14, klass = r15, r0_preserved = r12;
6273
6274 // The following code is similar to allocate_instance but has some slight differences,
6275 // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after
6276 // allocating is not necessary if vk != nullptr, etc. allocate_instance is not aware of these.
6277 Label slow_case;
6278 // 1. Try to allocate a new buffered inline instance either from TLAB or eden space
6279 mov(r0_preserved, r0); // save r0 for slow_case since *_allocate may corrupt it when allocation failed
6280
6281 if (vk != nullptr) {
6282 // Called from C1, where the return type is statically known.
6283 movptr(klass, (intptr_t)vk->get_InlineKlass());
6284 jint obj_size = vk->layout_helper();
6285 assert(obj_size != Klass::_lh_neutral_value, "inline class in return type must have been resolved");
6286 if (UseTLAB) {
6287 tlab_allocate(r0, noreg, obj_size, tmp1, tmp2, slow_case);
6288 } else {
6289 b(slow_case);
6290 }
6291 } else {
6292 // Call from interpreter. R0 contains ((the InlineKlass* of the return type) | 0x01)
6293 andr(klass, r0, -2);
6294 ldrw(tmp2, Address(klass, Klass::layout_helper_offset()));
6295 if (UseTLAB) {
6296 tlab_allocate(r0, tmp2, 0, tmp1, tmp2, slow_case);
6297 } else {
6298 b(slow_case);
6299 }
6300 }
6301 if (UseTLAB) {
6302 // 2. Initialize buffered inline instance header
6303 Register buffer_obj = r0;
6304 mov(rscratch1, (intptr_t)markWord::inline_type_prototype().value());
6305 str(rscratch1, Address(buffer_obj, oopDesc::mark_offset_in_bytes()));
6306 store_klass_gap(buffer_obj, zr);
6307 if (vk == nullptr) {
6308 // store_klass corrupts klass, so save it for later use (interpreter case only).
6309 mov(tmp1, klass);
6310 }
6311 store_klass(buffer_obj, klass);
6312 // 3. Initialize its fields with an inline class specific handler
6313 if (vk != nullptr) {
6314 far_call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint.
6315 } else {
6316 // tmp1 holds klass preserved above
6317 ldr(tmp1, Address(tmp1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
6318 ldr(tmp1, Address(tmp1, InlineKlass::pack_handler_offset()));
6319 blr(tmp1);
6320 }
6321
6322 membar(Assembler::StoreStore);
6323 b(skip);
6324 } else {
6325 // Must have already branched to slow_case above.
6326 DEBUG_ONLY(should_not_reach_here());
6327 }
6328 bind(slow_case);
6329 // We failed to allocate a new inline type, fall back to a runtime
6330 // call. Some oop field may be live in some registers but we can't
6331 // tell. That runtime call will take care of preserving them
6332 // across a GC if there's one.
6333 mov(r0, r0_preserved);
6334
6335 if (from_interpreter) {
6336 super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf());
6337 } else {
6338 far_call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf()));
6339 call_offset = offset();
6340 }
6341 membar(Assembler::StoreStore);
6342
6343 bind(skip);
6344 return call_offset;
6345 }
6346
6347 // Move a value between registers/stack slots and update the reg_state
6348 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) {
6349 assert(from->is_valid() && to->is_valid(), "source and destination must be valid");
6350 if (reg_state[to->value()] == reg_written) {
6351 return true; // Already written
6352 }
6353
6354 if (from != to && bt != T_VOID) {
6355 if (reg_state[to->value()] == reg_readonly) {
6356 return false; // Not yet writable
6357 }
6358 if (from->is_reg()) {
6359 if (to->is_reg()) {
6360 if (from->is_Register() && to->is_Register()) {
6361 mov(to->as_Register(), from->as_Register());
6362 } else if (from->is_FloatRegister() && to->is_FloatRegister()) {
6363 fmovd(to->as_FloatRegister(), from->as_FloatRegister());
6364 } else {
6365 ShouldNotReachHere();
6366 }
6367 } else {
6368 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size;
6369 Address to_addr = Address(sp, st_off);
6370 if (from->is_FloatRegister()) {
6371 if (bt == T_DOUBLE) {
6372 strd(from->as_FloatRegister(), to_addr);
6373 } else {
6374 assert(bt == T_FLOAT, "must be float");
6375 strs(from->as_FloatRegister(), to_addr);
6376 }
6377 } else {
6378 str(from->as_Register(), to_addr);
6379 }
6380 }
6381 } else {
6382 Address from_addr = Address(sp, from->reg2stack() * VMRegImpl::stack_slot_size);
6383 if (to->is_reg()) {
6384 if (to->is_FloatRegister()) {
6385 if (bt == T_DOUBLE) {
6386 ldrd(to->as_FloatRegister(), from_addr);
6387 } else {
6388 assert(bt == T_FLOAT, "must be float");
6389 ldrs(to->as_FloatRegister(), from_addr);
6390 }
6391 } else {
6392 ldr(to->as_Register(), from_addr);
6393 }
6394 } else {
6395 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size;
6396 ldr(rscratch1, from_addr);
6397 str(rscratch1, Address(sp, st_off));
6398 }
6399 }
6400 }
6401
6402 // Update register states
6403 reg_state[from->value()] = reg_writable;
6404 reg_state[to->value()] = reg_written;
6405 return true;
6406 }
6407
6408 // Calculate the extra stack space required for packing or unpacking inline
6409 // args and adjust the stack pointer
6410 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) {
6411 int sp_inc = args_on_stack * VMRegImpl::stack_slot_size;
6412 sp_inc = align_up(sp_inc, StackAlignmentInBytes);
6413 assert(sp_inc > 0, "sanity");
6414
6415 // Save a copy of the FP and LR here for deoptimization patching and frame walking
6416 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
6417
6418 // Adjust the stack pointer. This will be repaired on return by MacroAssembler::remove_frame
6419 if (sp_inc < (1 << 9)) {
6420 sub(sp, sp, sp_inc); // Fits in an immediate
6421 } else {
6422 mov(rscratch1, sp_inc);
6423 sub(sp, sp, rscratch1);
6424 }
6425
6426 return sp_inc + 2 * wordSize; // Account for the FP/LR space
6427 }
6428
6429 // Read all fields from an inline type oop and store the values in registers/stack slots
6430 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
6431 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
6432 RegState reg_state[]) {
6433 assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
6434 assert(from->is_valid(), "source must be valid");
6435 bool progress = false;
6436 #ifdef ASSERT
6437 const int start_offset = offset();
6438 #endif
6439
6440 Label L_null, L_notNull;
6441 // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for)
6442 Register tmp1 = r10;
6443 Register tmp2 = r11;
6444 Register fromReg = noreg;
6445 ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, -1);
6446 bool done = true;
6447 bool mark_done = true;
6448 VMReg toReg;
6449 BasicType bt;
6450 // Check if argument requires a null check
6451 bool null_check = false;
6452 VMReg nullCheckReg;
6453 while (stream.next(nullCheckReg, bt)) {
6454 if (sig->at(stream.sig_index())._offset == -1) {
6455 null_check = true;
6456 break;
6457 }
6458 }
6459 stream.reset(sig_index, to_index);
6460 while (stream.next(toReg, bt)) {
6461 assert(toReg->is_valid(), "destination must be valid");
6462 int idx = (int)toReg->value();
6463 if (reg_state[idx] == reg_readonly) {
6464 if (idx != from->value()) {
6465 mark_done = false;
6466 }
6467 done = false;
6468 continue;
6469 } else if (reg_state[idx] == reg_written) {
6470 continue;
6471 }
6472 assert(reg_state[idx] == reg_writable, "must be writable");
6473 reg_state[idx] = reg_written;
6474 progress = true;
6475
6476 if (fromReg == noreg) {
6477 if (from->is_reg()) {
6478 fromReg = from->as_Register();
6479 } else {
6480 int st_off = from->reg2stack() * VMRegImpl::stack_slot_size;
6481 ldr(tmp1, Address(sp, st_off));
6482 fromReg = tmp1;
6483 }
6484 if (null_check) {
6485 // Nullable inline type argument, emit null check
6486 cbz(fromReg, L_null);
6487 }
6488 }
6489 int off = sig->at(stream.sig_index())._offset;
6490 if (off == -1) {
6491 assert(null_check, "Missing null check at");
6492 if (toReg->is_stack()) {
6493 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size;
6494 mov(tmp2, 1);
6495 str(tmp2, Address(sp, st_off));
6496 } else {
6497 mov(toReg->as_Register(), 1);
6498 }
6499 continue;
6500 }
6501 assert(off > 0, "offset in object should be positive");
6502 Address fromAddr = Address(fromReg, off);
6503 if (!toReg->is_FloatRegister()) {
6504 Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register();
6505 if (is_reference_type(bt)) {
6506 load_heap_oop(dst, fromAddr, rscratch1, rscratch2);
6507 } else {
6508 bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
6509 load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed);
6510 }
6511 if (toReg->is_stack()) {
6512 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size;
6513 str(dst, Address(sp, st_off));
6514 }
6515 } else if (bt == T_DOUBLE) {
6516 ldrd(toReg->as_FloatRegister(), fromAddr);
6517 } else {
6518 assert(bt == T_FLOAT, "must be float");
6519 ldrs(toReg->as_FloatRegister(), fromAddr);
6520 }
6521 }
6522 if (progress && null_check) {
6523 if (done) {
6524 b(L_notNull);
6525 bind(L_null);
6526 // Set IsInit field to zero to signal that the argument is null.
6527 // Also set all oop fields to zero to make the GC happy.
6528 stream.reset(sig_index, to_index);
6529 while (stream.next(toReg, bt)) {
6530 if (sig->at(stream.sig_index())._offset == -1 ||
6531 bt == T_OBJECT || bt == T_ARRAY) {
6532 if (toReg->is_stack()) {
6533 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size;
6534 str(zr, Address(sp, st_off));
6535 } else {
6536 mov(toReg->as_Register(), zr);
6537 }
6538 }
6539 }
6540 bind(L_notNull);
6541 } else {
6542 bind(L_null);
6543 }
6544 }
6545
6546 sig_index = stream.sig_index();
6547 to_index = stream.regs_index();
6548
6549 if (mark_done && reg_state[from->value()] != reg_written) {
6550 // This is okay because no one else will write to that slot
6551 reg_state[from->value()] = reg_writable;
6552 }
6553 from_index--;
6554 assert(progress || (start_offset == offset()), "should not emit code");
6555 return done;
6556 }
6557
6558 // Pack fields back into an inline type oop
6559 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
6560 VMRegPair* from, int from_count, int& from_index, VMReg to,
6561 RegState reg_state[], Register val_array) {
6562 assert(sig->at(sig_index)._bt == T_PRIMITIVE_OBJECT, "should be at end delimiter");
6563 assert(to->is_valid(), "destination must be valid");
6564
6565 if (reg_state[to->value()] == reg_written) {
6566 skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
6567 return true; // Already written
6568 }
6569
6570 // The GC barrier expanded by store_heap_oop below may call into the
6571 // runtime so use callee-saved registers for any values that need to be
6572 // preserved. The GC barrier assembler should take care of saving the
6573 // Java argument registers.
6574 // TODO 8284443 Isn't it an issue if below code uses r14 as tmp when it contains a spilled value?
6575 // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for).
6576 Register val_obj_tmp = r21;
6577 Register from_reg_tmp = r22;
6578 Register tmp1 = r14;
6579 Register tmp2 = r13;
6580 Register tmp3 = r12;
6581 Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register();
6582
6583 assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array);
6584
6585 if (reg_state[to->value()] == reg_readonly) {
6586 if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) {
6587 skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
6588 return false; // Not yet writable
6589 }
6590 val_obj = val_obj_tmp;
6591 }
6592
6593 int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_PRIMITIVE_OBJECT);
6594 load_heap_oop(val_obj, Address(val_array, index), tmp1, tmp2);
6595
6596 ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index);
6597 VMReg fromReg;
6598 BasicType bt;
6599 Label L_null;
6600 while (stream.next(fromReg, bt)) {
6601 assert(fromReg->is_valid(), "source must be valid");
6602 reg_state[fromReg->value()] = reg_writable;
6603
6604 int off = sig->at(stream.sig_index())._offset;
6605 if (off == -1) {
6606 // Nullable inline type argument, emit null check
6607 Label L_notNull;
6608 if (fromReg->is_stack()) {
6609 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size;
6610 ldrb(tmp2, Address(sp, ld_off));
6611 cbnz(tmp2, L_notNull);
6612 } else {
6613 cbnz(fromReg->as_Register(), L_notNull);
6614 }
6615 mov(val_obj, 0);
6616 b(L_null);
6617 bind(L_notNull);
6618 continue;
6619 }
6620
6621 assert(off > 0, "offset in object should be positive");
6622 size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
6623
6624 // Pack the scalarized field into the value object.
6625 Address dst(val_obj, off);
6626
6627 if (!fromReg->is_FloatRegister()) {
6628 Register src;
6629 if (fromReg->is_stack()) {
6630 src = from_reg_tmp;
6631 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size;
6632 load_sized_value(src, Address(sp, ld_off), size_in_bytes, /* is_signed */ false);
6633 } else {
6634 src = fromReg->as_Register();
6635 }
6636 assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array);
6637 if (is_reference_type(bt)) {
6638 store_heap_oop(dst, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
6639 } else {
6640 store_sized_value(dst, src, size_in_bytes);
6641 }
6642 } else if (bt == T_DOUBLE) {
6643 strd(fromReg->as_FloatRegister(), dst);
6644 } else {
6645 assert(bt == T_FLOAT, "must be float");
6646 strs(fromReg->as_FloatRegister(), dst);
6647 }
6648 }
6649 bind(L_null);
6650 sig_index = stream.sig_index();
6651 from_index = stream.regs_index();
6652
6653 assert(reg_state[to->value()] == reg_writable, "must have already been read");
6654 bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state);
6655 assert(success, "to register must be writeable");
6656
6657 return true;
6658 }
6659
6660 VMReg MacroAssembler::spill_reg_for(VMReg reg) {
6661 return (reg->is_FloatRegister()) ? v8->as_VMReg() : r14->as_VMReg();
6662 }
6663
6664 void MacroAssembler::cache_wb(Address line) {
6665 assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset");
6666 assert(line.index() == noreg, "index should be noreg");
6667 assert(line.offset() == 0, "offset should be 0");
6668 // would like to assert this
6669 // assert(line._ext.shift == 0, "shift should be zero");
6670 if (VM_Version::supports_dcpop()) {
6671 // writeback using clear virtual address to point of persistence
6672 dc(Assembler::CVAP, line.base());
6673 } else {
6674 // no need to generate anything as Unsafe.writebackMemory should
6675 // never invoke this stub
6676 }
6677 }
6678
6679 void MacroAssembler::cache_wbsync(bool is_pre) {
6680 // we only need a barrier post sync
6681 if (!is_pre) {
6682 membar(Assembler::AnyAny);
6683 }
|