15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "c1/c1_CodeStubs.hpp"
29 #include "c1/c1_Compilation.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "c1/c1_ValueStack.hpp"
34 #include "ci/ciArrayKlass.hpp"
35 #include "ci/ciInstance.hpp"
36 #include "compiler/oopMap.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "gc/shared/gc_globals.hpp"
39 #include "nativeInst_x86.hpp"
40 #include "oops/objArrayKlass.hpp"
41 #include "runtime/frame.inline.hpp"
42 #include "runtime/safepointMechanism.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "runtime/stubRoutines.hpp"
45 #include "utilities/powerOfTwo.hpp"
46 #include "vmreg_x86.inline.hpp"
47
48
49 // These masks are used to provide 128-bit aligned bitmasks to the XMM
50 // instructions, to allow sign-masking or sign-bit flipping. They allow
51 // fast versions of NegF/NegD and AbsF/AbsD.
52
53 // Note: 'double' and 'long long' have 32-bits alignment on x86.
54 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
55 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
56 // of 128-bits operands for SSE instructions.
57 jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
58 // Store the value to a 128-bits operand.
59 operand[0] = lo;
445 __ bind(*stub->continuation());
446 }
447
448 if (compilation()->env()->dtrace_method_probes()) {
449 #ifdef _LP64
450 __ mov(rdi, r15_thread);
451 __ mov_metadata(rsi, method()->constant_encoding());
452 #else
453 __ get_thread(rax);
454 __ movptr(Address(rsp, 0), rax);
455 __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding(), noreg);
456 #endif
457 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
458 }
459
460 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
461 __ mov(rax, rbx); // Restore the exception
462 }
463
464 // remove the activation and dispatch to the unwind handler
465 __ remove_frame(initial_frame_size_in_bytes());
466 __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
467
468 // Emit the slow path assembly
469 if (stub != nullptr) {
470 stub->emit_code(this);
471 }
472
473 return offset;
474 }
475
476
477 int LIR_Assembler::emit_deopt_handler() {
478 // generate code for exception handler
479 address handler_base = __ start_a_stub(deopt_handler_size());
480 if (handler_base == nullptr) {
481 // not enough space left for the handler
482 bailout("deopt handler overflow");
483 return -1;
484 }
485
486 int offset = code_offset();
487 InternalAddress here(__ pc());
488
489 __ pushptr(here.addr(), rscratch1);
490 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
491 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
492 __ end_a_stub();
493
494 return offset;
495 }
496
497 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
498 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
499 if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
500 assert(result->fpu() == 0, "result must already be on TOS");
501 }
502
503 // Pop the stack before the safepoint code
504 __ remove_frame(initial_frame_size_in_bytes());
505
506 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
507 __ reserved_stack_check();
508 }
509
510 // Note: we do not need to round double result; float result has the right precision
511 // the poll sets the condition code, but no data registers
512
513 #ifdef _LP64
514 const Register thread = r15_thread;
515 #else
516 const Register thread = rbx;
517 __ get_thread(thread);
518 #endif
519 code_stub->set_safepoint_offset(__ offset());
520 __ relocate(relocInfo::poll_return_type);
521 __ safepoint_poll(*code_stub->entry(), thread, true /* at_return */, true /* in_nmethod */);
522 __ ret(0);
523 }
524
525
526 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
527 guarantee(info != nullptr, "Shouldn't be null");
528 int offset = __ offset();
529 #ifdef _LP64
530 const Register poll_addr = rscratch1;
531 __ movptr(poll_addr, Address(r15_thread, JavaThread::polling_page_offset()));
532 #else
533 assert(tmp->is_cpu_register(), "needed");
534 const Register poll_addr = tmp->as_register();
535 __ get_thread(poll_addr);
536 __ movptr(poll_addr, Address(poll_addr, in_bytes(JavaThread::polling_page_offset())));
537 #endif
538 add_debug_info_for_branch(info);
539 __ relocate(relocInfo::poll_type);
540 address pre_pc = __ pc();
541 __ testl(rax, Address(poll_addr, 0));
542 address post_pc = __ pc();
543 guarantee(pointer_delta(post_pc, pre_pc, 1) == 2 LP64_ONLY(+1), "must be exact length");
544 return offset;
545 }
1580 add_debug_info_for_null_check_here(op->stub()->info());
1581 __ cmpb(Address(op->klass()->as_register(),
1582 InstanceKlass::init_state_offset()),
1583 InstanceKlass::fully_initialized);
1584 __ jcc(Assembler::notEqual, *op->stub()->entry());
1585 }
1586 __ allocate_object(op->obj()->as_register(),
1587 op->tmp1()->as_register(),
1588 op->tmp2()->as_register(),
1589 op->header_size(),
1590 op->object_size(),
1591 op->klass()->as_register(),
1592 *op->stub()->entry());
1593 __ bind(*op->stub()->continuation());
1594 }
1595
1596 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1597 Register len = op->len()->as_register();
1598 LP64_ONLY( __ movslq(len, len); )
1599
1600 if (UseSlowPath ||
1601 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1602 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1603 __ jmp(*op->stub()->entry());
1604 } else {
1605 Register tmp1 = op->tmp1()->as_register();
1606 Register tmp2 = op->tmp2()->as_register();
1607 Register tmp3 = op->tmp3()->as_register();
1608 if (len == tmp1) {
1609 tmp1 = tmp3;
1610 } else if (len == tmp2) {
1611 tmp2 = tmp3;
1612 } else if (len == tmp3) {
1613 // everything is ok
1614 } else {
1615 __ mov(tmp3, len);
1616 }
1617 __ allocate_array(op->obj()->as_register(),
1618 len,
1619 tmp1,
1620 tmp2,
1679 assert(data != nullptr, "need data for type check");
1680 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1681 }
1682 Label* success_target = success;
1683 Label* failure_target = failure;
1684
1685 if (obj == k_RInfo) {
1686 k_RInfo = dst;
1687 } else if (obj == klass_RInfo) {
1688 klass_RInfo = dst;
1689 }
1690 if (k->is_loaded() && !UseCompressedClassPointers) {
1691 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1692 } else {
1693 Rtmp1 = op->tmp3()->as_register();
1694 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1695 }
1696
1697 assert_different_registers(obj, k_RInfo, klass_RInfo);
1698
1699 __ testptr(obj, obj);
1700 if (op->should_profile()) {
1701 Label not_null;
1702 Register mdo = klass_RInfo;
1703 __ mov_metadata(mdo, md->constant_encoding());
1704 __ jccb(Assembler::notEqual, not_null);
1705 // Object is null; update MDO and exit
1706 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1707 int header_bits = BitData::null_seen_byte_constant();
1708 __ orb(data_addr, header_bits);
1709 __ jmp(*obj_is_null);
1710 __ bind(not_null);
1711
1712 Label update_done;
1713 Register recv = k_RInfo;
1714 __ load_klass(recv, obj, tmp_load_klass);
1715 type_profile_helper(mdo, md, data, recv, &update_done);
1716
1717 Address nonprofiled_receiver_count_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1718 __ addptr(nonprofiled_receiver_count_addr, DataLayout::counter_increment);
1719
1720 __ bind(update_done);
1721 } else {
1722 __ jcc(Assembler::equal, *obj_is_null);
1723 }
1724
1725 if (!k->is_loaded()) {
1726 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1727 } else {
1728 #ifdef _LP64
1729 __ mov_metadata(k_RInfo, k->constant_encoding());
1730 #endif // _LP64
1731 }
1732 __ verify_oop(obj);
1733
1734 if (op->fast_check()) {
1735 // get object class
1736 // not a safepoint as obj null check happens earlier
1737 #ifdef _LP64
1738 if (UseCompressedClassPointers) {
1739 __ load_klass(Rtmp1, obj, tmp_load_klass);
1740 __ cmpptr(k_RInfo, Rtmp1);
1741 } else {
1742 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1894 __ mov(dst, obj);
1895 }
1896 } else
1897 if (code == lir_instanceof) {
1898 Register obj = op->object()->as_register();
1899 Register dst = op->result_opr()->as_register();
1900 Label success, failure, done;
1901 emit_typecheck_helper(op, &success, &failure, &failure);
1902 __ bind(failure);
1903 __ xorptr(dst, dst);
1904 __ jmpb(done);
1905 __ bind(success);
1906 __ movptr(dst, 1);
1907 __ bind(done);
1908 } else {
1909 ShouldNotReachHere();
1910 }
1911
1912 }
1913
1914
1915 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1916 if (LP64_ONLY(false &&) op->code() == lir_cas_long) {
1917 assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
1918 assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
1919 assert(op->new_value()->as_register_lo() == rbx, "wrong register");
1920 assert(op->new_value()->as_register_hi() == rcx, "wrong register");
1921 Register addr = op->addr()->as_register();
1922 __ lock();
1923 NOT_LP64(__ cmpxchg8(Address(addr, 0)));
1924
1925 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
1926 NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
1927 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1928 Register newval = op->new_value()->as_register();
1929 Register cmpval = op->cmp_value()->as_register();
1930 assert(cmpval == rax, "wrong register");
1931 assert(newval != noreg, "new val must be register");
1932 assert(cmpval != newval, "cmp and new values must be in different registers");
1933 assert(cmpval != addr, "cmp and addr must be in different registers");
1954 __ cmpxchgl(newval, Address(addr, 0));
1955 }
1956 #ifdef _LP64
1957 } else if (op->code() == lir_cas_long) {
1958 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1959 Register newval = op->new_value()->as_register_lo();
1960 Register cmpval = op->cmp_value()->as_register_lo();
1961 assert(cmpval == rax, "wrong register");
1962 assert(newval != noreg, "new val must be register");
1963 assert(cmpval != newval, "cmp and new values must be in different registers");
1964 assert(cmpval != addr, "cmp and addr must be in different registers");
1965 assert(newval != addr, "new value and addr must be in different registers");
1966 __ lock();
1967 __ cmpxchgq(newval, Address(addr, 0));
1968 #endif // _LP64
1969 } else {
1970 Unimplemented();
1971 }
1972 }
1973
1974 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
1975 LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
1976 assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on x86");
1977
1978 Assembler::Condition acond, ncond;
1979 switch (condition) {
1980 case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break;
1981 case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break;
1982 case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break;
1983 case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break;
1984 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less; break;
1985 case lir_cond_greater: acond = Assembler::greater; ncond = Assembler::lessEqual; break;
1986 case lir_cond_belowEqual: acond = Assembler::belowEqual; ncond = Assembler::above; break;
1987 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; ncond = Assembler::below; break;
1988 default: acond = Assembler::equal; ncond = Assembler::notEqual;
1989 ShouldNotReachHere();
1990 }
1991
1992 if (opr1->is_cpu_register()) {
1993 reg2reg(opr1, result);
2831 int offset = __ offset();
2832 switch (code) {
2833 case lir_static_call:
2834 case lir_optvirtual_call:
2835 case lir_dynamic_call:
2836 offset += NativeCall::displacement_offset;
2837 break;
2838 case lir_icvirtual_call:
2839 offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
2840 break;
2841 default: ShouldNotReachHere();
2842 }
2843 __ align(BytesPerWord, offset);
2844 }
2845
2846
2847 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2848 assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2849 "must be aligned");
2850 __ call(AddressLiteral(op->addr(), rtype));
2851 add_call_info(code_offset(), op->info());
2852 __ post_call_nop();
2853 }
2854
2855
2856 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2857 __ ic_call(op->addr());
2858 add_call_info(code_offset(), op->info());
2859 assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2860 "must be aligned");
2861 __ post_call_nop();
2862 }
2863
2864
2865 void LIR_Assembler::emit_static_call_stub() {
2866 address call_pc = __ pc();
2867 address stub = __ start_a_stub(call_stub_size());
2868 if (stub == nullptr) {
2869 bailout("static call stub overflow");
2870 return;
2871 }
2872
2873 int start = __ offset();
2874
2875 // make sure that the displacement word of the call ends up word aligned
2876 __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
2877 __ relocate(static_stub_Relocation::spec(call_pc));
2878 __ mov_metadata(rbx, (Metadata*)nullptr);
3019 __ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
3020 }
3021
3022
3023 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
3024 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3025 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3026 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3027 __ movoop(Address(rsp, offset_from_rsp_in_bytes), o, rscratch1);
3028 }
3029
3030
3031 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) {
3032 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3033 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3034 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3035 __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m, rscratch1);
3036 }
3037
3038
3039 // This code replaces a call to arraycopy; no exception may
3040 // be thrown in this code, they must be thrown in the System.arraycopy
3041 // activation frame; we could save some checks if this would not be the case
3042 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3043 ciArrayKlass* default_type = op->expected_type();
3044 Register src = op->src()->as_register();
3045 Register dst = op->dst()->as_register();
3046 Register src_pos = op->src_pos()->as_register();
3047 Register dst_pos = op->dst_pos()->as_register();
3048 Register length = op->length()->as_register();
3049 Register tmp = op->tmp()->as_register();
3050 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3051
3052 CodeStub* stub = op->stub();
3053 int flags = op->flags();
3054 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
3055 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
3056
3057 // if we don't know anything, just go through the generic arraycopy
3058 if (default_type == nullptr) {
3059 // save outgoing arguments on stack in case call to System.arraycopy is needed
3060 // HACK ALERT. This code used to push the parameters in a hardwired fashion
3061 // for interpreter calling conventions. Now we have to do it in new style conventions.
3062 // For the moment until C1 gets the new register allocator I just force all the
3063 // args to the right place (except the register args) and then on the back side
3064 // reload the register args properly if we go slow path. Yuck
3065
3066 // These are proper for the calling convention
3067 store_parameter(length, 2);
3068 store_parameter(dst_pos, 1);
3069 store_parameter(dst, 0);
3070
3071 // these are just temporary placements until we need to reload
3072 store_parameter(src_pos, 3);
3073 store_parameter(src, 4);
3074 NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
3075
3076 address copyfunc_addr = StubRoutines::generic_arraycopy();
3130 __ mov(tmp, rax);
3131 __ xorl(tmp, -1);
3132
3133 // Reload values from the stack so they are where the stub
3134 // expects them.
3135 __ movptr (dst, Address(rsp, 0*BytesPerWord));
3136 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord));
3137 __ movptr (length, Address(rsp, 2*BytesPerWord));
3138 __ movptr (src_pos, Address(rsp, 3*BytesPerWord));
3139 __ movptr (src, Address(rsp, 4*BytesPerWord));
3140
3141 __ subl(length, tmp);
3142 __ addl(src_pos, tmp);
3143 __ addl(dst_pos, tmp);
3144 __ jmp(*stub->entry());
3145
3146 __ bind(*stub->continuation());
3147 return;
3148 }
3149
3150 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
3151
3152 int elem_size = type2aelembytes(basic_type);
3153 Address::ScaleFactor scale;
3154
3155 switch (elem_size) {
3156 case 1 :
3157 scale = Address::times_1;
3158 break;
3159 case 2 :
3160 scale = Address::times_2;
3161 break;
3162 case 4 :
3163 scale = Address::times_4;
3164 break;
3165 case 8 :
3166 scale = Address::times_8;
3167 break;
3168 default:
3169 scale = Address::no_scale;
3759 // first time here. Set profile type.
3760 __ movptr(mdo_addr, tmp);
3761 #ifdef ASSERT
3762 __ andptr(tmp, TypeEntries::type_klass_mask);
3763 __ verify_klass_ptr(tmp);
3764 #endif
3765 } else {
3766 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
3767 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
3768
3769 __ testptr(mdo_addr, TypeEntries::type_unknown);
3770 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3771
3772 __ orptr(mdo_addr, TypeEntries::type_unknown);
3773 }
3774 }
3775 }
3776 __ bind(next);
3777 }
3778
3779 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
3780 Unimplemented();
3781 }
3782
3783
3784 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
3785 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
3786 }
3787
3788
3789 void LIR_Assembler::align_backward_branch_target() {
3790 __ align(BytesPerWord);
3791 }
3792
3793
3794 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
3795 if (left->is_single_cpu()) {
3796 __ negl(left->as_register());
3797 move_regs(left->as_register(), dest->as_register());
3798
4022 }
4023
4024 void LIR_Assembler::membar_storeload() {
4025 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
4026 }
4027
4028 void LIR_Assembler::on_spin_wait() {
4029 __ pause ();
4030 }
4031
4032 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
4033 assert(result_reg->is_register(), "check");
4034 #ifdef _LP64
4035 // __ get_thread(result_reg->as_register_lo());
4036 __ mov(result_reg->as_register(), r15_thread);
4037 #else
4038 __ get_thread(result_reg->as_register());
4039 #endif // _LP64
4040 }
4041
4042
4043 void LIR_Assembler::peephole(LIR_List*) {
4044 // do nothing for now
4045 }
4046
4047 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
4048 assert(data == dest, "xchg/xadd uses only 2 operands");
4049
4050 if (data->type() == T_INT) {
4051 if (code == lir_xadd) {
4052 __ lock();
4053 __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
4054 } else {
4055 __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
4056 }
4057 } else if (data->is_oop()) {
4058 assert (code == lir_xchg, "xadd for oops");
4059 Register obj = data->as_register();
4060 #ifdef _LP64
4061 if (UseCompressedOops) {
|
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "c1/c1_CodeStubs.hpp"
29 #include "c1/c1_Compilation.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "c1/c1_ValueStack.hpp"
34 #include "ci/ciArrayKlass.hpp"
35 #include "ci/ciInlineKlass.hpp"
36 #include "ci/ciInstance.hpp"
37 #include "compiler/oopMap.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/gc_globals.hpp"
40 #include "nativeInst_x86.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "oops/objArrayKlass.hpp"
43 #include "runtime/frame.inline.hpp"
44 #include "runtime/safepointMechanism.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "runtime/stubRoutines.hpp"
47 #include "utilities/powerOfTwo.hpp"
48 #include "vmreg_x86.inline.hpp"
49
50
51 // These masks are used to provide 128-bit aligned bitmasks to the XMM
52 // instructions, to allow sign-masking or sign-bit flipping. They allow
53 // fast versions of NegF/NegD and AbsF/AbsD.
54
55 // Note: 'double' and 'long long' have 32-bits alignment on x86.
56 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
57 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
58 // of 128-bits operands for SSE instructions.
59 jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
60 // Store the value to a 128-bits operand.
61 operand[0] = lo;
447 __ bind(*stub->continuation());
448 }
449
450 if (compilation()->env()->dtrace_method_probes()) {
451 #ifdef _LP64
452 __ mov(rdi, r15_thread);
453 __ mov_metadata(rsi, method()->constant_encoding());
454 #else
455 __ get_thread(rax);
456 __ movptr(Address(rsp, 0), rax);
457 __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding(), noreg);
458 #endif
459 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
460 }
461
462 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
463 __ mov(rax, rbx); // Restore the exception
464 }
465
466 // remove the activation and dispatch to the unwind handler
467 __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
468 __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
469
470 // Emit the slow path assembly
471 if (stub != nullptr) {
472 stub->emit_code(this);
473 }
474
475 return offset;
476 }
477
478
479 int LIR_Assembler::emit_deopt_handler() {
480 // generate code for exception handler
481 address handler_base = __ start_a_stub(deopt_handler_size());
482 if (handler_base == nullptr) {
483 // not enough space left for the handler
484 bailout("deopt handler overflow");
485 return -1;
486 }
487
488 int offset = code_offset();
489 InternalAddress here(__ pc());
490
491 __ pushptr(here.addr(), rscratch1);
492 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
493 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
494 __ end_a_stub();
495
496 return offset;
497 }
498
499 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
500 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
501 if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
502 assert(result->fpu() == 0, "result must already be on TOS");
503 }
504 if (InlineTypeReturnedAsFields) {
505 #ifndef _LP64
506 Unimplemented();
507 #endif
508 // Check if we are returning an non-null inline type and load its fields into registers
509 ciType* return_type = compilation()->method()->return_type();
510 if (return_type->is_inlinetype()) {
511 ciInlineKlass* vk = return_type->as_inline_klass();
512 if (vk->can_be_returned_as_fields()) {
513 address unpack_handler = vk->unpack_handler();
514 assert(unpack_handler != nullptr, "must be");
515 __ call(RuntimeAddress(unpack_handler));
516 }
517 } else if (return_type->is_instance_klass() && (!return_type->is_loaded() || StressCallingConvention)) {
518 Label skip;
519 __ test_oop_is_not_inline_type(rax, rscratch1, skip);
520
521 // Load fields from a buffered value with an inline class specific handler
522 __ load_klass(rdi, rax, rscratch1);
523 __ movptr(rdi, Address(rdi, InstanceKlass::adr_inlineklass_fixed_block_offset()));
524 __ movptr(rdi, Address(rdi, InlineKlass::unpack_handler_offset()));
525 // Unpack handler can be null if inline type is not scalarizable in returns
526 __ testptr(rdi, rdi);
527 __ jcc(Assembler::zero, skip);
528 __ call(rdi);
529
530 __ bind(skip);
531 }
532 // At this point, rax points to the value object (for interpreter or C1 caller).
533 // The fields of the object are copied into registers (for C2 caller).
534 }
535
536 // Pop the stack before the safepoint code
537 __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
538
539 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
540 __ reserved_stack_check();
541 }
542
543 // Note: we do not need to round double result; float result has the right precision
544 // the poll sets the condition code, but no data registers
545
546 #ifdef _LP64
547 const Register thread = r15_thread;
548 #else
549 const Register thread = rbx;
550 __ get_thread(thread);
551 #endif
552 code_stub->set_safepoint_offset(__ offset());
553 __ relocate(relocInfo::poll_return_type);
554 __ safepoint_poll(*code_stub->entry(), thread, true /* at_return */, true /* in_nmethod */);
555 __ ret(0);
556 }
557
558
559 int LIR_Assembler::store_inline_type_fields_to_buf(ciInlineKlass* vk) {
560 return (__ store_inline_type_fields_to_buf(vk, false));
561 }
562
563 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
564 guarantee(info != nullptr, "Shouldn't be null");
565 int offset = __ offset();
566 #ifdef _LP64
567 const Register poll_addr = rscratch1;
568 __ movptr(poll_addr, Address(r15_thread, JavaThread::polling_page_offset()));
569 #else
570 assert(tmp->is_cpu_register(), "needed");
571 const Register poll_addr = tmp->as_register();
572 __ get_thread(poll_addr);
573 __ movptr(poll_addr, Address(poll_addr, in_bytes(JavaThread::polling_page_offset())));
574 #endif
575 add_debug_info_for_branch(info);
576 __ relocate(relocInfo::poll_type);
577 address pre_pc = __ pc();
578 __ testl(rax, Address(poll_addr, 0));
579 address post_pc = __ pc();
580 guarantee(pointer_delta(post_pc, pre_pc, 1) == 2 LP64_ONLY(+1), "must be exact length");
581 return offset;
582 }
1617 add_debug_info_for_null_check_here(op->stub()->info());
1618 __ cmpb(Address(op->klass()->as_register(),
1619 InstanceKlass::init_state_offset()),
1620 InstanceKlass::fully_initialized);
1621 __ jcc(Assembler::notEqual, *op->stub()->entry());
1622 }
1623 __ allocate_object(op->obj()->as_register(),
1624 op->tmp1()->as_register(),
1625 op->tmp2()->as_register(),
1626 op->header_size(),
1627 op->object_size(),
1628 op->klass()->as_register(),
1629 *op->stub()->entry());
1630 __ bind(*op->stub()->continuation());
1631 }
1632
1633 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1634 Register len = op->len()->as_register();
1635 LP64_ONLY( __ movslq(len, len); )
1636
1637 if (UseSlowPath || op->is_null_free() ||
1638 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1639 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1640 __ jmp(*op->stub()->entry());
1641 } else {
1642 Register tmp1 = op->tmp1()->as_register();
1643 Register tmp2 = op->tmp2()->as_register();
1644 Register tmp3 = op->tmp3()->as_register();
1645 if (len == tmp1) {
1646 tmp1 = tmp3;
1647 } else if (len == tmp2) {
1648 tmp2 = tmp3;
1649 } else if (len == tmp3) {
1650 // everything is ok
1651 } else {
1652 __ mov(tmp3, len);
1653 }
1654 __ allocate_array(op->obj()->as_register(),
1655 len,
1656 tmp1,
1657 tmp2,
1716 assert(data != nullptr, "need data for type check");
1717 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1718 }
1719 Label* success_target = success;
1720 Label* failure_target = failure;
1721
1722 if (obj == k_RInfo) {
1723 k_RInfo = dst;
1724 } else if (obj == klass_RInfo) {
1725 klass_RInfo = dst;
1726 }
1727 if (k->is_loaded() && !UseCompressedClassPointers) {
1728 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1729 } else {
1730 Rtmp1 = op->tmp3()->as_register();
1731 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1732 }
1733
1734 assert_different_registers(obj, k_RInfo, klass_RInfo);
1735
1736 if (op->need_null_check()) {
1737 __ testptr(obj, obj);
1738 if (op->should_profile()) {
1739 Label not_null;
1740 Register mdo = klass_RInfo;
1741 __ mov_metadata(mdo, md->constant_encoding());
1742 __ jccb(Assembler::notEqual, not_null);
1743 // Object is null; update MDO and exit
1744 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1745 int header_bits = BitData::null_seen_byte_constant();
1746 __ orb(data_addr, header_bits);
1747 __ jmp(*obj_is_null);
1748 __ bind(not_null);
1749
1750 Label update_done;
1751 Register recv = k_RInfo;
1752 __ load_klass(recv, obj, tmp_load_klass);
1753 type_profile_helper(mdo, md, data, recv, &update_done);
1754
1755 Address nonprofiled_receiver_count_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1756 __ addptr(nonprofiled_receiver_count_addr, DataLayout::counter_increment);
1757
1758 __ bind(update_done);
1759 } else {
1760 __ jcc(Assembler::equal, *obj_is_null);
1761 }
1762 }
1763
1764 if (!k->is_loaded()) {
1765 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1766 } else {
1767 #ifdef _LP64
1768 __ mov_metadata(k_RInfo, k->constant_encoding());
1769 #endif // _LP64
1770 }
1771 __ verify_oop(obj);
1772
1773 if (op->fast_check()) {
1774 // get object class
1775 // not a safepoint as obj null check happens earlier
1776 #ifdef _LP64
1777 if (UseCompressedClassPointers) {
1778 __ load_klass(Rtmp1, obj, tmp_load_klass);
1779 __ cmpptr(k_RInfo, Rtmp1);
1780 } else {
1781 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1933 __ mov(dst, obj);
1934 }
1935 } else
1936 if (code == lir_instanceof) {
1937 Register obj = op->object()->as_register();
1938 Register dst = op->result_opr()->as_register();
1939 Label success, failure, done;
1940 emit_typecheck_helper(op, &success, &failure, &failure);
1941 __ bind(failure);
1942 __ xorptr(dst, dst);
1943 __ jmpb(done);
1944 __ bind(success);
1945 __ movptr(dst, 1);
1946 __ bind(done);
1947 } else {
1948 ShouldNotReachHere();
1949 }
1950
1951 }
1952
1953 void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
1954 // We are loading/storing from/to an array that *may* be a flat array (the
1955 // declared type is Object[], abstract[], interface[] or VT.ref[]).
1956 // If this array is a flat array, take the slow path.
1957 __ test_flat_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1958 if (!op->value()->is_illegal()) {
1959 // The array is not a flat array, but it might be null-free. If we are storing
1960 // a null into a null-free array, take the slow path (which will throw NPE).
1961 Label skip;
1962 __ cmpptr(op->value()->as_register(), NULL_WORD);
1963 __ jcc(Assembler::notEqual, skip);
1964 __ test_null_free_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1965 __ bind(skip);
1966 }
1967 }
1968
1969 void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
1970 // We are storing into an array that *may* be null-free (the declared type is
1971 // Object[], abstract[], interface[] or VT.ref[]).
1972 Label test_mark_word;
1973 Register tmp = op->tmp()->as_register();
1974 __ movptr(tmp, Address(op->array()->as_register(), oopDesc::mark_offset_in_bytes()));
1975 __ testl(tmp, markWord::unlocked_value);
1976 __ jccb(Assembler::notZero, test_mark_word);
1977 __ load_prototype_header(tmp, op->array()->as_register(), rscratch1);
1978 __ bind(test_mark_word);
1979 __ testl(tmp, markWord::null_free_array_bit_in_place);
1980 }
1981
1982 void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
1983 Label L_oops_equal;
1984 Label L_oops_not_equal;
1985 Label L_end;
1986
1987 Register left = op->left()->as_register();
1988 Register right = op->right()->as_register();
1989
1990 __ cmpptr(left, right);
1991 __ jcc(Assembler::equal, L_oops_equal);
1992
1993 // (1) Null check -- if one of the operands is null, the other must not be null (because
1994 // the two references are not equal), so they are not substitutable,
1995 // FIXME: do null check only if the operand is nullable
1996 __ testptr(left, right);
1997 __ jcc(Assembler::zero, L_oops_not_equal);
1998
1999 ciKlass* left_klass = op->left_klass();
2000 ciKlass* right_klass = op->right_klass();
2001
2002 // (2) Inline type check -- if either of the operands is not a inline type,
2003 // they are not substitutable. We do this only if we are not sure that the
2004 // operands are inline type
2005 if ((left_klass == nullptr || right_klass == nullptr) ||// The klass is still unloaded, or came from a Phi node.
2006 !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
2007 Register tmp1 = op->tmp1()->as_register();
2008 __ movptr(tmp1, (intptr_t)markWord::inline_type_pattern);
2009 __ andptr(tmp1, Address(left, oopDesc::mark_offset_in_bytes()));
2010 __ andptr(tmp1, Address(right, oopDesc::mark_offset_in_bytes()));
2011 __ cmpptr(tmp1, (intptr_t)markWord::inline_type_pattern);
2012 __ jcc(Assembler::notEqual, L_oops_not_equal);
2013 }
2014
2015 // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
2016 if (left_klass != nullptr && left_klass->is_inlinetype() && left_klass == right_klass) {
2017 // No need to load klass -- the operands are statically known to be the same inline klass.
2018 __ jmp(*op->stub()->entry());
2019 } else {
2020 Register left_klass_op = op->left_klass_op()->as_register();
2021 Register right_klass_op = op->right_klass_op()->as_register();
2022
2023 if (UseCompressedClassPointers) {
2024 __ movl(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes()));
2025 __ movl(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
2026 __ cmpl(left_klass_op, right_klass_op);
2027 } else {
2028 __ movptr(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes()));
2029 __ movptr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
2030 __ cmpptr(left_klass_op, right_klass_op);
2031 }
2032
2033 __ jcc(Assembler::equal, *op->stub()->entry()); // same klass -> do slow check
2034 // fall through to L_oops_not_equal
2035 }
2036
2037 __ bind(L_oops_not_equal);
2038 move(op->not_equal_result(), op->result_opr());
2039 __ jmp(L_end);
2040
2041 __ bind(L_oops_equal);
2042 move(op->equal_result(), op->result_opr());
2043 __ jmp(L_end);
2044
2045 // We've returned from the stub. RAX contains 0x0 IFF the two
2046 // operands are not substitutable. (Don't compare against 0x1 in case the
2047 // C compiler is naughty)
2048 __ bind(*op->stub()->continuation());
2049 __ cmpl(rax, 0);
2050 __ jcc(Assembler::equal, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
2051 move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal
2052 // fall-through
2053 __ bind(L_end);
2054 }
2055
2056 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
2057 if (LP64_ONLY(false &&) op->code() == lir_cas_long) {
2058 assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
2059 assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
2060 assert(op->new_value()->as_register_lo() == rbx, "wrong register");
2061 assert(op->new_value()->as_register_hi() == rcx, "wrong register");
2062 Register addr = op->addr()->as_register();
2063 __ lock();
2064 NOT_LP64(__ cmpxchg8(Address(addr, 0)));
2065
2066 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
2067 NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
2068 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
2069 Register newval = op->new_value()->as_register();
2070 Register cmpval = op->cmp_value()->as_register();
2071 assert(cmpval == rax, "wrong register");
2072 assert(newval != noreg, "new val must be register");
2073 assert(cmpval != newval, "cmp and new values must be in different registers");
2074 assert(cmpval != addr, "cmp and addr must be in different registers");
2095 __ cmpxchgl(newval, Address(addr, 0));
2096 }
2097 #ifdef _LP64
2098 } else if (op->code() == lir_cas_long) {
2099 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
2100 Register newval = op->new_value()->as_register_lo();
2101 Register cmpval = op->cmp_value()->as_register_lo();
2102 assert(cmpval == rax, "wrong register");
2103 assert(newval != noreg, "new val must be register");
2104 assert(cmpval != newval, "cmp and new values must be in different registers");
2105 assert(cmpval != addr, "cmp and addr must be in different registers");
2106 assert(newval != addr, "new value and addr must be in different registers");
2107 __ lock();
2108 __ cmpxchgq(newval, Address(addr, 0));
2109 #endif // _LP64
2110 } else {
2111 Unimplemented();
2112 }
2113 }
2114
2115 void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) {
2116 assert(dst->is_cpu_register(), "must be");
2117 assert(dst->type() == src->type(), "must be");
2118
2119 if (src->is_cpu_register()) {
2120 reg2reg(src, dst);
2121 } else if (src->is_stack()) {
2122 stack2reg(src, dst, dst->type());
2123 } else if (src->is_constant()) {
2124 const2reg(src, dst, lir_patch_none, nullptr);
2125 } else {
2126 ShouldNotReachHere();
2127 }
2128 }
2129
2130 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
2131 LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
2132 assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on x86");
2133
2134 Assembler::Condition acond, ncond;
2135 switch (condition) {
2136 case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break;
2137 case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break;
2138 case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break;
2139 case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break;
2140 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less; break;
2141 case lir_cond_greater: acond = Assembler::greater; ncond = Assembler::lessEqual; break;
2142 case lir_cond_belowEqual: acond = Assembler::belowEqual; ncond = Assembler::above; break;
2143 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; ncond = Assembler::below; break;
2144 default: acond = Assembler::equal; ncond = Assembler::notEqual;
2145 ShouldNotReachHere();
2146 }
2147
2148 if (opr1->is_cpu_register()) {
2149 reg2reg(opr1, result);
2987 int offset = __ offset();
2988 switch (code) {
2989 case lir_static_call:
2990 case lir_optvirtual_call:
2991 case lir_dynamic_call:
2992 offset += NativeCall::displacement_offset;
2993 break;
2994 case lir_icvirtual_call:
2995 offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
2996 break;
2997 default: ShouldNotReachHere();
2998 }
2999 __ align(BytesPerWord, offset);
3000 }
3001
3002
3003 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
3004 assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
3005 "must be aligned");
3006 __ call(AddressLiteral(op->addr(), rtype));
3007 add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
3008 __ post_call_nop();
3009 }
3010
3011
3012 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
3013 __ ic_call(op->addr());
3014 add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
3015 assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
3016 "must be aligned");
3017 __ post_call_nop();
3018 }
3019
3020
3021 void LIR_Assembler::emit_static_call_stub() {
3022 address call_pc = __ pc();
3023 address stub = __ start_a_stub(call_stub_size());
3024 if (stub == nullptr) {
3025 bailout("static call stub overflow");
3026 return;
3027 }
3028
3029 int start = __ offset();
3030
3031 // make sure that the displacement word of the call ends up word aligned
3032 __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
3033 __ relocate(static_stub_Relocation::spec(call_pc));
3034 __ mov_metadata(rbx, (Metadata*)nullptr);
3175 __ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
3176 }
3177
3178
3179 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
3180 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3181 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3182 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3183 __ movoop(Address(rsp, offset_from_rsp_in_bytes), o, rscratch1);
3184 }
3185
3186
3187 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) {
3188 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3189 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3190 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3191 __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m, rscratch1);
3192 }
3193
3194
3195 void LIR_Assembler::arraycopy_inlinetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) {
3196 if (null_check) {
3197 __ testptr(obj, obj);
3198 __ jcc(Assembler::zero, *slow_path->entry());
3199 }
3200 if (is_dest) {
3201 __ test_null_free_array_oop(obj, tmp, *slow_path->entry());
3202 } else {
3203 __ test_flat_array_oop(obj, tmp, *slow_path->entry());
3204 }
3205 }
3206
3207
3208 // This code replaces a call to arraycopy; no exception may
3209 // be thrown in this code, they must be thrown in the System.arraycopy
3210 // activation frame; we could save some checks if this would not be the case
3211 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3212 ciArrayKlass* default_type = op->expected_type();
3213 Register src = op->src()->as_register();
3214 Register dst = op->dst()->as_register();
3215 Register src_pos = op->src_pos()->as_register();
3216 Register dst_pos = op->dst_pos()->as_register();
3217 Register length = op->length()->as_register();
3218 Register tmp = op->tmp()->as_register();
3219 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3220
3221 CodeStub* stub = op->stub();
3222 int flags = op->flags();
3223 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
3224 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
3225
3226 if (flags & LIR_OpArrayCopy::always_slow_path) {
3227 __ jmp(*stub->entry());
3228 __ bind(*stub->continuation());
3229 return;
3230 }
3231
3232 // if we don't know anything, just go through the generic arraycopy
3233 if (default_type == nullptr) {
3234 // save outgoing arguments on stack in case call to System.arraycopy is needed
3235 // HACK ALERT. This code used to push the parameters in a hardwired fashion
3236 // for interpreter calling conventions. Now we have to do it in new style conventions.
3237 // For the moment until C1 gets the new register allocator I just force all the
3238 // args to the right place (except the register args) and then on the back side
3239 // reload the register args properly if we go slow path. Yuck
3240
3241 // These are proper for the calling convention
3242 store_parameter(length, 2);
3243 store_parameter(dst_pos, 1);
3244 store_parameter(dst, 0);
3245
3246 // these are just temporary placements until we need to reload
3247 store_parameter(src_pos, 3);
3248 store_parameter(src, 4);
3249 NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
3250
3251 address copyfunc_addr = StubRoutines::generic_arraycopy();
3305 __ mov(tmp, rax);
3306 __ xorl(tmp, -1);
3307
3308 // Reload values from the stack so they are where the stub
3309 // expects them.
3310 __ movptr (dst, Address(rsp, 0*BytesPerWord));
3311 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord));
3312 __ movptr (length, Address(rsp, 2*BytesPerWord));
3313 __ movptr (src_pos, Address(rsp, 3*BytesPerWord));
3314 __ movptr (src, Address(rsp, 4*BytesPerWord));
3315
3316 __ subl(length, tmp);
3317 __ addl(src_pos, tmp);
3318 __ addl(dst_pos, tmp);
3319 __ jmp(*stub->entry());
3320
3321 __ bind(*stub->continuation());
3322 return;
3323 }
3324
3325 // Handle inline type arrays
3326 if (flags & LIR_OpArrayCopy::src_inlinetype_check) {
3327 arraycopy_inlinetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check));
3328 }
3329 if (flags & LIR_OpArrayCopy::dst_inlinetype_check) {
3330 arraycopy_inlinetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check));
3331 }
3332
3333 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
3334
3335 int elem_size = type2aelembytes(basic_type);
3336 Address::ScaleFactor scale;
3337
3338 switch (elem_size) {
3339 case 1 :
3340 scale = Address::times_1;
3341 break;
3342 case 2 :
3343 scale = Address::times_2;
3344 break;
3345 case 4 :
3346 scale = Address::times_4;
3347 break;
3348 case 8 :
3349 scale = Address::times_8;
3350 break;
3351 default:
3352 scale = Address::no_scale;
3942 // first time here. Set profile type.
3943 __ movptr(mdo_addr, tmp);
3944 #ifdef ASSERT
3945 __ andptr(tmp, TypeEntries::type_klass_mask);
3946 __ verify_klass_ptr(tmp);
3947 #endif
3948 } else {
3949 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
3950 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
3951
3952 __ testptr(mdo_addr, TypeEntries::type_unknown);
3953 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3954
3955 __ orptr(mdo_addr, TypeEntries::type_unknown);
3956 }
3957 }
3958 }
3959 __ bind(next);
3960 }
3961
3962 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) {
3963 Register obj = op->obj()->as_register();
3964 Register tmp = op->tmp()->as_pointer_register();
3965 Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
3966 bool not_null = op->not_null();
3967 int flag = op->flag();
3968
3969 Label not_inline_type;
3970 if (!not_null) {
3971 __ testptr(obj, obj);
3972 __ jccb(Assembler::zero, not_inline_type);
3973 }
3974
3975 __ test_oop_is_not_inline_type(obj, tmp, not_inline_type);
3976
3977 __ orb(mdo_addr, flag);
3978
3979 __ bind(not_inline_type);
3980 }
3981
3982 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
3983 Unimplemented();
3984 }
3985
3986
3987 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
3988 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
3989 }
3990
3991
3992 void LIR_Assembler::align_backward_branch_target() {
3993 __ align(BytesPerWord);
3994 }
3995
3996
3997 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
3998 if (left->is_single_cpu()) {
3999 __ negl(left->as_register());
4000 move_regs(left->as_register(), dest->as_register());
4001
4225 }
4226
4227 void LIR_Assembler::membar_storeload() {
4228 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
4229 }
4230
4231 void LIR_Assembler::on_spin_wait() {
4232 __ pause ();
4233 }
4234
4235 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
4236 assert(result_reg->is_register(), "check");
4237 #ifdef _LP64
4238 // __ get_thread(result_reg->as_register_lo());
4239 __ mov(result_reg->as_register(), r15_thread);
4240 #else
4241 __ get_thread(result_reg->as_register());
4242 #endif // _LP64
4243 }
4244
4245 void LIR_Assembler::check_orig_pc() {
4246 __ cmpptr(frame_map()->address_for_orig_pc_addr(), NULL_WORD);
4247 }
4248
4249 void LIR_Assembler::peephole(LIR_List*) {
4250 // do nothing for now
4251 }
4252
4253 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
4254 assert(data == dest, "xchg/xadd uses only 2 operands");
4255
4256 if (data->type() == T_INT) {
4257 if (code == lir_xadd) {
4258 __ lock();
4259 __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
4260 } else {
4261 __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
4262 }
4263 } else if (data->is_oop()) {
4264 assert (code == lir_xchg, "xadd for oops");
4265 Register obj = data->as_register();
4266 #ifdef _LP64
4267 if (UseCompressedOops) {
|