< prev index next >

src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp

Print this page

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_CodeStubs.hpp"
  29 #include "c1/c1_Compilation.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_MacroAssembler.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "ci/ciArrayKlass.hpp"

  35 #include "ci/ciInstance.hpp"
  36 #include "compiler/oopMap.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "gc/shared/gc_globals.hpp"
  39 #include "nativeInst_x86.hpp"

  40 #include "oops/objArrayKlass.hpp"
  41 #include "runtime/frame.inline.hpp"
  42 #include "runtime/safepointMechanism.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "runtime/stubRoutines.hpp"
  45 #include "utilities/powerOfTwo.hpp"
  46 #include "vmreg_x86.inline.hpp"
  47 
  48 
  49 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  50 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  51 // fast versions of NegF/NegD and AbsF/AbsD.
  52 
  53 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  54 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
  55   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  56   // of 128-bits operands for SSE instructions.
  57   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
  58   // Store the value to a 128-bits operand.
  59   operand[0] = lo;

 445     __ bind(*stub->continuation());
 446   }
 447 
 448   if (compilation()->env()->dtrace_method_probes()) {
 449 #ifdef _LP64
 450     __ mov(rdi, r15_thread);
 451     __ mov_metadata(rsi, method()->constant_encoding());
 452 #else
 453     __ get_thread(rax);
 454     __ movptr(Address(rsp, 0), rax);
 455     __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding(), noreg);
 456 #endif
 457     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
 458   }
 459 
 460   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 461     __ mov(rax, rbx);  // Restore the exception
 462   }
 463 
 464   // remove the activation and dispatch to the unwind handler
 465   __ remove_frame(initial_frame_size_in_bytes());
 466   __ jump(RuntimeAddress(Runtime1::entry_for(C1StubId::unwind_exception_id)));
 467 
 468   // Emit the slow path assembly
 469   if (stub != nullptr) {
 470     stub->emit_code(this);
 471   }
 472 
 473   return offset;
 474 }
 475 
 476 
 477 int LIR_Assembler::emit_deopt_handler() {
 478   // generate code for exception handler
 479   address handler_base = __ start_a_stub(deopt_handler_size());
 480   if (handler_base == nullptr) {
 481     // not enough space left for the handler
 482     bailout("deopt handler overflow");
 483     return -1;
 484   }
 485 
 486   int offset = code_offset();
 487   InternalAddress here(__ pc());
 488 
 489   __ pushptr(here.addr(), rscratch1);
 490   __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 491   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 492   __ end_a_stub();
 493 
 494   return offset;
 495 }
 496 
 497 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 498   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
 499   if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
 500     assert(result->fpu() == 0, "result must already be on TOS");
 501   }































 502 
 503   // Pop the stack before the safepoint code
 504   __ remove_frame(initial_frame_size_in_bytes());
 505 
 506   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 507     __ reserved_stack_check();
 508   }
 509 
 510   // Note: we do not need to round double result; float result has the right precision
 511   // the poll sets the condition code, but no data registers
 512 
 513 #ifdef _LP64
 514   const Register thread = r15_thread;
 515 #else
 516   const Register thread = rbx;
 517   __ get_thread(thread);
 518 #endif
 519   code_stub->set_safepoint_offset(__ offset());
 520   __ relocate(relocInfo::poll_return_type);
 521   __ safepoint_poll(*code_stub->entry(), thread, true /* at_return */, true /* in_nmethod */);
 522   __ ret(0);
 523 }
 524 
 525 




 526 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 527   guarantee(info != nullptr, "Shouldn't be null");
 528   int offset = __ offset();
 529 #ifdef _LP64
 530   const Register poll_addr = rscratch1;
 531   __ movptr(poll_addr, Address(r15_thread, JavaThread::polling_page_offset()));
 532 #else
 533   assert(tmp->is_cpu_register(), "needed");
 534   const Register poll_addr = tmp->as_register();
 535   __ get_thread(poll_addr);
 536   __ movptr(poll_addr, Address(poll_addr, in_bytes(JavaThread::polling_page_offset())));
 537 #endif
 538   add_debug_info_for_branch(info);
 539   __ relocate(relocInfo::poll_type);
 540   address pre_pc = __ pc();
 541   __ testl(rax, Address(poll_addr, 0));
 542   address post_pc = __ pc();
 543   guarantee(pointer_delta(post_pc, pre_pc, 1) == 2 LP64_ONLY(+1), "must be exact length");
 544   return offset;
 545 }

1581     // init_state needs acquire, but x86 is TSO, and so we are already good.
1582     __ cmpb(Address(op->klass()->as_register(),
1583                     InstanceKlass::init_state_offset()),
1584                     InstanceKlass::fully_initialized);
1585     __ jcc(Assembler::notEqual, *op->stub()->entry());
1586   }
1587   __ allocate_object(op->obj()->as_register(),
1588                      op->tmp1()->as_register(),
1589                      op->tmp2()->as_register(),
1590                      op->header_size(),
1591                      op->object_size(),
1592                      op->klass()->as_register(),
1593                      *op->stub()->entry());
1594   __ bind(*op->stub()->continuation());
1595 }
1596 
1597 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1598   Register len =  op->len()->as_register();
1599   LP64_ONLY( __ movslq(len, len); )
1600 
1601   if (UseSlowPath ||
1602       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1603       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1604     __ jmp(*op->stub()->entry());
1605   } else {
1606     Register tmp1 = op->tmp1()->as_register();
1607     Register tmp2 = op->tmp2()->as_register();
1608     Register tmp3 = op->tmp3()->as_register();
1609     if (len == tmp1) {
1610       tmp1 = tmp3;
1611     } else if (len == tmp2) {
1612       tmp2 = tmp3;
1613     } else if (len == tmp3) {
1614       // everything is ok
1615     } else {
1616       __ mov(tmp3, len);
1617     }
1618     __ allocate_array(op->obj()->as_register(),
1619                       len,
1620                       tmp1,
1621                       tmp2,

1680     assert(data != nullptr,                "need data for type check");
1681     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1682   }
1683   Label* success_target = success;
1684   Label* failure_target = failure;
1685 
1686   if (obj == k_RInfo) {
1687     k_RInfo = dst;
1688   } else if (obj == klass_RInfo) {
1689     klass_RInfo = dst;
1690   }
1691   if (k->is_loaded() && !UseCompressedClassPointers) {
1692     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1693   } else {
1694     Rtmp1 = op->tmp3()->as_register();
1695     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1696   }
1697 
1698   assert_different_registers(obj, k_RInfo, klass_RInfo);
1699 
1700   __ testptr(obj, obj);
1701   if (op->should_profile()) {
1702     Label not_null;
1703     Register mdo  = klass_RInfo;
1704     __ mov_metadata(mdo, md->constant_encoding());
1705     __ jccb(Assembler::notEqual, not_null);
1706     // Object is null; update MDO and exit
1707     Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1708     int header_bits = BitData::null_seen_byte_constant();
1709     __ orb(data_addr, header_bits);
1710     __ jmp(*obj_is_null);
1711     __ bind(not_null);
1712 
1713     Label update_done;
1714     Register recv = k_RInfo;
1715     __ load_klass(recv, obj, tmp_load_klass);
1716     type_profile_helper(mdo, md, data, recv, &update_done);
1717 
1718     Address nonprofiled_receiver_count_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1719     __ addptr(nonprofiled_receiver_count_addr, DataLayout::counter_increment);
1720 
1721     __ bind(update_done);
1722   } else {
1723     __ jcc(Assembler::equal, *obj_is_null);


1724   }
1725 
1726   if (!k->is_loaded()) {
1727     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1728   } else {
1729 #ifdef _LP64
1730     __ mov_metadata(k_RInfo, k->constant_encoding());
1731 #endif // _LP64
1732   }
1733   __ verify_oop(obj);
1734 
1735   if (op->fast_check()) {
1736     // get object class
1737     // not a safepoint as obj null check happens earlier
1738 #ifdef _LP64
1739     if (UseCompressedClassPointers) {
1740       __ load_klass(Rtmp1, obj, tmp_load_klass);
1741       __ cmpptr(k_RInfo, Rtmp1);
1742     } else {
1743       __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));

1895         __ mov(dst, obj);
1896       }
1897     } else
1898       if (code == lir_instanceof) {
1899         Register obj = op->object()->as_register();
1900         Register dst = op->result_opr()->as_register();
1901         Label success, failure, done;
1902         emit_typecheck_helper(op, &success, &failure, &failure);
1903         __ bind(failure);
1904         __ xorptr(dst, dst);
1905         __ jmpb(done);
1906         __ bind(success);
1907         __ movptr(dst, 1);
1908         __ bind(done);
1909       } else {
1910         ShouldNotReachHere();
1911       }
1912 
1913 }
1914 






































































































1915 
1916 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1917   if (LP64_ONLY(false &&) op->code() == lir_cas_long) {
1918     assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
1919     assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
1920     assert(op->new_value()->as_register_lo() == rbx, "wrong register");
1921     assert(op->new_value()->as_register_hi() == rcx, "wrong register");
1922     Register addr = op->addr()->as_register();
1923     __ lock();
1924     NOT_LP64(__ cmpxchg8(Address(addr, 0)));
1925 
1926   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
1927     NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
1928     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1929     Register newval = op->new_value()->as_register();
1930     Register cmpval = op->cmp_value()->as_register();
1931     assert(cmpval == rax, "wrong register");
1932     assert(newval != noreg, "new val must be register");
1933     assert(cmpval != newval, "cmp and new values must be in different registers");
1934     assert(cmpval != addr, "cmp and addr must be in different registers");

1955       __ cmpxchgl(newval, Address(addr, 0));
1956     }
1957 #ifdef _LP64
1958   } else if (op->code() == lir_cas_long) {
1959     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1960     Register newval = op->new_value()->as_register_lo();
1961     Register cmpval = op->cmp_value()->as_register_lo();
1962     assert(cmpval == rax, "wrong register");
1963     assert(newval != noreg, "new val must be register");
1964     assert(cmpval != newval, "cmp and new values must be in different registers");
1965     assert(cmpval != addr, "cmp and addr must be in different registers");
1966     assert(newval != addr, "new value and addr must be in different registers");
1967     __ lock();
1968     __ cmpxchgq(newval, Address(addr, 0));
1969 #endif // _LP64
1970   } else {
1971     Unimplemented();
1972   }
1973 }
1974 















1975 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
1976                           LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
1977   assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on x86");
1978 
1979   Assembler::Condition acond, ncond;
1980   switch (condition) {
1981     case lir_cond_equal:        acond = Assembler::equal;        ncond = Assembler::notEqual;     break;
1982     case lir_cond_notEqual:     acond = Assembler::notEqual;     ncond = Assembler::equal;        break;
1983     case lir_cond_less:         acond = Assembler::less;         ncond = Assembler::greaterEqual; break;
1984     case lir_cond_lessEqual:    acond = Assembler::lessEqual;    ncond = Assembler::greater;      break;
1985     case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less;         break;
1986     case lir_cond_greater:      acond = Assembler::greater;      ncond = Assembler::lessEqual;    break;
1987     case lir_cond_belowEqual:   acond = Assembler::belowEqual;   ncond = Assembler::above;        break;
1988     case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;   ncond = Assembler::below;        break;
1989     default:                    acond = Assembler::equal;        ncond = Assembler::notEqual;
1990                                 ShouldNotReachHere();
1991   }
1992 
1993   if (opr1->is_cpu_register()) {
1994     reg2reg(opr1, result);

2832   int offset = __ offset();
2833   switch (code) {
2834   case lir_static_call:
2835   case lir_optvirtual_call:
2836   case lir_dynamic_call:
2837     offset += NativeCall::displacement_offset;
2838     break;
2839   case lir_icvirtual_call:
2840     offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size_rex;
2841     break;
2842   default: ShouldNotReachHere();
2843   }
2844   __ align(BytesPerWord, offset);
2845 }
2846 
2847 
2848 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2849   assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2850          "must be aligned");
2851   __ call(AddressLiteral(op->addr(), rtype));
2852   add_call_info(code_offset(), op->info());
2853   __ post_call_nop();
2854 }
2855 
2856 
2857 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2858   __ ic_call(op->addr());
2859   add_call_info(code_offset(), op->info());
2860   assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2861          "must be aligned");
2862   __ post_call_nop();
2863 }
2864 
2865 
2866 void LIR_Assembler::emit_static_call_stub() {
2867   address call_pc = __ pc();
2868   address stub = __ start_a_stub(call_stub_size());
2869   if (stub == nullptr) {
2870     bailout("static call stub overflow");
2871     return;
2872   }
2873 
2874   int start = __ offset();
2875 
2876   // make sure that the displacement word of the call ends up word aligned
2877   __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size_rex + NativeCall::displacement_offset);
2878   __ relocate(static_stub_Relocation::spec(call_pc));
2879   __ mov_metadata(rbx, (Metadata*)nullptr);

3020   __ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
3021 }
3022 
3023 
3024 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
3025   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3026   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3027   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3028   __ movoop(Address(rsp, offset_from_rsp_in_bytes), o, rscratch1);
3029 }
3030 
3031 
3032 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) {
3033   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3034   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3035   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3036   __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m, rscratch1);
3037 }
3038 
3039 













3040 // This code replaces a call to arraycopy; no exception may
3041 // be thrown in this code, they must be thrown in the System.arraycopy
3042 // activation frame; we could save some checks if this would not be the case
3043 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3044   ciArrayKlass* default_type = op->expected_type();
3045   Register src = op->src()->as_register();
3046   Register dst = op->dst()->as_register();
3047   Register src_pos = op->src_pos()->as_register();
3048   Register dst_pos = op->dst_pos()->as_register();
3049   Register length  = op->length()->as_register();
3050   Register tmp = op->tmp()->as_register();
3051   Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3052 
3053   CodeStub* stub = op->stub();
3054   int flags = op->flags();
3055   BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
3056   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
3057 






3058   // if we don't know anything, just go through the generic arraycopy
3059   if (default_type == nullptr) {
3060     // save outgoing arguments on stack in case call to System.arraycopy is needed
3061     // HACK ALERT. This code used to push the parameters in a hardwired fashion
3062     // for interpreter calling conventions. Now we have to do it in new style conventions.
3063     // For the moment until C1 gets the new register allocator I just force all the
3064     // args to the right place (except the register args) and then on the back side
3065     // reload the register args properly if we go slow path. Yuck
3066 
3067     // These are proper for the calling convention
3068     store_parameter(length, 2);
3069     store_parameter(dst_pos, 1);
3070     store_parameter(dst, 0);
3071 
3072     // these are just temporary placements until we need to reload
3073     store_parameter(src_pos, 3);
3074     store_parameter(src, 4);
3075     NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
3076 
3077     address copyfunc_addr = StubRoutines::generic_arraycopy();

3131     __ mov(tmp, rax);
3132     __ xorl(tmp, -1);
3133 
3134     // Reload values from the stack so they are where the stub
3135     // expects them.
3136     __ movptr   (dst,     Address(rsp, 0*BytesPerWord));
3137     __ movptr   (dst_pos, Address(rsp, 1*BytesPerWord));
3138     __ movptr   (length,  Address(rsp, 2*BytesPerWord));
3139     __ movptr   (src_pos, Address(rsp, 3*BytesPerWord));
3140     __ movptr   (src,     Address(rsp, 4*BytesPerWord));
3141 
3142     __ subl(length, tmp);
3143     __ addl(src_pos, tmp);
3144     __ addl(dst_pos, tmp);
3145     __ jmp(*stub->entry());
3146 
3147     __ bind(*stub->continuation());
3148     return;
3149   }
3150 








3151   assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
3152 
3153   int elem_size = type2aelembytes(basic_type);
3154   Address::ScaleFactor scale;
3155 
3156   switch (elem_size) {
3157     case 1 :
3158       scale = Address::times_1;
3159       break;
3160     case 2 :
3161       scale = Address::times_2;
3162       break;
3163     case 4 :
3164       scale = Address::times_4;
3165       break;
3166     case 8 :
3167       scale = Address::times_8;
3168       break;
3169     default:
3170       scale = Address::no_scale;

3760         // first time here. Set profile type.
3761         __ movptr(mdo_addr, tmp);
3762 #ifdef ASSERT
3763         __ andptr(tmp, TypeEntries::type_klass_mask);
3764         __ verify_klass_ptr(tmp);
3765 #endif
3766       } else {
3767         assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
3768                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
3769 
3770         __ testptr(mdo_addr, TypeEntries::type_unknown);
3771         __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3772 
3773         __ orptr(mdo_addr, TypeEntries::type_unknown);
3774       }
3775     }
3776   }
3777   __ bind(next);
3778 }
3779 




















3780 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
3781   Unimplemented();
3782 }
3783 
3784 
3785 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
3786   __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
3787 }
3788 
3789 
3790 void LIR_Assembler::align_backward_branch_target() {
3791   __ align(BytesPerWord);
3792 }
3793 
3794 
3795 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
3796   if (left->is_single_cpu()) {
3797     __ negl(left->as_register());
3798     move_regs(left->as_register(), dest->as_register());
3799 

4023 }
4024 
4025 void LIR_Assembler::membar_storeload() {
4026   __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
4027 }
4028 
4029 void LIR_Assembler::on_spin_wait() {
4030   __ pause ();
4031 }
4032 
4033 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
4034   assert(result_reg->is_register(), "check");
4035 #ifdef _LP64
4036   // __ get_thread(result_reg->as_register_lo());
4037   __ mov(result_reg->as_register(), r15_thread);
4038 #else
4039   __ get_thread(result_reg->as_register());
4040 #endif // _LP64
4041 }
4042 



4043 
4044 void LIR_Assembler::peephole(LIR_List*) {
4045   // do nothing for now
4046 }
4047 
4048 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
4049   assert(data == dest, "xchg/xadd uses only 2 operands");
4050 
4051   if (data->type() == T_INT) {
4052     if (code == lir_xadd) {
4053       __ lock();
4054       __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
4055     } else {
4056       __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
4057     }
4058   } else if (data->is_oop()) {
4059     assert (code == lir_xchg, "xadd for oops");
4060     Register obj = data->as_register();
4061 #ifdef _LP64
4062     if (UseCompressedOops) {

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_CodeStubs.hpp"
  29 #include "c1/c1_Compilation.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_MacroAssembler.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "ci/ciArrayKlass.hpp"
  35 #include "ci/ciInlineKlass.hpp"
  36 #include "ci/ciInstance.hpp"
  37 #include "compiler/oopMap.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "gc/shared/gc_globals.hpp"
  40 #include "nativeInst_x86.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "oops/objArrayKlass.hpp"
  43 #include "runtime/frame.inline.hpp"
  44 #include "runtime/safepointMechanism.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "runtime/stubRoutines.hpp"
  47 #include "utilities/powerOfTwo.hpp"
  48 #include "vmreg_x86.inline.hpp"
  49 
  50 
  51 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  52 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  53 // fast versions of NegF/NegD and AbsF/AbsD.
  54 
  55 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  56 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
  57   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  58   // of 128-bits operands for SSE instructions.
  59   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
  60   // Store the value to a 128-bits operand.
  61   operand[0] = lo;

 447     __ bind(*stub->continuation());
 448   }
 449 
 450   if (compilation()->env()->dtrace_method_probes()) {
 451 #ifdef _LP64
 452     __ mov(rdi, r15_thread);
 453     __ mov_metadata(rsi, method()->constant_encoding());
 454 #else
 455     __ get_thread(rax);
 456     __ movptr(Address(rsp, 0), rax);
 457     __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding(), noreg);
 458 #endif
 459     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
 460   }
 461 
 462   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 463     __ mov(rax, rbx);  // Restore the exception
 464   }
 465 
 466   // remove the activation and dispatch to the unwind handler
 467   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 468   __ jump(RuntimeAddress(Runtime1::entry_for(C1StubId::unwind_exception_id)));
 469 
 470   // Emit the slow path assembly
 471   if (stub != nullptr) {
 472     stub->emit_code(this);
 473   }
 474 
 475   return offset;
 476 }
 477 
 478 
 479 int LIR_Assembler::emit_deopt_handler() {
 480   // generate code for exception handler
 481   address handler_base = __ start_a_stub(deopt_handler_size());
 482   if (handler_base == nullptr) {
 483     // not enough space left for the handler
 484     bailout("deopt handler overflow");
 485     return -1;
 486   }
 487 
 488   int offset = code_offset();
 489   InternalAddress here(__ pc());
 490 
 491   __ pushptr(here.addr(), rscratch1);
 492   __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 493   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 494   __ end_a_stub();
 495 
 496   return offset;
 497 }
 498 
 499 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 500   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
 501   if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
 502     assert(result->fpu() == 0, "result must already be on TOS");
 503   }
 504   if (InlineTypeReturnedAsFields) {
 505   #ifndef _LP64
 506      Unimplemented();
 507   #endif
 508     // Check if we are returning an non-null inline type and load its fields into registers
 509     ciType* return_type = compilation()->method()->return_type();
 510     if (return_type->is_inlinetype()) {
 511       ciInlineKlass* vk = return_type->as_inline_klass();
 512       if (vk->can_be_returned_as_fields()) {
 513         address unpack_handler = vk->unpack_handler();
 514         assert(unpack_handler != nullptr, "must be");
 515         __ call(RuntimeAddress(unpack_handler));
 516       }
 517     } else if (return_type->is_instance_klass() && (!return_type->is_loaded() || StressCallingConvention)) {
 518       Label skip;
 519       __ test_oop_is_not_inline_type(rax, rscratch1, skip);
 520 
 521       // Load fields from a buffered value with an inline class specific handler
 522       __ load_klass(rdi, rax, rscratch1);
 523       __ movptr(rdi, Address(rdi, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 524       __ movptr(rdi, Address(rdi, InlineKlass::unpack_handler_offset()));
 525       // Unpack handler can be null if inline type is not scalarizable in returns
 526       __ testptr(rdi, rdi);
 527       __ jcc(Assembler::zero, skip);
 528       __ call(rdi);
 529 
 530       __ bind(skip);
 531     }
 532     // At this point, rax points to the value object (for interpreter or C1 caller).
 533     // The fields of the object are copied into registers (for C2 caller).
 534   }
 535 
 536   // Pop the stack before the safepoint code
 537   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 538 
 539   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 540     __ reserved_stack_check();
 541   }
 542 
 543   // Note: we do not need to round double result; float result has the right precision
 544   // the poll sets the condition code, but no data registers
 545 
 546 #ifdef _LP64
 547   const Register thread = r15_thread;
 548 #else
 549   const Register thread = rbx;
 550   __ get_thread(thread);
 551 #endif
 552   code_stub->set_safepoint_offset(__ offset());
 553   __ relocate(relocInfo::poll_return_type);
 554   __ safepoint_poll(*code_stub->entry(), thread, true /* at_return */, true /* in_nmethod */);
 555   __ ret(0);
 556 }
 557 
 558 
 559 int LIR_Assembler::store_inline_type_fields_to_buf(ciInlineKlass* vk) {
 560   return (__ store_inline_type_fields_to_buf(vk, false));
 561 }
 562 
 563 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 564   guarantee(info != nullptr, "Shouldn't be null");
 565   int offset = __ offset();
 566 #ifdef _LP64
 567   const Register poll_addr = rscratch1;
 568   __ movptr(poll_addr, Address(r15_thread, JavaThread::polling_page_offset()));
 569 #else
 570   assert(tmp->is_cpu_register(), "needed");
 571   const Register poll_addr = tmp->as_register();
 572   __ get_thread(poll_addr);
 573   __ movptr(poll_addr, Address(poll_addr, in_bytes(JavaThread::polling_page_offset())));
 574 #endif
 575   add_debug_info_for_branch(info);
 576   __ relocate(relocInfo::poll_type);
 577   address pre_pc = __ pc();
 578   __ testl(rax, Address(poll_addr, 0));
 579   address post_pc = __ pc();
 580   guarantee(pointer_delta(post_pc, pre_pc, 1) == 2 LP64_ONLY(+1), "must be exact length");
 581   return offset;
 582 }

1618     // init_state needs acquire, but x86 is TSO, and so we are already good.
1619     __ cmpb(Address(op->klass()->as_register(),
1620                     InstanceKlass::init_state_offset()),
1621                     InstanceKlass::fully_initialized);
1622     __ jcc(Assembler::notEqual, *op->stub()->entry());
1623   }
1624   __ allocate_object(op->obj()->as_register(),
1625                      op->tmp1()->as_register(),
1626                      op->tmp2()->as_register(),
1627                      op->header_size(),
1628                      op->object_size(),
1629                      op->klass()->as_register(),
1630                      *op->stub()->entry());
1631   __ bind(*op->stub()->continuation());
1632 }
1633 
1634 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1635   Register len =  op->len()->as_register();
1636   LP64_ONLY( __ movslq(len, len); )
1637 
1638   if (UseSlowPath || op->is_null_free() ||
1639       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1640       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1641     __ jmp(*op->stub()->entry());
1642   } else {
1643     Register tmp1 = op->tmp1()->as_register();
1644     Register tmp2 = op->tmp2()->as_register();
1645     Register tmp3 = op->tmp3()->as_register();
1646     if (len == tmp1) {
1647       tmp1 = tmp3;
1648     } else if (len == tmp2) {
1649       tmp2 = tmp3;
1650     } else if (len == tmp3) {
1651       // everything is ok
1652     } else {
1653       __ mov(tmp3, len);
1654     }
1655     __ allocate_array(op->obj()->as_register(),
1656                       len,
1657                       tmp1,
1658                       tmp2,

1717     assert(data != nullptr,                "need data for type check");
1718     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1719   }
1720   Label* success_target = success;
1721   Label* failure_target = failure;
1722 
1723   if (obj == k_RInfo) {
1724     k_RInfo = dst;
1725   } else if (obj == klass_RInfo) {
1726     klass_RInfo = dst;
1727   }
1728   if (k->is_loaded() && !UseCompressedClassPointers) {
1729     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1730   } else {
1731     Rtmp1 = op->tmp3()->as_register();
1732     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1733   }
1734 
1735   assert_different_registers(obj, k_RInfo, klass_RInfo);
1736 
1737   if (op->need_null_check()) {
1738     __ testptr(obj, obj);
1739     if (op->should_profile()) {
1740       Label not_null;
1741       Register mdo  = klass_RInfo;
1742       __ mov_metadata(mdo, md->constant_encoding());
1743       __ jccb(Assembler::notEqual, not_null);
1744       // Object is null; update MDO and exit
1745       Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1746       int header_bits = BitData::null_seen_byte_constant();
1747       __ orb(data_addr, header_bits);
1748       __ jmp(*obj_is_null);
1749       __ bind(not_null);
1750 
1751       Label update_done;
1752       Register recv = k_RInfo;
1753       __ load_klass(recv, obj, tmp_load_klass);
1754       type_profile_helper(mdo, md, data, recv, &update_done);
1755 
1756       Address nonprofiled_receiver_count_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1757       __ addptr(nonprofiled_receiver_count_addr, DataLayout::counter_increment);
1758 
1759       __ bind(update_done);
1760     } else {
1761       __ jcc(Assembler::equal, *obj_is_null);
1762     }
1763   }
1764 
1765   if (!k->is_loaded()) {
1766     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1767   } else {
1768 #ifdef _LP64
1769     __ mov_metadata(k_RInfo, k->constant_encoding());
1770 #endif // _LP64
1771   }
1772   __ verify_oop(obj);
1773 
1774   if (op->fast_check()) {
1775     // get object class
1776     // not a safepoint as obj null check happens earlier
1777 #ifdef _LP64
1778     if (UseCompressedClassPointers) {
1779       __ load_klass(Rtmp1, obj, tmp_load_klass);
1780       __ cmpptr(k_RInfo, Rtmp1);
1781     } else {
1782       __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));

1934         __ mov(dst, obj);
1935       }
1936     } else
1937       if (code == lir_instanceof) {
1938         Register obj = op->object()->as_register();
1939         Register dst = op->result_opr()->as_register();
1940         Label success, failure, done;
1941         emit_typecheck_helper(op, &success, &failure, &failure);
1942         __ bind(failure);
1943         __ xorptr(dst, dst);
1944         __ jmpb(done);
1945         __ bind(success);
1946         __ movptr(dst, 1);
1947         __ bind(done);
1948       } else {
1949         ShouldNotReachHere();
1950       }
1951 
1952 }
1953 
1954 void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
1955   // We are loading/storing from/to an array that *may* be a flat array (the
1956   // declared type is Object[], abstract[], interface[] or VT.ref[]).
1957   // If this array is a flat array, take the slow path.
1958   __ test_flat_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1959   if (!op->value()->is_illegal()) {
1960     // The array is not a flat array, but it might be null-free. If we are storing
1961     // a null into a null-free array, take the slow path (which will throw NPE).
1962     Label skip;
1963     __ cmpptr(op->value()->as_register(), NULL_WORD);
1964     __ jcc(Assembler::notEqual, skip);
1965     __ test_null_free_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1966     __ bind(skip);
1967   }
1968 }
1969 
1970 void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
1971   // We are storing into an array that *may* be null-free (the declared type is
1972   // Object[], abstract[], interface[] or VT.ref[]).
1973   Label test_mark_word;
1974   Register tmp = op->tmp()->as_register();
1975   __ movptr(tmp, Address(op->array()->as_register(), oopDesc::mark_offset_in_bytes()));
1976   __ testl(tmp, markWord::unlocked_value);
1977   __ jccb(Assembler::notZero, test_mark_word);
1978   __ load_prototype_header(tmp, op->array()->as_register(), rscratch1);
1979   __ bind(test_mark_word);
1980   __ testl(tmp, markWord::null_free_array_bit_in_place);
1981 }
1982 
1983 void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
1984   Label L_oops_equal;
1985   Label L_oops_not_equal;
1986   Label L_end;
1987 
1988   Register left  = op->left()->as_register();
1989   Register right = op->right()->as_register();
1990 
1991   __ cmpptr(left, right);
1992   __ jcc(Assembler::equal, L_oops_equal);
1993 
1994   // (1) Null check -- if one of the operands is null, the other must not be null (because
1995   //     the two references are not equal), so they are not substitutable,
1996   //     FIXME: do null check only if the operand is nullable
1997   __ testptr(left, right);
1998   __ jcc(Assembler::zero, L_oops_not_equal);
1999 
2000   ciKlass* left_klass = op->left_klass();
2001   ciKlass* right_klass = op->right_klass();
2002 
2003   // (2) Inline type check -- if either of the operands is not a inline type,
2004   //     they are not substitutable. We do this only if we are not sure that the
2005   //     operands are inline type
2006   if ((left_klass == nullptr || right_klass == nullptr) ||// The klass is still unloaded, or came from a Phi node.
2007       !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
2008     Register tmp1  = op->tmp1()->as_register();
2009     __ movptr(tmp1, (intptr_t)markWord::inline_type_pattern);
2010     __ andptr(tmp1, Address(left, oopDesc::mark_offset_in_bytes()));
2011     __ andptr(tmp1, Address(right, oopDesc::mark_offset_in_bytes()));
2012     __ cmpptr(tmp1, (intptr_t)markWord::inline_type_pattern);
2013     __ jcc(Assembler::notEqual, L_oops_not_equal);
2014   }
2015 
2016   // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
2017   if (left_klass != nullptr && left_klass->is_inlinetype() && left_klass == right_klass) {
2018     // No need to load klass -- the operands are statically known to be the same inline klass.
2019     __ jmp(*op->stub()->entry());
2020   } else {
2021     Register left_klass_op = op->left_klass_op()->as_register();
2022     Register right_klass_op = op->right_klass_op()->as_register();
2023 
2024     if (UseCompressedClassPointers) {
2025       __ movl(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
2026       __ movl(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
2027       __ cmpl(left_klass_op, right_klass_op);
2028     } else {
2029       __ movptr(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
2030       __ movptr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
2031       __ cmpptr(left_klass_op, right_klass_op);
2032     }
2033 
2034     __ jcc(Assembler::equal, *op->stub()->entry()); // same klass -> do slow check
2035     // fall through to L_oops_not_equal
2036   }
2037 
2038   __ bind(L_oops_not_equal);
2039   move(op->not_equal_result(), op->result_opr());
2040   __ jmp(L_end);
2041 
2042   __ bind(L_oops_equal);
2043   move(op->equal_result(), op->result_opr());
2044   __ jmp(L_end);
2045 
2046   // We've returned from the stub. RAX contains 0x0 IFF the two
2047   // operands are not substitutable. (Don't compare against 0x1 in case the
2048   // C compiler is naughty)
2049   __ bind(*op->stub()->continuation());
2050   __ cmpl(rax, 0);
2051   __ jcc(Assembler::equal, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
2052   move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal
2053   // fall-through
2054   __ bind(L_end);
2055 }
2056 
2057 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
2058   if (LP64_ONLY(false &&) op->code() == lir_cas_long) {
2059     assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
2060     assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
2061     assert(op->new_value()->as_register_lo() == rbx, "wrong register");
2062     assert(op->new_value()->as_register_hi() == rcx, "wrong register");
2063     Register addr = op->addr()->as_register();
2064     __ lock();
2065     NOT_LP64(__ cmpxchg8(Address(addr, 0)));
2066 
2067   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
2068     NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
2069     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
2070     Register newval = op->new_value()->as_register();
2071     Register cmpval = op->cmp_value()->as_register();
2072     assert(cmpval == rax, "wrong register");
2073     assert(newval != noreg, "new val must be register");
2074     assert(cmpval != newval, "cmp and new values must be in different registers");
2075     assert(cmpval != addr, "cmp and addr must be in different registers");

2096       __ cmpxchgl(newval, Address(addr, 0));
2097     }
2098 #ifdef _LP64
2099   } else if (op->code() == lir_cas_long) {
2100     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
2101     Register newval = op->new_value()->as_register_lo();
2102     Register cmpval = op->cmp_value()->as_register_lo();
2103     assert(cmpval == rax, "wrong register");
2104     assert(newval != noreg, "new val must be register");
2105     assert(cmpval != newval, "cmp and new values must be in different registers");
2106     assert(cmpval != addr, "cmp and addr must be in different registers");
2107     assert(newval != addr, "new value and addr must be in different registers");
2108     __ lock();
2109     __ cmpxchgq(newval, Address(addr, 0));
2110 #endif // _LP64
2111   } else {
2112     Unimplemented();
2113   }
2114 }
2115 
2116 void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) {
2117   assert(dst->is_cpu_register(), "must be");
2118   assert(dst->type() == src->type(), "must be");
2119 
2120   if (src->is_cpu_register()) {
2121     reg2reg(src, dst);
2122   } else if (src->is_stack()) {
2123     stack2reg(src, dst, dst->type());
2124   } else if (src->is_constant()) {
2125     const2reg(src, dst, lir_patch_none, nullptr);
2126   } else {
2127     ShouldNotReachHere();
2128   }
2129 }
2130 
2131 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
2132                           LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
2133   assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on x86");
2134 
2135   Assembler::Condition acond, ncond;
2136   switch (condition) {
2137     case lir_cond_equal:        acond = Assembler::equal;        ncond = Assembler::notEqual;     break;
2138     case lir_cond_notEqual:     acond = Assembler::notEqual;     ncond = Assembler::equal;        break;
2139     case lir_cond_less:         acond = Assembler::less;         ncond = Assembler::greaterEqual; break;
2140     case lir_cond_lessEqual:    acond = Assembler::lessEqual;    ncond = Assembler::greater;      break;
2141     case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less;         break;
2142     case lir_cond_greater:      acond = Assembler::greater;      ncond = Assembler::lessEqual;    break;
2143     case lir_cond_belowEqual:   acond = Assembler::belowEqual;   ncond = Assembler::above;        break;
2144     case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;   ncond = Assembler::below;        break;
2145     default:                    acond = Assembler::equal;        ncond = Assembler::notEqual;
2146                                 ShouldNotReachHere();
2147   }
2148 
2149   if (opr1->is_cpu_register()) {
2150     reg2reg(opr1, result);

2988   int offset = __ offset();
2989   switch (code) {
2990   case lir_static_call:
2991   case lir_optvirtual_call:
2992   case lir_dynamic_call:
2993     offset += NativeCall::displacement_offset;
2994     break;
2995   case lir_icvirtual_call:
2996     offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size_rex;
2997     break;
2998   default: ShouldNotReachHere();
2999   }
3000   __ align(BytesPerWord, offset);
3001 }
3002 
3003 
3004 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
3005   assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
3006          "must be aligned");
3007   __ call(AddressLiteral(op->addr(), rtype));
3008   add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
3009   __ post_call_nop();
3010 }
3011 
3012 
3013 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
3014   __ ic_call(op->addr());
3015   add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
3016   assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
3017          "must be aligned");
3018   __ post_call_nop();
3019 }
3020 
3021 
3022 void LIR_Assembler::emit_static_call_stub() {
3023   address call_pc = __ pc();
3024   address stub = __ start_a_stub(call_stub_size());
3025   if (stub == nullptr) {
3026     bailout("static call stub overflow");
3027     return;
3028   }
3029 
3030   int start = __ offset();
3031 
3032   // make sure that the displacement word of the call ends up word aligned
3033   __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size_rex + NativeCall::displacement_offset);
3034   __ relocate(static_stub_Relocation::spec(call_pc));
3035   __ mov_metadata(rbx, (Metadata*)nullptr);

3176   __ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
3177 }
3178 
3179 
3180 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
3181   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3182   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3183   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3184   __ movoop(Address(rsp, offset_from_rsp_in_bytes), o, rscratch1);
3185 }
3186 
3187 
3188 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) {
3189   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3190   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3191   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3192   __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m, rscratch1);
3193 }
3194 
3195 
3196 void LIR_Assembler::arraycopy_inlinetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) {
3197   if (null_check) {
3198     __ testptr(obj, obj);
3199     __ jcc(Assembler::zero, *slow_path->entry());
3200   }
3201   if (is_dest) {
3202     __ test_null_free_array_oop(obj, tmp, *slow_path->entry());
3203   } else {
3204     __ test_flat_array_oop(obj, tmp, *slow_path->entry());
3205   }
3206 }
3207 
3208 
3209 // This code replaces a call to arraycopy; no exception may
3210 // be thrown in this code, they must be thrown in the System.arraycopy
3211 // activation frame; we could save some checks if this would not be the case
3212 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3213   ciArrayKlass* default_type = op->expected_type();
3214   Register src = op->src()->as_register();
3215   Register dst = op->dst()->as_register();
3216   Register src_pos = op->src_pos()->as_register();
3217   Register dst_pos = op->dst_pos()->as_register();
3218   Register length  = op->length()->as_register();
3219   Register tmp = op->tmp()->as_register();
3220   Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3221 
3222   CodeStub* stub = op->stub();
3223   int flags = op->flags();
3224   BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
3225   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
3226 
3227   if (flags & LIR_OpArrayCopy::always_slow_path) {
3228     __ jmp(*stub->entry());
3229     __ bind(*stub->continuation());
3230     return;
3231   }
3232 
3233   // if we don't know anything, just go through the generic arraycopy
3234   if (default_type == nullptr) {
3235     // save outgoing arguments on stack in case call to System.arraycopy is needed
3236     // HACK ALERT. This code used to push the parameters in a hardwired fashion
3237     // for interpreter calling conventions. Now we have to do it in new style conventions.
3238     // For the moment until C1 gets the new register allocator I just force all the
3239     // args to the right place (except the register args) and then on the back side
3240     // reload the register args properly if we go slow path. Yuck
3241 
3242     // These are proper for the calling convention
3243     store_parameter(length, 2);
3244     store_parameter(dst_pos, 1);
3245     store_parameter(dst, 0);
3246 
3247     // these are just temporary placements until we need to reload
3248     store_parameter(src_pos, 3);
3249     store_parameter(src, 4);
3250     NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
3251 
3252     address copyfunc_addr = StubRoutines::generic_arraycopy();

3306     __ mov(tmp, rax);
3307     __ xorl(tmp, -1);
3308 
3309     // Reload values from the stack so they are where the stub
3310     // expects them.
3311     __ movptr   (dst,     Address(rsp, 0*BytesPerWord));
3312     __ movptr   (dst_pos, Address(rsp, 1*BytesPerWord));
3313     __ movptr   (length,  Address(rsp, 2*BytesPerWord));
3314     __ movptr   (src_pos, Address(rsp, 3*BytesPerWord));
3315     __ movptr   (src,     Address(rsp, 4*BytesPerWord));
3316 
3317     __ subl(length, tmp);
3318     __ addl(src_pos, tmp);
3319     __ addl(dst_pos, tmp);
3320     __ jmp(*stub->entry());
3321 
3322     __ bind(*stub->continuation());
3323     return;
3324   }
3325 
3326   // Handle inline type arrays
3327   if (flags & LIR_OpArrayCopy::src_inlinetype_check) {
3328     arraycopy_inlinetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check));
3329   }
3330   if (flags & LIR_OpArrayCopy::dst_inlinetype_check) {
3331     arraycopy_inlinetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check));
3332   }
3333 
3334   assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
3335 
3336   int elem_size = type2aelembytes(basic_type);
3337   Address::ScaleFactor scale;
3338 
3339   switch (elem_size) {
3340     case 1 :
3341       scale = Address::times_1;
3342       break;
3343     case 2 :
3344       scale = Address::times_2;
3345       break;
3346     case 4 :
3347       scale = Address::times_4;
3348       break;
3349     case 8 :
3350       scale = Address::times_8;
3351       break;
3352     default:
3353       scale = Address::no_scale;

3943         // first time here. Set profile type.
3944         __ movptr(mdo_addr, tmp);
3945 #ifdef ASSERT
3946         __ andptr(tmp, TypeEntries::type_klass_mask);
3947         __ verify_klass_ptr(tmp);
3948 #endif
3949       } else {
3950         assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
3951                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
3952 
3953         __ testptr(mdo_addr, TypeEntries::type_unknown);
3954         __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3955 
3956         __ orptr(mdo_addr, TypeEntries::type_unknown);
3957       }
3958     }
3959   }
3960   __ bind(next);
3961 }
3962 
3963 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) {
3964   Register obj = op->obj()->as_register();
3965   Register tmp = op->tmp()->as_pointer_register();
3966   Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
3967   bool not_null = op->not_null();
3968   int flag = op->flag();
3969 
3970   Label not_inline_type;
3971   if (!not_null) {
3972     __ testptr(obj, obj);
3973     __ jccb(Assembler::zero, not_inline_type);
3974   }
3975 
3976   __ test_oop_is_not_inline_type(obj, tmp, not_inline_type);
3977 
3978   __ orb(mdo_addr, flag);
3979 
3980   __ bind(not_inline_type);
3981 }
3982 
3983 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
3984   Unimplemented();
3985 }
3986 
3987 
3988 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
3989   __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
3990 }
3991 
3992 
3993 void LIR_Assembler::align_backward_branch_target() {
3994   __ align(BytesPerWord);
3995 }
3996 
3997 
3998 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
3999   if (left->is_single_cpu()) {
4000     __ negl(left->as_register());
4001     move_regs(left->as_register(), dest->as_register());
4002 

4226 }
4227 
4228 void LIR_Assembler::membar_storeload() {
4229   __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
4230 }
4231 
4232 void LIR_Assembler::on_spin_wait() {
4233   __ pause ();
4234 }
4235 
4236 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
4237   assert(result_reg->is_register(), "check");
4238 #ifdef _LP64
4239   // __ get_thread(result_reg->as_register_lo());
4240   __ mov(result_reg->as_register(), r15_thread);
4241 #else
4242   __ get_thread(result_reg->as_register());
4243 #endif // _LP64
4244 }
4245 
4246 void LIR_Assembler::check_orig_pc() {
4247   __ cmpptr(frame_map()->address_for_orig_pc_addr(), NULL_WORD);
4248 }
4249 
4250 void LIR_Assembler::peephole(LIR_List*) {
4251   // do nothing for now
4252 }
4253 
4254 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
4255   assert(data == dest, "xchg/xadd uses only 2 operands");
4256 
4257   if (data->type() == T_INT) {
4258     if (code == lir_xadd) {
4259       __ lock();
4260       __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
4261     } else {
4262       __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
4263     }
4264   } else if (data->is_oop()) {
4265     assert (code == lir_xchg, "xadd for oops");
4266     Register obj = data->as_register();
4267 #ifdef _LP64
4268     if (UseCompressedOops) {
< prev index next >