< prev index next >

src/cpu/x86/vm/c1_LIRAssembler_x86.cpp

Print this page




  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "gc_interface/collectedHeap.hpp"
  36 #include "memory/barrierSet.hpp"
  37 #include "memory/cardTableModRefBS.hpp"
  38 #include "nativeInst_x86.hpp"
  39 #include "oops/objArrayKlass.hpp"
  40 #include "runtime/sharedRuntime.hpp"
  41 #include "vmreg_x86.inline.hpp"
  42 



  43 
  44 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  45 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  46 // fast versions of NegF/NegD and AbsF/AbsD.
  47 
  48 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  49 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
  50   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  51   // of 128-bits operands for SSE instructions.
  52   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
  53   // Store the value to a 128-bits operand.
  54   operand[0] = lo;
  55   operand[1] = hi;
  56   return operand;
  57 }
  58 
  59 // Buffer for 128-bits masks used by SSE instructions.
  60 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
  61 
  62 // Static initialization during VM startup.


1981     Register addr = op->addr()->as_register();
1982     if (os::is_MP()) {
1983       __ lock();
1984     }
1985     NOT_LP64(__ cmpxchg8(Address(addr, 0)));
1986 
1987   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
1988     NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
1989     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1990     Register newval = op->new_value()->as_register();
1991     Register cmpval = op->cmp_value()->as_register();
1992     assert(cmpval == rax, "wrong register");
1993     assert(newval != NULL, "new val must be register");
1994     assert(cmpval != newval, "cmp and new values must be in different registers");
1995     assert(cmpval != addr, "cmp and addr must be in different registers");
1996     assert(newval != addr, "new value and addr must be in different registers");
1997 
1998     if ( op->code() == lir_cas_obj) {
1999 #ifdef _LP64
2000       if (UseCompressedOops) {
2001         __ encode_heap_oop(cmpval);
2002         __ mov(rscratch1, newval);
2003         __ encode_heap_oop(rscratch1);
2004         if (os::is_MP()) {
2005           __ lock();















2006         }
2007         // cmpval (rax) is implicitly used by this instruction
2008         __ cmpxchgl(rscratch1, Address(addr, 0));
2009       } else
2010 #endif
2011       {
2012         if (os::is_MP()) {
2013           __ lock();
2014         }
2015         __ cmpxchgptr(newval, Address(addr, 0));










2016       }
2017     } else {
2018       assert(op->code() == lir_cas_int, "lir_cas_int expected");
2019       if (os::is_MP()) {
2020         __ lock();
2021       }
2022       __ cmpxchgl(newval, Address(addr, 0));
2023     }
2024 #ifdef _LP64
2025   } else if (op->code() == lir_cas_long) {
2026     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
2027     Register newval = op->new_value()->as_register_lo();
2028     Register cmpval = op->cmp_value()->as_register_lo();
2029     assert(cmpval == rax, "wrong register");
2030     assert(newval != NULL, "new val must be register");
2031     assert(cmpval != newval, "cmp and new values must be in different registers");
2032     assert(cmpval != addr, "cmp and addr must be in different registers");
2033     assert(newval != addr, "new value and addr must be in different registers");
2034     if (os::is_MP()) {
2035       __ lock();


3868              ExternalAddress((address)float_signflip_pool));
3869 
3870   } else if (dest->is_double_xmm()) {
3871     if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) {
3872       __ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg());
3873     }
3874     __ xorpd(dest->as_xmm_double_reg(),
3875              ExternalAddress((address)double_signflip_pool));
3876 
3877   } else if (left->is_single_fpu() || left->is_double_fpu()) {
3878     assert(left->fpu() == 0, "arg must be on TOS");
3879     assert(dest->fpu() == 0, "dest must be TOS");
3880     __ fchs();
3881 
3882   } else {
3883     ShouldNotReachHere();
3884   }
3885 }
3886 
3887 
3888 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) {
3889   assert(addr->is_address() && dest->is_register(), "check");
3890   Register reg;
3891   reg = dest->as_pointer_register();
3892   __ lea(reg, as_Address(addr->as_address_ptr()));
















3893 }
3894 
3895 
3896 
3897 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3898   assert(!tmp->is_valid(), "don't need temporary");
3899   __ call(RuntimeAddress(dest));
3900   if (info != NULL) {
3901     add_call_info_here(info);
3902   }
3903 }
3904 
3905 
3906 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3907   assert(type == T_LONG, "only for volatile long fields");
3908 
3909   if (info != NULL) {
3910     add_debug_info_for_null_check_here(info);
3911   }
3912 




  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "gc_interface/collectedHeap.hpp"
  36 #include "memory/barrierSet.hpp"
  37 #include "memory/cardTableModRefBS.hpp"
  38 #include "nativeInst_x86.hpp"
  39 #include "oops/objArrayKlass.hpp"
  40 #include "runtime/sharedRuntime.hpp"
  41 #include "vmreg_x86.inline.hpp"
  42 #include "utilities/macros.hpp"
  43 #if INCLUDE_ALL_GCS
  44 #include "shenandoahBarrierSetAssembler_x86.hpp"
  45 #endif
  46 
  47 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  48 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  49 // fast versions of NegF/NegD and AbsF/AbsD.
  50 
  51 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  52 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
  53   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  54   // of 128-bits operands for SSE instructions.
  55   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
  56   // Store the value to a 128-bits operand.
  57   operand[0] = lo;
  58   operand[1] = hi;
  59   return operand;
  60 }
  61 
  62 // Buffer for 128-bits masks used by SSE instructions.
  63 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
  64 
  65 // Static initialization during VM startup.


1984     Register addr = op->addr()->as_register();
1985     if (os::is_MP()) {
1986       __ lock();
1987     }
1988     NOT_LP64(__ cmpxchg8(Address(addr, 0)));
1989 
1990   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
1991     NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
1992     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1993     Register newval = op->new_value()->as_register();
1994     Register cmpval = op->cmp_value()->as_register();
1995     assert(cmpval == rax, "wrong register");
1996     assert(newval != NULL, "new val must be register");
1997     assert(cmpval != newval, "cmp and new values must be in different registers");
1998     assert(cmpval != addr, "cmp and addr must be in different registers");
1999     assert(newval != addr, "new value and addr must be in different registers");
2000 
2001     if ( op->code() == lir_cas_obj) {
2002 #ifdef _LP64
2003       if (UseCompressedOops) {
2004 #if INCLUDE_ALL_GCS
2005         if (UseShenandoahGC && ShenandoahCASBarrier) {
2006           Register tmp1 = op->tmp1()->as_register();
2007           Register tmp2 = op->tmp2()->as_register();
2008           Register res  = op->result_opr()->as_register();
2009           __ encode_heap_oop(cmpval);
2010           __ mov(rscratch1, newval);
2011           __ encode_heap_oop(rscratch1);
2012           ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(_masm, res, Address(addr, 0), cmpval, rscratch1, false, tmp1, tmp2);
2013         } else
2014 #endif
2015         {
2016           __ encode_heap_oop(cmpval);
2017           __ mov(rscratch1, newval);
2018           __ encode_heap_oop(rscratch1);
2019           if (os::is_MP()) {
2020             __ lock();
2021           }
2022           // cmpval (rax) is implicitly used by this instruction
2023           __ cmpxchgl(rscratch1, Address(addr, 0));
2024         }


2025       } else
2026 #endif
2027       {
2028 #if INCLUDE_ALL_GCS
2029         if (UseShenandoahGC && ShenandoahCASBarrier) {
2030           Register tmp1 = op->tmp1()->as_register();
2031           Register tmp2 = op->tmp2()->as_register();
2032           Register res  = op->result_opr()->as_register();
2033           ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(_masm, res, Address(addr, 0), cmpval, newval, false, tmp1, tmp2);
2034         } else
2035 #endif
2036         {
2037           if (os::is_MP()) {
2038             __ lock();
2039           }
2040           __ cmpxchgptr(newval, Address(addr, 0));
2041         }
2042       }
2043     } else {
2044       assert(op->code() == lir_cas_int, "lir_cas_int expected");
2045       if (os::is_MP()) {
2046         __ lock();
2047       }
2048       __ cmpxchgl(newval, Address(addr, 0));
2049     }
2050 #ifdef _LP64
2051   } else if (op->code() == lir_cas_long) {
2052     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
2053     Register newval = op->new_value()->as_register_lo();
2054     Register cmpval = op->cmp_value()->as_register_lo();
2055     assert(cmpval == rax, "wrong register");
2056     assert(newval != NULL, "new val must be register");
2057     assert(cmpval != newval, "cmp and new values must be in different registers");
2058     assert(cmpval != addr, "cmp and addr must be in different registers");
2059     assert(newval != addr, "new value and addr must be in different registers");
2060     if (os::is_MP()) {
2061       __ lock();


3894              ExternalAddress((address)float_signflip_pool));
3895 
3896   } else if (dest->is_double_xmm()) {
3897     if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) {
3898       __ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg());
3899     }
3900     __ xorpd(dest->as_xmm_double_reg(),
3901              ExternalAddress((address)double_signflip_pool));
3902 
3903   } else if (left->is_single_fpu() || left->is_double_fpu()) {
3904     assert(left->fpu() == 0, "arg must be on TOS");
3905     assert(dest->fpu() == 0, "dest must be TOS");
3906     __ fchs();
3907 
3908   } else {
3909     ShouldNotReachHere();
3910   }
3911 }
3912 
3913 
3914 void LIR_Assembler::leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
3915   assert(src->is_address(), "must be an address");
3916   assert(dest->is_register(), "must be a register");
3917 
3918   if (!UseShenandoahGC) {
3919     Register reg = dest->as_pointer_register();
3920     __ lea(reg, as_Address(src->as_address_ptr()));
3921   } else {
3922     PatchingStub* patch = NULL;
3923     if (patch_code != lir_patch_none) {
3924       patch = new PatchingStub(_masm, PatchingStub::access_field_id);
3925     }
3926 
3927     Register reg = dest->as_pointer_register();
3928     LIR_Address* addr = src->as_address_ptr();
3929     __ lea(reg, as_Address(addr));
3930 
3931     if (patch != NULL) {
3932       patching_epilog(patch, patch_code, addr->base()->as_register(), info);
3933     }
3934   }
3935 }
3936 
3937 
3938 
3939 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3940   assert(!tmp->is_valid(), "don't need temporary");
3941   __ call(RuntimeAddress(dest));
3942   if (info != NULL) {
3943     add_call_info_here(info);
3944   }
3945 }
3946 
3947 
3948 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3949   assert(type == T_LONG, "only for volatile long fields");
3950 
3951   if (info != NULL) {
3952     add_debug_info_for_null_check_here(info);
3953   }
3954 


< prev index next >