< prev index next >

src/cpu/x86/vm/c1_LIRAssembler_x86.cpp

Print this page




  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "gc_interface/collectedHeap.hpp"
  36 #include "memory/barrierSet.hpp"
  37 #include "memory/cardTableModRefBS.hpp"
  38 #include "nativeInst_x86.hpp"
  39 #include "oops/objArrayKlass.hpp"
  40 #include "runtime/sharedRuntime.hpp"
  41 #include "vmreg_x86.inline.hpp"
  42 



  43 
  44 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  45 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  46 // fast versions of NegF/NegD and AbsF/AbsD.
  47 
  48 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  49 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
  50   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  51   // of 128-bits operands for SSE instructions.
  52   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
  53   // Store the value to a 128-bits operand.
  54   operand[0] = lo;
  55   operand[1] = hi;
  56   return operand;
  57 }
  58 
  59 // Buffer for 128-bits masks used by SSE instructions.
  60 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
  61 
  62 // Static initialization during VM startup.


1981     Register addr = op->addr()->as_register();
1982     if (os::is_MP()) {
1983       __ lock();
1984     }
1985     NOT_LP64(__ cmpxchg8(Address(addr, 0)));
1986 
1987   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
1988     NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
1989     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1990     Register newval = op->new_value()->as_register();
1991     Register cmpval = op->cmp_value()->as_register();
1992     assert(cmpval == rax, "wrong register");
1993     assert(newval != NULL, "new val must be register");
1994     assert(cmpval != newval, "cmp and new values must be in different registers");
1995     assert(cmpval != addr, "cmp and addr must be in different registers");
1996     assert(newval != addr, "new value and addr must be in different registers");
1997 
1998     if ( op->code() == lir_cas_obj) {
1999 #ifdef _LP64
2000       if (UseCompressedOops) {
2001         __ encode_heap_oop(cmpval);
2002         __ mov(rscratch1, newval);
2003         __ encode_heap_oop(rscratch1);
2004         if (os::is_MP()) {
2005           __ lock();















2006         }
2007         // cmpval (rax) is implicitly used by this instruction
2008         __ cmpxchgl(rscratch1, Address(addr, 0));
2009       } else
2010 #endif
2011       {
2012         if (os::is_MP()) {
2013           __ lock();
2014         }
2015         __ cmpxchgptr(newval, Address(addr, 0));









2016       }
2017     } else {
2018       assert(op->code() == lir_cas_int, "lir_cas_int expected");
2019       if (os::is_MP()) {
2020         __ lock();
2021       }
2022       __ cmpxchgl(newval, Address(addr, 0));
2023     }
2024 #ifdef _LP64
2025   } else if (op->code() == lir_cas_long) {
2026     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
2027     Register newval = op->new_value()->as_register_lo();
2028     Register cmpval = op->cmp_value()->as_register_lo();
2029     assert(cmpval == rax, "wrong register");
2030     assert(newval != NULL, "new val must be register");
2031     assert(cmpval != newval, "cmp and new values must be in different registers");
2032     assert(cmpval != addr, "cmp and addr must be in different registers");
2033     assert(newval != addr, "new value and addr must be in different registers");
2034     if (os::is_MP()) {
2035       __ lock();


3154     // convention
3155     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
3156     __ mov(c_rarg0, j_rarg0);
3157     assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
3158     __ mov(c_rarg1, j_rarg1);
3159     assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
3160     __ mov(c_rarg2, j_rarg2);
3161     assert_different_registers(c_rarg3, j_rarg4);
3162     __ mov(c_rarg3, j_rarg3);
3163 #ifdef _WIN64
3164     // Allocate abi space for args but be sure to keep stack aligned
3165     __ subptr(rsp, 6*wordSize);
3166     store_parameter(j_rarg4, 4);
3167     if (copyfunc_addr == NULL) { // Use C version if stub was not generated
3168       __ call(RuntimeAddress(C_entry));
3169     } else {
3170 #ifndef PRODUCT
3171       if (PrintC1Statistics) {
3172         __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
3173       }
3174 #endif
3175       __ call(RuntimeAddress(copyfunc_addr));
3176     }
3177     __ addptr(rsp, 6*wordSize);
3178 #else
3179     __ mov(c_rarg4, j_rarg4);
3180     if (copyfunc_addr == NULL) { // Use C version if stub was not generated
3181       __ call(RuntimeAddress(C_entry));
3182     } else {
3183 #ifndef PRODUCT
3184       if (PrintC1Statistics) {
3185         __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
3186       }
3187 #endif
3188       __ call(RuntimeAddress(copyfunc_addr));
3189     }
3190 #endif // _WIN64
3191 #else
3192     __ push(length);
3193     __ push(dst_pos);
3194     __ push(dst);
3195     __ push(src_pos);
3196     __ push(src);
3197 
3198     if (copyfunc_addr == NULL) { // Use C version if stub was not generated




  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "gc_interface/collectedHeap.hpp"
  36 #include "memory/barrierSet.hpp"
  37 #include "memory/cardTableModRefBS.hpp"
  38 #include "nativeInst_x86.hpp"
  39 #include "oops/objArrayKlass.hpp"
  40 #include "runtime/sharedRuntime.hpp"
  41 #include "vmreg_x86.inline.hpp"
  42 
  43 #if INCLUDE_ALL_GCS
  44 #include "shenandoahBarrierSetAssembler_x86.hpp"
  45 #endif
  46 
  47 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  48 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  49 // fast versions of NegF/NegD and AbsF/AbsD.
  50 
  51 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  52 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
  53   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  54   // of 128-bits operands for SSE instructions.
  55   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
  56   // Store the value to a 128-bits operand.
  57   operand[0] = lo;
  58   operand[1] = hi;
  59   return operand;
  60 }
  61 
  62 // Buffer for 128-bits masks used by SSE instructions.
  63 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
  64 
  65 // Static initialization during VM startup.


1984     Register addr = op->addr()->as_register();
1985     if (os::is_MP()) {
1986       __ lock();
1987     }
1988     NOT_LP64(__ cmpxchg8(Address(addr, 0)));
1989 
1990   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
1991     NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
1992     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1993     Register newval = op->new_value()->as_register();
1994     Register cmpval = op->cmp_value()->as_register();
1995     assert(cmpval == rax, "wrong register");
1996     assert(newval != NULL, "new val must be register");
1997     assert(cmpval != newval, "cmp and new values must be in different registers");
1998     assert(cmpval != addr, "cmp and addr must be in different registers");
1999     assert(newval != addr, "new value and addr must be in different registers");
2000 
2001     if ( op->code() == lir_cas_obj) {
2002 #ifdef _LP64
2003       if (UseCompressedOops) {
2004 #if INCLUDE_ALL_GCS
2005         if (UseShenandoahGC && ShenandoahCASBarrier) {
2006           Register tmp1 = op->tmp1()->as_register();
2007           Register tmp2 = op->tmp2()->as_register();
2008 
2009           __ encode_heap_oop(cmpval);
2010           __ mov(rscratch1, newval);
2011           __ encode_heap_oop(rscratch1);
2012           ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(_masm, NULL, Address(addr, 0), cmpval, rscratch1, true, tmp1, tmp2);
2013         } else
2014 #endif
2015         {
2016           __ encode_heap_oop(cmpval);
2017           __ mov(rscratch1, newval);
2018           __ encode_heap_oop(rscratch1);
2019           if (os::is_MP()) {
2020             __ lock();
2021           }
2022           // cmpval (rax) is implicitly used by this instruction
2023           __ cmpxchgl(rscratch1, Address(addr, 0));
2024         }


2025       } else
2026 #endif
2027       {
2028 #if INCLUDE_ALL_GCS
2029         if (UseShenandoahGC && ShenandoahCASBarrier) {
2030           Register tmp1 = op->tmp1()->as_register();
2031           Register tmp2 = op->tmp2()->as_register();
2032           ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(_masm, NULL, Address(addr, 0), cmpval, newval, true, tmp1, tmp2);
2033         } else
2034 #endif
2035         {
2036           if (os::is_MP()) {
2037             __ lock();
2038           }
2039           __ cmpxchgptr(newval, Address(addr, 0));
2040         }
2041       }
2042     } else {
2043       assert(op->code() == lir_cas_int, "lir_cas_int expected");
2044       if (os::is_MP()) {
2045         __ lock();
2046       }
2047       __ cmpxchgl(newval, Address(addr, 0));
2048     }
2049 #ifdef _LP64
2050   } else if (op->code() == lir_cas_long) {
2051     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
2052     Register newval = op->new_value()->as_register_lo();
2053     Register cmpval = op->cmp_value()->as_register_lo();
2054     assert(cmpval == rax, "wrong register");
2055     assert(newval != NULL, "new val must be register");
2056     assert(cmpval != newval, "cmp and new values must be in different registers");
2057     assert(cmpval != addr, "cmp and addr must be in different registers");
2058     assert(newval != addr, "new value and addr must be in different registers");
2059     if (os::is_MP()) {
2060       __ lock();


3179     // convention
3180     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
3181     __ mov(c_rarg0, j_rarg0);
3182     assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
3183     __ mov(c_rarg1, j_rarg1);
3184     assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
3185     __ mov(c_rarg2, j_rarg2);
3186     assert_different_registers(c_rarg3, j_rarg4);
3187     __ mov(c_rarg3, j_rarg3);
3188 #ifdef _WIN64
3189     // Allocate abi space for args but be sure to keep stack aligned
3190     __ subptr(rsp, 6*wordSize);
3191     store_parameter(j_rarg4, 4);
3192     if (copyfunc_addr == NULL) { // Use C version if stub was not generated
3193       __ call(RuntimeAddress(C_entry));
3194     } else {
3195 #ifndef PRODUCT
3196       if (PrintC1Statistics) {
3197         __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
3198       }
3199 #endif // PRODUCT
3200       __ call(RuntimeAddress(copyfunc_addr));
3201     }
3202     __ addptr(rsp, 6*wordSize);
3203 #else // 
3204     __ mov(c_rarg4, j_rarg4);
3205     if (copyfunc_addr == NULL) { // Use C version if stub was not generated
3206       __ call(RuntimeAddress(C_entry));
3207     } else {
3208 #ifndef PRODUCT
3209       if (PrintC1Statistics) {
3210         __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
3211       }
3212 #endif
3213       __ call(RuntimeAddress(copyfunc_addr));
3214     }
3215 #endif // _WIN64
3216 #else
3217     __ push(length);
3218     __ push(dst_pos);
3219     __ push(dst);
3220     __ push(src_pos);
3221     __ push(src);
3222 
3223     if (copyfunc_addr == NULL) { // Use C version if stub was not generated


< prev index next >