< prev index next > src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
Print this page
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "c1/c1_ValueStack.hpp"
#include "ci/ciArrayKlass.hpp"
#include "ci/ciInstance.hpp"
+ #include "ci/ciUtilities.hpp"
+ #include "code/SCCache.hpp"
#include "compiler/oopMap.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/gc_globals.hpp"
#include "nativeInst_x86.hpp"
#include "oops/objArrayKlass.hpp"
// These masks are used to provide 128-bit aligned bitmasks to the XMM
// instructions, to allow sign-masking or sign-bit flipping. They allow
// fast versions of NegF/NegD and AbsF/AbsD.
// Note: 'double' and 'long long' have 32-bits alignment on x86.
! static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
// Use the expression (adr)&(~0xF) to provide 128-bits aligned address
// of 128-bits operands for SSE instructions.
jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
// Store the value to a 128-bits operand.
operand[0] = lo;
operand[1] = hi;
! return operand;
}
// Buffer for 128-bits masks used by SSE instructions.
static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
// Static initialization during VM startup.
! static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));
! static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));
! static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000));
! static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));
NEEDS_CLEANUP // remove this definitions ?
const Register SYNC_header = rax; // synchronization header
const Register SHIFT_count = rcx; // where count for shift operations must be
// These masks are used to provide 128-bit aligned bitmasks to the XMM
// instructions, to allow sign-masking or sign-bit flipping. They allow
// fast versions of NegF/NegD and AbsF/AbsD.
// Note: 'double' and 'long long' have 32-bits alignment on x86.
! static address double_quadword(jlong *adr, jlong lo, jlong hi) {
// Use the expression (adr)&(~0xF) to provide 128-bits aligned address
// of 128-bits operands for SSE instructions.
jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
// Store the value to a 128-bits operand.
operand[0] = lo;
operand[1] = hi;
! return (address)operand;
}
// Buffer for 128-bits masks used by SSE instructions.
static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
// Static initialization during VM startup.
! address LIR_Assembler::float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));
! address LIR_Assembler::double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));
! address LIR_Assembler::float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000));
! address LIR_Assembler::double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));
NEEDS_CLEANUP // remove this definitions ?
const Register SYNC_header = rax; // synchronization header
const Register SHIFT_count = rcx; // where count for shift operations must be
}
case T_LONG: {
assert(patch_code == lir_patch_none, "no patching handled here");
#ifdef _LP64
+ if (SCCache::is_on_for_write()) {
+ // SCA needs relocation info for card table base
+ address b = c->as_pointer();
+ if (is_card_table_address(b)) {
+ __ lea(dest->as_register_lo(), ExternalAddress(b));
+ break;
+ }
+ if (AOTRuntimeConstants::contains(b)) {
+ __ load_aotrc_address(dest->as_register_lo(), b);
+ break;
+ }
+ }
__ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
#else
__ movptr(dest->as_register_lo(), c->as_jint_lo());
__ movptr(dest->as_register_hi(), c->as_jint_hi());
#endif // _LP64
if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
__ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
}
assert(!tmp->is_valid(), "do not need temporary");
__ andpd(dest->as_xmm_double_reg(),
! ExternalAddress((address)double_signmask_pool),
rscratch1);
}
}
break;
if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
__ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
}
assert(!tmp->is_valid(), "do not need temporary");
__ andpd(dest->as_xmm_double_reg(),
! ExternalAddress(LIR_Assembler::double_signmask_pool),
rscratch1);
}
}
break;
assert(!tmp->is_valid(), "do not need temporary");
if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) {
__ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg());
}
__ xorps(dest->as_xmm_float_reg(),
! ExternalAddress((address)float_signflip_pool),
rscratch1);
}
} else if (dest->is_double_xmm()) {
#ifdef _LP64
if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
assert(!tmp->is_valid(), "do not need temporary");
if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) {
__ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg());
}
__ xorps(dest->as_xmm_float_reg(),
! ExternalAddress(LIR_Assembler::float_signflip_pool),
rscratch1);
}
} else if (dest->is_double_xmm()) {
#ifdef _LP64
if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
assert(!tmp->is_valid(), "do not need temporary");
if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) {
__ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg());
}
__ xorpd(dest->as_xmm_double_reg(),
! ExternalAddress((address)double_signflip_pool),
rscratch1);
}
#ifndef _LP64
} else if (left->is_single_fpu() || left->is_double_fpu()) {
assert(left->fpu() == 0, "arg must be on TOS");
assert(!tmp->is_valid(), "do not need temporary");
if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) {
__ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg());
}
__ xorpd(dest->as_xmm_double_reg(),
! ExternalAddress(LIR_Assembler::double_signflip_pool),
rscratch1);
}
#ifndef _LP64
} else if (left->is_single_fpu() || left->is_double_fpu()) {
assert(left->fpu() == 0, "arg must be on TOS");
< prev index next >