< prev index next >

src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp

Print this page

  25 #include "asm/macroAssembler.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "c1/c1_CodeStubs.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "code/aotCodeCache.hpp"
  36 #include "compiler/oopMap.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "gc/shared/gc_globals.hpp"
  39 #include "nativeInst_x86.hpp"
  40 #include "oops/objArrayKlass.hpp"
  41 #include "runtime/frame.inline.hpp"
  42 #include "runtime/safepointMechanism.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "runtime/stubRoutines.hpp"

  45 #include "utilities/powerOfTwo.hpp"
  46 #include "vmreg_x86.inline.hpp"
  47 
  48 
  49 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  50 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  51 // fast versions of NegF/NegD and AbsF/AbsD.
  52 
  53 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  54 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
  55   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  56   // of 128-bits operands for SSE instructions.
  57   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
  58   // Store the value to a 128-bits operand.
  59   operand[0] = lo;
  60   operand[1] = hi;
  61   return operand;
  62 }
  63 
  64 // Buffer for 128-bits masks used by SSE instructions.
  65 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
  66 
  67 // Static initialization during VM startup.
  68 static jlong *float_signmask_pool  = double_quadword(&fp_signmask_pool[1*2],         CONST64(0x7FFFFFFF7FFFFFFF),         CONST64(0x7FFFFFFF7FFFFFFF));
  69 static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2],         CONST64(0x7FFFFFFFFFFFFFFF),         CONST64(0x7FFFFFFFFFFFFFFF));
  70 static jlong *float_signflip_pool  = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000));
  71 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));
  72 
  73 
  74 NEEDS_CLEANUP // remove this definitions ?
  75 const Register SYNC_header = rax;   // synchronization header
  76 const Register SHIFT_count = rcx;   // where count for shift operations must be
  77 
  78 #define __ _masm->
  79 
  80 
  81 static void select_different_registers(Register preserve,
  82                                        Register extra,
  83                                        Register &tmp1,
  84                                        Register &tmp2) {
  85   if (tmp1 == preserve) {
  86     assert_different_registers(tmp1, tmp2, extra);
  87     tmp1 = extra;
  88   } else if (tmp2 == preserve) {
  89     assert_different_registers(tmp1, tmp2, extra);
  90     tmp2 = extra;
  91   }

 522   LIR_Const* c = src->as_constant_ptr();
 523 
 524   switch (c->type()) {
 525     case T_INT: {
 526       assert(patch_code == lir_patch_none, "no patching handled here");
 527       __ movl(dest->as_register(), c->as_jint());
 528       break;
 529     }
 530 
 531     case T_ADDRESS: {
 532       assert(patch_code == lir_patch_none, "no patching handled here");
 533       __ movptr(dest->as_register(), c->as_jint());
 534       break;
 535     }
 536 
 537     case T_LONG: {
 538       assert(patch_code == lir_patch_none, "no patching handled here");
 539 #if INCLUDE_CDS
 540       if (AOTCodeCache::is_on_for_dump()) {
 541         address b = c->as_pointer();




 542         if (AOTRuntimeConstants::contains(b)) {
 543           __ load_aotrc_address(dest->as_register_lo(), b);
 544           break;
 545         }
 546       }
 547 #endif
 548       __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
 549       break;
 550     }
 551 
 552     case T_OBJECT: {
 553       if (patch_code != lir_patch_none) {
 554         jobject2reg_with_patching(dest->as_register(), info);
 555       } else {
 556         __ movoop(dest->as_register(), c->as_jobject());
 557       }
 558       break;
 559     }
 560 
 561     case T_METADATA: {

1794     } else {
1795       ShouldNotReachHere();
1796     }
1797 
1798   } else {
1799     ShouldNotReachHere();
1800   }
1801 }
1802 
1803 
1804 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) {
1805   if (value->is_double_xmm()) {
1806     switch(code) {
1807       case lir_abs :
1808         {
1809           if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
1810             __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
1811           }
1812           assert(!tmp->is_valid(), "do not need temporary");
1813           __ andpd(dest->as_xmm_double_reg(),
1814                    ExternalAddress((address)double_signmask_pool),
1815                    rscratch1);
1816         }
1817         break;
1818 
1819       case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;
1820       // all other intrinsics are not available in the SSE instruction set, so FPU is used
1821       default      : ShouldNotReachHere();
1822     }
1823 
1824   } else if (code == lir_f2hf) {
1825     __ flt_to_flt16(dest->as_register(), value->as_xmm_float_reg(), tmp->as_xmm_float_reg());
1826   } else if (code == lir_hf2f) {
1827     __ flt16_to_flt(dest->as_xmm_float_reg(), value->as_register());
1828   } else {
1829     Unimplemented();
1830   }
1831 }
1832 
1833 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1834   // assert(left->destroys_register(), "check");

  25 #include "asm/macroAssembler.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "c1/c1_CodeStubs.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "code/aotCodeCache.hpp"
  36 #include "compiler/oopMap.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "gc/shared/gc_globals.hpp"
  39 #include "nativeInst_x86.hpp"
  40 #include "oops/objArrayKlass.hpp"
  41 #include "runtime/frame.inline.hpp"
  42 #include "runtime/safepointMechanism.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "runtime/stubRoutines.hpp"
  45 #include "runtime/threadIdentifier.hpp"
  46 #include "utilities/powerOfTwo.hpp"
  47 #include "vmreg_x86.inline.hpp"
  48 
  49 
  50 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  51 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  52 // fast versions of NegF/NegD and AbsF/AbsD.
  53 
  54 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  55 static address double_quadword(jlong *adr, jlong lo, jlong hi) {
  56   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  57   // of 128-bits operands for SSE instructions.
  58   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
  59   // Store the value to a 128-bits operand.
  60   operand[0] = lo;
  61   operand[1] = hi;
  62   return (address)operand;
  63 }
  64 
  65 // Buffer for 128-bits masks used by SSE instructions.
  66 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
  67 
  68 // Static initialization during VM startup.
  69 address LIR_Assembler::float_signmask_pool  = double_quadword(&fp_signmask_pool[1*2],         CONST64(0x7FFFFFFF7FFFFFFF),         CONST64(0x7FFFFFFF7FFFFFFF));
  70 address LIR_Assembler::double_signmask_pool = double_quadword(&fp_signmask_pool[2*2],         CONST64(0x7FFFFFFFFFFFFFFF),         CONST64(0x7FFFFFFFFFFFFFFF));
  71 address LIR_Assembler::float_signflip_pool  = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000));
  72 address LIR_Assembler::double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));
  73 
  74 
  75 NEEDS_CLEANUP // remove this definitions ?
  76 const Register SYNC_header = rax;   // synchronization header
  77 const Register SHIFT_count = rcx;   // where count for shift operations must be
  78 
  79 #define __ _masm->
  80 
  81 
  82 static void select_different_registers(Register preserve,
  83                                        Register extra,
  84                                        Register &tmp1,
  85                                        Register &tmp2) {
  86   if (tmp1 == preserve) {
  87     assert_different_registers(tmp1, tmp2, extra);
  88     tmp1 = extra;
  89   } else if (tmp2 == preserve) {
  90     assert_different_registers(tmp1, tmp2, extra);
  91     tmp2 = extra;
  92   }

 523   LIR_Const* c = src->as_constant_ptr();
 524 
 525   switch (c->type()) {
 526     case T_INT: {
 527       assert(patch_code == lir_patch_none, "no patching handled here");
 528       __ movl(dest->as_register(), c->as_jint());
 529       break;
 530     }
 531 
 532     case T_ADDRESS: {
 533       assert(patch_code == lir_patch_none, "no patching handled here");
 534       __ movptr(dest->as_register(), c->as_jint());
 535       break;
 536     }
 537 
 538     case T_LONG: {
 539       assert(patch_code == lir_patch_none, "no patching handled here");
 540 #if INCLUDE_CDS
 541       if (AOTCodeCache::is_on_for_dump()) {
 542         address b = c->as_pointer();
 543         if (b == (address)ThreadIdentifier::unsafe_offset()) {
 544           __ lea(dest->as_register_lo(), ExternalAddress(b));
 545           break;
 546         }
 547         if (AOTRuntimeConstants::contains(b)) {
 548           __ load_aotrc_address(dest->as_register_lo(), b);
 549           break;
 550         }
 551       }
 552 #endif
 553       __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
 554       break;
 555     }
 556 
 557     case T_OBJECT: {
 558       if (patch_code != lir_patch_none) {
 559         jobject2reg_with_patching(dest->as_register(), info);
 560       } else {
 561         __ movoop(dest->as_register(), c->as_jobject());
 562       }
 563       break;
 564     }
 565 
 566     case T_METADATA: {

1799     } else {
1800       ShouldNotReachHere();
1801     }
1802 
1803   } else {
1804     ShouldNotReachHere();
1805   }
1806 }
1807 
1808 
1809 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) {
1810   if (value->is_double_xmm()) {
1811     switch(code) {
1812       case lir_abs :
1813         {
1814           if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
1815             __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
1816           }
1817           assert(!tmp->is_valid(), "do not need temporary");
1818           __ andpd(dest->as_xmm_double_reg(),
1819                    ExternalAddress(LIR_Assembler::double_signmask_pool),
1820                    rscratch1);
1821         }
1822         break;
1823 
1824       case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;
1825       // all other intrinsics are not available in the SSE instruction set, so FPU is used
1826       default      : ShouldNotReachHere();
1827     }
1828 
1829   } else if (code == lir_f2hf) {
1830     __ flt_to_flt16(dest->as_register(), value->as_xmm_float_reg(), tmp->as_xmm_float_reg());
1831   } else if (code == lir_hf2f) {
1832     __ flt16_to_flt(dest->as_xmm_float_reg(), value->as_register());
1833   } else {
1834     Unimplemented();
1835   }
1836 }
1837 
1838 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1839   // assert(left->destroys_register(), "check");
< prev index next >