< prev index next >

src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp

Print this page

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/macroAssembler.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "c1/c1_CodeStubs.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"


  35 #include "compiler/oopMap.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/gc_globals.hpp"
  38 #include "nativeInst_x86.hpp"
  39 #include "oops/objArrayKlass.hpp"
  40 #include "runtime/frame.inline.hpp"
  41 #include "runtime/safepointMechanism.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/stubRoutines.hpp"

  44 #include "utilities/powerOfTwo.hpp"
  45 #include "vmreg_x86.inline.hpp"
  46 
  47 
  48 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  49 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  50 // fast versions of NegF/NegD and AbsF/AbsD.
  51 
  52 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  53 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
  54   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  55   // of 128-bits operands for SSE instructions.
  56   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
  57   // Store the value to a 128-bits operand.
  58   operand[0] = lo;
  59   operand[1] = hi;
  60   return operand;
  61 }
  62 
  63 // Buffer for 128-bits masks used by SSE instructions.
  64 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
  65 
  66 // Static initialization during VM startup.
  67 static jlong *float_signmask_pool  = double_quadword(&fp_signmask_pool[1*2],         CONST64(0x7FFFFFFF7FFFFFFF),         CONST64(0x7FFFFFFF7FFFFFFF));
  68 static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2],         CONST64(0x7FFFFFFFFFFFFFFF),         CONST64(0x7FFFFFFFFFFFFFFF));
  69 static jlong *float_signflip_pool  = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000));
  70 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));
  71 
  72 
  73 NEEDS_CLEANUP // remove this definitions ?
  74 const Register SYNC_header = rax;   // synchronization header
  75 const Register SHIFT_count = rcx;   // where count for shift operations must be
  76 
  77 #define __ _masm->
  78 
  79 
  80 static void select_different_registers(Register preserve,
  81                                        Register extra,
  82                                        Register &tmp1,
  83                                        Register &tmp2) {
  84   if (tmp1 == preserve) {
  85     assert_different_registers(tmp1, tmp2, extra);
  86     tmp1 = extra;
  87   } else if (tmp2 == preserve) {
  88     assert_different_registers(tmp1, tmp2, extra);
  89     tmp2 = extra;
  90   }

 518 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
 519   assert(src->is_constant(), "should not call otherwise");
 520   assert(dest->is_register(), "should not call otherwise");
 521   LIR_Const* c = src->as_constant_ptr();
 522 
 523   switch (c->type()) {
 524     case T_INT: {
 525       assert(patch_code == lir_patch_none, "no patching handled here");
 526       __ movl(dest->as_register(), c->as_jint());
 527       break;
 528     }
 529 
 530     case T_ADDRESS: {
 531       assert(patch_code == lir_patch_none, "no patching handled here");
 532       __ movptr(dest->as_register(), c->as_jint());
 533       break;
 534     }
 535 
 536     case T_LONG: {
 537       assert(patch_code == lir_patch_none, "no patching handled here");













 538       __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
 539       break;
 540     }
 541 
 542     case T_OBJECT: {
 543       if (patch_code != lir_patch_none) {
 544         jobject2reg_with_patching(dest->as_register(), info);
 545       } else {
 546         __ movoop(dest->as_register(), c->as_jobject());
 547       }
 548       break;
 549     }
 550 
 551     case T_METADATA: {
 552       if (patch_code != lir_patch_none) {
 553         klass2reg_with_patching(dest->as_register(), info);
 554       } else {
 555         __ mov_metadata(dest->as_register(), c->as_metadata());
 556       }
 557       break;

1784     } else {
1785       ShouldNotReachHere();
1786     }
1787 
1788   } else {
1789     ShouldNotReachHere();
1790   }
1791 }
1792 
1793 
1794 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) {
1795   if (value->is_double_xmm()) {
1796     switch(code) {
1797       case lir_abs :
1798         {
1799           if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
1800             __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
1801           }
1802           assert(!tmp->is_valid(), "do not need temporary");
1803           __ andpd(dest->as_xmm_double_reg(),
1804                    ExternalAddress((address)double_signmask_pool),
1805                    rscratch1);
1806         }
1807         break;
1808 
1809       case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;
1810       // all other intrinsics are not available in the SSE instruction set, so FPU is used
1811       default      : ShouldNotReachHere();
1812     }
1813 
1814   } else if (code == lir_f2hf) {
1815     __ flt_to_flt16(dest->as_register(), value->as_xmm_float_reg(), tmp->as_xmm_float_reg());
1816   } else if (code == lir_hf2f) {
1817     __ flt16_to_flt(dest->as_xmm_float_reg(), value->as_register());
1818   } else {
1819     Unimplemented();
1820   }
1821 }
1822 
1823 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1824   // assert(left->destroys_register(), "check");

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/macroAssembler.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "c1/c1_CodeStubs.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "ci/ciUtilities.hpp"
  36 #include "code/aotCodeCache.hpp"
  37 #include "compiler/oopMap.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "gc/shared/gc_globals.hpp"
  40 #include "nativeInst_x86.hpp"
  41 #include "oops/objArrayKlass.hpp"
  42 #include "runtime/frame.inline.hpp"
  43 #include "runtime/safepointMechanism.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/stubRoutines.hpp"
  46 #include "runtime/threadIdentifier.hpp"
  47 #include "utilities/powerOfTwo.hpp"
  48 #include "vmreg_x86.inline.hpp"
  49 
  50 
  51 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  52 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  53 // fast versions of NegF/NegD and AbsF/AbsD.
  54 
  55 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  56 static address double_quadword(jlong *adr, jlong lo, jlong hi) {
  57   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  58   // of 128-bits operands for SSE instructions.
  59   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
  60   // Store the value to a 128-bits operand.
  61   operand[0] = lo;
  62   operand[1] = hi;
  63   return (address)operand;
  64 }
  65 
  66 // Buffer for 128-bits masks used by SSE instructions.
  67 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
  68 
  69 // Static initialization during VM startup.
  70 address LIR_Assembler::float_signmask_pool  = double_quadword(&fp_signmask_pool[1*2],         CONST64(0x7FFFFFFF7FFFFFFF),         CONST64(0x7FFFFFFF7FFFFFFF));
  71 address LIR_Assembler::double_signmask_pool = double_quadword(&fp_signmask_pool[2*2],         CONST64(0x7FFFFFFFFFFFFFFF),         CONST64(0x7FFFFFFFFFFFFFFF));
  72 address LIR_Assembler::float_signflip_pool  = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000));
  73 address LIR_Assembler::double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));
  74 
  75 
  76 NEEDS_CLEANUP // remove this definitions ?
  77 const Register SYNC_header = rax;   // synchronization header
  78 const Register SHIFT_count = rcx;   // where count for shift operations must be
  79 
  80 #define __ _masm->
  81 
  82 
  83 static void select_different_registers(Register preserve,
  84                                        Register extra,
  85                                        Register &tmp1,
  86                                        Register &tmp2) {
  87   if (tmp1 == preserve) {
  88     assert_different_registers(tmp1, tmp2, extra);
  89     tmp1 = extra;
  90   } else if (tmp2 == preserve) {
  91     assert_different_registers(tmp1, tmp2, extra);
  92     tmp2 = extra;
  93   }

 521 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
 522   assert(src->is_constant(), "should not call otherwise");
 523   assert(dest->is_register(), "should not call otherwise");
 524   LIR_Const* c = src->as_constant_ptr();
 525 
 526   switch (c->type()) {
 527     case T_INT: {
 528       assert(patch_code == lir_patch_none, "no patching handled here");
 529       __ movl(dest->as_register(), c->as_jint());
 530       break;
 531     }
 532 
 533     case T_ADDRESS: {
 534       assert(patch_code == lir_patch_none, "no patching handled here");
 535       __ movptr(dest->as_register(), c->as_jint());
 536       break;
 537     }
 538 
 539     case T_LONG: {
 540       assert(patch_code == lir_patch_none, "no patching handled here");
 541 #if INCLUDE_CDS
 542       if (AOTCodeCache::is_on_for_dump()) {
 543         address b = c->as_pointer();
 544         if (b == (address)ThreadIdentifier::unsafe_offset()) {
 545           __ lea(dest->as_register_lo(), ExternalAddress(b));
 546           break;
 547         }
 548         if (AOTRuntimeConstants::contains(b)) {
 549           __ load_aotrc_address(dest->as_register_lo(), b);
 550           break;
 551         }
 552       }
 553 #endif
 554       __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
 555       break;
 556     }
 557 
 558     case T_OBJECT: {
 559       if (patch_code != lir_patch_none) {
 560         jobject2reg_with_patching(dest->as_register(), info);
 561       } else {
 562         __ movoop(dest->as_register(), c->as_jobject());
 563       }
 564       break;
 565     }
 566 
 567     case T_METADATA: {
 568       if (patch_code != lir_patch_none) {
 569         klass2reg_with_patching(dest->as_register(), info);
 570       } else {
 571         __ mov_metadata(dest->as_register(), c->as_metadata());
 572       }
 573       break;

1800     } else {
1801       ShouldNotReachHere();
1802     }
1803 
1804   } else {
1805     ShouldNotReachHere();
1806   }
1807 }
1808 
1809 
1810 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) {
1811   if (value->is_double_xmm()) {
1812     switch(code) {
1813       case lir_abs :
1814         {
1815           if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
1816             __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
1817           }
1818           assert(!tmp->is_valid(), "do not need temporary");
1819           __ andpd(dest->as_xmm_double_reg(),
1820                    ExternalAddress(LIR_Assembler::double_signmask_pool),
1821                    rscratch1);
1822         }
1823         break;
1824 
1825       case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;
1826       // all other intrinsics are not available in the SSE instruction set, so FPU is used
1827       default      : ShouldNotReachHere();
1828     }
1829 
1830   } else if (code == lir_f2hf) {
1831     __ flt_to_flt16(dest->as_register(), value->as_xmm_float_reg(), tmp->as_xmm_float_reg());
1832   } else if (code == lir_hf2f) {
1833     __ flt16_to_flt(dest->as_xmm_float_reg(), value->as_register());
1834   } else {
1835     Unimplemented();
1836   }
1837 }
1838 
1839 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1840   // assert(left->destroys_register(), "check");
< prev index next >