< prev index next >

src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp

Print this page

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/macroAssembler.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "c1/c1_CodeStubs.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"


  35 #include "compiler/oopMap.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/gc_globals.hpp"
  38 #include "nativeInst_x86.hpp"
  39 #include "oops/objArrayKlass.hpp"
  40 #include "runtime/frame.inline.hpp"
  41 #include "runtime/safepointMechanism.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/stubRoutines.hpp"

  44 #include "utilities/powerOfTwo.hpp"
  45 #include "vmreg_x86.inline.hpp"
  46 
  47 
  48 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  49 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  50 // fast versions of NegF/NegD and AbsF/AbsD.
  51 
  52 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  53 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
  54   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  55   // of 128-bits operands for SSE instructions.
  56   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
  57   // Store the value to a 128-bits operand.
  58   operand[0] = lo;
  59   operand[1] = hi;
  60   return operand;
  61 }
  62 
  63 // Buffer for 128-bits masks used by SSE instructions.
  64 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
  65 
  66 // Static initialization during VM startup.
  67 static jlong *float_signmask_pool  = double_quadword(&fp_signmask_pool[1*2],         CONST64(0x7FFFFFFF7FFFFFFF),         CONST64(0x7FFFFFFF7FFFFFFF));
  68 static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2],         CONST64(0x7FFFFFFFFFFFFFFF),         CONST64(0x7FFFFFFFFFFFFFFF));
  69 static jlong *float_signflip_pool  = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000));
  70 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));
  71 
  72 
  73 NEEDS_CLEANUP // remove this definitions ?
  74 const Register SYNC_header = rax;   // synchronization header
  75 const Register SHIFT_count = rcx;   // where count for shift operations must be
  76 
  77 #define __ _masm->
  78 
  79 
  80 static void select_different_registers(Register preserve,
  81                                        Register extra,
  82                                        Register &tmp1,
  83                                        Register &tmp2) {
  84   if (tmp1 == preserve) {
  85     assert_different_registers(tmp1, tmp2, extra);
  86     tmp1 = extra;
  87   } else if (tmp2 == preserve) {
  88     assert_different_registers(tmp1, tmp2, extra);
  89     tmp2 = extra;
  90   }

 510 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
 511   assert(src->is_constant(), "should not call otherwise");
 512   assert(dest->is_register(), "should not call otherwise");
 513   LIR_Const* c = src->as_constant_ptr();
 514 
 515   switch (c->type()) {
 516     case T_INT: {
 517       assert(patch_code == lir_patch_none, "no patching handled here");
 518       __ movl(dest->as_register(), c->as_jint());
 519       break;
 520     }
 521 
 522     case T_ADDRESS: {
 523       assert(patch_code == lir_patch_none, "no patching handled here");
 524       __ movptr(dest->as_register(), c->as_jint());
 525       break;
 526     }
 527 
 528     case T_LONG: {
 529       assert(patch_code == lir_patch_none, "no patching handled here");


















 530       __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
 531       break;
 532     }
 533 
 534     case T_OBJECT: {
 535       if (patch_code != lir_patch_none) {
 536         jobject2reg_with_patching(dest->as_register(), info);
 537       } else {
 538         __ movoop(dest->as_register(), c->as_jobject());
 539       }
 540       break;
 541     }
 542 
 543     case T_METADATA: {
 544       if (patch_code != lir_patch_none) {
 545         klass2reg_with_patching(dest->as_register(), info);
 546       } else {
 547         __ mov_metadata(dest->as_register(), c->as_metadata());
 548       }
 549       break;

1807     } else {
1808       ShouldNotReachHere();
1809     }
1810 
1811   } else {
1812     ShouldNotReachHere();
1813   }
1814 }
1815 
1816 
1817 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) {
1818   if (value->is_double_xmm()) {
1819     switch(code) {
1820       case lir_abs :
1821         {
1822           if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
1823             __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
1824           }
1825           assert(!tmp->is_valid(), "do not need temporary");
1826           __ andpd(dest->as_xmm_double_reg(),
1827                    ExternalAddress((address)double_signmask_pool),
1828                    rscratch1);
1829         }
1830         break;
1831 
1832       case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;
1833       // all other intrinsics are not available in the SSE instruction set, so FPU is used
1834       default      : ShouldNotReachHere();
1835     }
1836 
1837   } else if (code == lir_f2hf) {
1838     __ flt_to_flt16(dest->as_register(), value->as_xmm_float_reg(), tmp->as_xmm_float_reg());
1839   } else if (code == lir_hf2f) {
1840     __ flt16_to_flt(dest->as_xmm_float_reg(), value->as_register());
1841   } else {
1842     Unimplemented();
1843   }
1844 }
1845 
1846 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1847   // assert(left->destroys_register(), "check");

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/macroAssembler.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "c1/c1_CodeStubs.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "ci/ciUtilities.hpp"
  36 #include "code/aotCodeCache.hpp"
  37 #include "compiler/oopMap.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "gc/shared/gc_globals.hpp"
  40 #include "nativeInst_x86.hpp"
  41 #include "oops/objArrayKlass.hpp"
  42 #include "runtime/frame.inline.hpp"
  43 #include "runtime/safepointMechanism.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/stubRoutines.hpp"
  46 #include "runtime/threadIdentifier.hpp"
  47 #include "utilities/powerOfTwo.hpp"
  48 #include "vmreg_x86.inline.hpp"
  49 
  50 
  51 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  52 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  53 // fast versions of NegF/NegD and AbsF/AbsD.
  54 
  55 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  56 static address double_quadword(jlong *adr, jlong lo, jlong hi) {
  57   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  58   // of 128-bits operands for SSE instructions.
  59   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
  60   // Store the value to a 128-bits operand.
  61   operand[0] = lo;
  62   operand[1] = hi;
  63   return (address)operand;
  64 }
  65 
  66 // Buffer for 128-bits masks used by SSE instructions.
  67 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
  68 
  69 // Static initialization during VM startup.
  70 address LIR_Assembler::float_signmask_pool  = double_quadword(&fp_signmask_pool[1*2],         CONST64(0x7FFFFFFF7FFFFFFF),         CONST64(0x7FFFFFFF7FFFFFFF));
  71 address LIR_Assembler::double_signmask_pool = double_quadword(&fp_signmask_pool[2*2],         CONST64(0x7FFFFFFFFFFFFFFF),         CONST64(0x7FFFFFFFFFFFFFFF));
  72 address LIR_Assembler::float_signflip_pool  = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000));
  73 address LIR_Assembler::double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));
  74 
  75 
  76 NEEDS_CLEANUP // remove this definitions ?
  77 const Register SYNC_header = rax;   // synchronization header
  78 const Register SHIFT_count = rcx;   // where count for shift operations must be
  79 
  80 #define __ _masm->
  81 
  82 
  83 static void select_different_registers(Register preserve,
  84                                        Register extra,
  85                                        Register &tmp1,
  86                                        Register &tmp2) {
  87   if (tmp1 == preserve) {
  88     assert_different_registers(tmp1, tmp2, extra);
  89     tmp1 = extra;
  90   } else if (tmp2 == preserve) {
  91     assert_different_registers(tmp1, tmp2, extra);
  92     tmp2 = extra;
  93   }

 513 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
 514   assert(src->is_constant(), "should not call otherwise");
 515   assert(dest->is_register(), "should not call otherwise");
 516   LIR_Const* c = src->as_constant_ptr();
 517 
 518   switch (c->type()) {
 519     case T_INT: {
 520       assert(patch_code == lir_patch_none, "no patching handled here");
 521       __ movl(dest->as_register(), c->as_jint());
 522       break;
 523     }
 524 
 525     case T_ADDRESS: {
 526       assert(patch_code == lir_patch_none, "no patching handled here");
 527       __ movptr(dest->as_register(), c->as_jint());
 528       break;
 529     }
 530 
 531     case T_LONG: {
 532       assert(patch_code == lir_patch_none, "no patching handled here");
 533       if (AOTCodeCache::is_on_for_dump()) {
 534         // AOTCodeCache needs relocation info for card table base
 535         address b = c->as_pointer();
 536         if (is_card_table_address(b)) {
 537           __ lea(dest->as_register_lo(), ExternalAddress(b));
 538           break;
 539         }
 540         if (b == (address)ThreadIdentifier::unsafe_offset()) {
 541           __ lea(dest->as_register_lo(), ExternalAddress(b));
 542           break;
 543         }
 544 #if INCLUDE_CDS
 545         if (AOTRuntimeConstants::contains(b)) {
 546           __ load_aotrc_address(dest->as_register_lo(), b);
 547           break;
 548         }
 549 #endif
 550       }
 551       __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
 552       break;
 553     }
 554 
 555     case T_OBJECT: {
 556       if (patch_code != lir_patch_none) {
 557         jobject2reg_with_patching(dest->as_register(), info);
 558       } else {
 559         __ movoop(dest->as_register(), c->as_jobject());
 560       }
 561       break;
 562     }
 563 
 564     case T_METADATA: {
 565       if (patch_code != lir_patch_none) {
 566         klass2reg_with_patching(dest->as_register(), info);
 567       } else {
 568         __ mov_metadata(dest->as_register(), c->as_metadata());
 569       }
 570       break;

1828     } else {
1829       ShouldNotReachHere();
1830     }
1831 
1832   } else {
1833     ShouldNotReachHere();
1834   }
1835 }
1836 
1837 
1838 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) {
1839   if (value->is_double_xmm()) {
1840     switch(code) {
1841       case lir_abs :
1842         {
1843           if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
1844             __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
1845           }
1846           assert(!tmp->is_valid(), "do not need temporary");
1847           __ andpd(dest->as_xmm_double_reg(),
1848                    ExternalAddress(LIR_Assembler::double_signmask_pool),
1849                    rscratch1);
1850         }
1851         break;
1852 
1853       case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;
1854       // all other intrinsics are not available in the SSE instruction set, so FPU is used
1855       default      : ShouldNotReachHere();
1856     }
1857 
1858   } else if (code == lir_f2hf) {
1859     __ flt_to_flt16(dest->as_register(), value->as_xmm_float_reg(), tmp->as_xmm_float_reg());
1860   } else if (code == lir_hf2f) {
1861     __ flt16_to_flt(dest->as_xmm_float_reg(), value->as_register());
1862   } else {
1863     Unimplemented();
1864   }
1865 }
1866 
1867 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1868   // assert(left->destroys_register(), "check");
< prev index next >