< prev index next >

src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp

Print this page

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/macroAssembler.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "c1/c1_CodeStubs.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"


  35 #include "compiler/oopMap.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/gc_globals.hpp"
  38 #include "nativeInst_x86.hpp"
  39 #include "oops/objArrayKlass.hpp"
  40 #include "runtime/frame.inline.hpp"
  41 #include "runtime/safepointMechanism.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/stubRoutines.hpp"
  44 #include "utilities/powerOfTwo.hpp"
  45 #include "vmreg_x86.inline.hpp"
  46 
  47 
  48 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  49 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  50 // fast versions of NegF/NegD and AbsF/AbsD.
  51 
  52 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  53 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
  54   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  55   // of 128-bits operands for SSE instructions.
  56   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
  57   // Store the value to a 128-bits operand.
  58   operand[0] = lo;
  59   operand[1] = hi;
  60   return operand;
  61 }
  62 
  63 // Buffer for 128-bits masks used by SSE instructions.
  64 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
  65 
  66 // Static initialization during VM startup.
  67 static jlong *float_signmask_pool  = double_quadword(&fp_signmask_pool[1*2],         CONST64(0x7FFFFFFF7FFFFFFF),         CONST64(0x7FFFFFFF7FFFFFFF));
  68 static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2],         CONST64(0x7FFFFFFFFFFFFFFF),         CONST64(0x7FFFFFFFFFFFFFFF));
  69 static jlong *float_signflip_pool  = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000));
  70 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));
  71 
  72 
  73 NEEDS_CLEANUP // remove this definitions ?
  74 const Register SYNC_header = rax;   // synchronization header
  75 const Register SHIFT_count = rcx;   // where count for shift operations must be
  76 
  77 #define __ _masm->
  78 
  79 
  80 static void select_different_registers(Register preserve,
  81                                        Register extra,
  82                                        Register &tmp1,
  83                                        Register &tmp2) {
  84   if (tmp1 == preserve) {
  85     assert_different_registers(tmp1, tmp2, extra);
  86     tmp1 = extra;
  87   } else if (tmp2 == preserve) {
  88     assert_different_registers(tmp1, tmp2, extra);
  89     tmp2 = extra;
  90   }

 557   assert(src->is_constant(), "should not call otherwise");
 558   assert(dest->is_register(), "should not call otherwise");
 559   LIR_Const* c = src->as_constant_ptr();
 560 
 561   switch (c->type()) {
 562     case T_INT: {
 563       assert(patch_code == lir_patch_none, "no patching handled here");
 564       __ movl(dest->as_register(), c->as_jint());
 565       break;
 566     }
 567 
 568     case T_ADDRESS: {
 569       assert(patch_code == lir_patch_none, "no patching handled here");
 570       __ movptr(dest->as_register(), c->as_jint());
 571       break;
 572     }
 573 
 574     case T_LONG: {
 575       assert(patch_code == lir_patch_none, "no patching handled here");
 576 #ifdef _LP64












 577       __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
 578 #else
 579       __ movptr(dest->as_register_lo(), c->as_jint_lo());
 580       __ movptr(dest->as_register_hi(), c->as_jint_hi());
 581 #endif // _LP64
 582       break;
 583     }
 584 
 585     case T_OBJECT: {
 586       if (patch_code != lir_patch_none) {
 587         jobject2reg_with_patching(dest->as_register(), info);
 588       } else {
 589         __ movoop(dest->as_register(), c->as_jobject());
 590       }
 591       break;
 592     }
 593 
 594     case T_METADATA: {
 595       if (patch_code != lir_patch_none) {
 596         klass2reg_with_patching(dest->as_register(), info);

2380       __ fremr(noreg);
2381       break;
2382 
2383     default:
2384       ShouldNotReachHere();
2385   }
2386 }
2387 #endif // _LP64
2388 
2389 
2390 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) {
2391   if (value->is_double_xmm()) {
2392     switch(code) {
2393       case lir_abs :
2394         {
2395           if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
2396             __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
2397           }
2398           assert(!tmp->is_valid(), "do not need temporary");
2399           __ andpd(dest->as_xmm_double_reg(),
2400                    ExternalAddress((address)double_signmask_pool),
2401                    rscratch1);
2402         }
2403         break;
2404 
2405       case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;
2406       // all other intrinsics are not available in the SSE instruction set, so FPU is used
2407       default      : ShouldNotReachHere();
2408     }
2409 
2410 #ifndef _LP64
2411   } else if (value->is_double_fpu()) {
2412     assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS");
2413     switch(code) {
2414       case lir_abs   : __ fabs() ; break;
2415       case lir_sqrt  : __ fsqrt(); break;
2416       default      : ShouldNotReachHere();
2417     }
2418 #endif // !_LP64
2419   } else if (code == lir_f2hf) {
2420     __ flt_to_flt16(dest->as_register(), value->as_xmm_float_reg(), tmp->as_xmm_float_reg());

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/macroAssembler.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "c1/c1_CodeStubs.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "ci/ciUtilities.hpp"
  36 #include "code/SCCache.hpp"
  37 #include "compiler/oopMap.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "gc/shared/gc_globals.hpp"
  40 #include "nativeInst_x86.hpp"
  41 #include "oops/objArrayKlass.hpp"
  42 #include "runtime/frame.inline.hpp"
  43 #include "runtime/safepointMechanism.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/stubRoutines.hpp"
  46 #include "utilities/powerOfTwo.hpp"
  47 #include "vmreg_x86.inline.hpp"
  48 
  49 
  50 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  51 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  52 // fast versions of NegF/NegD and AbsF/AbsD.
  53 
  54 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  55 static address double_quadword(jlong *adr, jlong lo, jlong hi) {
  56   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  57   // of 128-bits operands for SSE instructions.
  58   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
  59   // Store the value to a 128-bits operand.
  60   operand[0] = lo;
  61   operand[1] = hi;
  62   return (address)operand;
  63 }
  64 
  65 // Buffer for 128-bits masks used by SSE instructions.
  66 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
  67 
  68 // Static initialization during VM startup.
  69 address LIR_Assembler::float_signmask_pool  = double_quadword(&fp_signmask_pool[1*2],         CONST64(0x7FFFFFFF7FFFFFFF),         CONST64(0x7FFFFFFF7FFFFFFF));
  70 address LIR_Assembler::double_signmask_pool = double_quadword(&fp_signmask_pool[2*2],         CONST64(0x7FFFFFFFFFFFFFFF),         CONST64(0x7FFFFFFFFFFFFFFF));
  71 address LIR_Assembler::float_signflip_pool  = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000));
  72 address LIR_Assembler::double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));
  73 
  74 
  75 NEEDS_CLEANUP // remove this definitions ?
  76 const Register SYNC_header = rax;   // synchronization header
  77 const Register SHIFT_count = rcx;   // where count for shift operations must be
  78 
  79 #define __ _masm->
  80 
  81 
  82 static void select_different_registers(Register preserve,
  83                                        Register extra,
  84                                        Register &tmp1,
  85                                        Register &tmp2) {
  86   if (tmp1 == preserve) {
  87     assert_different_registers(tmp1, tmp2, extra);
  88     tmp1 = extra;
  89   } else if (tmp2 == preserve) {
  90     assert_different_registers(tmp1, tmp2, extra);
  91     tmp2 = extra;
  92   }

 559   assert(src->is_constant(), "should not call otherwise");
 560   assert(dest->is_register(), "should not call otherwise");
 561   LIR_Const* c = src->as_constant_ptr();
 562 
 563   switch (c->type()) {
 564     case T_INT: {
 565       assert(patch_code == lir_patch_none, "no patching handled here");
 566       __ movl(dest->as_register(), c->as_jint());
 567       break;
 568     }
 569 
 570     case T_ADDRESS: {
 571       assert(patch_code == lir_patch_none, "no patching handled here");
 572       __ movptr(dest->as_register(), c->as_jint());
 573       break;
 574     }
 575 
 576     case T_LONG: {
 577       assert(patch_code == lir_patch_none, "no patching handled here");
 578 #ifdef _LP64
 579       if (SCCache::is_on_for_write()) {
 580         // SCA needs relocation info for card table base
 581         address b = c->as_pointer();
 582         if (is_card_table_address(b)) {
 583           __ lea(dest->as_register_lo(), ExternalAddress(b));
 584           break;
 585         }
 586         if (AOTRuntimeConstants::contains(b)) {
 587           __ load_aotrc_address(dest->as_register_lo(), b);
 588           break;
 589         }
 590       }
 591       __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
 592 #else
 593       __ movptr(dest->as_register_lo(), c->as_jint_lo());
 594       __ movptr(dest->as_register_hi(), c->as_jint_hi());
 595 #endif // _LP64
 596       break;
 597     }
 598 
 599     case T_OBJECT: {
 600       if (patch_code != lir_patch_none) {
 601         jobject2reg_with_patching(dest->as_register(), info);
 602       } else {
 603         __ movoop(dest->as_register(), c->as_jobject());
 604       }
 605       break;
 606     }
 607 
 608     case T_METADATA: {
 609       if (patch_code != lir_patch_none) {
 610         klass2reg_with_patching(dest->as_register(), info);

2394       __ fremr(noreg);
2395       break;
2396 
2397     default:
2398       ShouldNotReachHere();
2399   }
2400 }
2401 #endif // _LP64
2402 
2403 
2404 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) {
2405   if (value->is_double_xmm()) {
2406     switch(code) {
2407       case lir_abs :
2408         {
2409           if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
2410             __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
2411           }
2412           assert(!tmp->is_valid(), "do not need temporary");
2413           __ andpd(dest->as_xmm_double_reg(),
2414                    ExternalAddress(LIR_Assembler::double_signmask_pool),
2415                    rscratch1);
2416         }
2417         break;
2418 
2419       case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;
2420       // all other intrinsics are not available in the SSE instruction set, so FPU is used
2421       default      : ShouldNotReachHere();
2422     }
2423 
2424 #ifndef _LP64
2425   } else if (value->is_double_fpu()) {
2426     assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS");
2427     switch(code) {
2428       case lir_abs   : __ fabs() ; break;
2429       case lir_sqrt  : __ fsqrt(); break;
2430       default      : ShouldNotReachHere();
2431     }
2432 #endif // !_LP64
2433   } else if (code == lir_f2hf) {
2434     __ flt_to_flt16(dest->as_register(), value->as_xmm_float_reg(), tmp->as_xmm_float_reg());
< prev index next >