15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/macroAssembler.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "c1/c1_CodeStubs.hpp"
28 #include "c1/c1_Compilation.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciArrayKlass.hpp"
34 #include "ci/ciInstance.hpp"
35 #include "compiler/oopMap.hpp"
36 #include "gc/shared/collectedHeap.hpp"
37 #include "gc/shared/gc_globals.hpp"
38 #include "nativeInst_x86.hpp"
39 #include "oops/objArrayKlass.hpp"
40 #include "runtime/frame.inline.hpp"
41 #include "runtime/safepointMechanism.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "runtime/stubRoutines.hpp"
44 #include "utilities/powerOfTwo.hpp"
45 #include "vmreg_x86.inline.hpp"
46
47
48 // These masks are used to provide 128-bit aligned bitmasks to the XMM
49 // instructions, to allow sign-masking or sign-bit flipping. They allow
50 // fast versions of NegF/NegD and AbsF/AbsD.
51
52 // Note: 'double' and 'long long' have 32-bits alignment on x86.
53 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
54 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
55 // of 128-bits operands for SSE instructions.
56 jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
57 // Store the value to a 128-bits operand.
58 operand[0] = lo;
59 operand[1] = hi;
60 return operand;
61 }
62
63 // Buffer for 128-bits masks used by SSE instructions.
64 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
65
66 // Static initialization during VM startup.
67 static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));
68 static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));
69 static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000));
70 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));
71
72
73 NEEDS_CLEANUP // remove this definitions ?
74 const Register SYNC_header = rax; // synchronization header
75 const Register SHIFT_count = rcx; // where count for shift operations must be
76
77 #define __ _masm->
78
79
80 static void select_different_registers(Register preserve,
81 Register extra,
82 Register &tmp1,
83 Register &tmp2) {
84 if (tmp1 == preserve) {
85 assert_different_registers(tmp1, tmp2, extra);
86 tmp1 = extra;
87 } else if (tmp2 == preserve) {
88 assert_different_registers(tmp1, tmp2, extra);
89 tmp2 = extra;
90 }
514 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
515 assert(src->is_constant(), "should not call otherwise");
516 assert(dest->is_register(), "should not call otherwise");
517 LIR_Const* c = src->as_constant_ptr();
518
519 switch (c->type()) {
520 case T_INT: {
521 assert(patch_code == lir_patch_none, "no patching handled here");
522 __ movl(dest->as_register(), c->as_jint());
523 break;
524 }
525
526 case T_ADDRESS: {
527 assert(patch_code == lir_patch_none, "no patching handled here");
528 __ movptr(dest->as_register(), c->as_jint());
529 break;
530 }
531
532 case T_LONG: {
533 assert(patch_code == lir_patch_none, "no patching handled here");
534 __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
535 break;
536 }
537
538 case T_OBJECT: {
539 if (patch_code != lir_patch_none) {
540 jobject2reg_with_patching(dest->as_register(), info);
541 } else {
542 __ movoop(dest->as_register(), c->as_jobject());
543 }
544 break;
545 }
546
547 case T_METADATA: {
548 if (patch_code != lir_patch_none) {
549 klass2reg_with_patching(dest->as_register(), info);
550 } else {
551 __ mov_metadata(dest->as_register(), c->as_metadata());
552 }
553 break;
1811 } else {
1812 ShouldNotReachHere();
1813 }
1814
1815 } else {
1816 ShouldNotReachHere();
1817 }
1818 }
1819
1820
1821 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) {
1822 if (value->is_double_xmm()) {
1823 switch(code) {
1824 case lir_abs :
1825 {
1826 if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
1827 __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
1828 }
1829 assert(!tmp->is_valid(), "do not need temporary");
1830 __ andpd(dest->as_xmm_double_reg(),
1831 ExternalAddress((address)double_signmask_pool),
1832 rscratch1);
1833 }
1834 break;
1835
1836 case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;
1837 // all other intrinsics are not available in the SSE instruction set, so FPU is used
1838 default : ShouldNotReachHere();
1839 }
1840
1841 } else if (code == lir_f2hf) {
1842 __ flt_to_flt16(dest->as_register(), value->as_xmm_float_reg(), tmp->as_xmm_float_reg());
1843 } else if (code == lir_hf2f) {
1844 __ flt16_to_flt(dest->as_xmm_float_reg(), value->as_register());
1845 } else {
1846 Unimplemented();
1847 }
1848 }
1849
1850 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1851 // assert(left->destroys_register(), "check");
|
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/macroAssembler.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "c1/c1_CodeStubs.hpp"
28 #include "c1/c1_Compilation.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciArrayKlass.hpp"
34 #include "ci/ciInstance.hpp"
35 #include "ci/ciUtilities.hpp"
36 #include "code/aotCodeCache.hpp"
37 #include "compiler/oopMap.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/gc_globals.hpp"
40 #include "nativeInst_x86.hpp"
41 #include "oops/objArrayKlass.hpp"
42 #include "runtime/frame.inline.hpp"
43 #include "runtime/safepointMechanism.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "runtime/stubRoutines.hpp"
46 #include "runtime/threadIdentifier.hpp"
47 #include "utilities/powerOfTwo.hpp"
48 #include "vmreg_x86.inline.hpp"
49
50
51 // These masks are used to provide 128-bit aligned bitmasks to the XMM
52 // instructions, to allow sign-masking or sign-bit flipping. They allow
53 // fast versions of NegF/NegD and AbsF/AbsD.
54
55 // Note: 'double' and 'long long' have 32-bits alignment on x86.
56 static address double_quadword(jlong *adr, jlong lo, jlong hi) {
57 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
58 // of 128-bits operands for SSE instructions.
59 jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
60 // Store the value to a 128-bits operand.
61 operand[0] = lo;
62 operand[1] = hi;
63 return (address)operand;
64 }
65
66 // Buffer for 128-bits masks used by SSE instructions.
67 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
68
69 // Static initialization during VM startup.
70 address LIR_Assembler::float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));
71 address LIR_Assembler::double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));
72 address LIR_Assembler::float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000));
73 address LIR_Assembler::double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));
74
75
76 NEEDS_CLEANUP // remove this definitions ?
77 const Register SYNC_header = rax; // synchronization header
78 const Register SHIFT_count = rcx; // where count for shift operations must be
79
80 #define __ _masm->
81
82
83 static void select_different_registers(Register preserve,
84 Register extra,
85 Register &tmp1,
86 Register &tmp2) {
87 if (tmp1 == preserve) {
88 assert_different_registers(tmp1, tmp2, extra);
89 tmp1 = extra;
90 } else if (tmp2 == preserve) {
91 assert_different_registers(tmp1, tmp2, extra);
92 tmp2 = extra;
93 }
517 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
518 assert(src->is_constant(), "should not call otherwise");
519 assert(dest->is_register(), "should not call otherwise");
520 LIR_Const* c = src->as_constant_ptr();
521
522 switch (c->type()) {
523 case T_INT: {
524 assert(patch_code == lir_patch_none, "no patching handled here");
525 __ movl(dest->as_register(), c->as_jint());
526 break;
527 }
528
529 case T_ADDRESS: {
530 assert(patch_code == lir_patch_none, "no patching handled here");
531 __ movptr(dest->as_register(), c->as_jint());
532 break;
533 }
534
535 case T_LONG: {
536 assert(patch_code == lir_patch_none, "no patching handled here");
537 if (AOTCodeCache::is_on_for_dump()) {
538 // AOTCodeCache needs relocation info for card table base
539 address b = c->as_pointer();
540 if (is_card_table_address(b)) {
541 __ lea(dest->as_register_lo(), ExternalAddress(b));
542 break;
543 }
544 if (b == (address)ThreadIdentifier::unsafe_offset()) {
545 __ lea(dest->as_register_lo(), ExternalAddress(b));
546 break;
547 }
548 #if INCLUDE_CDS
549 if (AOTRuntimeConstants::contains(b)) {
550 __ load_aotrc_address(dest->as_register_lo(), b);
551 break;
552 }
553 #endif
554 }
555 __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
556 break;
557 }
558
559 case T_OBJECT: {
560 if (patch_code != lir_patch_none) {
561 jobject2reg_with_patching(dest->as_register(), info);
562 } else {
563 __ movoop(dest->as_register(), c->as_jobject());
564 }
565 break;
566 }
567
568 case T_METADATA: {
569 if (patch_code != lir_patch_none) {
570 klass2reg_with_patching(dest->as_register(), info);
571 } else {
572 __ mov_metadata(dest->as_register(), c->as_metadata());
573 }
574 break;
1832 } else {
1833 ShouldNotReachHere();
1834 }
1835
1836 } else {
1837 ShouldNotReachHere();
1838 }
1839 }
1840
1841
1842 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) {
1843 if (value->is_double_xmm()) {
1844 switch(code) {
1845 case lir_abs :
1846 {
1847 if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
1848 __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
1849 }
1850 assert(!tmp->is_valid(), "do not need temporary");
1851 __ andpd(dest->as_xmm_double_reg(),
1852 ExternalAddress(LIR_Assembler::double_signmask_pool),
1853 rscratch1);
1854 }
1855 break;
1856
1857 case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;
1858 // all other intrinsics are not available in the SSE instruction set, so FPU is used
1859 default : ShouldNotReachHere();
1860 }
1861
1862 } else if (code == lir_f2hf) {
1863 __ flt_to_flt16(dest->as_register(), value->as_xmm_float_reg(), tmp->as_xmm_float_reg());
1864 } else if (code == lir_hf2f) {
1865 __ flt16_to_flt(dest->as_xmm_float_reg(), value->as_register());
1866 } else {
1867 Unimplemented();
1868 }
1869 }
1870
1871 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1872 // assert(left->destroys_register(), "check");
|