14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
26 #define CPU_X86_MACROASSEMBLER_X86_HPP
27
28 #include "asm/assembler.hpp"
29 #include "asm/register.hpp"
30 #include "code/vmreg.inline.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "utilities/macros.hpp"
33 #include "runtime/rtmLocking.hpp"
34 #include "runtime/vm_version.hpp"
35 #include "utilities/checkedCast.hpp"
36
37 // MacroAssembler extends Assembler by frequently used macros.
38 //
39 // Instructions for which a 'better' code sequence exists depending
40 // on arguments should also go in here.
41
42 class MacroAssembler: public Assembler {
43 friend class LIR_Assembler;
44 friend class Runtime1; // as_Address()
45
46 public:
47 // Support for VM calls
48 //
49 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
50 // may customize this version by overriding it for its purposes (e.g., to save/restore
51 // additional registers when doing a VM call).
52
53 virtual void call_VM_leaf_base(
54 address entry_point, // the entry point
55 int number_of_arguments // the number of arguments to pop after the call
56 );
86 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
87 // The implementation is only non-empty for the InterpreterMacroAssembler,
88 // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
89 virtual void check_and_handle_popframe(Register java_thread);
90 virtual void check_and_handle_earlyret(Register java_thread);
91
92 Address as_Address(AddressLiteral adr);
93 Address as_Address(ArrayAddress adr, Register rscratch);
94
95 // Support for null-checks
96 //
97 // Generates code that causes a null OS exception if the content of reg is null.
98 // If the accessed location is M[reg + offset] and the offset is known, provide the
99 // offset. No explicit code generation is needed if the offset is within a certain
100 // range (0 <= offset <= page_size).
101
102 void null_check(Register reg, int offset = -1);
103 static bool needs_explicit_null_check(intptr_t offset);
104 static bool uses_implicit_null_check(void* address);
105
106 // Required platform-specific helpers for Label::patch_instructions.
107 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
108 void pd_patch_instruction(address branch, address target, const char* file, int line) {
109 unsigned char op = branch[0];
110 assert(op == 0xE8 /* call */ ||
111 op == 0xE9 /* jmp */ ||
112 op == 0xEB /* short jmp */ ||
113 (op & 0xF0) == 0x70 /* short jcc */ ||
114 (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ ||
115 (op == 0xC7 && branch[1] == 0xF8) /* xbegin */,
116 "Invalid opcode at patch point");
117
118 if (op == 0xEB || (op & 0xF0) == 0x70) {
119 // short offset operators (jmp and jcc)
120 char* disp = (char*) &branch[1];
121 int imm8 = checked_cast<int>(target - (address) &disp[1]);
122 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
123 file == nullptr ? "<null>" : file, line);
124 *disp = (char)imm8;
125 } else {
347 void resolve_global_jobject(Register value, Register thread, Register tmp);
348
349 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
350 void c2bool(Register x);
351
352 // C++ bool manipulation
353
354 void movbool(Register dst, Address src);
355 void movbool(Address dst, bool boolconst);
356 void movbool(Address dst, Register src);
357 void testbool(Register dst);
358
359 void resolve_oop_handle(Register result, Register tmp);
360 void resolve_weak_handle(Register result, Register tmp);
361 void load_mirror(Register mirror, Register method, Register tmp);
362 void load_method_holder_cld(Register rresult, Register rmethod);
363
364 void load_method_holder(Register holder, Register method);
365
366 // oop manipulations
367 void load_klass(Register dst, Register src, Register tmp);
368 void store_klass(Register dst, Register src, Register tmp);
369
370 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
371 Register tmp1, Register thread_tmp);
372 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
373 Register tmp1, Register tmp2, Register tmp3);
374
375 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
376 Register thread_tmp = noreg, DecoratorSet decorators = 0);
377 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
378 Register thread_tmp = noreg, DecoratorSet decorators = 0);
379 void store_heap_oop(Address dst, Register val, Register tmp1 = noreg,
380 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
381
382 // Used for storing null. All other oop constants should be
383 // stored using routines that take a jobject.
384 void store_heap_oop_null(Address dst);
385
386 #ifdef _LP64
387 void store_klass_gap(Register dst, Register src);
388
389 // This dummy is to prevent a call to store_heap_oop from
390 // converting a zero (like null) into a Register by giving
391 // the compiler two choices it can't resolve
392
393 void store_heap_oop(Address dst, void* dummy);
394
395 void encode_heap_oop(Register r);
396 void decode_heap_oop(Register r);
397 void encode_heap_oop_not_null(Register r);
398 void decode_heap_oop_not_null(Register r);
399 void encode_heap_oop_not_null(Register dst, Register src);
400 void decode_heap_oop_not_null(Register dst, Register src);
401
402 void set_narrow_oop(Register dst, jobject obj);
403 void set_narrow_oop(Address dst, jobject obj);
404 void cmp_narrow_oop(Register dst, jobject obj);
405 void cmp_narrow_oop(Address dst, jobject obj);
567
568 public:
569 void push_set(RegSet set, int offset = -1);
570 void pop_set(RegSet set, int offset = -1);
571
572 // Push and pop everything that might be clobbered by a native
573 // runtime call.
574 // Only save the lower 64 bits of each vector register.
575 // Additional registers can be excluded in a passed RegSet.
576 void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
577 void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
578
579 void push_call_clobbered_registers(bool save_fpu = true) {
580 push_call_clobbered_registers_except(RegSet(), save_fpu);
581 }
582 void pop_call_clobbered_registers(bool restore_fpu = true) {
583 pop_call_clobbered_registers_except(RegSet(), restore_fpu);
584 }
585
586 // allocation
587 void tlab_allocate(
588 Register thread, // Current thread
589 Register obj, // result: pointer to object after successful allocation
590 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
591 int con_size_in_bytes, // object size in bytes if known at compile time
592 Register t1, // temp register
593 Register t2, // temp register
594 Label& slow_case // continuation point if fast allocation fails
595 );
596 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
597
598 // interface method calling
599 void lookup_interface_method(Register recv_klass,
600 Register intf_klass,
601 RegisterOrConstant itable_index,
602 Register method_result,
603 Register scan_temp,
604 Label& no_such_interface,
605 bool return_method = true);
606
607 void lookup_interface_method_stub(Register recv_klass,
608 Register holder_klass,
609 Register resolved_klass,
610 Register method_result,
611 Register scan_temp,
612 Register temp_reg2,
613 Register receiver,
614 int itable_index,
615 Label& L_no_such_interface);
616
617 // virtual method calling
738 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
739 // operands. In general the names are modified to avoid hiding the instruction in Assembler
740 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
741 // here in MacroAssembler. The major exception to this rule is call
742
743 // Arithmetics
744
745
746 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
747 void addptr(Address dst, Register src);
748
749 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
750 void addptr(Register dst, int32_t src);
751 void addptr(Register dst, Register src);
752 void addptr(Register dst, RegisterOrConstant src) {
753 if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant()));
754 else addptr(dst, src.as_register());
755 }
756
757 void andptr(Register dst, int32_t src);
758 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }
759
760 #ifdef _LP64
761 using Assembler::andq;
762 void andq(Register dst, AddressLiteral src, Register rscratch = noreg);
763 #endif
764
765 void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg);
766
767 // renamed to drag out the casting of address to int32_t/intptr_t
768 void cmp32(Register src1, int32_t imm);
769
770 void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg);
771 // compare reg - mem, or reg - &mem
772 void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg);
773
774 void cmp32(Register src1, Address src2);
775
776 #ifndef _LP64
777 void cmpklass(Address dst, Metadata* obj);
778 void cmpklass(Register dst, Metadata* obj);
1851 void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1852
1853 using Assembler::movq;
1854 void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1855
1856 // Can push value or effective address
1857 void pushptr(AddressLiteral src, Register rscratch);
1858
1859 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
1860 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
1861
1862 void pushoop(jobject obj, Register rscratch);
1863 void pushklass(Metadata* obj, Register rscratch);
1864
1865 // sign extend as need a l to ptr sized element
1866 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
1867 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
1868
1869
1870 public:
1871 // clear memory of size 'cnt' qwords, starting at 'base';
1872 // if 'is_large' is set, do not try to produce short loop
1873 void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg);
1874
1875 // clear memory initialization sequence for constant size;
1876 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1877
1878 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
1879 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1880
1881 // Fill primitive arrays
1882 void generate_fill(BasicType t, bool aligned,
1883 Register to, Register value, Register count,
1884 Register rtmp, XMMRegister xtmp);
1885
1886 void encode_iso_array(Register src, Register dst, Register len,
1887 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1888 XMMRegister tmp4, Register tmp5, Register result, bool ascii);
1889
1890 #ifdef _LP64
1891 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
1892 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1893 Register y, Register y_idx, Register z,
|
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
26 #define CPU_X86_MACROASSEMBLER_X86_HPP
27
28 #include "asm/assembler.hpp"
29 #include "asm/register.hpp"
30 #include "code/vmreg.inline.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "utilities/macros.hpp"
33 #include "runtime/rtmLocking.hpp"
34 #include "runtime/signature.hpp"
35 #include "runtime/vm_version.hpp"
36 #include "utilities/checkedCast.hpp"
37
38 class ciInlineKlass;
39
40 // MacroAssembler extends Assembler by frequently used macros.
41 //
42 // Instructions for which a 'better' code sequence exists depending
43 // on arguments should also go in here.
44
45 class MacroAssembler: public Assembler {
46 friend class LIR_Assembler;
47 friend class Runtime1; // as_Address()
48
49 public:
50 // Support for VM calls
51 //
52 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
53 // may customize this version by overriding it for its purposes (e.g., to save/restore
54 // additional registers when doing a VM call).
55
56 virtual void call_VM_leaf_base(
57 address entry_point, // the entry point
58 int number_of_arguments // the number of arguments to pop after the call
59 );
89 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
90 // The implementation is only non-empty for the InterpreterMacroAssembler,
91 // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
92 virtual void check_and_handle_popframe(Register java_thread);
93 virtual void check_and_handle_earlyret(Register java_thread);
94
95 Address as_Address(AddressLiteral adr);
96 Address as_Address(ArrayAddress adr, Register rscratch);
97
98 // Support for null-checks
99 //
100 // Generates code that causes a null OS exception if the content of reg is null.
101 // If the accessed location is M[reg + offset] and the offset is known, provide the
102 // offset. No explicit code generation is needed if the offset is within a certain
103 // range (0 <= offset <= page_size).
104
105 void null_check(Register reg, int offset = -1);
106 static bool needs_explicit_null_check(intptr_t offset);
107 static bool uses_implicit_null_check(void* address);
108
109 // markWord tests, kills markWord reg
110 void test_markword_is_inline_type(Register markword, Label& is_inline_type);
111
112 // inlineKlass queries, kills temp_reg
113 void test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type);
114 void test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type);
115 void test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type);
116
117 // Get the default value oop for the given InlineKlass
118 void get_default_value_oop(Register inline_klass, Register temp_reg, Register obj);
119 // The empty value oop, for the given InlineKlass ("empty" as in no instance fields)
120 // get_default_value_oop with extra assertion for empty inline klass
121 void get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj);
122
123 void test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free);
124 void test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free);
125 void test_field_is_flat(Register flags, Register temp_reg, Label& is_flat);
126 void test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker);
127
128 // Check oops for special arrays, i.e. flat arrays and/or null-free arrays
129 void test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label);
130 void test_flat_array_oop(Register oop, Register temp_reg, Label& is_flat_array);
131 void test_non_flat_array_oop(Register oop, Register temp_reg, Label& is_non_flat_array);
132 void test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array);
133 void test_non_null_free_array_oop(Register oop, Register temp_reg, Label& is_non_null_free_array);
134
135 // Check array klass layout helper for flat or null-free arrays...
136 void test_flat_array_layout(Register lh, Label& is_flat_array);
137 void test_non_flat_array_layout(Register lh, Label& is_non_flat_array);
138
139 // Required platform-specific helpers for Label::patch_instructions.
140 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
141 void pd_patch_instruction(address branch, address target, const char* file, int line) {
142 unsigned char op = branch[0];
143 assert(op == 0xE8 /* call */ ||
144 op == 0xE9 /* jmp */ ||
145 op == 0xEB /* short jmp */ ||
146 (op & 0xF0) == 0x70 /* short jcc */ ||
147 (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ ||
148 (op == 0xC7 && branch[1] == 0xF8) /* xbegin */,
149 "Invalid opcode at patch point");
150
151 if (op == 0xEB || (op & 0xF0) == 0x70) {
152 // short offset operators (jmp and jcc)
153 char* disp = (char*) &branch[1];
154 int imm8 = checked_cast<int>(target - (address) &disp[1]);
155 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
156 file == nullptr ? "<null>" : file, line);
157 *disp = (char)imm8;
158 } else {
380 void resolve_global_jobject(Register value, Register thread, Register tmp);
381
382 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
383 void c2bool(Register x);
384
385 // C++ bool manipulation
386
387 void movbool(Register dst, Address src);
388 void movbool(Address dst, bool boolconst);
389 void movbool(Address dst, Register src);
390 void testbool(Register dst);
391
392 void resolve_oop_handle(Register result, Register tmp);
393 void resolve_weak_handle(Register result, Register tmp);
394 void load_mirror(Register mirror, Register method, Register tmp);
395 void load_method_holder_cld(Register rresult, Register rmethod);
396
397 void load_method_holder(Register holder, Register method);
398
399 // oop manipulations
400 void load_metadata(Register dst, Register src);
401 void load_klass(Register dst, Register src, Register tmp);
402 void store_klass(Register dst, Register src, Register tmp);
403
404 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
405 Register tmp1, Register thread_tmp);
406 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
407 Register tmp1, Register tmp2, Register tmp3);
408
409 void access_value_copy(DecoratorSet decorators, Register src, Register dst, Register inline_klass);
410
411 // inline type data payload offsets...
412 void first_field_offset(Register inline_klass, Register offset);
413 void data_for_oop(Register oop, Register data, Register inline_klass);
414 // get data payload ptr a flat value array at index, kills rcx and index
415 void data_for_value_array_index(Register array, Register array_klass,
416 Register index, Register data);
417
418 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
419 Register thread_tmp = noreg, DecoratorSet decorators = 0);
420 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
421 Register thread_tmp = noreg, DecoratorSet decorators = 0);
422 void store_heap_oop(Address dst, Register val, Register tmp1 = noreg,
423 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
424
425 // Used for storing null. All other oop constants should be
426 // stored using routines that take a jobject.
427 void store_heap_oop_null(Address dst);
428
429 void load_prototype_header(Register dst, Register src, Register tmp);
430
431 #ifdef _LP64
432 void store_klass_gap(Register dst, Register src);
433
434 // This dummy is to prevent a call to store_heap_oop from
435 // converting a zero (like null) into a Register by giving
436 // the compiler two choices it can't resolve
437
438 void store_heap_oop(Address dst, void* dummy);
439
440 void encode_heap_oop(Register r);
441 void decode_heap_oop(Register r);
442 void encode_heap_oop_not_null(Register r);
443 void decode_heap_oop_not_null(Register r);
444 void encode_heap_oop_not_null(Register dst, Register src);
445 void decode_heap_oop_not_null(Register dst, Register src);
446
447 void set_narrow_oop(Register dst, jobject obj);
448 void set_narrow_oop(Address dst, jobject obj);
449 void cmp_narrow_oop(Register dst, jobject obj);
450 void cmp_narrow_oop(Address dst, jobject obj);
612
613 public:
614 void push_set(RegSet set, int offset = -1);
615 void pop_set(RegSet set, int offset = -1);
616
617 // Push and pop everything that might be clobbered by a native
618 // runtime call.
619 // Only save the lower 64 bits of each vector register.
620 // Additional registers can be excluded in a passed RegSet.
621 void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
622 void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
623
624 void push_call_clobbered_registers(bool save_fpu = true) {
625 push_call_clobbered_registers_except(RegSet(), save_fpu);
626 }
627 void pop_call_clobbered_registers(bool restore_fpu = true) {
628 pop_call_clobbered_registers_except(RegSet(), restore_fpu);
629 }
630
631 // allocation
632
633 // Object / value buffer allocation...
634 // Allocate instance of klass, assumes klass initialized by caller
635 // new_obj prefers to be rax
636 // Kills t1 and t2, perserves klass, return allocation in new_obj (rsi on LP64)
637 void allocate_instance(Register klass, Register new_obj,
638 Register t1, Register t2,
639 bool clear_fields, Label& alloc_failed);
640
641 void tlab_allocate(
642 Register thread, // Current thread
643 Register obj, // result: pointer to object after successful allocation
644 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
645 int con_size_in_bytes, // object size in bytes if known at compile time
646 Register t1, // temp register
647 Register t2, // temp register
648 Label& slow_case // continuation point if fast allocation fails
649 );
650 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
651
652 // For field "index" within "klass", return inline_klass ...
653 void get_inline_type_field_klass(Register klass, Register index, Register inline_klass);
654
655 // interface method calling
656 void lookup_interface_method(Register recv_klass,
657 Register intf_klass,
658 RegisterOrConstant itable_index,
659 Register method_result,
660 Register scan_temp,
661 Label& no_such_interface,
662 bool return_method = true);
663
664 void lookup_interface_method_stub(Register recv_klass,
665 Register holder_klass,
666 Register resolved_klass,
667 Register method_result,
668 Register scan_temp,
669 Register temp_reg2,
670 Register receiver,
671 int itable_index,
672 Label& L_no_such_interface);
673
674 // virtual method calling
795 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
796 // operands. In general the names are modified to avoid hiding the instruction in Assembler
797 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
798 // here in MacroAssembler. The major exception to this rule is call
799
800 // Arithmetics
801
802
803 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
804 void addptr(Address dst, Register src);
805
806 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
807 void addptr(Register dst, int32_t src);
808 void addptr(Register dst, Register src);
809 void addptr(Register dst, RegisterOrConstant src) {
810 if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant()));
811 else addptr(dst, src.as_register());
812 }
813
814 void andptr(Register dst, int32_t src);
815 void andptr(Register dst, Register src) { LP64_ONLY(andq(dst, src)) NOT_LP64(andl(dst, src)) ; }
816 void andptr(Register dst, Address src) { LP64_ONLY(andq(dst, src)) NOT_LP64(andl(dst, src)) ; }
817
818 #ifdef _LP64
819 using Assembler::andq;
820 void andq(Register dst, AddressLiteral src, Register rscratch = noreg);
821 #endif
822
823 void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg);
824
825 // renamed to drag out the casting of address to int32_t/intptr_t
826 void cmp32(Register src1, int32_t imm);
827
828 void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg);
829 // compare reg - mem, or reg - &mem
830 void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg);
831
832 void cmp32(Register src1, Address src2);
833
834 #ifndef _LP64
835 void cmpklass(Address dst, Metadata* obj);
836 void cmpklass(Register dst, Metadata* obj);
1909 void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1910
1911 using Assembler::movq;
1912 void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1913
1914 // Can push value or effective address
1915 void pushptr(AddressLiteral src, Register rscratch);
1916
1917 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
1918 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
1919
1920 void pushoop(jobject obj, Register rscratch);
1921 void pushklass(Metadata* obj, Register rscratch);
1922
1923 // sign extend as need a l to ptr sized element
1924 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
1925 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
1926
1927
1928 public:
1929 // Inline type specific methods
1930 #include "asm/macroAssembler_common.hpp"
1931
1932 int store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter = true);
1933 bool move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]);
1934 bool unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
1935 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
1936 RegState reg_state[]);
1937 bool pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
1938 VMRegPair* from, int from_count, int& from_index, VMReg to,
1939 RegState reg_state[], Register val_array);
1940 int extend_stack_for_inline_args(int args_on_stack);
1941 void remove_frame(int initial_framesize, bool needs_stack_repair);
1942 VMReg spill_reg_for(VMReg reg);
1943
1944 // clear memory of size 'cnt' qwords, starting at 'base';
1945 // if 'is_large' is set, do not try to produce short loop
1946 void clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, bool is_large, bool word_copy_only, KRegister mask=knoreg);
1947
1948 // clear memory initialization sequence for constant size;
1949 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1950
1951 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
1952 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1953
1954 // Fill primitive arrays
1955 void generate_fill(BasicType t, bool aligned,
1956 Register to, Register value, Register count,
1957 Register rtmp, XMMRegister xtmp);
1958
1959 void encode_iso_array(Register src, Register dst, Register len,
1960 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1961 XMMRegister tmp4, Register tmp5, Register result, bool ascii);
1962
1963 #ifdef _LP64
1964 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
1965 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1966 Register y, Register y_idx, Register z,
|