14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
26 #define CPU_X86_MACROASSEMBLER_X86_HPP
27
28 #include "asm/assembler.hpp"
29 #include "asm/register.hpp"
30 #include "code/vmreg.inline.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "utilities/macros.hpp"
33 #include "runtime/rtmLocking.hpp"
34 #include "runtime/vm_version.hpp"
35
36 // MacroAssembler extends Assembler by frequently used macros.
37 //
38 // Instructions for which a 'better' code sequence exists depending
39 // on arguments should also go in here.
40
41 class MacroAssembler: public Assembler {
42 friend class LIR_Assembler;
43 friend class Runtime1; // as_Address()
44
45 public:
46 // Support for VM calls
47 //
48 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
49 // may customize this version by overriding it for its purposes (e.g., to save/restore
50 // additional registers when doing a VM call).
51
52 virtual void call_VM_leaf_base(
53 address entry_point, // the entry point
54 int number_of_arguments // the number of arguments to pop after the call
55 );
85 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
86 // The implementation is only non-empty for the InterpreterMacroAssembler,
87 // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
88 virtual void check_and_handle_popframe(Register java_thread);
89 virtual void check_and_handle_earlyret(Register java_thread);
90
91 Address as_Address(AddressLiteral adr);
92 Address as_Address(ArrayAddress adr, Register rscratch);
93
94 // Support for null-checks
95 //
96 // Generates code that causes a null OS exception if the content of reg is null.
97 // If the accessed location is M[reg + offset] and the offset is known, provide the
98 // offset. No explicit code generation is needed if the offset is within a certain
99 // range (0 <= offset <= page_size).
100
101 void null_check(Register reg, int offset = -1);
102 static bool needs_explicit_null_check(intptr_t offset);
103 static bool uses_implicit_null_check(void* address);
104
105 // Required platform-specific helpers for Label::patch_instructions.
106 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
107 void pd_patch_instruction(address branch, address target, const char* file, int line) {
108 unsigned char op = branch[0];
109 assert(op == 0xE8 /* call */ ||
110 op == 0xE9 /* jmp */ ||
111 op == 0xEB /* short jmp */ ||
112 (op & 0xF0) == 0x70 /* short jcc */ ||
113 op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ ||
114 op == 0xC7 && branch[1] == 0xF8 /* xbegin */,
115 "Invalid opcode at patch point");
116
117 if (op == 0xEB || (op & 0xF0) == 0x70) {
118 // short offset operators (jmp and jcc)
119 char* disp = (char*) &branch[1];
120 int imm8 = target - (address) &disp[1];
121 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
122 file == nullptr ? "<null>" : file, line);
123 *disp = imm8;
124 } else {
346 void resolve_global_jobject(Register value, Register thread, Register tmp);
347
348 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
349 void c2bool(Register x);
350
351 // C++ bool manipulation
352
353 void movbool(Register dst, Address src);
354 void movbool(Address dst, bool boolconst);
355 void movbool(Address dst, Register src);
356 void testbool(Register dst);
357
358 void resolve_oop_handle(Register result, Register tmp);
359 void resolve_weak_handle(Register result, Register tmp);
360 void load_mirror(Register mirror, Register method, Register tmp);
361 void load_method_holder_cld(Register rresult, Register rmethod);
362
363 void load_method_holder(Register holder, Register method);
364
365 // oop manipulations
366 void load_klass(Register dst, Register src, Register tmp);
367 void store_klass(Register dst, Register src, Register tmp);
368
369 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
370 Register tmp1, Register thread_tmp);
371 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
372 Register tmp1, Register tmp2, Register tmp3);
373
374 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
375 Register thread_tmp = noreg, DecoratorSet decorators = 0);
376 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
377 Register thread_tmp = noreg, DecoratorSet decorators = 0);
378 void store_heap_oop(Address dst, Register val, Register tmp1 = noreg,
379 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
380
381 // Used for storing null. All other oop constants should be
382 // stored using routines that take a jobject.
383 void store_heap_oop_null(Address dst);
384
385 #ifdef _LP64
386 void store_klass_gap(Register dst, Register src);
387
388 // This dummy is to prevent a call to store_heap_oop from
389 // converting a zero (like null) into a Register by giving
390 // the compiler two choices it can't resolve
391
392 void store_heap_oop(Address dst, void* dummy);
393
394 void encode_heap_oop(Register r);
395 void decode_heap_oop(Register r);
396 void encode_heap_oop_not_null(Register r);
397 void decode_heap_oop_not_null(Register r);
398 void encode_heap_oop_not_null(Register dst, Register src);
399 void decode_heap_oop_not_null(Register dst, Register src);
400
401 void set_narrow_oop(Register dst, jobject obj);
402 void set_narrow_oop(Address dst, jobject obj);
403 void cmp_narrow_oop(Register dst, jobject obj);
404 void cmp_narrow_oop(Address dst, jobject obj);
566
567 public:
568 void push_set(RegSet set, int offset = -1);
569 void pop_set(RegSet set, int offset = -1);
570
571 // Push and pop everything that might be clobbered by a native
572 // runtime call.
573 // Only save the lower 64 bits of each vector register.
574 // Additional registers can be excluded in a passed RegSet.
575 void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
576 void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
577
578 void push_call_clobbered_registers(bool save_fpu = true) {
579 push_call_clobbered_registers_except(RegSet(), save_fpu);
580 }
581 void pop_call_clobbered_registers(bool restore_fpu = true) {
582 pop_call_clobbered_registers_except(RegSet(), restore_fpu);
583 }
584
585 // allocation
586 void tlab_allocate(
587 Register thread, // Current thread
588 Register obj, // result: pointer to object after successful allocation
589 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
590 int con_size_in_bytes, // object size in bytes if known at compile time
591 Register t1, // temp register
592 Register t2, // temp register
593 Label& slow_case // continuation point if fast allocation fails
594 );
595 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
596
597 // interface method calling
598 void lookup_interface_method(Register recv_klass,
599 Register intf_klass,
600 RegisterOrConstant itable_index,
601 Register method_result,
602 Register scan_temp,
603 Label& no_such_interface,
604 bool return_method = true);
605
606 // virtual method calling
607 void lookup_virtual_method(Register recv_klass,
608 RegisterOrConstant vtable_index,
609 Register method_result);
610
611 // Test sub_klass against super_klass, with fast and slow paths.
612
613 // The fast path produces a tri-state answer: yes / no / maybe-slow.
614 // One of the three labels can be null, meaning take the fall-through.
615 // If super_check_offset is -1, the value is loaded up from super_klass.
616 // No registers are killed, except temp_reg.
727 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
728 // operands. In general the names are modified to avoid hiding the instruction in Assembler
729 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
730 // here in MacroAssembler. The major exception to this rule is call
731
732 // Arithmetics
733
734
735 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
736 void addptr(Address dst, Register src);
737
738 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
739 void addptr(Register dst, int32_t src);
740 void addptr(Register dst, Register src);
741 void addptr(Register dst, RegisterOrConstant src) {
742 if (src.is_constant()) addptr(dst, src.as_constant());
743 else addptr(dst, src.as_register());
744 }
745
746 void andptr(Register dst, int32_t src);
747 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }
748
749 #ifdef _LP64
750 using Assembler::andq;
751 void andq(Register dst, AddressLiteral src, Register rscratch = noreg);
752 #endif
753
754 void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg);
755
756 // renamed to drag out the casting of address to int32_t/intptr_t
757 void cmp32(Register src1, int32_t imm);
758
759 void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg);
760 // compare reg - mem, or reg - &mem
761 void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg);
762
763 void cmp32(Register src1, Address src2);
764
765 #ifndef _LP64
766 void cmpklass(Address dst, Metadata* obj);
767 void cmpklass(Register dst, Metadata* obj);
1824 void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1825
1826 using Assembler::movq;
1827 void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1828
1829 // Can push value or effective address
1830 void pushptr(AddressLiteral src, Register rscratch);
1831
1832 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
1833 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
1834
1835 void pushoop(jobject obj, Register rscratch);
1836 void pushklass(Metadata* obj, Register rscratch);
1837
1838 // sign extend as need a l to ptr sized element
1839 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
1840 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
1841
1842
1843 public:
1844 // clear memory of size 'cnt' qwords, starting at 'base';
1845 // if 'is_large' is set, do not try to produce short loop
1846 void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg);
1847
1848 // clear memory initialization sequence for constant size;
1849 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1850
1851 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
1852 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1853
1854 // Fill primitive arrays
1855 void generate_fill(BasicType t, bool aligned,
1856 Register to, Register value, Register count,
1857 Register rtmp, XMMRegister xtmp);
1858
1859 void encode_iso_array(Register src, Register dst, Register len,
1860 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1861 XMMRegister tmp4, Register tmp5, Register result, bool ascii);
1862
1863 #ifdef _LP64
1864 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
1865 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1866 Register y, Register y_idx, Register z,
|
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
26 #define CPU_X86_MACROASSEMBLER_X86_HPP
27
28 #include "asm/assembler.hpp"
29 #include "asm/register.hpp"
30 #include "code/vmreg.inline.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "utilities/macros.hpp"
33 #include "runtime/rtmLocking.hpp"
34 #include "runtime/signature.hpp"
35 #include "runtime/vm_version.hpp"
36
37 class ciInlineKlass;
38
39 // MacroAssembler extends Assembler by frequently used macros.
40 //
41 // Instructions for which a 'better' code sequence exists depending
42 // on arguments should also go in here.
43
44 class MacroAssembler: public Assembler {
45 friend class LIR_Assembler;
46 friend class Runtime1; // as_Address()
47
48 public:
49 // Support for VM calls
50 //
51 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
52 // may customize this version by overriding it for its purposes (e.g., to save/restore
53 // additional registers when doing a VM call).
54
55 virtual void call_VM_leaf_base(
56 address entry_point, // the entry point
57 int number_of_arguments // the number of arguments to pop after the call
58 );
88 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
89 // The implementation is only non-empty for the InterpreterMacroAssembler,
90 // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
91 virtual void check_and_handle_popframe(Register java_thread);
92 virtual void check_and_handle_earlyret(Register java_thread);
93
94 Address as_Address(AddressLiteral adr);
95 Address as_Address(ArrayAddress adr, Register rscratch);
96
97 // Support for null-checks
98 //
99 // Generates code that causes a null OS exception if the content of reg is null.
100 // If the accessed location is M[reg + offset] and the offset is known, provide the
101 // offset. No explicit code generation is needed if the offset is within a certain
102 // range (0 <= offset <= page_size).
103
104 void null_check(Register reg, int offset = -1);
105 static bool needs_explicit_null_check(intptr_t offset);
106 static bool uses_implicit_null_check(void* address);
107
108 // markWord tests, kills markWord reg
109 void test_markword_is_inline_type(Register markword, Label& is_inline_type);
110
111 // inlineKlass queries, kills temp_reg
112 void test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type);
113 void test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type);
114 void test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type);
115
116 // Get the default value oop for the given InlineKlass
117 void get_default_value_oop(Register inline_klass, Register temp_reg, Register obj);
118 // The empty value oop, for the given InlineKlass ("empty" as in no instance fields)
119 // get_default_value_oop with extra assertion for empty inline klass
120 void get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj);
121
122 void test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free);
123 void test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free);
124 void test_field_is_inlined(Register flags, Register temp_reg, Label& is_inlined);
125
126 // Check oops for special arrays, i.e. flattened and/or null-free
127 void test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label);
128 void test_flattened_array_oop(Register oop, Register temp_reg, Label&is_flattened_array);
129 void test_non_flattened_array_oop(Register oop, Register temp_reg, Label&is_non_flattened_array);
130 void test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array);
131 void test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array);
132
133 // Check array klass layout helper for flatten or null-free arrays...
134 void test_flattened_array_layout(Register lh, Label& is_flattened_array);
135 void test_non_flattened_array_layout(Register lh, Label& is_non_flattened_array);
136 void test_null_free_array_layout(Register lh, Label& is_null_free_array);
137 void test_non_null_free_array_layout(Register lh, Label& is_non_null_free_array);
138
139 // Required platform-specific helpers for Label::patch_instructions.
140 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
141 void pd_patch_instruction(address branch, address target, const char* file, int line) {
142 unsigned char op = branch[0];
143 assert(op == 0xE8 /* call */ ||
144 op == 0xE9 /* jmp */ ||
145 op == 0xEB /* short jmp */ ||
146 (op & 0xF0) == 0x70 /* short jcc */ ||
147 op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ ||
148 op == 0xC7 && branch[1] == 0xF8 /* xbegin */,
149 "Invalid opcode at patch point");
150
151 if (op == 0xEB || (op & 0xF0) == 0x70) {
152 // short offset operators (jmp and jcc)
153 char* disp = (char*) &branch[1];
154 int imm8 = target - (address) &disp[1];
155 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
156 file == nullptr ? "<null>" : file, line);
157 *disp = imm8;
158 } else {
380 void resolve_global_jobject(Register value, Register thread, Register tmp);
381
382 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
383 void c2bool(Register x);
384
385 // C++ bool manipulation
386
387 void movbool(Register dst, Address src);
388 void movbool(Address dst, bool boolconst);
389 void movbool(Address dst, Register src);
390 void testbool(Register dst);
391
392 void resolve_oop_handle(Register result, Register tmp);
393 void resolve_weak_handle(Register result, Register tmp);
394 void load_mirror(Register mirror, Register method, Register tmp);
395 void load_method_holder_cld(Register rresult, Register rmethod);
396
397 void load_method_holder(Register holder, Register method);
398
399 // oop manipulations
400 void load_metadata(Register dst, Register src);
401 void load_klass(Register dst, Register src, Register tmp);
402 void store_klass(Register dst, Register src, Register tmp);
403
404 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
405 Register tmp1, Register thread_tmp);
406 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
407 Register tmp1, Register tmp2, Register tmp3);
408
409 void access_value_copy(DecoratorSet decorators, Register src, Register dst, Register inline_klass);
410
411 // inline type data payload offsets...
412 void first_field_offset(Register inline_klass, Register offset);
413 void data_for_oop(Register oop, Register data, Register inline_klass);
414 // get data payload ptr a flat value array at index, kills rcx and index
415 void data_for_value_array_index(Register array, Register array_klass,
416 Register index, Register data);
417
418
419 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
420 Register thread_tmp = noreg, DecoratorSet decorators = 0);
421 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
422 Register thread_tmp = noreg, DecoratorSet decorators = 0);
423 void store_heap_oop(Address dst, Register val, Register tmp1 = noreg,
424 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
425
426 // Used for storing null. All other oop constants should be
427 // stored using routines that take a jobject.
428 void store_heap_oop_null(Address dst);
429
430 void load_prototype_header(Register dst, Register src, Register tmp);
431
432 #ifdef _LP64
433 void store_klass_gap(Register dst, Register src);
434
435 // This dummy is to prevent a call to store_heap_oop from
436 // converting a zero (like null) into a Register by giving
437 // the compiler two choices it can't resolve
438
439 void store_heap_oop(Address dst, void* dummy);
440
441 void encode_heap_oop(Register r);
442 void decode_heap_oop(Register r);
443 void encode_heap_oop_not_null(Register r);
444 void decode_heap_oop_not_null(Register r);
445 void encode_heap_oop_not_null(Register dst, Register src);
446 void decode_heap_oop_not_null(Register dst, Register src);
447
448 void set_narrow_oop(Register dst, jobject obj);
449 void set_narrow_oop(Address dst, jobject obj);
450 void cmp_narrow_oop(Register dst, jobject obj);
451 void cmp_narrow_oop(Address dst, jobject obj);
613
614 public:
615 void push_set(RegSet set, int offset = -1);
616 void pop_set(RegSet set, int offset = -1);
617
618 // Push and pop everything that might be clobbered by a native
619 // runtime call.
620 // Only save the lower 64 bits of each vector register.
621 // Additional registers can be excluded in a passed RegSet.
622 void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
623 void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
624
625 void push_call_clobbered_registers(bool save_fpu = true) {
626 push_call_clobbered_registers_except(RegSet(), save_fpu);
627 }
628 void pop_call_clobbered_registers(bool restore_fpu = true) {
629 pop_call_clobbered_registers_except(RegSet(), restore_fpu);
630 }
631
632 // allocation
633
634 // Object / value buffer allocation...
635 // Allocate instance of klass, assumes klass initialized by caller
636 // new_obj prefers to be rax
637 // Kills t1 and t2, perserves klass, return allocation in new_obj (rsi on LP64)
638 void allocate_instance(Register klass, Register new_obj,
639 Register t1, Register t2,
640 bool clear_fields, Label& alloc_failed);
641
642 void tlab_allocate(
643 Register thread, // Current thread
644 Register obj, // result: pointer to object after successful allocation
645 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
646 int con_size_in_bytes, // object size in bytes if known at compile time
647 Register t1, // temp register
648 Register t2, // temp register
649 Label& slow_case // continuation point if fast allocation fails
650 );
651 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
652
653 // For field "index" within "klass", return inline_klass ...
654 void get_inline_type_field_klass(Register klass, Register index, Register inline_klass);
655
656 // interface method calling
657 void lookup_interface_method(Register recv_klass,
658 Register intf_klass,
659 RegisterOrConstant itable_index,
660 Register method_result,
661 Register scan_temp,
662 Label& no_such_interface,
663 bool return_method = true);
664
665 // virtual method calling
666 void lookup_virtual_method(Register recv_klass,
667 RegisterOrConstant vtable_index,
668 Register method_result);
669
670 // Test sub_klass against super_klass, with fast and slow paths.
671
672 // The fast path produces a tri-state answer: yes / no / maybe-slow.
673 // One of the three labels can be null, meaning take the fall-through.
674 // If super_check_offset is -1, the value is loaded up from super_klass.
675 // No registers are killed, except temp_reg.
786 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
787 // operands. In general the names are modified to avoid hiding the instruction in Assembler
788 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
789 // here in MacroAssembler. The major exception to this rule is call
790
791 // Arithmetics
792
793
794 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
795 void addptr(Address dst, Register src);
796
797 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
798 void addptr(Register dst, int32_t src);
799 void addptr(Register dst, Register src);
800 void addptr(Register dst, RegisterOrConstant src) {
801 if (src.is_constant()) addptr(dst, src.as_constant());
802 else addptr(dst, src.as_register());
803 }
804
805 void andptr(Register dst, int32_t src);
806 void andptr(Register dst, Register src) { LP64_ONLY(andq(dst, src)) NOT_LP64(andl(dst, src)) ; }
807 void andptr(Register dst, Address src) { LP64_ONLY(andq(dst, src)) NOT_LP64(andl(dst, src)) ; }
808
809 #ifdef _LP64
810 using Assembler::andq;
811 void andq(Register dst, AddressLiteral src, Register rscratch = noreg);
812 #endif
813
814 void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg);
815
816 // renamed to drag out the casting of address to int32_t/intptr_t
817 void cmp32(Register src1, int32_t imm);
818
819 void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg);
820 // compare reg - mem, or reg - &mem
821 void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg);
822
823 void cmp32(Register src1, Address src2);
824
825 #ifndef _LP64
826 void cmpklass(Address dst, Metadata* obj);
827 void cmpklass(Register dst, Metadata* obj);
1884 void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1885
1886 using Assembler::movq;
1887 void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1888
1889 // Can push value or effective address
1890 void pushptr(AddressLiteral src, Register rscratch);
1891
1892 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
1893 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
1894
1895 void pushoop(jobject obj, Register rscratch);
1896 void pushklass(Metadata* obj, Register rscratch);
1897
1898 // sign extend as need a l to ptr sized element
1899 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
1900 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
1901
1902
1903 public:
1904 // Inline type specific methods
1905 #include "asm/macroAssembler_common.hpp"
1906
1907 int store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter = true);
1908 bool move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]);
1909 bool unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
1910 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
1911 RegState reg_state[]);
1912 bool pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
1913 VMRegPair* from, int from_count, int& from_index, VMReg to,
1914 RegState reg_state[], Register val_array);
1915 int extend_stack_for_inline_args(int args_on_stack);
1916 void remove_frame(int initial_framesize, bool needs_stack_repair);
1917 VMReg spill_reg_for(VMReg reg);
1918
1919 // clear memory of size 'cnt' qwords, starting at 'base';
1920 // if 'is_large' is set, do not try to produce short loop
1921 void clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, bool is_large, bool word_copy_only, KRegister mask=knoreg);
1922
1923 // clear memory initialization sequence for constant size;
1924 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1925
1926 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
1927 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1928
1929 // Fill primitive arrays
1930 void generate_fill(BasicType t, bool aligned,
1931 Register to, Register value, Register count,
1932 Register rtmp, XMMRegister xtmp);
1933
1934 void encode_iso_array(Register src, Register dst, Register len,
1935 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1936 XMMRegister tmp4, Register tmp5, Register result, bool ascii);
1937
1938 #ifdef _LP64
1939 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
1940 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1941 Register y, Register y_idx, Register z,
|