13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
26 #define CPU_X86_MACROASSEMBLER_X86_HPP
27
28 #include "asm/assembler.hpp"
29 #include "asm/register.hpp"
30 #include "code/vmreg.inline.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "utilities/macros.hpp"
33 #include "runtime/vm_version.hpp"
34 #include "utilities/checkedCast.hpp"
35
36 // MacroAssembler extends Assembler by frequently used macros.
37 //
38 // Instructions for which a 'better' code sequence exists depending
39 // on arguments should also go in here.
40
41 class MacroAssembler: public Assembler {
42 friend class LIR_Assembler;
43 friend class Runtime1; // as_Address()
44
45 public:
46 // Support for VM calls
47 //
48 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
49 // may customize this version by overriding it for its purposes (e.g., to save/restore
50 // additional registers when doing a VM call).
51
52 virtual void call_VM_leaf_base(
53 address entry_point, // the entry point
54 int number_of_arguments // the number of arguments to pop after the call
55 );
85 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
86 // The implementation is only non-empty for the InterpreterMacroAssembler,
87 // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
88 virtual void check_and_handle_popframe(Register java_thread);
89 virtual void check_and_handle_earlyret(Register java_thread);
90
91 Address as_Address(AddressLiteral adr);
92 Address as_Address(ArrayAddress adr, Register rscratch);
93
94 // Support for null-checks
95 //
96 // Generates code that causes a null OS exception if the content of reg is null.
97 // If the accessed location is M[reg + offset] and the offset is known, provide the
98 // offset. No explicit code generation is needed if the offset is within a certain
99 // range (0 <= offset <= page_size).
100
101 void null_check(Register reg, int offset = -1);
102 static bool needs_explicit_null_check(intptr_t offset);
103 static bool uses_implicit_null_check(void* address);
104
105 // Required platform-specific helpers for Label::patch_instructions.
106 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
107 void pd_patch_instruction(address branch, address target, const char* file, int line) {
108 unsigned char op = branch[0];
109 assert(op == 0xE8 /* call */ ||
110 op == 0xE9 /* jmp */ ||
111 op == 0xEB /* short jmp */ ||
112 (op & 0xF0) == 0x70 /* short jcc */ ||
113 (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ ||
114 (op == 0xC7 && branch[1] == 0xF8) /* xbegin */,
115 "Invalid opcode at patch point");
116
117 if (op == 0xEB || (op & 0xF0) == 0x70) {
118 // short offset operators (jmp and jcc)
119 char* disp = (char*) &branch[1];
120 int imm8 = checked_cast<int>(target - (address) &disp[1]);
121 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
122 file == nullptr ? "<null>" : file, line);
123 *disp = (char)imm8;
124 } else {
346 void resolve_global_jobject(Register value, Register thread, Register tmp);
347
348 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
349 void c2bool(Register x);
350
351 // C++ bool manipulation
352
353 void movbool(Register dst, Address src);
354 void movbool(Address dst, bool boolconst);
355 void movbool(Address dst, Register src);
356 void testbool(Register dst);
357
358 void resolve_oop_handle(Register result, Register tmp);
359 void resolve_weak_handle(Register result, Register tmp);
360 void load_mirror(Register mirror, Register method, Register tmp);
361 void load_method_holder_cld(Register rresult, Register rmethod);
362
363 void load_method_holder(Register holder, Register method);
364
365 // oop manipulations
366 void load_klass(Register dst, Register src, Register tmp);
367 void store_klass(Register dst, Register src, Register tmp);
368
369 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
370 Register tmp1, Register thread_tmp);
371 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
372 Register tmp1, Register tmp2, Register tmp3);
373
374 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
375 Register thread_tmp = noreg, DecoratorSet decorators = 0);
376 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
377 Register thread_tmp = noreg, DecoratorSet decorators = 0);
378 void store_heap_oop(Address dst, Register val, Register tmp1 = noreg,
379 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
380
381 // Used for storing null. All other oop constants should be
382 // stored using routines that take a jobject.
383 void store_heap_oop_null(Address dst);
384
385 #ifdef _LP64
386 void store_klass_gap(Register dst, Register src);
387
388 // This dummy is to prevent a call to store_heap_oop from
389 // converting a zero (like null) into a Register by giving
390 // the compiler two choices it can't resolve
391
392 void store_heap_oop(Address dst, void* dummy);
393
394 void encode_heap_oop(Register r);
395 void decode_heap_oop(Register r);
396 void encode_heap_oop_not_null(Register r);
397 void decode_heap_oop_not_null(Register r);
398 void encode_heap_oop_not_null(Register dst, Register src);
399 void decode_heap_oop_not_null(Register dst, Register src);
400
401 void set_narrow_oop(Register dst, jobject obj);
402 void set_narrow_oop(Address dst, jobject obj);
403 void cmp_narrow_oop(Register dst, jobject obj);
404 void cmp_narrow_oop(Address dst, jobject obj);
566
567 public:
568 void push_set(RegSet set, int offset = -1);
569 void pop_set(RegSet set, int offset = -1);
570
571 // Push and pop everything that might be clobbered by a native
572 // runtime call.
573 // Only save the lower 64 bits of each vector register.
574 // Additional registers can be excluded in a passed RegSet.
575 void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
576 void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
577
578 void push_call_clobbered_registers(bool save_fpu = true) {
579 push_call_clobbered_registers_except(RegSet(), save_fpu);
580 }
581 void pop_call_clobbered_registers(bool restore_fpu = true) {
582 pop_call_clobbered_registers_except(RegSet(), restore_fpu);
583 }
584
585 // allocation
586 void tlab_allocate(
587 Register thread, // Current thread
588 Register obj, // result: pointer to object after successful allocation
589 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
590 int con_size_in_bytes, // object size in bytes if known at compile time
591 Register t1, // temp register
592 Register t2, // temp register
593 Label& slow_case // continuation point if fast allocation fails
594 );
595 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
596
597 void population_count(Register dst, Register src, Register scratch1, Register scratch2);
598
599 // interface method calling
600 void lookup_interface_method(Register recv_klass,
601 Register intf_klass,
602 RegisterOrConstant itable_index,
603 Register method_result,
604 Register scan_temp,
605 Label& no_such_interface,
606 bool return_method = true);
607
608 void lookup_interface_method_stub(Register recv_klass,
609 Register holder_klass,
610 Register resolved_klass,
611 Register method_result,
612 Register scan_temp,
613 Register temp_reg2,
614 Register receiver,
615 int itable_index,
616 Label& L_no_such_interface);
777 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
778 // operands. In general the names are modified to avoid hiding the instruction in Assembler
779 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
780 // here in MacroAssembler. The major exception to this rule is call
781
782 // Arithmetics
783
784
785 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
786 void addptr(Address dst, Register src);
787
788 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
789 void addptr(Register dst, int32_t src);
790 void addptr(Register dst, Register src);
791 void addptr(Register dst, RegisterOrConstant src) {
792 if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant()));
793 else addptr(dst, src.as_register());
794 }
795
796 void andptr(Register dst, int32_t src);
797 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }
798
799 #ifdef _LP64
800 using Assembler::andq;
801 void andq(Register dst, AddressLiteral src, Register rscratch = noreg);
802 #endif
803
804 void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg);
805
806 // renamed to drag out the casting of address to int32_t/intptr_t
807 void cmp32(Register src1, int32_t imm);
808
809 void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg);
810 // compare reg - mem, or reg - &mem
811 void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg);
812
813 void cmp32(Register src1, Address src2);
814
815 #ifndef _LP64
816 void cmpklass(Address dst, Metadata* obj);
817 void cmpklass(Register dst, Metadata* obj);
1963 void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1964
1965 using Assembler::movq;
1966 void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1967
1968 // Can push value or effective address
1969 void pushptr(AddressLiteral src, Register rscratch);
1970
1971 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
1972 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
1973
1974 void pushoop(jobject obj, Register rscratch);
1975 void pushklass(Metadata* obj, Register rscratch);
1976
1977 // sign extend as need a l to ptr sized element
1978 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
1979 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
1980
1981
1982 public:
1983 // clear memory of size 'cnt' qwords, starting at 'base';
1984 // if 'is_large' is set, do not try to produce short loop
1985 void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg);
1986
1987 // clear memory initialization sequence for constant size;
1988 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1989
1990 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
1991 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1992
1993 // Fill primitive arrays
1994 void generate_fill(BasicType t, bool aligned,
1995 Register to, Register value, Register count,
1996 Register rtmp, XMMRegister xtmp);
1997
1998 void encode_iso_array(Register src, Register dst, Register len,
1999 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
2000 XMMRegister tmp4, Register tmp5, Register result, bool ascii);
2001
2002 #ifdef _LP64
2003 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
2004 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
2005 Register y, Register y_idx, Register z,
|
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
26 #define CPU_X86_MACROASSEMBLER_X86_HPP
27
28 #include "asm/assembler.hpp"
29 #include "asm/register.hpp"
30 #include "code/vmreg.inline.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "utilities/macros.hpp"
33 #include "runtime/signature.hpp"
34 #include "runtime/vm_version.hpp"
35 #include "utilities/checkedCast.hpp"
36
37 class ciInlineKlass;
38
39 // MacroAssembler extends Assembler by frequently used macros.
40 //
41 // Instructions for which a 'better' code sequence exists depending
42 // on arguments should also go in here.
43
44 class MacroAssembler: public Assembler {
45 friend class LIR_Assembler;
46 friend class Runtime1; // as_Address()
47
48 public:
49 // Support for VM calls
50 //
51 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
52 // may customize this version by overriding it for its purposes (e.g., to save/restore
53 // additional registers when doing a VM call).
54
55 virtual void call_VM_leaf_base(
56 address entry_point, // the entry point
57 int number_of_arguments // the number of arguments to pop after the call
58 );
88 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
89 // The implementation is only non-empty for the InterpreterMacroAssembler,
90 // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
91 virtual void check_and_handle_popframe(Register java_thread);
92 virtual void check_and_handle_earlyret(Register java_thread);
93
94 Address as_Address(AddressLiteral adr);
95 Address as_Address(ArrayAddress adr, Register rscratch);
96
97 // Support for null-checks
98 //
99 // Generates code that causes a null OS exception if the content of reg is null.
100 // If the accessed location is M[reg + offset] and the offset is known, provide the
101 // offset. No explicit code generation is needed if the offset is within a certain
102 // range (0 <= offset <= page_size).
103
104 void null_check(Register reg, int offset = -1);
105 static bool needs_explicit_null_check(intptr_t offset);
106 static bool uses_implicit_null_check(void* address);
107
108 // markWord tests, kills markWord reg
109 void test_markword_is_inline_type(Register markword, Label& is_inline_type);
110
111 // inlineKlass queries, kills temp_reg
112 void test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type);
113 void test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type);
114 void test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type);
115
116 // Get the default value oop for the given InlineKlass
117 void get_default_value_oop(Register inline_klass, Register temp_reg, Register obj);
118 // The empty value oop, for the given InlineKlass ("empty" as in no instance fields)
119 // get_default_value_oop with extra assertion for empty inline klass
120 void get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj);
121
122 void test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free);
123 void test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free);
124 void test_field_is_flat(Register flags, Register temp_reg, Label& is_flat);
125 void test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker);
126
127 // Check oops for special arrays, i.e. flat arrays and/or null-free arrays
128 void test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label);
129 void test_flat_array_oop(Register oop, Register temp_reg, Label& is_flat_array);
130 void test_non_flat_array_oop(Register oop, Register temp_reg, Label& is_non_flat_array);
131 void test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array);
132 void test_non_null_free_array_oop(Register oop, Register temp_reg, Label& is_non_null_free_array);
133
134 // Check array klass layout helper for flat or null-free arrays...
135 void test_flat_array_layout(Register lh, Label& is_flat_array);
136 void test_non_flat_array_layout(Register lh, Label& is_non_flat_array);
137
138 // Required platform-specific helpers for Label::patch_instructions.
139 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
140 void pd_patch_instruction(address branch, address target, const char* file, int line) {
141 unsigned char op = branch[0];
142 assert(op == 0xE8 /* call */ ||
143 op == 0xE9 /* jmp */ ||
144 op == 0xEB /* short jmp */ ||
145 (op & 0xF0) == 0x70 /* short jcc */ ||
146 (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ ||
147 (op == 0xC7 && branch[1] == 0xF8) /* xbegin */,
148 "Invalid opcode at patch point");
149
150 if (op == 0xEB || (op & 0xF0) == 0x70) {
151 // short offset operators (jmp and jcc)
152 char* disp = (char*) &branch[1];
153 int imm8 = checked_cast<int>(target - (address) &disp[1]);
154 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
155 file == nullptr ? "<null>" : file, line);
156 *disp = (char)imm8;
157 } else {
379 void resolve_global_jobject(Register value, Register thread, Register tmp);
380
381 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
382 void c2bool(Register x);
383
384 // C++ bool manipulation
385
386 void movbool(Register dst, Address src);
387 void movbool(Address dst, bool boolconst);
388 void movbool(Address dst, Register src);
389 void testbool(Register dst);
390
391 void resolve_oop_handle(Register result, Register tmp);
392 void resolve_weak_handle(Register result, Register tmp);
393 void load_mirror(Register mirror, Register method, Register tmp);
394 void load_method_holder_cld(Register rresult, Register rmethod);
395
396 void load_method_holder(Register holder, Register method);
397
398 // oop manipulations
399 void load_metadata(Register dst, Register src);
400 void load_klass(Register dst, Register src, Register tmp);
401 void store_klass(Register dst, Register src, Register tmp);
402
403 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
404 Register tmp1, Register thread_tmp);
405 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
406 Register tmp1, Register tmp2, Register tmp3);
407
408 void access_value_copy(DecoratorSet decorators, Register src, Register dst, Register inline_klass);
409 void flat_field_copy(DecoratorSet decorators, Register src, Register dst, Register inline_layout_info);
410 // We probably need the following for arrays: TODO FIXME
411 // void flat_element_copy(DecoratorSet decorators, Register src, Register dst, Register array);
412
413 // inline type data payload offsets...
414 void first_field_offset(Register inline_klass, Register offset);
415 void data_for_oop(Register oop, Register data, Register inline_klass);
416 // get data payload ptr a flat value array at index, kills rcx and index
417 void data_for_value_array_index(Register array, Register array_klass,
418 Register index, Register data);
419
420 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
421 Register thread_tmp = noreg, DecoratorSet decorators = 0);
422 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
423 Register thread_tmp = noreg, DecoratorSet decorators = 0);
424 void store_heap_oop(Address dst, Register val, Register tmp1 = noreg,
425 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
426
427 // Used for storing null. All other oop constants should be
428 // stored using routines that take a jobject.
429 void store_heap_oop_null(Address dst);
430
431 void load_prototype_header(Register dst, Register src, Register tmp);
432
433 #ifdef _LP64
434 void store_klass_gap(Register dst, Register src);
435
436 // This dummy is to prevent a call to store_heap_oop from
437 // converting a zero (like null) into a Register by giving
438 // the compiler two choices it can't resolve
439
440 void store_heap_oop(Address dst, void* dummy);
441
442 void encode_heap_oop(Register r);
443 void decode_heap_oop(Register r);
444 void encode_heap_oop_not_null(Register r);
445 void decode_heap_oop_not_null(Register r);
446 void encode_heap_oop_not_null(Register dst, Register src);
447 void decode_heap_oop_not_null(Register dst, Register src);
448
449 void set_narrow_oop(Register dst, jobject obj);
450 void set_narrow_oop(Address dst, jobject obj);
451 void cmp_narrow_oop(Register dst, jobject obj);
452 void cmp_narrow_oop(Address dst, jobject obj);
614
615 public:
616 void push_set(RegSet set, int offset = -1);
617 void pop_set(RegSet set, int offset = -1);
618
619 // Push and pop everything that might be clobbered by a native
620 // runtime call.
621 // Only save the lower 64 bits of each vector register.
622 // Additional registers can be excluded in a passed RegSet.
623 void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
624 void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
625
626 void push_call_clobbered_registers(bool save_fpu = true) {
627 push_call_clobbered_registers_except(RegSet(), save_fpu);
628 }
629 void pop_call_clobbered_registers(bool restore_fpu = true) {
630 pop_call_clobbered_registers_except(RegSet(), restore_fpu);
631 }
632
633 // allocation
634
635 // Object / value buffer allocation...
636 // Allocate instance of klass, assumes klass initialized by caller
637 // new_obj prefers to be rax
638 // Kills t1 and t2, perserves klass, return allocation in new_obj (rsi on LP64)
639 void allocate_instance(Register klass, Register new_obj,
640 Register t1, Register t2,
641 bool clear_fields, Label& alloc_failed);
642
643 void tlab_allocate(
644 Register thread, // Current thread
645 Register obj, // result: pointer to object after successful allocation
646 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
647 int con_size_in_bytes, // object size in bytes if known at compile time
648 Register t1, // temp register
649 Register t2, // temp register
650 Label& slow_case // continuation point if fast allocation fails
651 );
652 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
653
654 // For field "index" within "klass", return inline_klass ...
655 void get_inline_type_field_klass(Register klass, Register index, Register inline_klass);
656
657 void inline_layout_info(Register klass, Register index, Register layout_info);
658
659 void population_count(Register dst, Register src, Register scratch1, Register scratch2);
660
661 // interface method calling
662 void lookup_interface_method(Register recv_klass,
663 Register intf_klass,
664 RegisterOrConstant itable_index,
665 Register method_result,
666 Register scan_temp,
667 Label& no_such_interface,
668 bool return_method = true);
669
670 void lookup_interface_method_stub(Register recv_klass,
671 Register holder_klass,
672 Register resolved_klass,
673 Register method_result,
674 Register scan_temp,
675 Register temp_reg2,
676 Register receiver,
677 int itable_index,
678 Label& L_no_such_interface);
839 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
840 // operands. In general the names are modified to avoid hiding the instruction in Assembler
841 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
842 // here in MacroAssembler. The major exception to this rule is call
843
844 // Arithmetics
845
846
847 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
848 void addptr(Address dst, Register src);
849
850 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
851 void addptr(Register dst, int32_t src);
852 void addptr(Register dst, Register src);
853 void addptr(Register dst, RegisterOrConstant src) {
854 if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant()));
855 else addptr(dst, src.as_register());
856 }
857
858 void andptr(Register dst, int32_t src);
859 void andptr(Register dst, Register src) { LP64_ONLY(andq(dst, src)) NOT_LP64(andl(dst, src)) ; }
860 void andptr(Register dst, Address src) { LP64_ONLY(andq(dst, src)) NOT_LP64(andl(dst, src)) ; }
861
862 #ifdef _LP64
863 using Assembler::andq;
864 void andq(Register dst, AddressLiteral src, Register rscratch = noreg);
865 #endif
866
867 void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg);
868
869 // renamed to drag out the casting of address to int32_t/intptr_t
870 void cmp32(Register src1, int32_t imm);
871
872 void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg);
873 // compare reg - mem, or reg - &mem
874 void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg);
875
876 void cmp32(Register src1, Address src2);
877
878 #ifndef _LP64
879 void cmpklass(Address dst, Metadata* obj);
880 void cmpklass(Register dst, Metadata* obj);
2026 void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
2027
2028 using Assembler::movq;
2029 void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
2030
2031 // Can push value or effective address
2032 void pushptr(AddressLiteral src, Register rscratch);
2033
2034 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
2035 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
2036
2037 void pushoop(jobject obj, Register rscratch);
2038 void pushklass(Metadata* obj, Register rscratch);
2039
2040 // sign extend as need a l to ptr sized element
2041 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
2042 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
2043
2044
2045 public:
2046 // Inline type specific methods
2047 #include "asm/macroAssembler_common.hpp"
2048
2049 int store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter = true);
2050 bool move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]);
2051 bool unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
2052 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
2053 RegState reg_state[]);
2054 bool pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
2055 VMRegPair* from, int from_count, int& from_index, VMReg to,
2056 RegState reg_state[], Register val_array);
2057 int extend_stack_for_inline_args(int args_on_stack);
2058 void remove_frame(int initial_framesize, bool needs_stack_repair);
2059 VMReg spill_reg_for(VMReg reg);
2060
2061 // clear memory of size 'cnt' qwords, starting at 'base';
2062 // if 'is_large' is set, do not try to produce short loop
2063 void clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, bool is_large, bool word_copy_only, KRegister mask=knoreg);
2064
2065 // clear memory initialization sequence for constant size;
2066 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
2067
2068 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
2069 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
2070
2071 // Fill primitive arrays
2072 void generate_fill(BasicType t, bool aligned,
2073 Register to, Register value, Register count,
2074 Register rtmp, XMMRegister xtmp);
2075
2076 void encode_iso_array(Register src, Register dst, Register len,
2077 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
2078 XMMRegister tmp4, Register tmp5, Register result, bool ascii);
2079
2080 #ifdef _LP64
2081 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
2082 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
2083 Register y, Register y_idx, Register z,
|