16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
27 #define CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
28
29 #include "asm/assembler.inline.hpp"
30 #include "code/aotCodeCache.hpp"
31 #include "code/vmreg.hpp"
32 #include "metaprogramming/enableIf.hpp"
33 #include "oops/compressedOops.hpp"
34 #include "oops/compressedKlass.hpp"
35 #include "runtime/vm_version.hpp"
36 #include "utilities/powerOfTwo.hpp"
37
38 class OopMap;
39
40 // MacroAssembler extends Assembler by frequently used macros.
41 //
42 // Instructions for which a 'better' code sequence exists depending
43 // on arguments should also go in here.
44
45 class MacroAssembler: public Assembler {
46 friend class LIR_Assembler;
47
48 public:
49 using Assembler::mov;
50 using Assembler::movi;
51
52 protected:
53
54 // Support for VM calls
55 //
56 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
167
168 void bind(Label& L) {
169 Assembler::bind(L);
170 code()->clear_last_insn();
171 code()->set_last_label(pc());
172 }
173
174 void membar(Membar_mask_bits order_constraint);
175
176 using Assembler::ldr;
177 using Assembler::str;
178 using Assembler::ldrw;
179 using Assembler::strw;
180
181 void ldr(Register Rx, const Address &adr);
182 void ldrw(Register Rw, const Address &adr);
183 void str(Register Rx, const Address &adr);
184 void strw(Register Rx, const Address &adr);
185
186 // Frame creation and destruction shared between JITs.
187 void build_frame(int framesize);
188 void remove_frame(int framesize);
189
190 virtual void _call_Unimplemented(address call_site) {
191 mov(rscratch2, call_site);
192 }
193
194 // Microsoft's MSVC team thinks that the __FUNCSIG__ is approximately (sympathy for calling conventions) equivalent to __PRETTY_FUNCTION__
195 // Also, from Clang patch: "It is very similar to GCC's PRETTY_FUNCTION, except it prints the calling convention."
196 // https://reviews.llvm.org/D3311
197
198 #ifdef _WIN64
199 #define call_Unimplemented() _call_Unimplemented((address)__FUNCSIG__)
200 #else
201 #define call_Unimplemented() _call_Unimplemented((address)__PRETTY_FUNCTION__)
202 #endif
203
204 // aliases defined in AARCH64 spec
205
206 template<class T>
207 inline void cmpw(Register Rd, T imm) { subsw(zr, Rd, imm); }
660 msr(0b011, 0b0100, 0b0010, 0b000, reg);
661 }
662
663 // idiv variant which deals with MINLONG as dividend and -1 as divisor
664 int corrected_idivl(Register result, Register ra, Register rb,
665 bool want_remainder, Register tmp = rscratch1);
666 int corrected_idivq(Register result, Register ra, Register rb,
667 bool want_remainder, Register tmp = rscratch1);
668
669 // Support for null-checks
670 //
671 // Generates code that causes a null OS exception if the content of reg is null.
672 // If the accessed location is M[reg + offset] and the offset is known, provide the
673 // offset. No explicit code generation is needed if the offset is within a certain
674 // range (0 <= offset <= page_size).
675
676 virtual void null_check(Register reg, int offset = -1);
677 static bool needs_explicit_null_check(intptr_t offset);
678 static bool uses_implicit_null_check(void* address);
679
680 static address target_addr_for_insn(address insn_addr);
681
682 // Required platform-specific helpers for Label::patch_instructions.
683 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
684 static int pd_patch_instruction_size(address branch, address target);
685 static void pd_patch_instruction(address branch, address target, const char* file = nullptr, int line = 0) {
686 pd_patch_instruction_size(branch, target);
687 }
688 static address pd_call_destination(address branch) {
689 return target_addr_for_insn(branch);
690 }
691 #ifndef PRODUCT
692 static void pd_print_patched_instruction(address branch);
693 #endif
694
695 static int patch_oop(address insn_addr, address o);
696 static int patch_narrow_klass(address insn_addr, narrowKlass n);
697
698 // Return whether code is emitted to a scratch blob.
699 virtual bool in_scratch_emit_size() {
888
889 void reset_last_Java_frame(Register thread);
890
891 // thread in the default location (rthread)
892 void reset_last_Java_frame(bool clear_fp);
893
894 // Stores
895 void store_check(Register obj); // store check for obj - register is destroyed afterwards
896 void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed)
897
898 void resolve_jobject(Register value, Register tmp1, Register tmp2);
899 void resolve_global_jobject(Register value, Register tmp1, Register tmp2);
900
901 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
902 void c2bool(Register x);
903
904 void load_method_holder_cld(Register rresult, Register rmethod);
905 void load_method_holder(Register holder, Register method);
906
907 // oop manipulations
908 void load_narrow_klass_compact(Register dst, Register src);
909 void load_klass(Register dst, Register src);
910 void store_klass(Register dst, Register src);
911 void cmp_klass(Register obj, Register klass, Register tmp);
912 void cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2);
913
914 void resolve_weak_handle(Register result, Register tmp1, Register tmp2);
915 void resolve_oop_handle(Register result, Register tmp1, Register tmp2);
916 void load_mirror(Register dst, Register method, Register tmp1, Register tmp2);
917
918 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
919 Register tmp1, Register tmp2);
920
921 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
922 Register tmp1, Register tmp2, Register tmp3);
923
924 void load_heap_oop(Register dst, Address src, Register tmp1,
925 Register tmp2, DecoratorSet decorators = 0);
926
927 void load_heap_oop_not_null(Register dst, Address src, Register tmp1,
928 Register tmp2, DecoratorSet decorators = 0);
929 void store_heap_oop(Address dst, Register val, Register tmp1,
930 Register tmp2, Register tmp3, DecoratorSet decorators = 0);
931
932 // currently unimplemented
933 // Used for storing null. All other oop constants should be
934 // stored using routines that take a jobject.
935 void store_heap_oop_null(Address dst);
936
937 void store_klass_gap(Register dst, Register src);
938
939 // This dummy is to prevent a call to store_heap_oop from
940 // converting a zero (like null) into a Register by giving
941 // the compiler two choices it can't resolve
942
943 void store_heap_oop(Address dst, void* dummy);
944
945 void encode_heap_oop(Register d, Register s);
946 void encode_heap_oop(Register r) { encode_heap_oop(r, r); }
947 void decode_heap_oop(Register d, Register s);
948 void decode_heap_oop(Register r) { decode_heap_oop(r, r); }
949 void encode_heap_oop_not_null(Register r);
950 void decode_heap_oop_not_null(Register r);
951 void encode_heap_oop_not_null(Register dst, Register src);
952 void decode_heap_oop_not_null(Register dst, Register src);
953
954 void set_narrow_oop(Register dst, jobject obj);
955
956 void decode_klass_not_null_for_aot(Register dst, Register src);
966 void reinit_heapbase();
967
968 DEBUG_ONLY(void verify_heapbase(const char* msg);)
969
970 void push_CPU_state(bool save_vectors = false, bool use_sve = false,
971 int sve_vector_size_in_bytes = 0, int total_predicate_in_bytes = 0);
972 void pop_CPU_state(bool restore_vectors = false, bool use_sve = false,
973 int sve_vector_size_in_bytes = 0, int total_predicate_in_bytes = 0);
974
975 void push_cont_fastpath(Register java_thread = rthread);
976 void pop_cont_fastpath(Register java_thread = rthread);
977
978 // Round up to a power of two
979 void round_to(Register reg, int modulus);
980
981 // java.lang.Math::round intrinsics
982 void java_round_double(Register dst, FloatRegister src, FloatRegister ftmp);
983 void java_round_float(Register dst, FloatRegister src, FloatRegister ftmp);
984
985 // allocation
986 void tlab_allocate(
987 Register obj, // result: pointer to object after successful allocation
988 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
989 int con_size_in_bytes, // object size in bytes if known at compile time
990 Register t1, // temp register
991 Register t2, // temp register
992 Label& slow_case // continuation point if fast allocation fails
993 );
994 void verify_tlab();
995
996 // interface method calling
997 void lookup_interface_method(Register recv_klass,
998 Register intf_klass,
999 RegisterOrConstant itable_index,
1000 Register method_result,
1001 Register scan_temp,
1002 Label& no_such_interface,
1003 bool return_method = true);
1004
1005 void lookup_interface_method_stub(Register recv_klass,
1006 Register holder_klass,
1007 Register resolved_klass,
1008 Register method_result,
1009 Register temp_reg,
1010 Register temp_reg2,
1011 int itable_index,
1012 Label& L_no_such_interface);
1013
1014 // virtual method calling
1015 // n.b. x86 allows RegisterOrConstant for vtable_index
1433 } \
1434 \
1435 void INSN(Register Rd, Register Rn, Register Rm) { \
1436 Assembler::INSN(Rd, Rn, Rm); \
1437 } \
1438 \
1439 void INSN(Register Rd, Register Rn, Register Rm, \
1440 ext::operation option, int amount = 0) { \
1441 Assembler::INSN(Rd, Rn, Rm, option, amount); \
1442 }
1443
1444 WRAP(adds, false) WRAP(addsw, true) WRAP(subs, false) WRAP(subsw, true)
1445
1446 void add(Register Rd, Register Rn, RegisterOrConstant increment);
1447 void addw(Register Rd, Register Rn, RegisterOrConstant increment);
1448 void sub(Register Rd, Register Rn, RegisterOrConstant decrement);
1449 void subw(Register Rd, Register Rn, RegisterOrConstant decrement);
1450
1451 void adrp(Register reg1, const Address &dest, uint64_t &byte_offset);
1452
1453 void tableswitch(Register index, jint lowbound, jint highbound,
1454 Label &jumptable, Label &jumptable_end, int stride = 1) {
1455 adr(rscratch1, jumptable);
1456 subsw(rscratch2, index, lowbound);
1457 subsw(zr, rscratch2, highbound - lowbound);
1458 br(Assembler::HS, jumptable_end);
1459 add(rscratch1, rscratch1, rscratch2,
1460 ext::sxtw, exact_log2(stride * Assembler::instruction_size));
1461 br(rscratch1);
1462 }
1463
1464 // Form an address from base + offset in Rd. Rd may or may not
1465 // actually be used: you must use the Address that is returned. It
1466 // is up to you to ensure that the shift provided matches the size
1467 // of your data.
1468 Address form_address(Register Rd, Register base, int64_t byte_offset, int shift);
1469
1470 // Return true iff an address is within the 48-bit AArch64 address
1471 // space.
1472 bool is_valid_AArch64_address(address a) {
1507 #define ARRAYS_HASHCODE_REGISTERS \
1508 do { \
1509 assert(result == r0 && \
1510 ary == r1 && \
1511 cnt == r2 && \
1512 vdata0 == v3 && \
1513 vdata1 == v2 && \
1514 vdata2 == v1 && \
1515 vdata3 == v0 && \
1516 vmul0 == v4 && \
1517 vmul1 == v5 && \
1518 vmul2 == v6 && \
1519 vmul3 == v7 && \
1520 vpow == v12 && \
1521 vpowm == v13, "registers must match aarch64.ad"); \
1522 } while (0)
1523
1524 void string_equals(Register a1, Register a2, Register result, Register cnt1);
1525
1526 void fill_words(Register base, Register cnt, Register value);
1527 address zero_words(Register base, uint64_t cnt);
1528 address zero_words(Register ptr, Register cnt);
1529 void zero_dcache_blocks(Register base, Register cnt);
1530
1531 static const int zero_words_block_size;
1532
1533 address byte_array_inflate(Register src, Register dst, Register len,
1534 FloatRegister vtmp1, FloatRegister vtmp2,
1535 FloatRegister vtmp3, Register tmp4);
1536
1537 void char_array_compress(Register src, Register dst, Register len,
1538 Register res,
1539 FloatRegister vtmp0, FloatRegister vtmp1,
1540 FloatRegister vtmp2, FloatRegister vtmp3,
1541 FloatRegister vtmp4, FloatRegister vtmp5);
1542
1543 void encode_iso_array(Register src, Register dst,
1544 Register len, Register res, bool ascii,
1545 FloatRegister vtmp0, FloatRegister vtmp1,
1546 FloatRegister vtmp2, FloatRegister vtmp3,
|
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
27 #define CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
28
29 #include "asm/assembler.inline.hpp"
30 #include "code/aotCodeCache.hpp"
31 #include "code/vmreg.hpp"
32 #include "metaprogramming/enableIf.hpp"
33 #include "oops/compressedOops.hpp"
34 #include "oops/compressedKlass.hpp"
35 #include "runtime/vm_version.hpp"
36 #include "utilities/macros.hpp"
37 #include "utilities/powerOfTwo.hpp"
38 #include "runtime/signature.hpp"
39
40
41 class ciInlineKlass;
42
43 class OopMap;
44
45 // MacroAssembler extends Assembler by frequently used macros.
46 //
47 // Instructions for which a 'better' code sequence exists depending
48 // on arguments should also go in here.
49
50 class MacroAssembler: public Assembler {
51 friend class LIR_Assembler;
52
53 public:
54 using Assembler::mov;
55 using Assembler::movi;
56
57 protected:
58
59 // Support for VM calls
60 //
61 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
172
173 void bind(Label& L) {
174 Assembler::bind(L);
175 code()->clear_last_insn();
176 code()->set_last_label(pc());
177 }
178
179 void membar(Membar_mask_bits order_constraint);
180
181 using Assembler::ldr;
182 using Assembler::str;
183 using Assembler::ldrw;
184 using Assembler::strw;
185
186 void ldr(Register Rx, const Address &adr);
187 void ldrw(Register Rw, const Address &adr);
188 void str(Register Rx, const Address &adr);
189 void strw(Register Rx, const Address &adr);
190
191 // Frame creation and destruction shared between JITs.
192 DEBUG_ONLY(void build_frame(int framesize);)
193 void build_frame(int framesize DEBUG_ONLY(COMMA bool zap_rfp_lr_spills));
194 void remove_frame(int framesize);
195
196 virtual void _call_Unimplemented(address call_site) {
197 mov(rscratch2, call_site);
198 }
199
200 // Microsoft's MSVC team thinks that the __FUNCSIG__ is approximately (sympathy for calling conventions) equivalent to __PRETTY_FUNCTION__
201 // Also, from Clang patch: "It is very similar to GCC's PRETTY_FUNCTION, except it prints the calling convention."
202 // https://reviews.llvm.org/D3311
203
204 #ifdef _WIN64
205 #define call_Unimplemented() _call_Unimplemented((address)__FUNCSIG__)
206 #else
207 #define call_Unimplemented() _call_Unimplemented((address)__PRETTY_FUNCTION__)
208 #endif
209
210 // aliases defined in AARCH64 spec
211
212 template<class T>
213 inline void cmpw(Register Rd, T imm) { subsw(zr, Rd, imm); }
666 msr(0b011, 0b0100, 0b0010, 0b000, reg);
667 }
668
669 // idiv variant which deals with MINLONG as dividend and -1 as divisor
670 int corrected_idivl(Register result, Register ra, Register rb,
671 bool want_remainder, Register tmp = rscratch1);
672 int corrected_idivq(Register result, Register ra, Register rb,
673 bool want_remainder, Register tmp = rscratch1);
674
675 // Support for null-checks
676 //
677 // Generates code that causes a null OS exception if the content of reg is null.
678 // If the accessed location is M[reg + offset] and the offset is known, provide the
679 // offset. No explicit code generation is needed if the offset is within a certain
680 // range (0 <= offset <= page_size).
681
682 virtual void null_check(Register reg, int offset = -1);
683 static bool needs_explicit_null_check(intptr_t offset);
684 static bool uses_implicit_null_check(void* address);
685
686 // markWord tests, kills markWord reg
687 void test_markword_is_inline_type(Register markword, Label& is_inline_type);
688
689 // inlineKlass queries, kills temp_reg
690 void test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type, bool can_be_null = true);
691
692 void test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free);
693 void test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free);
694 void test_field_is_flat(Register flags, Register temp_reg, Label& is_flat);
695 void test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker);
696
697 // Check oops for special arrays, i.e. flat arrays and/or null-free arrays
698 void test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label);
699 void test_flat_array_oop(Register klass, Register temp_reg, Label& is_flat_array);
700 void test_non_flat_array_oop(Register oop, Register temp_reg, Label&is_non_flat_array);
701 void test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array);
702 void test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array);
703
704 // Check array klass layout helper for flat or null-free arrays...
705 void test_flat_array_layout(Register lh, Label& is_flat_array);
706 void test_non_flat_array_layout(Register lh, Label& is_non_flat_array);
707
708 static address target_addr_for_insn(address insn_addr);
709
710 // Required platform-specific helpers for Label::patch_instructions.
711 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
712 static int pd_patch_instruction_size(address branch, address target);
713 static void pd_patch_instruction(address branch, address target, const char* file = nullptr, int line = 0) {
714 pd_patch_instruction_size(branch, target);
715 }
716 static address pd_call_destination(address branch) {
717 return target_addr_for_insn(branch);
718 }
719 #ifndef PRODUCT
720 static void pd_print_patched_instruction(address branch);
721 #endif
722
723 static int patch_oop(address insn_addr, address o);
724 static int patch_narrow_klass(address insn_addr, narrowKlass n);
725
726 // Return whether code is emitted to a scratch blob.
727 virtual bool in_scratch_emit_size() {
916
917 void reset_last_Java_frame(Register thread);
918
919 // thread in the default location (rthread)
920 void reset_last_Java_frame(bool clear_fp);
921
922 // Stores
923 void store_check(Register obj); // store check for obj - register is destroyed afterwards
924 void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed)
925
926 void resolve_jobject(Register value, Register tmp1, Register tmp2);
927 void resolve_global_jobject(Register value, Register tmp1, Register tmp2);
928
929 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
930 void c2bool(Register x);
931
932 void load_method_holder_cld(Register rresult, Register rmethod);
933 void load_method_holder(Register holder, Register method);
934
935 // oop manipulations
936 void load_metadata(Register dst, Register src);
937
938 void load_narrow_klass_compact(Register dst, Register src);
939 void load_klass(Register dst, Register src);
940 void store_klass(Register dst, Register src);
941 void cmp_klass(Register obj, Register klass, Register tmp);
942 void cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2);
943
944 void resolve_weak_handle(Register result, Register tmp1, Register tmp2);
945 void resolve_oop_handle(Register result, Register tmp1, Register tmp2);
946 void load_mirror(Register dst, Register method, Register tmp1, Register tmp2);
947
948 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
949 Register tmp1, Register tmp2);
950
951 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
952 Register tmp1, Register tmp2, Register tmp3);
953
954 void flat_field_copy(DecoratorSet decorators, Register src, Register dst, Register inline_layout_info);
955
956 // inline type data payload offsets...
957 void payload_offset(Register inline_klass, Register offset);
958 void payload_address(Register oop, Register data, Register inline_klass);
959 // get data payload ptr a flat value array at index, kills rcx and index
960 void data_for_value_array_index(Register array, Register array_klass,
961 Register index, Register data);
962
963 void load_heap_oop(Register dst, Address src, Register tmp1,
964 Register tmp2, DecoratorSet decorators = 0);
965
966 void load_heap_oop_not_null(Register dst, Address src, Register tmp1,
967 Register tmp2, DecoratorSet decorators = 0);
968 void store_heap_oop(Address dst, Register val, Register tmp1,
969 Register tmp2, Register tmp3, DecoratorSet decorators = 0);
970
971 // currently unimplemented
972 // Used for storing null. All other oop constants should be
973 // stored using routines that take a jobject.
974 void store_heap_oop_null(Address dst);
975
976 void load_prototype_header(Register dst, Register src);
977
978 void store_klass_gap(Register dst, Register src);
979
980 // This dummy is to prevent a call to store_heap_oop from
981 // converting a zero (like null) into a Register by giving
982 // the compiler two choices it can't resolve
983
984 void store_heap_oop(Address dst, void* dummy);
985
986 void encode_heap_oop(Register d, Register s);
987 void encode_heap_oop(Register r) { encode_heap_oop(r, r); }
988 void decode_heap_oop(Register d, Register s);
989 void decode_heap_oop(Register r) { decode_heap_oop(r, r); }
990 void encode_heap_oop_not_null(Register r);
991 void decode_heap_oop_not_null(Register r);
992 void encode_heap_oop_not_null(Register dst, Register src);
993 void decode_heap_oop_not_null(Register dst, Register src);
994
995 void set_narrow_oop(Register dst, jobject obj);
996
997 void decode_klass_not_null_for_aot(Register dst, Register src);
1007 void reinit_heapbase();
1008
1009 DEBUG_ONLY(void verify_heapbase(const char* msg);)
1010
1011 void push_CPU_state(bool save_vectors = false, bool use_sve = false,
1012 int sve_vector_size_in_bytes = 0, int total_predicate_in_bytes = 0);
1013 void pop_CPU_state(bool restore_vectors = false, bool use_sve = false,
1014 int sve_vector_size_in_bytes = 0, int total_predicate_in_bytes = 0);
1015
1016 void push_cont_fastpath(Register java_thread = rthread);
1017 void pop_cont_fastpath(Register java_thread = rthread);
1018
1019 // Round up to a power of two
1020 void round_to(Register reg, int modulus);
1021
1022 // java.lang.Math::round intrinsics
1023 void java_round_double(Register dst, FloatRegister src, FloatRegister ftmp);
1024 void java_round_float(Register dst, FloatRegister src, FloatRegister ftmp);
1025
1026 // allocation
1027
1028 // Object / value buffer allocation...
1029 // Allocate instance of klass, assumes klass initialized by caller
1030 // new_obj prefers to be rax
1031 // Kills t1 and t2, perserves klass, return allocation in new_obj (rsi on LP64)
1032 void allocate_instance(Register klass, Register new_obj,
1033 Register t1, Register t2,
1034 bool clear_fields, Label& alloc_failed);
1035
1036 void tlab_allocate(
1037 Register obj, // result: pointer to object after successful allocation
1038 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
1039 int con_size_in_bytes, // object size in bytes if known at compile time
1040 Register t1, // temp register
1041 Register t2, // temp register
1042 Label& slow_case // continuation point if fast allocation fails
1043 );
1044 void verify_tlab();
1045
1046 void inline_layout_info(Register holder_klass, Register index, Register layout_info);
1047
1048 // interface method calling
1049 void lookup_interface_method(Register recv_klass,
1050 Register intf_klass,
1051 RegisterOrConstant itable_index,
1052 Register method_result,
1053 Register scan_temp,
1054 Label& no_such_interface,
1055 bool return_method = true);
1056
1057 void lookup_interface_method_stub(Register recv_klass,
1058 Register holder_klass,
1059 Register resolved_klass,
1060 Register method_result,
1061 Register temp_reg,
1062 Register temp_reg2,
1063 int itable_index,
1064 Label& L_no_such_interface);
1065
1066 // virtual method calling
1067 // n.b. x86 allows RegisterOrConstant for vtable_index
1485 } \
1486 \
1487 void INSN(Register Rd, Register Rn, Register Rm) { \
1488 Assembler::INSN(Rd, Rn, Rm); \
1489 } \
1490 \
1491 void INSN(Register Rd, Register Rn, Register Rm, \
1492 ext::operation option, int amount = 0) { \
1493 Assembler::INSN(Rd, Rn, Rm, option, amount); \
1494 }
1495
1496 WRAP(adds, false) WRAP(addsw, true) WRAP(subs, false) WRAP(subsw, true)
1497
1498 void add(Register Rd, Register Rn, RegisterOrConstant increment);
1499 void addw(Register Rd, Register Rn, RegisterOrConstant increment);
1500 void sub(Register Rd, Register Rn, RegisterOrConstant decrement);
1501 void subw(Register Rd, Register Rn, RegisterOrConstant decrement);
1502
1503 void adrp(Register reg1, const Address &dest, uint64_t &byte_offset);
1504
1505 void verified_entry(Compile* C, int sp_inc);
1506
1507 // Inline type specific methods
1508 #include "asm/macroAssembler_common.hpp"
1509
1510 int store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter = true);
1511 bool move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]);
1512 bool unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
1513 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
1514 RegState reg_state[]);
1515 bool pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
1516 VMRegPair* from, int from_count, int& from_index, VMReg to,
1517 RegState reg_state[], Register val_array);
1518 int extend_stack_for_inline_args(int args_on_stack);
1519 void remove_frame(int initial_framesize, bool needs_stack_repair);
1520 VMReg spill_reg_for(VMReg reg);
1521 void save_stack_increment(int sp_inc, int frame_size);
1522
1523 void tableswitch(Register index, jint lowbound, jint highbound,
1524 Label &jumptable, Label &jumptable_end, int stride = 1) {
1525 adr(rscratch1, jumptable);
1526 subsw(rscratch2, index, lowbound);
1527 subsw(zr, rscratch2, highbound - lowbound);
1528 br(Assembler::HS, jumptable_end);
1529 add(rscratch1, rscratch1, rscratch2,
1530 ext::sxtw, exact_log2(stride * Assembler::instruction_size));
1531 br(rscratch1);
1532 }
1533
1534 // Form an address from base + offset in Rd. Rd may or may not
1535 // actually be used: you must use the Address that is returned. It
1536 // is up to you to ensure that the shift provided matches the size
1537 // of your data.
1538 Address form_address(Register Rd, Register base, int64_t byte_offset, int shift);
1539
1540 // Return true iff an address is within the 48-bit AArch64 address
1541 // space.
1542 bool is_valid_AArch64_address(address a) {
1577 #define ARRAYS_HASHCODE_REGISTERS \
1578 do { \
1579 assert(result == r0 && \
1580 ary == r1 && \
1581 cnt == r2 && \
1582 vdata0 == v3 && \
1583 vdata1 == v2 && \
1584 vdata2 == v1 && \
1585 vdata3 == v0 && \
1586 vmul0 == v4 && \
1587 vmul1 == v5 && \
1588 vmul2 == v6 && \
1589 vmul3 == v7 && \
1590 vpow == v12 && \
1591 vpowm == v13, "registers must match aarch64.ad"); \
1592 } while (0)
1593
1594 void string_equals(Register a1, Register a2, Register result, Register cnt1);
1595
1596 void fill_words(Register base, Register cnt, Register value);
1597 void fill_words(Register base, uint64_t cnt, Register value);
1598
1599 address zero_words(Register base, uint64_t cnt);
1600 address zero_words(Register ptr, Register cnt);
1601 void zero_dcache_blocks(Register base, Register cnt);
1602
1603 static const int zero_words_block_size;
1604
1605 address byte_array_inflate(Register src, Register dst, Register len,
1606 FloatRegister vtmp1, FloatRegister vtmp2,
1607 FloatRegister vtmp3, Register tmp4);
1608
1609 void char_array_compress(Register src, Register dst, Register len,
1610 Register res,
1611 FloatRegister vtmp0, FloatRegister vtmp1,
1612 FloatRegister vtmp2, FloatRegister vtmp3,
1613 FloatRegister vtmp4, FloatRegister vtmp5);
1614
1615 void encode_iso_array(Register src, Register dst,
1616 Register len, Register res, bool ascii,
1617 FloatRegister vtmp0, FloatRegister vtmp1,
1618 FloatRegister vtmp2, FloatRegister vtmp3,
|