17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
27 #define CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
28
29 #include "asm/assembler.inline.hpp"
30 #include "code/aotCodeCache.hpp"
31 #include "code/vmreg.hpp"
32 #include "metaprogramming/enableIf.hpp"
33 #include "oops/compressedOops.hpp"
34 #include "oops/compressedKlass.hpp"
35 #include "runtime/vm_version.hpp"
36 #include "utilities/globalDefinitions.hpp"
37 #include "utilities/powerOfTwo.hpp"
38
39 class OopMap;
40
41 // MacroAssembler extends Assembler by frequently used macros.
42 //
43 // Instructions for which a 'better' code sequence exists depending
44 // on arguments should also go in here.
45
46 class MacroAssembler: public Assembler {
47 friend class LIR_Assembler;
48
49 public:
50 using Assembler::mov;
51 using Assembler::movi;
52
53 protected:
54
55 // Support for VM calls
56 //
57 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
168
169 void bind(Label& L) {
170 Assembler::bind(L);
171 code()->clear_last_insn();
172 code()->set_last_label(pc());
173 }
174
175 void membar(Membar_mask_bits order_constraint);
176
177 using Assembler::ldr;
178 using Assembler::str;
179 using Assembler::ldrw;
180 using Assembler::strw;
181
182 void ldr(Register Rx, const Address &adr);
183 void ldrw(Register Rw, const Address &adr);
184 void str(Register Rx, const Address &adr);
185 void strw(Register Rx, const Address &adr);
186
187 // Frame creation and destruction shared between JITs.
188 void build_frame(int framesize);
189 void remove_frame(int framesize);
190
191 virtual void _call_Unimplemented(address call_site) {
192 mov(rscratch2, call_site);
193 }
194
195 // Microsoft's MSVC team thinks that the __FUNCSIG__ is approximately (sympathy for calling conventions) equivalent to __PRETTY_FUNCTION__
196 // Also, from Clang patch: "It is very similar to GCC's PRETTY_FUNCTION, except it prints the calling convention."
197 // https://reviews.llvm.org/D3311
198
199 #ifdef _WIN64
200 #define call_Unimplemented() _call_Unimplemented((address)__FUNCSIG__)
201 #else
202 #define call_Unimplemented() _call_Unimplemented((address)__PRETTY_FUNCTION__)
203 #endif
204
205 // aliases defined in AARCH64 spec
206
207 template<class T>
208 inline void cmpw(Register Rd, T imm) { subsw(zr, Rd, imm); }
660 mrs(0b011, 0b1110, 0b0000, 0b110, reg);
661 }
662
663 // idiv variant which deals with MINLONG as dividend and -1 as divisor
664 int corrected_idivl(Register result, Register ra, Register rb,
665 bool want_remainder, Register tmp = rscratch1);
666 int corrected_idivq(Register result, Register ra, Register rb,
667 bool want_remainder, Register tmp = rscratch1);
668
669 // Support for null-checks
670 //
671 // Generates code that causes a null OS exception if the content of reg is null.
672 // If the accessed location is M[reg + offset] and the offset is known, provide the
673 // offset. No explicit code generation is needed if the offset is within a certain
674 // range (0 <= offset <= page_size).
675
676 virtual void null_check(Register reg, int offset = -1);
677 static bool needs_explicit_null_check(intptr_t offset);
678 static bool uses_implicit_null_check(void* address);
679
680 static address target_addr_for_insn(address insn_addr);
681
682 // Required platform-specific helpers for Label::patch_instructions.
683 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
684 static int pd_patch_instruction_size(address branch, address target);
685 static void pd_patch_instruction(address branch, address target, const char* file = nullptr, int line = 0) {
686 pd_patch_instruction_size(branch, target);
687 }
688 static address pd_call_destination(address branch) {
689 return target_addr_for_insn(branch);
690 }
691 #ifndef PRODUCT
692 static void pd_print_patched_instruction(address branch);
693 #endif
694
695 static int patch_oop(address insn_addr, address o);
696 static int patch_narrow_klass(address insn_addr, narrowKlass n);
697
698 // Return whether code is emitted to a scratch blob.
699 virtual bool in_scratch_emit_size() {
887 void set_last_Java_frame(Register last_java_sp,
888 Register last_java_fp,
889 Register last_java_pc,
890 Register scratch);
891
892 void reset_last_Java_frame(Register thread);
893
894 // thread in the default location (rthread)
895 void reset_last_Java_frame(bool clear_fp);
896
897 void resolve_jobject(Register value, Register tmp1, Register tmp2);
898 void resolve_global_jobject(Register value, Register tmp1, Register tmp2);
899
900 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
901 void c2bool(Register x);
902
903 void load_method_holder_cld(Register rresult, Register rmethod);
904 void load_method_holder(Register holder, Register method);
905
906 // oop manipulations
907 void load_narrow_klass_compact(Register dst, Register src);
908 void load_klass(Register dst, Register src);
909 void store_klass(Register dst, Register src);
910 void cmp_klass(Register obj, Register klass, Register tmp);
911 void cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2);
912
913 void resolve_weak_handle(Register result, Register tmp1, Register tmp2);
914 void resolve_oop_handle(Register result, Register tmp1, Register tmp2);
915 void load_mirror(Register dst, Register method, Register tmp1, Register tmp2);
916
917 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
918 Register tmp1, Register tmp2);
919
920 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
921 Register tmp1, Register tmp2, Register tmp3);
922
923 void load_heap_oop(Register dst, Address src, Register tmp1,
924 Register tmp2, DecoratorSet decorators = 0);
925
926 void load_heap_oop_not_null(Register dst, Address src, Register tmp1,
927 Register tmp2, DecoratorSet decorators = 0);
928 void store_heap_oop(Address dst, Register val, Register tmp1,
929 Register tmp2, Register tmp3, DecoratorSet decorators = 0);
930
931 // currently unimplemented
932 // Used for storing null. All other oop constants should be
933 // stored using routines that take a jobject.
934 void store_heap_oop_null(Address dst);
935
936 void store_klass_gap(Register dst, Register src);
937
938 // This dummy is to prevent a call to store_heap_oop from
939 // converting a zero (like null) into a Register by giving
940 // the compiler two choices it can't resolve
941
942 void store_heap_oop(Address dst, void* dummy);
943
944 void encode_heap_oop(Register d, Register s);
945 void encode_heap_oop(Register r) { encode_heap_oop(r, r); }
946 void decode_heap_oop(Register d, Register s);
947 void decode_heap_oop(Register r) { decode_heap_oop(r, r); }
948 void encode_heap_oop_not_null(Register r);
949 void decode_heap_oop_not_null(Register r);
950 void encode_heap_oop_not_null(Register dst, Register src);
951 void decode_heap_oop_not_null(Register dst, Register src);
952
953 void set_narrow_oop(Register dst, jobject obj);
954
955 void decode_klass_not_null_for_aot(Register dst, Register src);
965 void reinit_heapbase();
966
967 DEBUG_ONLY(void verify_heapbase(const char* msg);)
968
969 void push_CPU_state(bool save_vectors = false, bool use_sve = false,
970 int sve_vector_size_in_bytes = 0, int total_predicate_in_bytes = 0);
971 void pop_CPU_state(bool restore_vectors = false, bool use_sve = false,
972 int sve_vector_size_in_bytes = 0, int total_predicate_in_bytes = 0);
973
974 void push_cont_fastpath(Register java_thread = rthread);
975 void pop_cont_fastpath(Register java_thread = rthread);
976
977 // Round up to a power of two
978 void round_to(Register reg, int modulus);
979
980 // java.lang.Math::round intrinsics
981 void java_round_double(Register dst, FloatRegister src, FloatRegister ftmp);
982 void java_round_float(Register dst, FloatRegister src, FloatRegister ftmp);
983
984 // allocation
985 void tlab_allocate(
986 Register obj, // result: pointer to object after successful allocation
987 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
988 int con_size_in_bytes, // object size in bytes if known at compile time
989 Register t1, // temp register
990 Register t2, // temp register
991 Label& slow_case // continuation point if fast allocation fails
992 );
993 void verify_tlab();
994
995 // interface method calling
996 void lookup_interface_method(Register recv_klass,
997 Register intf_klass,
998 RegisterOrConstant itable_index,
999 Register method_result,
1000 Register scan_temp,
1001 Label& no_such_interface,
1002 bool return_method = true);
1003
1004 void lookup_interface_method_stub(Register recv_klass,
1005 Register holder_klass,
1006 Register resolved_klass,
1007 Register method_result,
1008 Register temp_reg,
1009 Register temp_reg2,
1010 int itable_index,
1011 Label& L_no_such_interface);
1012
1013 // virtual method calling
1014 // n.b. x86 allows RegisterOrConstant for vtable_index
1432 } \
1433 \
1434 void INSN(Register Rd, Register Rn, Register Rm) { \
1435 Assembler::INSN(Rd, Rn, Rm); \
1436 } \
1437 \
1438 void INSN(Register Rd, Register Rn, Register Rm, \
1439 ext::operation option, int amount = 0) { \
1440 Assembler::INSN(Rd, Rn, Rm, option, amount); \
1441 }
1442
1443 WRAP(adds, false) WRAP(addsw, true) WRAP(subs, false) WRAP(subsw, true)
1444
1445 void add(Register Rd, Register Rn, RegisterOrConstant increment);
1446 void addw(Register Rd, Register Rn, RegisterOrConstant increment);
1447 void sub(Register Rd, Register Rn, RegisterOrConstant decrement);
1448 void subw(Register Rd, Register Rn, RegisterOrConstant decrement);
1449
1450 void adrp(Register reg1, const Address &dest, uint64_t &byte_offset);
1451
1452 void tableswitch(Register index, jint lowbound, jint highbound,
1453 Label &jumptable, Label &jumptable_end, int stride = 1) {
1454 adr(rscratch1, jumptable);
1455 subsw(rscratch2, index, lowbound);
1456 subsw(zr, rscratch2, highbound - lowbound);
1457 br(Assembler::HS, jumptable_end);
1458 add(rscratch1, rscratch1, rscratch2,
1459 ext::sxtw, exact_log2(stride * Assembler::instruction_size));
1460 br(rscratch1);
1461 }
1462
1463 // Form an address from base + offset in Rd. Rd may or may not
1464 // actually be used: you must use the Address that is returned. It
1465 // is up to you to ensure that the shift provided matches the size
1466 // of your data.
1467 Address form_address(Register Rd, Register base, int64_t byte_offset, int shift);
1468
1469 // Return true iff an address is within the 48-bit AArch64 address
1470 // space.
1471 bool is_valid_AArch64_address(address a) {
1506 #define ARRAYS_HASHCODE_REGISTERS \
1507 do { \
1508 assert(result == r0 && \
1509 ary == r1 && \
1510 cnt == r2 && \
1511 vdata0 == v3 && \
1512 vdata1 == v2 && \
1513 vdata2 == v1 && \
1514 vdata3 == v0 && \
1515 vmul0 == v4 && \
1516 vmul1 == v5 && \
1517 vmul2 == v6 && \
1518 vmul3 == v7 && \
1519 vpow == v12 && \
1520 vpowm == v13, "registers must match aarch64.ad"); \
1521 } while (0)
1522
1523 void string_equals(Register a1, Register a2, Register result, Register cnt1);
1524
1525 void fill_words(Register base, Register cnt, Register value);
1526 address zero_words(Register base, uint64_t cnt);
1527 address zero_words(Register ptr, Register cnt);
1528 void zero_dcache_blocks(Register base, Register cnt);
1529
1530 static const int zero_words_block_size;
1531
1532 address byte_array_inflate(Register src, Register dst, Register len,
1533 FloatRegister vtmp1, FloatRegister vtmp2,
1534 FloatRegister vtmp3, Register tmp4);
1535
1536 void char_array_compress(Register src, Register dst, Register len,
1537 Register res,
1538 FloatRegister vtmp0, FloatRegister vtmp1,
1539 FloatRegister vtmp2, FloatRegister vtmp3,
1540 FloatRegister vtmp4, FloatRegister vtmp5);
1541
1542 void encode_iso_array(Register src, Register dst,
1543 Register len, Register res, bool ascii,
1544 FloatRegister vtmp0, FloatRegister vtmp1,
1545 FloatRegister vtmp2, FloatRegister vtmp3,
|
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
27 #define CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
28
29 #include "asm/assembler.inline.hpp"
30 #include "code/aotCodeCache.hpp"
31 #include "code/vmreg.hpp"
32 #include "metaprogramming/enableIf.hpp"
33 #include "oops/compressedOops.hpp"
34 #include "oops/compressedKlass.hpp"
35 #include "runtime/vm_version.hpp"
36 #include "utilities/globalDefinitions.hpp"
37 #include "utilities/macros.hpp"
38 #include "utilities/powerOfTwo.hpp"
39 #include "runtime/signature.hpp"
40
41
42 class ciInlineKlass;
43
44 class OopMap;
45
46 // MacroAssembler extends Assembler by frequently used macros.
47 //
48 // Instructions for which a 'better' code sequence exists depending
49 // on arguments should also go in here.
50
51 class MacroAssembler: public Assembler {
52 friend class LIR_Assembler;
53
54 public:
55 using Assembler::mov;
56 using Assembler::movi;
57
58 protected:
59
60 // Support for VM calls
61 //
62 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
173
174 void bind(Label& L) {
175 Assembler::bind(L);
176 code()->clear_last_insn();
177 code()->set_last_label(pc());
178 }
179
180 void membar(Membar_mask_bits order_constraint);
181
182 using Assembler::ldr;
183 using Assembler::str;
184 using Assembler::ldrw;
185 using Assembler::strw;
186
187 void ldr(Register Rx, const Address &adr);
188 void ldrw(Register Rw, const Address &adr);
189 void str(Register Rx, const Address &adr);
190 void strw(Register Rx, const Address &adr);
191
192 // Frame creation and destruction shared between JITs.
193 DEBUG_ONLY(void build_frame(int framesize);)
194 void build_frame(int framesize DEBUG_ONLY(COMMA bool zap_rfp_lr_spills));
195 void remove_frame(int framesize);
196
197 virtual void _call_Unimplemented(address call_site) {
198 mov(rscratch2, call_site);
199 }
200
201 // Microsoft's MSVC team thinks that the __FUNCSIG__ is approximately (sympathy for calling conventions) equivalent to __PRETTY_FUNCTION__
202 // Also, from Clang patch: "It is very similar to GCC's PRETTY_FUNCTION, except it prints the calling convention."
203 // https://reviews.llvm.org/D3311
204
205 #ifdef _WIN64
206 #define call_Unimplemented() _call_Unimplemented((address)__FUNCSIG__)
207 #else
208 #define call_Unimplemented() _call_Unimplemented((address)__PRETTY_FUNCTION__)
209 #endif
210
211 // aliases defined in AARCH64 spec
212
213 template<class T>
214 inline void cmpw(Register Rd, T imm) { subsw(zr, Rd, imm); }
666 mrs(0b011, 0b1110, 0b0000, 0b110, reg);
667 }
668
669 // idiv variant which deals with MINLONG as dividend and -1 as divisor
670 int corrected_idivl(Register result, Register ra, Register rb,
671 bool want_remainder, Register tmp = rscratch1);
672 int corrected_idivq(Register result, Register ra, Register rb,
673 bool want_remainder, Register tmp = rscratch1);
674
675 // Support for null-checks
676 //
677 // Generates code that causes a null OS exception if the content of reg is null.
678 // If the accessed location is M[reg + offset] and the offset is known, provide the
679 // offset. No explicit code generation is needed if the offset is within a certain
680 // range (0 <= offset <= page_size).
681
682 virtual void null_check(Register reg, int offset = -1);
683 static bool needs_explicit_null_check(intptr_t offset);
684 static bool uses_implicit_null_check(void* address);
685
686 // markWord tests, kills markWord reg
687 void test_markword_is_inline_type(Register markword, Label& is_inline_type);
688
689 // inlineKlass queries, kills temp_reg
690 void test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type, bool can_be_null = true);
691
692 void test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free);
693 void test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free);
694 void test_field_is_flat(Register flags, Register temp_reg, Label& is_flat);
695 void test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker);
696
697 // Check oops for special arrays, i.e. flat arrays and/or null-free arrays
698 void test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label);
699 void test_flat_array_oop(Register klass, Register temp_reg, Label& is_flat_array);
700 void test_non_flat_array_oop(Register oop, Register temp_reg, Label&is_non_flat_array);
701 void test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array);
702 void test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array);
703
704 // Check array klass layout helper for flat or null-free arrays...
705 void test_flat_array_layout(Register lh, Label& is_flat_array);
706 void test_non_flat_array_layout(Register lh, Label& is_non_flat_array);
707
708 static address target_addr_for_insn(address insn_addr);
709
710 // Required platform-specific helpers for Label::patch_instructions.
711 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
712 static int pd_patch_instruction_size(address branch, address target);
713 static void pd_patch_instruction(address branch, address target, const char* file = nullptr, int line = 0) {
714 pd_patch_instruction_size(branch, target);
715 }
716 static address pd_call_destination(address branch) {
717 return target_addr_for_insn(branch);
718 }
719 #ifndef PRODUCT
720 static void pd_print_patched_instruction(address branch);
721 #endif
722
723 static int patch_oop(address insn_addr, address o);
724 static int patch_narrow_klass(address insn_addr, narrowKlass n);
725
726 // Return whether code is emitted to a scratch blob.
727 virtual bool in_scratch_emit_size() {
915 void set_last_Java_frame(Register last_java_sp,
916 Register last_java_fp,
917 Register last_java_pc,
918 Register scratch);
919
920 void reset_last_Java_frame(Register thread);
921
922 // thread in the default location (rthread)
923 void reset_last_Java_frame(bool clear_fp);
924
925 void resolve_jobject(Register value, Register tmp1, Register tmp2);
926 void resolve_global_jobject(Register value, Register tmp1, Register tmp2);
927
928 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
929 void c2bool(Register x);
930
931 void load_method_holder_cld(Register rresult, Register rmethod);
932 void load_method_holder(Register holder, Register method);
933
934 // oop manipulations
935 void load_metadata(Register dst, Register src);
936
937 void load_narrow_klass_compact(Register dst, Register src);
938 void load_klass(Register dst, Register src);
939 void store_klass(Register dst, Register src);
940 void cmp_klass(Register obj, Register klass, Register tmp);
941 void cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2);
942
943 void resolve_weak_handle(Register result, Register tmp1, Register tmp2);
944 void resolve_oop_handle(Register result, Register tmp1, Register tmp2);
945 void load_mirror(Register dst, Register method, Register tmp1, Register tmp2);
946
947 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
948 Register tmp1, Register tmp2);
949
950 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
951 Register tmp1, Register tmp2, Register tmp3);
952
953 void flat_field_copy(DecoratorSet decorators, Register src, Register dst, Register inline_layout_info);
954
955 // inline type data payload offsets...
956 void payload_offset(Register inline_klass, Register offset);
957 void payload_address(Register oop, Register data, Register inline_klass);
958 // get data payload ptr a flat value array at index, kills rcx and index
959 void data_for_value_array_index(Register array, Register array_klass,
960 Register index, Register data);
961
962 void load_heap_oop(Register dst, Address src, Register tmp1,
963 Register tmp2, DecoratorSet decorators = 0);
964
965 void load_heap_oop_not_null(Register dst, Address src, Register tmp1,
966 Register tmp2, DecoratorSet decorators = 0);
967 void store_heap_oop(Address dst, Register val, Register tmp1,
968 Register tmp2, Register tmp3, DecoratorSet decorators = 0);
969
970 // currently unimplemented
971 // Used for storing null. All other oop constants should be
972 // stored using routines that take a jobject.
973 void store_heap_oop_null(Address dst);
974
975 void load_prototype_header(Register dst, Register src);
976
977 void store_klass_gap(Register dst, Register src);
978
979 // This dummy is to prevent a call to store_heap_oop from
980 // converting a zero (like null) into a Register by giving
981 // the compiler two choices it can't resolve
982
983 void store_heap_oop(Address dst, void* dummy);
984
985 void encode_heap_oop(Register d, Register s);
986 void encode_heap_oop(Register r) { encode_heap_oop(r, r); }
987 void decode_heap_oop(Register d, Register s);
988 void decode_heap_oop(Register r) { decode_heap_oop(r, r); }
989 void encode_heap_oop_not_null(Register r);
990 void decode_heap_oop_not_null(Register r);
991 void encode_heap_oop_not_null(Register dst, Register src);
992 void decode_heap_oop_not_null(Register dst, Register src);
993
994 void set_narrow_oop(Register dst, jobject obj);
995
996 void decode_klass_not_null_for_aot(Register dst, Register src);
1006 void reinit_heapbase();
1007
1008 DEBUG_ONLY(void verify_heapbase(const char* msg);)
1009
1010 void push_CPU_state(bool save_vectors = false, bool use_sve = false,
1011 int sve_vector_size_in_bytes = 0, int total_predicate_in_bytes = 0);
1012 void pop_CPU_state(bool restore_vectors = false, bool use_sve = false,
1013 int sve_vector_size_in_bytes = 0, int total_predicate_in_bytes = 0);
1014
1015 void push_cont_fastpath(Register java_thread = rthread);
1016 void pop_cont_fastpath(Register java_thread = rthread);
1017
1018 // Round up to a power of two
1019 void round_to(Register reg, int modulus);
1020
1021 // java.lang.Math::round intrinsics
1022 void java_round_double(Register dst, FloatRegister src, FloatRegister ftmp);
1023 void java_round_float(Register dst, FloatRegister src, FloatRegister ftmp);
1024
1025 // allocation
1026
1027 // Object / value buffer allocation...
1028 // Allocate instance of klass, assumes klass initialized by caller
1029 // new_obj prefers to be rax
1030 // Kills t1 and t2, perserves klass, return allocation in new_obj (rsi on LP64)
1031 void allocate_instance(Register klass, Register new_obj,
1032 Register t1, Register t2,
1033 bool clear_fields, Label& alloc_failed);
1034
1035 void tlab_allocate(
1036 Register obj, // result: pointer to object after successful allocation
1037 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
1038 int con_size_in_bytes, // object size in bytes if known at compile time
1039 Register t1, // temp register
1040 Register t2, // temp register
1041 Label& slow_case // continuation point if fast allocation fails
1042 );
1043 void verify_tlab();
1044
1045 void inline_layout_info(Register holder_klass, Register index, Register layout_info);
1046
1047 // interface method calling
1048 void lookup_interface_method(Register recv_klass,
1049 Register intf_klass,
1050 RegisterOrConstant itable_index,
1051 Register method_result,
1052 Register scan_temp,
1053 Label& no_such_interface,
1054 bool return_method = true);
1055
1056 void lookup_interface_method_stub(Register recv_klass,
1057 Register holder_klass,
1058 Register resolved_klass,
1059 Register method_result,
1060 Register temp_reg,
1061 Register temp_reg2,
1062 int itable_index,
1063 Label& L_no_such_interface);
1064
1065 // virtual method calling
1066 // n.b. x86 allows RegisterOrConstant for vtable_index
1484 } \
1485 \
1486 void INSN(Register Rd, Register Rn, Register Rm) { \
1487 Assembler::INSN(Rd, Rn, Rm); \
1488 } \
1489 \
1490 void INSN(Register Rd, Register Rn, Register Rm, \
1491 ext::operation option, int amount = 0) { \
1492 Assembler::INSN(Rd, Rn, Rm, option, amount); \
1493 }
1494
1495 WRAP(adds, false) WRAP(addsw, true) WRAP(subs, false) WRAP(subsw, true)
1496
1497 void add(Register Rd, Register Rn, RegisterOrConstant increment);
1498 void addw(Register Rd, Register Rn, RegisterOrConstant increment);
1499 void sub(Register Rd, Register Rn, RegisterOrConstant decrement);
1500 void subw(Register Rd, Register Rn, RegisterOrConstant decrement);
1501
1502 void adrp(Register reg1, const Address &dest, uint64_t &byte_offset);
1503
1504 void verified_entry(Compile* C, int sp_inc);
1505
1506 // Inline type specific methods
1507 #include "asm/macroAssembler_common.hpp"
1508
1509 int store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter = true);
1510 bool move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]);
1511 bool unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
1512 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
1513 RegState reg_state[]);
1514 bool pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
1515 VMRegPair* from, int from_count, int& from_index, VMReg to,
1516 RegState reg_state[], Register val_array);
1517 int extend_stack_for_inline_args(int args_on_stack);
1518 void remove_frame(int initial_framesize, bool needs_stack_repair);
1519 VMReg spill_reg_for(VMReg reg);
1520 void save_stack_increment(int sp_inc, int frame_size);
1521
1522 void tableswitch(Register index, jint lowbound, jint highbound,
1523 Label &jumptable, Label &jumptable_end, int stride = 1) {
1524 adr(rscratch1, jumptable);
1525 subsw(rscratch2, index, lowbound);
1526 subsw(zr, rscratch2, highbound - lowbound);
1527 br(Assembler::HS, jumptable_end);
1528 add(rscratch1, rscratch1, rscratch2,
1529 ext::sxtw, exact_log2(stride * Assembler::instruction_size));
1530 br(rscratch1);
1531 }
1532
1533 // Form an address from base + offset in Rd. Rd may or may not
1534 // actually be used: you must use the Address that is returned. It
1535 // is up to you to ensure that the shift provided matches the size
1536 // of your data.
1537 Address form_address(Register Rd, Register base, int64_t byte_offset, int shift);
1538
1539 // Return true iff an address is within the 48-bit AArch64 address
1540 // space.
1541 bool is_valid_AArch64_address(address a) {
1576 #define ARRAYS_HASHCODE_REGISTERS \
1577 do { \
1578 assert(result == r0 && \
1579 ary == r1 && \
1580 cnt == r2 && \
1581 vdata0 == v3 && \
1582 vdata1 == v2 && \
1583 vdata2 == v1 && \
1584 vdata3 == v0 && \
1585 vmul0 == v4 && \
1586 vmul1 == v5 && \
1587 vmul2 == v6 && \
1588 vmul3 == v7 && \
1589 vpow == v12 && \
1590 vpowm == v13, "registers must match aarch64.ad"); \
1591 } while (0)
1592
1593 void string_equals(Register a1, Register a2, Register result, Register cnt1);
1594
1595 void fill_words(Register base, Register cnt, Register value);
1596 void fill_words(Register base, uint64_t cnt, Register value);
1597
1598 address zero_words(Register base, uint64_t cnt);
1599 address zero_words(Register ptr, Register cnt);
1600 void zero_dcache_blocks(Register base, Register cnt);
1601
1602 static const int zero_words_block_size;
1603
1604 address byte_array_inflate(Register src, Register dst, Register len,
1605 FloatRegister vtmp1, FloatRegister vtmp2,
1606 FloatRegister vtmp3, Register tmp4);
1607
1608 void char_array_compress(Register src, Register dst, Register len,
1609 Register res,
1610 FloatRegister vtmp0, FloatRegister vtmp1,
1611 FloatRegister vtmp2, FloatRegister vtmp3,
1612 FloatRegister vtmp4, FloatRegister vtmp5);
1613
1614 void encode_iso_array(Register src, Register dst,
1615 Register len, Register res, bool ascii,
1616 FloatRegister vtmp0, FloatRegister vtmp1,
1617 FloatRegister vtmp2, FloatRegister vtmp3,
|