16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
27 #define CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
28
29 #include "asm/assembler.inline.hpp"
30 #include "code/aotCodeCache.hpp"
31 #include "code/vmreg.hpp"
32 #include "metaprogramming/enableIf.hpp"
33 #include "oops/compressedOops.hpp"
34 #include "oops/compressedKlass.hpp"
35 #include "runtime/vm_version.hpp"
36 #include "utilities/powerOfTwo.hpp"
37
38 class OopMap;
39
40 // MacroAssembler extends Assembler by frequently used macros.
41 //
42 // Instructions for which a 'better' code sequence exists depending
43 // on arguments should also go in here.
44
45 class MacroAssembler: public Assembler {
46 friend class LIR_Assembler;
47
48 public:
49 using Assembler::mov;
50 using Assembler::movi;
51
52 protected:
53
54 // Support for VM calls
55 //
56 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
167
168 void bind(Label& L) {
169 Assembler::bind(L);
170 code()->clear_last_insn();
171 code()->set_last_label(pc());
172 }
173
174 void membar(Membar_mask_bits order_constraint);
175
176 using Assembler::ldr;
177 using Assembler::str;
178 using Assembler::ldrw;
179 using Assembler::strw;
180
181 void ldr(Register Rx, const Address &adr);
182 void ldrw(Register Rw, const Address &adr);
183 void str(Register Rx, const Address &adr);
184 void strw(Register Rx, const Address &adr);
185
186 // Frame creation and destruction shared between JITs.
187 void build_frame(int framesize);
188 void remove_frame(int framesize);
189
190 virtual void _call_Unimplemented(address call_site) {
191 mov(rscratch2, call_site);
192 }
193
194 // Microsoft's MSVC team thinks that the __FUNCSIG__ is approximately (sympathy for calling conventions) equivalent to __PRETTY_FUNCTION__
195 // Also, from Clang patch: "It is very similar to GCC's PRETTY_FUNCTION, except it prints the calling convention."
196 // https://reviews.llvm.org/D3311
197
198 #ifdef _WIN64
199 #define call_Unimplemented() _call_Unimplemented((address)__FUNCSIG__)
200 #else
201 #define call_Unimplemented() _call_Unimplemented((address)__PRETTY_FUNCTION__)
202 #endif
203
204 // aliases defined in AARCH64 spec
205
206 template<class T>
207 inline void cmpw(Register Rd, T imm) { subsw(zr, Rd, imm); }
668 mrs(0b011, 0b1110, 0b0000, 0b110, reg);
669 }
670
671 // idiv variant which deals with MINLONG as dividend and -1 as divisor
672 int corrected_idivl(Register result, Register ra, Register rb,
673 bool want_remainder, Register tmp = rscratch1);
674 int corrected_idivq(Register result, Register ra, Register rb,
675 bool want_remainder, Register tmp = rscratch1);
676
677 // Support for null-checks
678 //
679 // Generates code that causes a null OS exception if the content of reg is null.
680 // If the accessed location is M[reg + offset] and the offset is known, provide the
681 // offset. No explicit code generation is needed if the offset is within a certain
682 // range (0 <= offset <= page_size).
683
684 virtual void null_check(Register reg, int offset = -1);
685 static bool needs_explicit_null_check(intptr_t offset);
686 static bool uses_implicit_null_check(void* address);
687
688 static address target_addr_for_insn(address insn_addr);
689
690 // Required platform-specific helpers for Label::patch_instructions.
691 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
692 static int pd_patch_instruction_size(address branch, address target);
693 static void pd_patch_instruction(address branch, address target, const char* file = nullptr, int line = 0) {
694 pd_patch_instruction_size(branch, target);
695 }
696 static address pd_call_destination(address branch) {
697 return target_addr_for_insn(branch);
698 }
699 #ifndef PRODUCT
700 static void pd_print_patched_instruction(address branch);
701 #endif
702
703 static int patch_oop(address insn_addr, address o);
704 static int patch_narrow_klass(address insn_addr, narrowKlass n);
705
706 // Return whether code is emitted to a scratch blob.
707 virtual bool in_scratch_emit_size() {
896
897 void reset_last_Java_frame(Register thread);
898
899 // thread in the default location (rthread)
900 void reset_last_Java_frame(bool clear_fp);
901
902 // Stores
903 void store_check(Register obj); // store check for obj - register is destroyed afterwards
904 void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed)
905
906 void resolve_jobject(Register value, Register tmp1, Register tmp2);
907 void resolve_global_jobject(Register value, Register tmp1, Register tmp2);
908
909 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
910 void c2bool(Register x);
911
912 void load_method_holder_cld(Register rresult, Register rmethod);
913 void load_method_holder(Register holder, Register method);
914
915 // oop manipulations
916 void load_narrow_klass_compact(Register dst, Register src);
917 void load_klass(Register dst, Register src);
918 void store_klass(Register dst, Register src);
919 void cmp_klass(Register obj, Register klass, Register tmp);
920 void cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2);
921
922 void resolve_weak_handle(Register result, Register tmp1, Register tmp2);
923 void resolve_oop_handle(Register result, Register tmp1, Register tmp2);
924 void load_mirror(Register dst, Register method, Register tmp1, Register tmp2);
925
926 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
927 Register tmp1, Register tmp2);
928
929 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
930 Register tmp1, Register tmp2, Register tmp3);
931
932 void load_heap_oop(Register dst, Address src, Register tmp1,
933 Register tmp2, DecoratorSet decorators = 0);
934
935 void load_heap_oop_not_null(Register dst, Address src, Register tmp1,
936 Register tmp2, DecoratorSet decorators = 0);
937 void store_heap_oop(Address dst, Register val, Register tmp1,
938 Register tmp2, Register tmp3, DecoratorSet decorators = 0);
939
940 // currently unimplemented
941 // Used for storing null. All other oop constants should be
942 // stored using routines that take a jobject.
943 void store_heap_oop_null(Address dst);
944
945 void store_klass_gap(Register dst, Register src);
946
947 // This dummy is to prevent a call to store_heap_oop from
948 // converting a zero (like null) into a Register by giving
949 // the compiler two choices it can't resolve
950
951 void store_heap_oop(Address dst, void* dummy);
952
953 void encode_heap_oop(Register d, Register s);
954 void encode_heap_oop(Register r) { encode_heap_oop(r, r); }
955 void decode_heap_oop(Register d, Register s);
956 void decode_heap_oop(Register r) { decode_heap_oop(r, r); }
957 void encode_heap_oop_not_null(Register r);
958 void decode_heap_oop_not_null(Register r);
959 void encode_heap_oop_not_null(Register dst, Register src);
960 void decode_heap_oop_not_null(Register dst, Register src);
961
962 void set_narrow_oop(Register dst, jobject obj);
963
964 void decode_klass_not_null_for_aot(Register dst, Register src);
974 void reinit_heapbase();
975
976 DEBUG_ONLY(void verify_heapbase(const char* msg);)
977
978 void push_CPU_state(bool save_vectors = false, bool use_sve = false,
979 int sve_vector_size_in_bytes = 0, int total_predicate_in_bytes = 0);
980 void pop_CPU_state(bool restore_vectors = false, bool use_sve = false,
981 int sve_vector_size_in_bytes = 0, int total_predicate_in_bytes = 0);
982
983 void push_cont_fastpath(Register java_thread = rthread);
984 void pop_cont_fastpath(Register java_thread = rthread);
985
986 // Round up to a power of two
987 void round_to(Register reg, int modulus);
988
989 // java.lang.Math::round intrinsics
990 void java_round_double(Register dst, FloatRegister src, FloatRegister ftmp);
991 void java_round_float(Register dst, FloatRegister src, FloatRegister ftmp);
992
993 // allocation
994 void tlab_allocate(
995 Register obj, // result: pointer to object after successful allocation
996 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
997 int con_size_in_bytes, // object size in bytes if known at compile time
998 Register t1, // temp register
999 Register t2, // temp register
1000 Label& slow_case // continuation point if fast allocation fails
1001 );
1002 void verify_tlab();
1003
1004 // interface method calling
1005 void lookup_interface_method(Register recv_klass,
1006 Register intf_klass,
1007 RegisterOrConstant itable_index,
1008 Register method_result,
1009 Register scan_temp,
1010 Label& no_such_interface,
1011 bool return_method = true);
1012
1013 void lookup_interface_method_stub(Register recv_klass,
1014 Register holder_klass,
1015 Register resolved_klass,
1016 Register method_result,
1017 Register temp_reg,
1018 Register temp_reg2,
1019 int itable_index,
1020 Label& L_no_such_interface);
1021
1022 // virtual method calling
1023 // n.b. x86 allows RegisterOrConstant for vtable_index
1441 } \
1442 \
1443 void INSN(Register Rd, Register Rn, Register Rm) { \
1444 Assembler::INSN(Rd, Rn, Rm); \
1445 } \
1446 \
1447 void INSN(Register Rd, Register Rn, Register Rm, \
1448 ext::operation option, int amount = 0) { \
1449 Assembler::INSN(Rd, Rn, Rm, option, amount); \
1450 }
1451
1452 WRAP(adds, false) WRAP(addsw, true) WRAP(subs, false) WRAP(subsw, true)
1453
1454 void add(Register Rd, Register Rn, RegisterOrConstant increment);
1455 void addw(Register Rd, Register Rn, RegisterOrConstant increment);
1456 void sub(Register Rd, Register Rn, RegisterOrConstant decrement);
1457 void subw(Register Rd, Register Rn, RegisterOrConstant decrement);
1458
1459 void adrp(Register reg1, const Address &dest, uint64_t &byte_offset);
1460
1461 void tableswitch(Register index, jint lowbound, jint highbound,
1462 Label &jumptable, Label &jumptable_end, int stride = 1) {
1463 adr(rscratch1, jumptable);
1464 subsw(rscratch2, index, lowbound);
1465 subsw(zr, rscratch2, highbound - lowbound);
1466 br(Assembler::HS, jumptable_end);
1467 add(rscratch1, rscratch1, rscratch2,
1468 ext::sxtw, exact_log2(stride * Assembler::instruction_size));
1469 br(rscratch1);
1470 }
1471
1472 // Form an address from base + offset in Rd. Rd may or may not
1473 // actually be used: you must use the Address that is returned. It
1474 // is up to you to ensure that the shift provided matches the size
1475 // of your data.
1476 Address form_address(Register Rd, Register base, int64_t byte_offset, int shift);
1477
1478 // Return true iff an address is within the 48-bit AArch64 address
1479 // space.
1480 bool is_valid_AArch64_address(address a) {
1515 #define ARRAYS_HASHCODE_REGISTERS \
1516 do { \
1517 assert(result == r0 && \
1518 ary == r1 && \
1519 cnt == r2 && \
1520 vdata0 == v3 && \
1521 vdata1 == v2 && \
1522 vdata2 == v1 && \
1523 vdata3 == v0 && \
1524 vmul0 == v4 && \
1525 vmul1 == v5 && \
1526 vmul2 == v6 && \
1527 vmul3 == v7 && \
1528 vpow == v12 && \
1529 vpowm == v13, "registers must match aarch64.ad"); \
1530 } while (0)
1531
1532 void string_equals(Register a1, Register a2, Register result, Register cnt1);
1533
1534 void fill_words(Register base, Register cnt, Register value);
1535 address zero_words(Register base, uint64_t cnt);
1536 address zero_words(Register ptr, Register cnt);
1537 void zero_dcache_blocks(Register base, Register cnt);
1538
1539 static const int zero_words_block_size;
1540
1541 address byte_array_inflate(Register src, Register dst, Register len,
1542 FloatRegister vtmp1, FloatRegister vtmp2,
1543 FloatRegister vtmp3, Register tmp4);
1544
1545 void char_array_compress(Register src, Register dst, Register len,
1546 Register res,
1547 FloatRegister vtmp0, FloatRegister vtmp1,
1548 FloatRegister vtmp2, FloatRegister vtmp3,
1549 FloatRegister vtmp4, FloatRegister vtmp5);
1550
1551 void encode_iso_array(Register src, Register dst,
1552 Register len, Register res, bool ascii,
1553 FloatRegister vtmp0, FloatRegister vtmp1,
1554 FloatRegister vtmp2, FloatRegister vtmp3,
|
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
27 #define CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
28
29 #include "asm/assembler.inline.hpp"
30 #include "code/aotCodeCache.hpp"
31 #include "code/vmreg.hpp"
32 #include "metaprogramming/enableIf.hpp"
33 #include "oops/compressedOops.hpp"
34 #include "oops/compressedKlass.hpp"
35 #include "runtime/vm_version.hpp"
36 #include "utilities/macros.hpp"
37 #include "utilities/powerOfTwo.hpp"
38 #include "runtime/signature.hpp"
39
40
41 class ciInlineKlass;
42
43 class OopMap;
44
45 // MacroAssembler extends Assembler by frequently used macros.
46 //
47 // Instructions for which a 'better' code sequence exists depending
48 // on arguments should also go in here.
49
50 class MacroAssembler: public Assembler {
51 friend class LIR_Assembler;
52
53 public:
54 using Assembler::mov;
55 using Assembler::movi;
56
57 protected:
58
59 // Support for VM calls
60 //
61 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
172
173 void bind(Label& L) {
174 Assembler::bind(L);
175 code()->clear_last_insn();
176 code()->set_last_label(pc());
177 }
178
179 void membar(Membar_mask_bits order_constraint);
180
181 using Assembler::ldr;
182 using Assembler::str;
183 using Assembler::ldrw;
184 using Assembler::strw;
185
186 void ldr(Register Rx, const Address &adr);
187 void ldrw(Register Rw, const Address &adr);
188 void str(Register Rx, const Address &adr);
189 void strw(Register Rx, const Address &adr);
190
191 // Frame creation and destruction shared between JITs.
192 DEBUG_ONLY(void build_frame(int framesize);)
193 void build_frame(int framesize DEBUG_ONLY(COMMA bool zap_rfp_lr_spills));
194 void remove_frame(int framesize);
195
196 virtual void _call_Unimplemented(address call_site) {
197 mov(rscratch2, call_site);
198 }
199
200 // Microsoft's MSVC team thinks that the __FUNCSIG__ is approximately (sympathy for calling conventions) equivalent to __PRETTY_FUNCTION__
201 // Also, from Clang patch: "It is very similar to GCC's PRETTY_FUNCTION, except it prints the calling convention."
202 // https://reviews.llvm.org/D3311
203
204 #ifdef _WIN64
205 #define call_Unimplemented() _call_Unimplemented((address)__FUNCSIG__)
206 #else
207 #define call_Unimplemented() _call_Unimplemented((address)__PRETTY_FUNCTION__)
208 #endif
209
210 // aliases defined in AARCH64 spec
211
212 template<class T>
213 inline void cmpw(Register Rd, T imm) { subsw(zr, Rd, imm); }
674 mrs(0b011, 0b1110, 0b0000, 0b110, reg);
675 }
676
677 // idiv variant which deals with MINLONG as dividend and -1 as divisor
678 int corrected_idivl(Register result, Register ra, Register rb,
679 bool want_remainder, Register tmp = rscratch1);
680 int corrected_idivq(Register result, Register ra, Register rb,
681 bool want_remainder, Register tmp = rscratch1);
682
683 // Support for null-checks
684 //
685 // Generates code that causes a null OS exception if the content of reg is null.
686 // If the accessed location is M[reg + offset] and the offset is known, provide the
687 // offset. No explicit code generation is needed if the offset is within a certain
688 // range (0 <= offset <= page_size).
689
690 virtual void null_check(Register reg, int offset = -1);
691 static bool needs_explicit_null_check(intptr_t offset);
692 static bool uses_implicit_null_check(void* address);
693
694 // markWord tests, kills markWord reg
695 void test_markword_is_inline_type(Register markword, Label& is_inline_type);
696
697 // inlineKlass queries, kills temp_reg
698 void test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type, bool can_be_null = true);
699
700 void test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free);
701 void test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free);
702 void test_field_is_flat(Register flags, Register temp_reg, Label& is_flat);
703 void test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker);
704
705 // Check oops for special arrays, i.e. flat arrays and/or null-free arrays
706 void test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label);
707 void test_flat_array_oop(Register klass, Register temp_reg, Label& is_flat_array);
708 void test_non_flat_array_oop(Register oop, Register temp_reg, Label&is_non_flat_array);
709 void test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array);
710 void test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array);
711
712 // Check array klass layout helper for flat or null-free arrays...
713 void test_flat_array_layout(Register lh, Label& is_flat_array);
714 void test_non_flat_array_layout(Register lh, Label& is_non_flat_array);
715
716 static address target_addr_for_insn(address insn_addr);
717
718 // Required platform-specific helpers for Label::patch_instructions.
719 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
720 static int pd_patch_instruction_size(address branch, address target);
721 static void pd_patch_instruction(address branch, address target, const char* file = nullptr, int line = 0) {
722 pd_patch_instruction_size(branch, target);
723 }
724 static address pd_call_destination(address branch) {
725 return target_addr_for_insn(branch);
726 }
727 #ifndef PRODUCT
728 static void pd_print_patched_instruction(address branch);
729 #endif
730
731 static int patch_oop(address insn_addr, address o);
732 static int patch_narrow_klass(address insn_addr, narrowKlass n);
733
734 // Return whether code is emitted to a scratch blob.
735 virtual bool in_scratch_emit_size() {
924
925 void reset_last_Java_frame(Register thread);
926
927 // thread in the default location (rthread)
928 void reset_last_Java_frame(bool clear_fp);
929
930 // Stores
931 void store_check(Register obj); // store check for obj - register is destroyed afterwards
932 void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed)
933
934 void resolve_jobject(Register value, Register tmp1, Register tmp2);
935 void resolve_global_jobject(Register value, Register tmp1, Register tmp2);
936
937 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
938 void c2bool(Register x);
939
940 void load_method_holder_cld(Register rresult, Register rmethod);
941 void load_method_holder(Register holder, Register method);
942
943 // oop manipulations
944 void load_metadata(Register dst, Register src);
945
946 void load_narrow_klass_compact(Register dst, Register src);
947 void load_klass(Register dst, Register src);
948 void store_klass(Register dst, Register src);
949 void cmp_klass(Register obj, Register klass, Register tmp);
950 void cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2);
951
952 void resolve_weak_handle(Register result, Register tmp1, Register tmp2);
953 void resolve_oop_handle(Register result, Register tmp1, Register tmp2);
954 void load_mirror(Register dst, Register method, Register tmp1, Register tmp2);
955
956 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
957 Register tmp1, Register tmp2);
958
959 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
960 Register tmp1, Register tmp2, Register tmp3);
961
962 void flat_field_copy(DecoratorSet decorators, Register src, Register dst, Register inline_layout_info);
963
964 // inline type data payload offsets...
965 void payload_offset(Register inline_klass, Register offset);
966 void payload_address(Register oop, Register data, Register inline_klass);
967 // get data payload ptr a flat value array at index, kills rcx and index
968 void data_for_value_array_index(Register array, Register array_klass,
969 Register index, Register data);
970
971 void load_heap_oop(Register dst, Address src, Register tmp1,
972 Register tmp2, DecoratorSet decorators = 0);
973
974 void load_heap_oop_not_null(Register dst, Address src, Register tmp1,
975 Register tmp2, DecoratorSet decorators = 0);
976 void store_heap_oop(Address dst, Register val, Register tmp1,
977 Register tmp2, Register tmp3, DecoratorSet decorators = 0);
978
979 // currently unimplemented
980 // Used for storing null. All other oop constants should be
981 // stored using routines that take a jobject.
982 void store_heap_oop_null(Address dst);
983
984 void load_prototype_header(Register dst, Register src);
985
986 void store_klass_gap(Register dst, Register src);
987
988 // This dummy is to prevent a call to store_heap_oop from
989 // converting a zero (like null) into a Register by giving
990 // the compiler two choices it can't resolve
991
992 void store_heap_oop(Address dst, void* dummy);
993
994 void encode_heap_oop(Register d, Register s);
995 void encode_heap_oop(Register r) { encode_heap_oop(r, r); }
996 void decode_heap_oop(Register d, Register s);
997 void decode_heap_oop(Register r) { decode_heap_oop(r, r); }
998 void encode_heap_oop_not_null(Register r);
999 void decode_heap_oop_not_null(Register r);
1000 void encode_heap_oop_not_null(Register dst, Register src);
1001 void decode_heap_oop_not_null(Register dst, Register src);
1002
1003 void set_narrow_oop(Register dst, jobject obj);
1004
1005 void decode_klass_not_null_for_aot(Register dst, Register src);
1015 void reinit_heapbase();
1016
1017 DEBUG_ONLY(void verify_heapbase(const char* msg);)
1018
1019 void push_CPU_state(bool save_vectors = false, bool use_sve = false,
1020 int sve_vector_size_in_bytes = 0, int total_predicate_in_bytes = 0);
1021 void pop_CPU_state(bool restore_vectors = false, bool use_sve = false,
1022 int sve_vector_size_in_bytes = 0, int total_predicate_in_bytes = 0);
1023
1024 void push_cont_fastpath(Register java_thread = rthread);
1025 void pop_cont_fastpath(Register java_thread = rthread);
1026
1027 // Round up to a power of two
1028 void round_to(Register reg, int modulus);
1029
1030 // java.lang.Math::round intrinsics
1031 void java_round_double(Register dst, FloatRegister src, FloatRegister ftmp);
1032 void java_round_float(Register dst, FloatRegister src, FloatRegister ftmp);
1033
1034 // allocation
1035
1036 // Object / value buffer allocation...
1037 // Allocate instance of klass, assumes klass initialized by caller
1038 // new_obj prefers to be rax
1039 // Kills t1 and t2, perserves klass, return allocation in new_obj (rsi on LP64)
1040 void allocate_instance(Register klass, Register new_obj,
1041 Register t1, Register t2,
1042 bool clear_fields, Label& alloc_failed);
1043
1044 void tlab_allocate(
1045 Register obj, // result: pointer to object after successful allocation
1046 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
1047 int con_size_in_bytes, // object size in bytes if known at compile time
1048 Register t1, // temp register
1049 Register t2, // temp register
1050 Label& slow_case // continuation point if fast allocation fails
1051 );
1052 void verify_tlab();
1053
1054 void inline_layout_info(Register holder_klass, Register index, Register layout_info);
1055
1056 // interface method calling
1057 void lookup_interface_method(Register recv_klass,
1058 Register intf_klass,
1059 RegisterOrConstant itable_index,
1060 Register method_result,
1061 Register scan_temp,
1062 Label& no_such_interface,
1063 bool return_method = true);
1064
1065 void lookup_interface_method_stub(Register recv_klass,
1066 Register holder_klass,
1067 Register resolved_klass,
1068 Register method_result,
1069 Register temp_reg,
1070 Register temp_reg2,
1071 int itable_index,
1072 Label& L_no_such_interface);
1073
1074 // virtual method calling
1075 // n.b. x86 allows RegisterOrConstant for vtable_index
1493 } \
1494 \
1495 void INSN(Register Rd, Register Rn, Register Rm) { \
1496 Assembler::INSN(Rd, Rn, Rm); \
1497 } \
1498 \
1499 void INSN(Register Rd, Register Rn, Register Rm, \
1500 ext::operation option, int amount = 0) { \
1501 Assembler::INSN(Rd, Rn, Rm, option, amount); \
1502 }
1503
1504 WRAP(adds, false) WRAP(addsw, true) WRAP(subs, false) WRAP(subsw, true)
1505
1506 void add(Register Rd, Register Rn, RegisterOrConstant increment);
1507 void addw(Register Rd, Register Rn, RegisterOrConstant increment);
1508 void sub(Register Rd, Register Rn, RegisterOrConstant decrement);
1509 void subw(Register Rd, Register Rn, RegisterOrConstant decrement);
1510
1511 void adrp(Register reg1, const Address &dest, uint64_t &byte_offset);
1512
1513 void verified_entry(Compile* C, int sp_inc);
1514
1515 // Inline type specific methods
1516 #include "asm/macroAssembler_common.hpp"
1517
1518 int store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter = true);
1519 bool move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]);
1520 bool unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
1521 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
1522 RegState reg_state[]);
1523 bool pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
1524 VMRegPair* from, int from_count, int& from_index, VMReg to,
1525 RegState reg_state[], Register val_array);
1526 int extend_stack_for_inline_args(int args_on_stack);
1527 void remove_frame(int initial_framesize, bool needs_stack_repair);
1528 VMReg spill_reg_for(VMReg reg);
1529 void save_stack_increment(int sp_inc, int frame_size);
1530
1531 void tableswitch(Register index, jint lowbound, jint highbound,
1532 Label &jumptable, Label &jumptable_end, int stride = 1) {
1533 adr(rscratch1, jumptable);
1534 subsw(rscratch2, index, lowbound);
1535 subsw(zr, rscratch2, highbound - lowbound);
1536 br(Assembler::HS, jumptable_end);
1537 add(rscratch1, rscratch1, rscratch2,
1538 ext::sxtw, exact_log2(stride * Assembler::instruction_size));
1539 br(rscratch1);
1540 }
1541
1542 // Form an address from base + offset in Rd. Rd may or may not
1543 // actually be used: you must use the Address that is returned. It
1544 // is up to you to ensure that the shift provided matches the size
1545 // of your data.
1546 Address form_address(Register Rd, Register base, int64_t byte_offset, int shift);
1547
1548 // Return true iff an address is within the 48-bit AArch64 address
1549 // space.
1550 bool is_valid_AArch64_address(address a) {
1585 #define ARRAYS_HASHCODE_REGISTERS \
1586 do { \
1587 assert(result == r0 && \
1588 ary == r1 && \
1589 cnt == r2 && \
1590 vdata0 == v3 && \
1591 vdata1 == v2 && \
1592 vdata2 == v1 && \
1593 vdata3 == v0 && \
1594 vmul0 == v4 && \
1595 vmul1 == v5 && \
1596 vmul2 == v6 && \
1597 vmul3 == v7 && \
1598 vpow == v12 && \
1599 vpowm == v13, "registers must match aarch64.ad"); \
1600 } while (0)
1601
1602 void string_equals(Register a1, Register a2, Register result, Register cnt1);
1603
1604 void fill_words(Register base, Register cnt, Register value);
1605 void fill_words(Register base, uint64_t cnt, Register value);
1606
1607 address zero_words(Register base, uint64_t cnt);
1608 address zero_words(Register ptr, Register cnt);
1609 void zero_dcache_blocks(Register base, Register cnt);
1610
1611 static const int zero_words_block_size;
1612
1613 address byte_array_inflate(Register src, Register dst, Register len,
1614 FloatRegister vtmp1, FloatRegister vtmp2,
1615 FloatRegister vtmp3, Register tmp4);
1616
1617 void char_array_compress(Register src, Register dst, Register len,
1618 Register res,
1619 FloatRegister vtmp0, FloatRegister vtmp1,
1620 FloatRegister vtmp2, FloatRegister vtmp3,
1621 FloatRegister vtmp4, FloatRegister vtmp5);
1622
1623 void encode_iso_array(Register src, Register dst,
1624 Register len, Register res, bool ascii,
1625 FloatRegister vtmp0, FloatRegister vtmp1,
1626 FloatRegister vtmp2, FloatRegister vtmp3,
|