1 /*
2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
27 #define CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
28
29 #include "asm/assembler.inline.hpp"
30 #include "code/vmreg.hpp"
31 #include "metaprogramming/enableIf.hpp"
32 #include "oops/compressedOops.hpp"
33 #include "oops/compressedKlass.hpp"
34 #include "runtime/vm_version.hpp"
35 #include "utilities/powerOfTwo.hpp"
36
37 class OopMap;
38
39 // MacroAssembler extends Assembler by frequently used macros.
40 //
41 // Instructions for which a 'better' code sequence exists depending
42 // on arguments should also go in here.
43
44 class MacroAssembler: public Assembler {
45 friend class LIR_Assembler;
46
47 public:
48 using Assembler::mov;
49 using Assembler::movi;
50
51 protected:
52
53 // Support for VM calls
54 //
55 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
632 msr(0b011, 0b0100, 0b0010, 0b000, reg);
633 }
634
635 // idiv variant which deals with MINLONG as dividend and -1 as divisor
636 int corrected_idivl(Register result, Register ra, Register rb,
637 bool want_remainder, Register tmp = rscratch1);
638 int corrected_idivq(Register result, Register ra, Register rb,
639 bool want_remainder, Register tmp = rscratch1);
640
641 // Support for null-checks
642 //
643 // Generates code that causes a null OS exception if the content of reg is null.
644 // If the accessed location is M[reg + offset] and the offset is known, provide the
645 // offset. No explicit code generation is needed if the offset is within a certain
646 // range (0 <= offset <= page_size).
647
648 virtual void null_check(Register reg, int offset = -1);
649 static bool needs_explicit_null_check(intptr_t offset);
650 static bool uses_implicit_null_check(void* address);
651
652 static address target_addr_for_insn(address insn_addr, unsigned insn);
653 static address target_addr_for_insn_or_null(address insn_addr, unsigned insn);
654 static address target_addr_for_insn(address insn_addr) {
655 unsigned insn = *(unsigned*)insn_addr;
656 return target_addr_for_insn(insn_addr, insn);
657 }
658 static address target_addr_for_insn_or_null(address insn_addr) {
659 unsigned insn = *(unsigned*)insn_addr;
660 return target_addr_for_insn_or_null(insn_addr, insn);
661 }
662
663 // Required platform-specific helpers for Label::patch_instructions.
664 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
665 static int pd_patch_instruction_size(address branch, address target);
666 static void pd_patch_instruction(address branch, address target, const char* file = nullptr, int line = 0) {
667 pd_patch_instruction_size(branch, target);
668 }
669 static address pd_call_destination(address branch) {
670 return target_addr_for_insn(branch);
671 }
869
870 void reset_last_Java_frame(Register thread);
871
872 // thread in the default location (rthread)
873 void reset_last_Java_frame(bool clear_fp);
874
875 // Stores
876 void store_check(Register obj); // store check for obj - register is destroyed afterwards
877 void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed)
878
879 void resolve_jobject(Register value, Register tmp1, Register tmp2);
880 void resolve_global_jobject(Register value, Register tmp1, Register tmp2);
881
882 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
883 void c2bool(Register x);
884
885 void load_method_holder_cld(Register rresult, Register rmethod);
886 void load_method_holder(Register holder, Register method);
887
888 // oop manipulations
889 void load_narrow_klass_compact(Register dst, Register src);
890 void load_klass(Register dst, Register src);
891 void store_klass(Register dst, Register src);
892 void cmp_klass(Register obj, Register klass, Register tmp);
893 void cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2);
894
895 void resolve_weak_handle(Register result, Register tmp1, Register tmp2);
896 void resolve_oop_handle(Register result, Register tmp1, Register tmp2);
897 void load_mirror(Register dst, Register method, Register tmp1, Register tmp2);
898
899 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
900 Register tmp1, Register tmp2);
901
902 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
903 Register tmp1, Register tmp2, Register tmp3);
904
905 void load_heap_oop(Register dst, Address src, Register tmp1,
906 Register tmp2, DecoratorSet decorators = 0);
907
908 void load_heap_oop_not_null(Register dst, Address src, Register tmp1,
909 Register tmp2, DecoratorSet decorators = 0);
910 void store_heap_oop(Address dst, Register val, Register tmp1,
911 Register tmp2, Register tmp3, DecoratorSet decorators = 0);
912
913 // currently unimplemented
914 // Used for storing null. All other oop constants should be
915 // stored using routines that take a jobject.
916 void store_heap_oop_null(Address dst);
917
918 void store_klass_gap(Register dst, Register src);
919
920 // This dummy is to prevent a call to store_heap_oop from
921 // converting a zero (like null) into a Register by giving
922 // the compiler two choices it can't resolve
923
924 void store_heap_oop(Address dst, void* dummy);
925
926 void encode_heap_oop(Register d, Register s);
927 void encode_heap_oop(Register r) { encode_heap_oop(r, r); }
928 void decode_heap_oop(Register d, Register s);
929 void decode_heap_oop(Register r) { decode_heap_oop(r, r); }
930 void encode_heap_oop_not_null(Register r);
931 void decode_heap_oop_not_null(Register r);
932 void encode_heap_oop_not_null(Register dst, Register src);
933 void decode_heap_oop_not_null(Register dst, Register src);
934
935 void set_narrow_oop(Register dst, jobject obj);
936
937 void encode_klass_not_null(Register r);
948
949 void push_CPU_state(bool save_vectors = false, bool use_sve = false,
950 int sve_vector_size_in_bytes = 0, int total_predicate_in_bytes = 0);
951 void pop_CPU_state(bool restore_vectors = false, bool use_sve = false,
952 int sve_vector_size_in_bytes = 0, int total_predicate_in_bytes = 0);
953
954 void push_cont_fastpath(Register java_thread = rthread);
955 void pop_cont_fastpath(Register java_thread = rthread);
956
957 void inc_held_monitor_count(Register tmp);
958 void dec_held_monitor_count(Register tmp);
959
960 // Round up to a power of two
961 void round_to(Register reg, int modulus);
962
963 // java.lang.Math::round intrinsics
964 void java_round_double(Register dst, FloatRegister src, FloatRegister ftmp);
965 void java_round_float(Register dst, FloatRegister src, FloatRegister ftmp);
966
967 // allocation
968 void tlab_allocate(
969 Register obj, // result: pointer to object after successful allocation
970 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
971 int con_size_in_bytes, // object size in bytes if known at compile time
972 Register t1, // temp register
973 Register t2, // temp register
974 Label& slow_case // continuation point if fast allocation fails
975 );
976 void verify_tlab();
977
978 // interface method calling
979 void lookup_interface_method(Register recv_klass,
980 Register intf_klass,
981 RegisterOrConstant itable_index,
982 Register method_result,
983 Register scan_temp,
984 Label& no_such_interface,
985 bool return_method = true);
986
987 void lookup_interface_method_stub(Register recv_klass,
988 Register holder_klass,
989 Register resolved_klass,
990 Register method_result,
991 Register temp_reg,
992 Register temp_reg2,
993 int itable_index,
994 Label& L_no_such_interface);
995
996 // virtual method calling
997 // n.b. x86 allows RegisterOrConstant for vtable_index
1419 } \
1420 \
1421 void INSN(Register Rd, Register Rn, Register Rm) { \
1422 Assembler::INSN(Rd, Rn, Rm); \
1423 } \
1424 \
1425 void INSN(Register Rd, Register Rn, Register Rm, \
1426 ext::operation option, int amount = 0) { \
1427 Assembler::INSN(Rd, Rn, Rm, option, amount); \
1428 }
1429
1430 WRAP(adds, false) WRAP(addsw, true) WRAP(subs, false) WRAP(subsw, true)
1431
1432 void add(Register Rd, Register Rn, RegisterOrConstant increment);
1433 void addw(Register Rd, Register Rn, RegisterOrConstant increment);
1434 void sub(Register Rd, Register Rn, RegisterOrConstant decrement);
1435 void subw(Register Rd, Register Rn, RegisterOrConstant decrement);
1436
1437 void adrp(Register reg1, const Address &dest, uint64_t &byte_offset);
1438
1439 void tableswitch(Register index, jint lowbound, jint highbound,
1440 Label &jumptable, Label &jumptable_end, int stride = 1) {
1441 adr(rscratch1, jumptable);
1442 subsw(rscratch2, index, lowbound);
1443 subsw(zr, rscratch2, highbound - lowbound);
1444 br(Assembler::HS, jumptable_end);
1445 add(rscratch1, rscratch1, rscratch2,
1446 ext::sxtw, exact_log2(stride * Assembler::instruction_size));
1447 br(rscratch1);
1448 }
1449
1450 // Form an address from base + offset in Rd. Rd may or may not
1451 // actually be used: you must use the Address that is returned. It
1452 // is up to you to ensure that the shift provided matches the size
1453 // of your data.
1454 Address form_address(Register Rd, Register base, int64_t byte_offset, int shift);
1455
1456 // Return true iff an address is within the 48-bit AArch64 address
1457 // space.
1458 bool is_valid_AArch64_address(address a) {
1500 #define ARRAYS_HASHCODE_REGISTERS \
1501 do { \
1502 assert(result == r0 && \
1503 ary == r1 && \
1504 cnt == r2 && \
1505 vdata0 == v3 && \
1506 vdata1 == v2 && \
1507 vdata2 == v1 && \
1508 vdata3 == v0 && \
1509 vmul0 == v4 && \
1510 vmul1 == v5 && \
1511 vmul2 == v6 && \
1512 vmul3 == v7 && \
1513 vpow == v12 && \
1514 vpowm == v13, "registers must match aarch64.ad"); \
1515 } while (0)
1516
1517 void string_equals(Register a1, Register a2, Register result, Register cnt1);
1518
1519 void fill_words(Register base, Register cnt, Register value);
1520 address zero_words(Register base, uint64_t cnt);
1521 address zero_words(Register ptr, Register cnt);
1522 void zero_dcache_blocks(Register base, Register cnt);
1523
1524 static const int zero_words_block_size;
1525
1526 address byte_array_inflate(Register src, Register dst, Register len,
1527 FloatRegister vtmp1, FloatRegister vtmp2,
1528 FloatRegister vtmp3, Register tmp4);
1529
1530 void char_array_compress(Register src, Register dst, Register len,
1531 Register res,
1532 FloatRegister vtmp0, FloatRegister vtmp1,
1533 FloatRegister vtmp2, FloatRegister vtmp3,
1534 FloatRegister vtmp4, FloatRegister vtmp5);
1535
1536 void encode_iso_array(Register src, Register dst,
1537 Register len, Register res, bool ascii,
1538 FloatRegister vtmp0, FloatRegister vtmp1,
1539 FloatRegister vtmp2, FloatRegister vtmp3,
|
1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
27 #define CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
28
29 #include "asm/assembler.inline.hpp"
30 #include "code/vmreg.hpp"
31 #include "metaprogramming/enableIf.hpp"
32 #include "oops/compressedOops.hpp"
33 #include "oops/compressedKlass.hpp"
34 #include "runtime/vm_version.hpp"
35 #include "utilities/macros.hpp"
36 #include "utilities/powerOfTwo.hpp"
37 #include "runtime/signature.hpp"
38
39
40 class ciInlineKlass;
41
42 class OopMap;
43
44 // MacroAssembler extends Assembler by frequently used macros.
45 //
46 // Instructions for which a 'better' code sequence exists depending
47 // on arguments should also go in here.
48
49 class MacroAssembler: public Assembler {
50 friend class LIR_Assembler;
51
52 public:
53 using Assembler::mov;
54 using Assembler::movi;
55
56 protected:
57
58 // Support for VM calls
59 //
60 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
637 msr(0b011, 0b0100, 0b0010, 0b000, reg);
638 }
639
640 // idiv variant which deals with MINLONG as dividend and -1 as divisor
641 int corrected_idivl(Register result, Register ra, Register rb,
642 bool want_remainder, Register tmp = rscratch1);
643 int corrected_idivq(Register result, Register ra, Register rb,
644 bool want_remainder, Register tmp = rscratch1);
645
646 // Support for null-checks
647 //
648 // Generates code that causes a null OS exception if the content of reg is null.
649 // If the accessed location is M[reg + offset] and the offset is known, provide the
650 // offset. No explicit code generation is needed if the offset is within a certain
651 // range (0 <= offset <= page_size).
652
653 virtual void null_check(Register reg, int offset = -1);
654 static bool needs_explicit_null_check(intptr_t offset);
655 static bool uses_implicit_null_check(void* address);
656
657 // markWord tests, kills markWord reg
658 void test_markword_is_inline_type(Register markword, Label& is_inline_type);
659
660 // inlineKlass queries, kills temp_reg
661 void test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type);
662 void test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type);
663 void test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type);
664
665 // Get the default value oop for the given InlineKlass
666 void get_default_value_oop(Register inline_klass, Register temp_reg, Register obj);
667 // The empty value oop, for the given InlineKlass ("empty" as in no instance fields)
668 // get_default_value_oop with extra assertion for empty inline klass
669 void get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj);
670
671 void test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free);
672 void test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free);
673 void test_field_is_flat(Register flags, Register temp_reg, Label& is_flat);
674 void test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker);
675
676 // Check oops for special arrays, i.e. flat arrays and/or null-free arrays
677 void test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label);
678 void test_flat_array_oop(Register klass, Register temp_reg, Label& is_flat_array);
679 void test_non_flat_array_oop(Register oop, Register temp_reg, Label&is_non_flat_array);
680 void test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array);
681 void test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array);
682
683 // Check array klass layout helper for flat or null-free arrays...
684 void test_flat_array_layout(Register lh, Label& is_flat_array);
685 void test_non_flat_array_layout(Register lh, Label& is_non_flat_array);
686
687 static address target_addr_for_insn(address insn_addr, unsigned insn);
688 static address target_addr_for_insn_or_null(address insn_addr, unsigned insn);
689 static address target_addr_for_insn(address insn_addr) {
690 unsigned insn = *(unsigned*)insn_addr;
691 return target_addr_for_insn(insn_addr, insn);
692 }
693 static address target_addr_for_insn_or_null(address insn_addr) {
694 unsigned insn = *(unsigned*)insn_addr;
695 return target_addr_for_insn_or_null(insn_addr, insn);
696 }
697
698 // Required platform-specific helpers for Label::patch_instructions.
699 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
700 static int pd_patch_instruction_size(address branch, address target);
701 static void pd_patch_instruction(address branch, address target, const char* file = nullptr, int line = 0) {
702 pd_patch_instruction_size(branch, target);
703 }
704 static address pd_call_destination(address branch) {
705 return target_addr_for_insn(branch);
706 }
904
905 void reset_last_Java_frame(Register thread);
906
907 // thread in the default location (rthread)
908 void reset_last_Java_frame(bool clear_fp);
909
910 // Stores
911 void store_check(Register obj); // store check for obj - register is destroyed afterwards
912 void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed)
913
914 void resolve_jobject(Register value, Register tmp1, Register tmp2);
915 void resolve_global_jobject(Register value, Register tmp1, Register tmp2);
916
917 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
918 void c2bool(Register x);
919
920 void load_method_holder_cld(Register rresult, Register rmethod);
921 void load_method_holder(Register holder, Register method);
922
923 // oop manipulations
924 void load_metadata(Register dst, Register src);
925
926 void load_narrow_klass_compact(Register dst, Register src);
927 void load_klass(Register dst, Register src);
928 void store_klass(Register dst, Register src);
929 void cmp_klass(Register obj, Register klass, Register tmp);
930 void cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2);
931
932 void resolve_weak_handle(Register result, Register tmp1, Register tmp2);
933 void resolve_oop_handle(Register result, Register tmp1, Register tmp2);
934 void load_mirror(Register dst, Register method, Register tmp1, Register tmp2);
935
936 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
937 Register tmp1, Register tmp2);
938
939 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
940 Register tmp1, Register tmp2, Register tmp3);
941
942 void flat_field_copy(DecoratorSet decorators, Register src, Register dst, Register inline_layout_info);
943
944 // inline type data payload offsets...
945 void payload_offset(Register inline_klass, Register offset);
946 void payload_address(Register oop, Register data, Register inline_klass);
947 // get data payload ptr a flat value array at index, kills rcx and index
948 void data_for_value_array_index(Register array, Register array_klass,
949 Register index, Register data);
950
951 void load_heap_oop(Register dst, Address src, Register tmp1,
952 Register tmp2, DecoratorSet decorators = 0);
953
954 void load_heap_oop_not_null(Register dst, Address src, Register tmp1,
955 Register tmp2, DecoratorSet decorators = 0);
956 void store_heap_oop(Address dst, Register val, Register tmp1,
957 Register tmp2, Register tmp3, DecoratorSet decorators = 0);
958
959 // currently unimplemented
960 // Used for storing null. All other oop constants should be
961 // stored using routines that take a jobject.
962 void store_heap_oop_null(Address dst);
963
964 void load_prototype_header(Register dst, Register src);
965
966 void store_klass_gap(Register dst, Register src);
967
968 // This dummy is to prevent a call to store_heap_oop from
969 // converting a zero (like null) into a Register by giving
970 // the compiler two choices it can't resolve
971
972 void store_heap_oop(Address dst, void* dummy);
973
974 void encode_heap_oop(Register d, Register s);
975 void encode_heap_oop(Register r) { encode_heap_oop(r, r); }
976 void decode_heap_oop(Register d, Register s);
977 void decode_heap_oop(Register r) { decode_heap_oop(r, r); }
978 void encode_heap_oop_not_null(Register r);
979 void decode_heap_oop_not_null(Register r);
980 void encode_heap_oop_not_null(Register dst, Register src);
981 void decode_heap_oop_not_null(Register dst, Register src);
982
983 void set_narrow_oop(Register dst, jobject obj);
984
985 void encode_klass_not_null(Register r);
996
997 void push_CPU_state(bool save_vectors = false, bool use_sve = false,
998 int sve_vector_size_in_bytes = 0, int total_predicate_in_bytes = 0);
999 void pop_CPU_state(bool restore_vectors = false, bool use_sve = false,
1000 int sve_vector_size_in_bytes = 0, int total_predicate_in_bytes = 0);
1001
1002 void push_cont_fastpath(Register java_thread = rthread);
1003 void pop_cont_fastpath(Register java_thread = rthread);
1004
1005 void inc_held_monitor_count(Register tmp);
1006 void dec_held_monitor_count(Register tmp);
1007
1008 // Round up to a power of two
1009 void round_to(Register reg, int modulus);
1010
1011 // java.lang.Math::round intrinsics
1012 void java_round_double(Register dst, FloatRegister src, FloatRegister ftmp);
1013 void java_round_float(Register dst, FloatRegister src, FloatRegister ftmp);
1014
1015 // allocation
1016
1017 // Object / value buffer allocation...
1018 // Allocate instance of klass, assumes klass initialized by caller
1019 // new_obj prefers to be rax
1020 // Kills t1 and t2, perserves klass, return allocation in new_obj (rsi on LP64)
1021 void allocate_instance(Register klass, Register new_obj,
1022 Register t1, Register t2,
1023 bool clear_fields, Label& alloc_failed);
1024
1025 void tlab_allocate(
1026 Register obj, // result: pointer to object after successful allocation
1027 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
1028 int con_size_in_bytes, // object size in bytes if known at compile time
1029 Register t1, // temp register
1030 Register t2, // temp register
1031 Label& slow_case // continuation point if fast allocation fails
1032 );
1033 void verify_tlab();
1034
1035 // For field "index" within "klass", return inline_klass ...
1036 void get_inline_type_field_klass(Register klass, Register index, Register inline_klass);
1037 void inline_layout_info(Register holder_klass, Register index, Register layout_info);
1038
1039
1040 // interface method calling
1041 void lookup_interface_method(Register recv_klass,
1042 Register intf_klass,
1043 RegisterOrConstant itable_index,
1044 Register method_result,
1045 Register scan_temp,
1046 Label& no_such_interface,
1047 bool return_method = true);
1048
1049 void lookup_interface_method_stub(Register recv_klass,
1050 Register holder_klass,
1051 Register resolved_klass,
1052 Register method_result,
1053 Register temp_reg,
1054 Register temp_reg2,
1055 int itable_index,
1056 Label& L_no_such_interface);
1057
1058 // virtual method calling
1059 // n.b. x86 allows RegisterOrConstant for vtable_index
1481 } \
1482 \
1483 void INSN(Register Rd, Register Rn, Register Rm) { \
1484 Assembler::INSN(Rd, Rn, Rm); \
1485 } \
1486 \
1487 void INSN(Register Rd, Register Rn, Register Rm, \
1488 ext::operation option, int amount = 0) { \
1489 Assembler::INSN(Rd, Rn, Rm, option, amount); \
1490 }
1491
1492 WRAP(adds, false) WRAP(addsw, true) WRAP(subs, false) WRAP(subsw, true)
1493
1494 void add(Register Rd, Register Rn, RegisterOrConstant increment);
1495 void addw(Register Rd, Register Rn, RegisterOrConstant increment);
1496 void sub(Register Rd, Register Rn, RegisterOrConstant decrement);
1497 void subw(Register Rd, Register Rn, RegisterOrConstant decrement);
1498
1499 void adrp(Register reg1, const Address &dest, uint64_t &byte_offset);
1500
1501 void verified_entry(Compile* C, int sp_inc);
1502
1503 // Inline type specific methods
1504 #include "asm/macroAssembler_common.hpp"
1505
1506 int store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter = true);
1507 bool move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]);
1508 bool unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
1509 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
1510 RegState reg_state[]);
1511 bool pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
1512 VMRegPair* from, int from_count, int& from_index, VMReg to,
1513 RegState reg_state[], Register val_array);
1514 int extend_stack_for_inline_args(int args_on_stack);
1515 void remove_frame(int initial_framesize, bool needs_stack_repair);
1516 VMReg spill_reg_for(VMReg reg);
1517 void save_stack_increment(int sp_inc, int frame_size);
1518
1519 void tableswitch(Register index, jint lowbound, jint highbound,
1520 Label &jumptable, Label &jumptable_end, int stride = 1) {
1521 adr(rscratch1, jumptable);
1522 subsw(rscratch2, index, lowbound);
1523 subsw(zr, rscratch2, highbound - lowbound);
1524 br(Assembler::HS, jumptable_end);
1525 add(rscratch1, rscratch1, rscratch2,
1526 ext::sxtw, exact_log2(stride * Assembler::instruction_size));
1527 br(rscratch1);
1528 }
1529
1530 // Form an address from base + offset in Rd. Rd may or may not
1531 // actually be used: you must use the Address that is returned. It
1532 // is up to you to ensure that the shift provided matches the size
1533 // of your data.
1534 Address form_address(Register Rd, Register base, int64_t byte_offset, int shift);
1535
1536 // Return true iff an address is within the 48-bit AArch64 address
1537 // space.
1538 bool is_valid_AArch64_address(address a) {
1580 #define ARRAYS_HASHCODE_REGISTERS \
1581 do { \
1582 assert(result == r0 && \
1583 ary == r1 && \
1584 cnt == r2 && \
1585 vdata0 == v3 && \
1586 vdata1 == v2 && \
1587 vdata2 == v1 && \
1588 vdata3 == v0 && \
1589 vmul0 == v4 && \
1590 vmul1 == v5 && \
1591 vmul2 == v6 && \
1592 vmul3 == v7 && \
1593 vpow == v12 && \
1594 vpowm == v13, "registers must match aarch64.ad"); \
1595 } while (0)
1596
1597 void string_equals(Register a1, Register a2, Register result, Register cnt1);
1598
1599 void fill_words(Register base, Register cnt, Register value);
1600 void fill_words(Register base, uint64_t cnt, Register value);
1601
1602 address zero_words(Register base, uint64_t cnt);
1603 address zero_words(Register ptr, Register cnt);
1604 void zero_dcache_blocks(Register base, Register cnt);
1605
1606 static const int zero_words_block_size;
1607
1608 address byte_array_inflate(Register src, Register dst, Register len,
1609 FloatRegister vtmp1, FloatRegister vtmp2,
1610 FloatRegister vtmp3, Register tmp4);
1611
1612 void char_array_compress(Register src, Register dst, Register len,
1613 Register res,
1614 FloatRegister vtmp0, FloatRegister vtmp1,
1615 FloatRegister vtmp2, FloatRegister vtmp3,
1616 FloatRegister vtmp4, FloatRegister vtmp5);
1617
1618 void encode_iso_array(Register src, Register dst,
1619 Register len, Register res, bool ascii,
1620 FloatRegister vtmp0, FloatRegister vtmp1,
1621 FloatRegister vtmp2, FloatRegister vtmp3,
|