1 /*
2 * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_AARCH64_C2_MACROASSEMBLER_AARCH64_HPP
26 #define CPU_AARCH64_C2_MACROASSEMBLER_AARCH64_HPP
27
28 // C2_MacroAssembler contains high-level macros for C2
29
30 private:
31 // Return true if the phase output is in the scratch emit size mode.
32 virtual bool in_scratch_emit_size() override;
33
34 void neon_reduce_logical_helper(int opc, bool sf, Register Rd, Register Rn, Register Rm,
35 enum shift_kind kind = Assembler::LSL, unsigned shift = 0);
36
37 public:
38 // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
39 // See full description in macroAssembler_aarch64.cpp.
40 void fast_lock(Register object, Register box, Register tmp, Register tmp2, Register tmp3);
41 void fast_unlock(Register object, Register box, Register tmp, Register tmp2);
42
43 void string_compare(Register str1, Register str2,
44 Register cnt1, Register cnt2, Register result,
45 Register tmp1, Register tmp2, FloatRegister vtmp1,
46 FloatRegister vtmp2, FloatRegister vtmp3,
47 PRegister pgtmp1, PRegister pgtmp2, int ae);
48
49 void string_indexof(Register str1, Register str2,
50 Register cnt1, Register cnt2,
51 Register tmp1, Register tmp2,
52 Register tmp3, Register tmp4,
53 Register tmp5, Register tmp6,
54 int int_cnt1, Register result, int ae);
55
56 void string_indexof_char(Register str1, Register cnt1,
57 Register ch, Register result,
58 Register tmp1, Register tmp2, Register tmp3);
59
60 void stringL_indexof_char(Register str1, Register cnt1,
61 Register ch, Register result,
62 Register tmp1, Register tmp2, Register tmp3);
63
64 void string_indexof_char_sve(Register str1, Register cnt1,
65 Register ch, Register result,
66 FloatRegister ztmp1, FloatRegister ztmp2,
67 PRegister pgtmp, PRegister ptmp, bool isL);
68
69 // Compress the least significant bit of each byte to the rightmost and clear
70 // the higher garbage bits.
71 void bytemask_compress(Register dst);
72
73 // Pack the lowest-numbered bit of each mask element in src into a long value
74 // in dst, at most the first 64 lane elements.
75 void sve_vmask_tolong(Register dst, PRegister src, BasicType bt, int lane_cnt,
76 FloatRegister vtmp1, FloatRegister vtmp2);
77
78 // Unpack the mask, a long value in src, into predicate register dst based on the
79 // corresponding data type. Note that dst can support at most 64 lanes.
80 void sve_vmask_fromlong(PRegister dst, Register src, BasicType bt, int lane_cnt,
81 FloatRegister vtmp1, FloatRegister vtmp2);
82
83 // SIMD&FP comparison
84 void neon_compare(FloatRegister dst, BasicType bt, FloatRegister src1,
85 FloatRegister src2, Condition cond, bool isQ);
86
87 void neon_compare_zero(FloatRegister dst, BasicType bt, FloatRegister src,
88 Condition cond, bool isQ);
89
90 void sve_compare(PRegister pd, BasicType bt, PRegister pg,
91 FloatRegister zn, FloatRegister zm, Condition cond);
92
93 void sve_vmask_lasttrue(Register dst, BasicType bt, PRegister src, PRegister ptmp);
94
95 // Vector cast
96 void neon_vector_extend(FloatRegister dst, BasicType dst_bt, unsigned dst_vlen_in_bytes,
97 FloatRegister src, BasicType src_bt);
98
99 void neon_vector_narrow(FloatRegister dst, BasicType dst_bt,
100 FloatRegister src, BasicType src_bt, unsigned src_vlen_in_bytes);
101
102 void sve_vector_extend(FloatRegister dst, SIMD_RegVariant dst_size,
103 FloatRegister src, SIMD_RegVariant src_size);
104
105 void sve_vector_narrow(FloatRegister dst, SIMD_RegVariant dst_size,
106 FloatRegister src, SIMD_RegVariant src_size, FloatRegister tmp);
107
108 void sve_vmaskcast_extend(PRegister dst, PRegister src,
109 uint dst_element_length_in_bytes, uint src_element_lenght_in_bytes);
110
111 void sve_vmaskcast_narrow(PRegister dst, PRegister src, PRegister ptmp,
112 uint dst_element_length_in_bytes, uint src_element_lenght_in_bytes);
113
114 // Vector reduction
115 void neon_reduce_add_integral(Register dst, BasicType bt,
116 Register isrc, FloatRegister vsrc,
117 unsigned vector_length_in_bytes, FloatRegister vtmp);
118
119 void neon_reduce_mul_integral(Register dst, BasicType bt,
120 Register isrc, FloatRegister vsrc,
121 unsigned vector_length_in_bytes,
122 FloatRegister vtmp1, FloatRegister vtmp2);
123
124 void neon_reduce_mul_fp(FloatRegister dst, BasicType bt,
125 FloatRegister fsrc, FloatRegister vsrc,
126 unsigned vector_length_in_bytes, FloatRegister vtmp);
127
128 void neon_reduce_logical(int opc, Register dst, BasicType bt, Register isrc,
129 FloatRegister vsrc, unsigned vector_length_in_bytes);
130
131 void neon_reduce_minmax_integral(int opc, Register dst, BasicType bt,
132 Register isrc, FloatRegister vsrc,
133 unsigned vector_length_in_bytes, FloatRegister vtmp);
134
135 void sve_reduce_integral(int opc, Register dst, BasicType bt, Register src1,
136 FloatRegister src2, PRegister pg, FloatRegister tmp);
137
138 // Set elements of the dst predicate to true for lanes in the range of
139 // [0, lane_cnt), or to false otherwise. The input "lane_cnt" should be
140 // smaller than or equal to the supported max vector length of the basic
141 // type. Clobbers: rscratch1 and the rFlagsReg.
142 void sve_gen_mask_imm(PRegister dst, BasicType bt, uint32_t lane_cnt);
143
144 // Extract a scalar element from an sve vector at position 'idx'.
145 // The input elements in src are expected to be of integral type.
146 void sve_extract_integral(Register dst, BasicType bt, FloatRegister src,
147 int idx, FloatRegister vtmp);
148
149 // java.lang.Math::round intrinsics
150 void vector_round_neon(FloatRegister dst, FloatRegister src, FloatRegister tmp1,
151 FloatRegister tmp2, FloatRegister tmp3,
152 SIMD_Arrangement T);
153 void vector_round_sve(FloatRegister dst, FloatRegister src, FloatRegister tmp1,
154 FloatRegister tmp2, PRegister pgtmp,
155 SIMD_RegVariant T);
156
157 // Pack active elements of src, under the control of mask, into the
158 // lowest-numbered elements of dst. Any remaining elements of dst will
159 // be filled with zero.
160 void sve_compress_byte(FloatRegister dst, FloatRegister src, PRegister mask,
161 FloatRegister vtmp1, FloatRegister vtmp2,
162 FloatRegister vtmp3, FloatRegister vtmp4,
163 PRegister ptmp, PRegister pgtmp);
164
165 void sve_compress_short(FloatRegister dst, FloatRegister src, PRegister mask,
166 FloatRegister vtmp1, FloatRegister vtmp2,
167 PRegister pgtmp);
168
169 void neon_reverse_bits(FloatRegister dst, FloatRegister src, BasicType bt, bool isQ);
170
171 void neon_reverse_bytes(FloatRegister dst, FloatRegister src, BasicType bt, bool isQ);
172
173 // java.lang.Math::signum intrinsics
174 void vector_signum_neon(FloatRegister dst, FloatRegister src, FloatRegister zero,
175 FloatRegister one, SIMD_Arrangement T);
176
177 void vector_signum_sve(FloatRegister dst, FloatRegister src, FloatRegister zero,
178 FloatRegister one, FloatRegister vtmp, PRegister pgtmp, SIMD_RegVariant T);
179
180 #endif // CPU_AARCH64_C2_MACROASSEMBLER_AARCH64_HPP