1 /*
  2  * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef CPU_RISCV_C2_MACROASSEMBLER_RISCV_HPP
 27 #define CPU_RISCV_C2_MACROASSEMBLER_RISCV_HPP
 28 
 29 // C2_MacroAssembler contains high-level macros for C2
 30 
 31  private:
 32   // Return true if the phase output is in the scratch emit size mode.
 33   virtual bool in_scratch_emit_size() override;
 34 
 35   void element_compare(Register r1, Register r2,
 36                        Register result, Register cnt,
 37                        Register tmp1, Register tmp2,
 38                        VectorRegister vr1, VectorRegister vr2,
 39                        VectorRegister vrs,
 40                        bool is_latin, Label& DONE, Assembler::LMUL lmul);
 41 
 42   void compress_bits_v(Register dst, Register src, Register mask, bool is_long);
 43   void expand_bits_v(Register dst, Register src, Register mask, bool is_long);
 44 
 45  public:
 46   // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
 47   void fast_lock(Register object, Register box, Register tmp1, Register tmp2, Register tmp3);
 48   void fast_unlock(Register object, Register box, Register tmp1, Register tmp2);
 49   // Code used by cmpFastLockLightweight and cmpFastUnlockLightweight mach instructions in .ad file.
 50   void fast_lock_lightweight(Register object, Register box, Register tmp1, Register tmp2, Register tmp3);
 51   void fast_unlock_lightweight(Register object, Register box, Register tmp1, Register tmp2, Register tmp3);
 52 
 53   void string_compare(Register str1, Register str2,
 54                       Register cnt1, Register cnt2, Register result,
 55                       Register tmp1, Register tmp2, Register tmp3,
 56                       int ae);
 57 
 58   void string_indexof_char_short(Register str1, Register cnt1,
 59                                  Register ch, Register result,
 60                                  bool isL);
 61 
 62   void string_indexof_char(Register str1, Register cnt1,
 63                            Register ch, Register result,
 64                            Register tmp1, Register tmp2,
 65                            Register tmp3, Register tmp4,
 66                            bool isL);
 67 
 68   void string_indexof(Register str1, Register str2,
 69                       Register cnt1, Register cnt2,
 70                       Register tmp1, Register tmp2,
 71                       Register tmp3, Register tmp4,
 72                       Register tmp5, Register tmp6,
 73                       Register result, int ae);
 74 
 75   void string_indexof_linearscan(Register haystack, Register needle,
 76                                  Register haystack_len, Register needle_len,
 77                                  Register tmp1, Register tmp2,
 78                                  Register tmp3, Register tmp4,
 79                                  int needle_con_cnt, Register result, int ae);
 80 
 81   void arrays_equals(Register r1, Register r2,
 82                      Register tmp1, Register tmp2, Register tmp3,
 83                      Register result, int elem_size);
 84 
 85   void arrays_hashcode(Register ary, Register cnt, Register result,
 86                        Register tmp1, Register tmp2,
 87                        Register tmp3, Register tmp4,
 88                        Register tmp5, Register tmp6,
 89                        BasicType eltype);
 90 
 91   // helper function for arrays_hashcode
 92   int arrays_hashcode_elsize(BasicType eltype);
 93   void arrays_hashcode_elload(Register dst, Address src, BasicType eltype);
 94 
 95   void string_equals(Register r1, Register r2,
 96                      Register result, Register cnt1);
 97 
 98   // refer to conditional_branches and float_conditional_branches
 99   static const int bool_test_bits = 3;
100   static const int neg_cond_bits = 2;
101   static const int unsigned_branch_mask = 1 << bool_test_bits;
102   static const int double_branch_mask = 1 << bool_test_bits;
103 
104   // cmp
105   void cmp_branch(int cmpFlag,
106                   Register op1, Register op2,
107                   Label& label, bool is_far = false);
108 
109   void float_cmp_branch(int cmpFlag,
110                         FloatRegister op1, FloatRegister op2,
111                         Label& label, bool is_far = false);
112 
113   void enc_cmpUEqNeLeGt_imm0_branch(int cmpFlag, Register op,
114                                     Label& L, bool is_far = false);
115 
116   void enc_cmpEqNe_imm0_branch(int cmpFlag, Register op,
117                                Label& L, bool is_far = false);
118 
119   void enc_cmove(int cmpFlag,
120                  Register op1, Register op2,
121                  Register dst, Register src);
122 
123   void spill(Register r, bool is64, int offset) {
124     is64 ? sd(r, Address(sp, offset))
125          : sw(r, Address(sp, offset));
126   }
127 
128   void spill(FloatRegister f, bool is64, int offset) {
129     is64 ? fsd(f, Address(sp, offset))
130          : fsw(f, Address(sp, offset));
131   }
132 
133   void spill(VectorRegister v, int offset) {
134     add(t0, sp, offset);
135     vs1r_v(v, t0);
136   }
137 
138   void unspill(Register r, bool is64, int offset) {
139     is64 ? ld(r, Address(sp, offset))
140          : lw(r, Address(sp, offset));
141   }
142 
143   void unspillu(Register r, bool is64, int offset) {
144     is64 ? ld(r, Address(sp, offset))
145          : lwu(r, Address(sp, offset));
146   }
147 
148   void unspill(FloatRegister f, bool is64, int offset) {
149     is64 ? fld(f, Address(sp, offset))
150          : flw(f, Address(sp, offset));
151   }
152 
153   void unspill(VectorRegister v, int offset) {
154     add(t0, sp, offset);
155     vl1r_v(v, t0);
156   }
157 
158   void spill_copy_vector_stack_to_stack(int src_offset, int dst_offset, uint vector_length_in_bytes) {
159     assert(vector_length_in_bytes % 16 == 0, "unexpected vector reg size");
160     for (int i = 0; i < (int)vector_length_in_bytes / 8; i++) {
161       unspill(t0, true, src_offset + (i * 8));
162       spill(t0, true, dst_offset + (i * 8));
163     }
164   }
165 
166   void minmax_fp(FloatRegister dst,
167                  FloatRegister src1, FloatRegister src2,
168                  bool is_double, bool is_min);
169 
170   void round_double_mode(FloatRegister dst, FloatRegister src, int round_mode,
171                          Register tmp1, Register tmp2, Register tmp3);
172 
173   void signum_fp(FloatRegister dst, FloatRegister one, bool is_double);
174 
175   void float16_to_float(FloatRegister dst, Register src, Register tmp);
176   void float_to_float16(Register dst, FloatRegister src, FloatRegister ftmp, Register xtmp);
177 
178   void signum_fp_v(VectorRegister dst, VectorRegister one, BasicType bt, int vlen);
179 
180 
181   // intrinsic methods implemented by rvv instructions
182 
183   // compress bits, i.e. j.l.Integer/Long::compress.
184   void compress_bits_i_v(Register dst, Register src, Register mask);
185   void compress_bits_l_v(Register dst, Register src, Register mask);
186   // expand bits, i.e. j.l.Integer/Long::expand.
187   void expand_bits_i_v(Register dst, Register src, Register mask);
188   void expand_bits_l_v(Register dst, Register src, Register mask);
189 
190   void java_round_float_v(VectorRegister dst, VectorRegister src, FloatRegister ftmp, BasicType bt, uint vector_length);
191   void java_round_double_v(VectorRegister dst, VectorRegister src, FloatRegister ftmp, BasicType bt, uint vector_length);
192 
193   void float16_to_float_v(VectorRegister dst, VectorRegister src, uint vector_length);
194   void float_to_float16_v(VectorRegister dst, VectorRegister src, VectorRegister vtmp, Register tmp, uint vector_length);
195 
196   void string_equals_v(Register r1, Register r2,
197                        Register result, Register cnt1);
198 
199   void arrays_equals_v(Register r1, Register r2,
200                        Register result, Register cnt1,
201                        int elem_size);
202 
203   void string_compare_v(Register str1, Register str2,
204                         Register cnt1, Register cnt2,
205                         Register result,
206                         Register tmp1, Register tmp2,
207                         int encForm);
208 
209   void clear_array_v(Register base, Register cnt);
210 
211   void byte_array_inflate_v(Register src, Register dst,
212                             Register len, Register tmp);
213 
214   void char_array_compress_v(Register src, Register dst,
215                             Register len, Register result,
216                             Register tmp);
217 
218   void encode_iso_array_v(Register src, Register dst,
219                           Register len, Register result,
220                           Register tmp, bool ascii);
221 
222   void count_positives_v(Register ary, Register len,
223                         Register result, Register tmp);
224 
225   void string_indexof_char_v(Register str1, Register cnt1,
226                             Register ch, Register result,
227                             Register tmp1, Register tmp2,
228                             bool isL);
229 
230   void minmax_fp_v(VectorRegister dst,
231                   VectorRegister src1, VectorRegister src2,
232                   BasicType bt, bool is_min, uint vector_length);
233 
234   void minmax_fp_masked_v(VectorRegister dst, VectorRegister src1, VectorRegister src2,
235                           VectorRegister vmask, VectorRegister tmp1, VectorRegister tmp2,
236                           BasicType bt, bool is_min, uint vector_length);
237 
238   void reduce_minmax_fp_v(FloatRegister dst,
239                           FloatRegister src1, VectorRegister src2,
240                           VectorRegister tmp1, VectorRegister tmp2,
241                           bool is_double, bool is_min, uint vector_length,
242                           VectorMask vm = Assembler::unmasked);
243 
244   void reduce_integral_v(Register dst, Register src1,
245                         VectorRegister src2, VectorRegister tmp,
246                         int opc, BasicType bt, uint vector_length,
247                         VectorMask vm = Assembler::unmasked);
248 
249   void vsetvli_helper(BasicType bt, uint vector_length, LMUL vlmul = Assembler::m1, Register tmp = t0);
250 
251   void compare_integral_v(VectorRegister dst, VectorRegister src1, VectorRegister src2, int cond,
252                           BasicType bt, uint vector_length, VectorMask vm = Assembler::unmasked);
253 
254   void compare_fp_v(VectorRegister dst, VectorRegister src1, VectorRegister src2, int cond,
255                     BasicType bt, uint vector_length, VectorMask vm = Assembler::unmasked);
256 
257   void spill_vmask(VectorRegister v, int offset);
258 
259   void unspill_vmask(VectorRegister v, int offset);
260 
261   void spill_copy_vmask_stack_to_stack(int src_offset, int dst_offset, uint vector_length_in_bytes) {
262     assert(vector_length_in_bytes % 4 == 0, "unexpected vector mask reg size");
263     for (int i = 0; i < (int)vector_length_in_bytes / 4; i++) {
264       unspill(t0, false, src_offset + (i * 4));
265       spill(t0, false, dst_offset + (i * 4));
266     }
267   }
268 
269   void integer_extend_v(VectorRegister dst, BasicType dst_bt, uint vector_length,
270                         VectorRegister src, BasicType src_bt, bool is_signed);
271 
272   void integer_narrow_v(VectorRegister dst, BasicType dst_bt, uint vector_length,
273                         VectorRegister src, BasicType src_bt);
274 
275   void vfcvt_rtz_x_f_v_safe(VectorRegister dst, VectorRegister src);
276 
277   void extract_v(Register dst, VectorRegister src, BasicType bt, int idx, VectorRegister tmp);
278   void extract_fp_v(FloatRegister dst, VectorRegister src, BasicType bt, int idx, VectorRegister tmp);
279 
280 #endif // CPU_RISCV_C2_MACROASSEMBLER_RISCV_HPP