1 /*
  2  * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef CPU_X86_C2_MACROASSEMBLER_X86_HPP
 26 #define CPU_X86_C2_MACROASSEMBLER_X86_HPP
 27 
 28 // C2_MacroAssembler contains high-level macros for C2
 29 
 30 public:
 31   Assembler::AvxVectorLen vector_length_encoding(int vlen_in_bytes);
 32 
 33   // special instructions for EVEX
 34   void setvectmask(Register dst, Register src, KRegister mask);
 35   void restorevectmask(KRegister mask);
 36 
 37   // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
 38   // See full desription in macroAssembler_x86.cpp.
 39   void fast_lock(Register obj, Register box, Register tmp,
 40                  Register scr, Register cx1, Register cx2,
 41                  RTMLockingCounters* rtm_counters,
 42                  RTMLockingCounters* stack_rtm_counters,
 43                  Metadata* method_data,
 44                  bool use_rtm, bool profile_rtm);
 45   void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm);
 46 
 47 #if INCLUDE_RTM_OPT
 48   void rtm_counters_update(Register abort_status, Register rtm_counters);
 49   void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel);
 50   void rtm_abort_ratio_calculation(Register tmp, Register rtm_counters_reg,
 51                                    RTMLockingCounters* rtm_counters,
 52                                    Metadata* method_data);
 53   void rtm_profiling(Register abort_status_Reg, Register rtm_counters_Reg,
 54                      RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm);
 55   void rtm_retry_lock_on_abort(Register retry_count, Register abort_status, Label& retryLabel);
 56   void rtm_retry_lock_on_busy(Register retry_count, Register box, Register tmp, Register scr, Label& retryLabel);
 57   void rtm_stack_locking(Register obj, Register tmp, Register scr,
 58                          Register retry_on_abort_count,
 59                          RTMLockingCounters* stack_rtm_counters,
 60                          Metadata* method_data, bool profile_rtm,
 61                          Label& DONE_LABEL, Label& IsInflated);
 62   void rtm_inflated_locking(Register obj, Register box, Register tmp,
 63                             Register scr, Register retry_on_busy_count,
 64                             Register retry_on_abort_count,
 65                             RTMLockingCounters* rtm_counters,
 66                             Metadata* method_data, bool profile_rtm,
 67                             Label& DONE_LABEL);
 68 #endif
 69 
 70   // Generic instructions support for use in .ad files C2 code generation
 71   void vabsnegd(int opcode, XMMRegister dst, XMMRegister src, Register scr);
 72   void vabsnegd(int opcode, XMMRegister dst, XMMRegister src, int vector_len, Register scr);
 73   void vabsnegf(int opcode, XMMRegister dst, XMMRegister src, Register scr);
 74   void vabsnegf(int opcode, XMMRegister dst, XMMRegister src, int vector_len, Register scr);
 75 
 76   void pminmax(int opcode, BasicType elem_bt, XMMRegister dst, XMMRegister src,
 77                XMMRegister tmp = xnoreg);
 78   void vpminmax(int opcode, BasicType elem_bt,
 79                 XMMRegister dst, XMMRegister src1, XMMRegister src2,
 80                 int vlen_enc);
 81 
 82   void vminmax_fp(int opcode, BasicType elem_bt,
 83                   XMMRegister dst, XMMRegister a, XMMRegister b,
 84                   XMMRegister tmp, XMMRegister atmp, XMMRegister btmp,
 85                   int vlen_enc);
 86   void evminmax_fp(int opcode, BasicType elem_bt,
 87                    XMMRegister dst, XMMRegister a, XMMRegister b,
 88                    KRegister ktmp, XMMRegister atmp, XMMRegister btmp,
 89                    int vlen_enc);
 90 
 91   void signum_fp(int opcode, XMMRegister dst,
 92                  XMMRegister zero, XMMRegister one,
 93                  Register scratch);
 94 
 95   void vector_compress_expand(int opcode, XMMRegister dst, XMMRegister src, KRegister mask,
 96                               bool merge, BasicType bt, int vec_enc);
 97 
 98   void vector_mask_compress(KRegister dst, KRegister src, Register rtmp1, Register rtmp2, int mask_len);
 99 
100   void vextendbw(bool sign, XMMRegister dst, XMMRegister src, int vector_len);
101   void vextendbw(bool sign, XMMRegister dst, XMMRegister src);
102   void vextendbd(bool sign, XMMRegister dst, XMMRegister src, int vector_len);
103   void vextendwd(bool sign, XMMRegister dst, XMMRegister src, int vector_len);
104 
105   void vshiftd(int opcode, XMMRegister dst, XMMRegister shift);
106   void vshiftd_imm(int opcode, XMMRegister dst, int shift);
107   void vshiftd(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vlen_enc);
108   void vshiftd_imm(int opcode, XMMRegister dst, XMMRegister nds, int shift, int vector_len);
109   void vshiftw(int opcode, XMMRegister dst, XMMRegister shift);
110   void vshiftw(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vlen_enc);
111   void vshiftq(int opcode, XMMRegister dst, XMMRegister shift);
112   void vshiftq_imm(int opcode, XMMRegister dst, int shift);
113   void vshiftq(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vlen_enc);
114   void vshiftq_imm(int opcode, XMMRegister dst, XMMRegister nds, int shift, int vector_len);
115 
116   void vprotate_imm(int opcode, BasicType etype, XMMRegister dst, XMMRegister src, int shift, int vector_len);
117   void vprotate_var(int opcode, BasicType etype, XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
118 
119   void varshiftd(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vlen_enc);
120   void varshiftw(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vlen_enc);
121   void varshiftq(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vlen_enc, XMMRegister vtmp = xnoreg);
122   void varshiftbw(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len, XMMRegister vtmp, Register scratch);
123   void evarshiftb(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len, XMMRegister vtmp, Register scratch);
124 
125   void insert(BasicType typ, XMMRegister dst, Register val, int idx);
126   void vinsert(BasicType typ, XMMRegister dst, XMMRegister src, Register val, int idx);
127   void vgather(BasicType typ, XMMRegister dst, Register base, XMMRegister idx, XMMRegister mask, int vector_len);
128   void evgather(BasicType typ, XMMRegister dst, KRegister mask, Register base, XMMRegister idx, int vector_len);
129   void evscatter(BasicType typ, Register base, XMMRegister idx, KRegister mask, XMMRegister src, int vector_len);
130 
131   void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, int vector_len);
132   void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, int vector_len);
133 
134   // extract
135   void extract(BasicType typ, Register dst, XMMRegister src, int idx);
136   XMMRegister get_lane(BasicType typ, XMMRegister dst, XMMRegister src, int elemindex);
137   void get_elem(BasicType typ, Register dst, XMMRegister src, int elemindex);
138   void get_elem(BasicType typ, XMMRegister dst, XMMRegister src, int elemindex, Register tmp = noreg, XMMRegister vtmp = xnoreg);
139 
140   // vector test
141   void vectortest(int bt, int vlen, XMMRegister src1, XMMRegister src2,
142                   XMMRegister vtmp1 = xnoreg, XMMRegister vtmp2 = xnoreg, KRegister mask = knoreg);
143 
144   // blend
145   void evpcmp(BasicType typ, KRegister kdmask, KRegister ksmask, XMMRegister src1, AddressLiteral adr, int comparison, int vector_len, Register scratch = rscratch1);
146   void evpcmp(BasicType typ, KRegister kdmask, KRegister ksmask, XMMRegister src1, XMMRegister src2, int comparison, int vector_len);
147   void evpblend(BasicType typ, XMMRegister dst, KRegister kmask, XMMRegister src1, XMMRegister src2, bool merge, int vector_len);
148 
149   void load_vector_mask(XMMRegister dst, XMMRegister src, int vlen_in_bytes, BasicType elem_bt, bool is_legacy);
150   void load_vector_mask(KRegister dst, XMMRegister src, XMMRegister xtmp, Register tmp, bool novlbwdq, int vlen_enc);
151 
152   void load_iota_indices(XMMRegister dst, Register scratch, int vlen_in_bytes);
153 
154   // vector compare
155   void vpcmpu(BasicType typ, XMMRegister dst, XMMRegister src1, XMMRegister src2, ComparisonPredicate comparison, int vlen_in_bytes,
156               XMMRegister vtmp1, XMMRegister vtmp2, Register scratch);
157   void vpcmpu32(BasicType typ, XMMRegister dst, XMMRegister src1, XMMRegister src2, ComparisonPredicate comparison, int vlen_in_bytes,
158                 XMMRegister vtmp1, XMMRegister vtmp2, XMMRegister vtmp3, Register scratch);
159 
160   // Reductions for vectors of bytes, shorts, ints, longs, floats, and doubles.
161 
162   // dst = src1  reduce(op, src2) using vtmp as temps
163   void reduceI(int opcode, int vlen, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
164 #ifdef _LP64
165   void reduceL(int opcode, int vlen, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
166   void genmask(KRegister dst, Register len, Register temp);
167 #endif // _LP64
168 
169   // dst = reduce(op, src2) using vtmp as temps
170   void reduce_fp(int opcode, int vlen,
171                  XMMRegister dst, XMMRegister src,
172                  XMMRegister vtmp1, XMMRegister vtmp2 = xnoreg);
173   void reduceB(int opcode, int vlen, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
174   void mulreduceB(int opcode, int vlen, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
175   void reduceS(int opcode, int vlen, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
176   void reduceFloatMinMax(int opcode, int vlen, bool is_dst_valid,
177                          XMMRegister dst, XMMRegister src,
178                          XMMRegister tmp, XMMRegister atmp, XMMRegister btmp, XMMRegister xmm_0, XMMRegister xmm_1 = xnoreg);
179   void reduceDoubleMinMax(int opcode, int vlen, bool is_dst_valid,
180                           XMMRegister dst, XMMRegister src,
181                           XMMRegister tmp, XMMRegister atmp, XMMRegister btmp, XMMRegister xmm_0, XMMRegister xmm_1 = xnoreg);
182  private:
183   void reduceF(int opcode, int vlen, XMMRegister dst, XMMRegister src, XMMRegister vtmp1, XMMRegister vtmp2);
184   void reduceD(int opcode, int vlen, XMMRegister dst, XMMRegister src, XMMRegister vtmp1, XMMRegister vtmp2);
185 
186   // Int Reduction
187   void reduce2I (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
188   void reduce4I (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
189   void reduce8I (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
190   void reduce16I(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
191 
192   // Byte Reduction
193   void reduce8B (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
194   void reduce16B(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
195   void reduce32B(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
196   void reduce64B(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
197   void mulreduce8B (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
198   void mulreduce16B(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
199   void mulreduce32B(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
200   void mulreduce64B(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
201 
202   // Short Reduction
203   void reduce4S (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
204   void reduce8S (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
205   void reduce16S(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
206   void reduce32S(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
207 
208   // Long Reduction
209 #ifdef _LP64
210   void reduce2L(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
211   void reduce4L(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
212   void reduce8L(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
213 #endif // _LP64
214 
215   // Float Reduction
216   void reduce2F (int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp);
217   void reduce4F (int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp);
218   void reduce8F (int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp1, XMMRegister vtmp2);
219   void reduce16F(int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp1, XMMRegister vtmp2);
220 
221   // Double Reduction
222   void reduce2D(int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp);
223   void reduce4D(int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp1, XMMRegister vtmp2);
224   void reduce8D(int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp1, XMMRegister vtmp2);
225 
226   // Base reduction instruction
227   void reduce_operation_128(BasicType typ, int opcode, XMMRegister dst, XMMRegister src);
228   void reduce_operation_256(BasicType typ, int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2);
229 
230  public:
231 #ifdef _LP64
232   void vector_mask_operation_helper(int opc, Register dst, Register tmp, int masklen);
233 
234   void vector_mask_operation(int opc, Register dst, KRegister mask, Register tmp, int masklen, int masksize, int vec_enc);
235 
236   void vector_mask_operation(int opc, Register dst, XMMRegister mask, XMMRegister xtmp,
237                              Register tmp, int masklen, BasicType bt, int vec_enc);
238   void vector_long_to_maskvec(XMMRegister dst, Register src, Register rtmp1,
239                               Register rtmp2, XMMRegister xtmp, int mask_len, int vec_enc);
240 #endif
241 
242   void vector_maskall_operation(KRegister dst, Register src, int mask_len);
243 
244 #ifndef _LP64
245   void vector_maskall_operation32(KRegister dst, Register src, KRegister ktmp, int mask_len);
246 #endif
247 
248   void string_indexof_char(Register str1, Register cnt1, Register ch, Register result,
249                            XMMRegister vec1, XMMRegister vec2, XMMRegister vec3, Register tmp);
250 
251   void stringL_indexof_char(Register str1, Register cnt1, Register ch, Register result,
252                            XMMRegister vec1, XMMRegister vec2, XMMRegister vec3, Register tmp);
253 
254   // IndexOf strings.
255   // Small strings are loaded through stack if they cross page boundary.
256   void string_indexof(Register str1, Register str2,
257                       Register cnt1, Register cnt2,
258                       int int_cnt2,  Register result,
259                       XMMRegister vec, Register tmp,
260                       int ae);
261 
262   // IndexOf for constant substrings with size >= 8 elements
263   // which don't need to be loaded through stack.
264   void string_indexofC8(Register str1, Register str2,
265                       Register cnt1, Register cnt2,
266                       int int_cnt2,  Register result,
267                       XMMRegister vec, Register tmp,
268                       int ae);
269 
270     // Smallest code: we don't need to load through stack,
271     // check string tail.
272 
273   // helper function for string_compare
274   void load_next_elements(Register elem1, Register elem2, Register str1, Register str2,
275                           Address::ScaleFactor scale, Address::ScaleFactor scale1,
276                           Address::ScaleFactor scale2, Register index, int ae);
277   // Compare strings.
278   void string_compare(Register str1, Register str2,
279                       Register cnt1, Register cnt2, Register result,
280                       XMMRegister vec1, int ae, KRegister mask = knoreg);
281 
282   // Search for Non-ASCII character (Negative byte value) in a byte array,
283   // return true if it has any and false otherwise.
284   void has_negatives(Register ary1, Register len,
285                      Register result, Register tmp1,
286                      XMMRegister vec1, XMMRegister vec2, KRegister mask1 = knoreg, KRegister mask2 = knoreg);
287 
288   // Compare char[] or byte[] arrays.
289   void arrays_equals(bool is_array_equ, Register ary1, Register ary2,
290                      Register limit, Register result, Register chr,
291                      XMMRegister vec1, XMMRegister vec2, bool is_char, KRegister mask = knoreg);
292 
293 
294   void evmasked_op(int ideal_opc, BasicType eType, KRegister mask,
295                    XMMRegister dst, XMMRegister src1, XMMRegister src2,
296                    bool merge, int vlen_enc, bool is_varshift = false);
297 
298   void evmasked_op(int ideal_opc, BasicType eType, KRegister mask,
299                    XMMRegister dst, XMMRegister src1, Address src2,
300                    bool merge, int vlen_enc);
301 
302   void evmasked_op(int ideal_opc, BasicType eType, KRegister mask, XMMRegister dst,
303                    XMMRegister src1, int imm8, bool merge, int vlen_enc);
304 
305   void masked_op(int ideal_opc, int mask_len, KRegister dst,
306                  KRegister src1, KRegister src2);
307 
308   void vector_castF2I_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
309                           XMMRegister xtmp2, XMMRegister xtmp3, XMMRegister xtmp4,
310                           AddressLiteral float_sign_flip, Register scratch, int vec_enc);
311 
312   void vector_castF2I_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2,
313                            KRegister ktmp1, KRegister ktmp2, AddressLiteral float_sign_flip,
314                            Register scratch, int vec_enc);
315 
316   void vector_castD2L_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2,
317                            KRegister ktmp1, KRegister ktmp2, AddressLiteral double_sign_flip,
318                            Register scratch, int vec_enc);
319 #endif // CPU_X86_C2_MACROASSEMBLER_X86_HPP