1 /*
  2  * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef CPU_X86_C2_MACROASSEMBLER_X86_HPP
 26 #define CPU_X86_C2_MACROASSEMBLER_X86_HPP
 27 
 28 // C2_MacroAssembler contains high-level macros for C2
 29 
 30 public:
 31   // C2 compiled method's prolog code.
 32   void verified_entry(Compile* C, int sp_inc = 0);
 33 
 34   void entry_barrier();
 35   void emit_entry_barrier_stub(C2EntryBarrierStub* stub);
 36   static int entry_barrier_stub_size();
 37 
 38   Assembler::AvxVectorLen vector_length_encoding(int vlen_in_bytes);
 39 
 40   // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
 41   // See full description in macroAssembler_x86.cpp.
 42   void fast_lock(Register obj, Register box, Register tmp,
 43                  Register scr, Register cx1, Register cx2,
 44                  RTMLockingCounters* rtm_counters,
 45                  RTMLockingCounters* stack_rtm_counters,
 46                  Metadata* method_data,
 47                  bool use_rtm, bool profile_rtm);
 48   void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm);
 49 
 50 #if INCLUDE_RTM_OPT
 51   void rtm_counters_update(Register abort_status, Register rtm_counters);
 52   void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel);
 53   void rtm_abort_ratio_calculation(Register tmp, Register rtm_counters_reg,
 54                                    RTMLockingCounters* rtm_counters,
 55                                    Metadata* method_data);
 56   void rtm_profiling(Register abort_status_Reg, Register rtm_counters_Reg,
 57                      RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm);
 58   void rtm_retry_lock_on_abort(Register retry_count, Register abort_status, Label& retryLabel);
 59   void rtm_retry_lock_on_busy(Register retry_count, Register box, Register tmp, Register scr, Label& retryLabel);
 60   void rtm_stack_locking(Register obj, Register tmp, Register scr,
 61                          Register retry_on_abort_count,
 62                          RTMLockingCounters* stack_rtm_counters,
 63                          Metadata* method_data, bool profile_rtm,
 64                          Label& DONE_LABEL, Label& IsInflated);
 65   void rtm_inflated_locking(Register obj, Register box, Register tmp,
 66                             Register scr, Register retry_on_busy_count,
 67                             Register retry_on_abort_count,
 68                             RTMLockingCounters* rtm_counters,
 69                             Metadata* method_data, bool profile_rtm,
 70                             Label& DONE_LABEL);
 71 #endif
 72 
 73   // Generic instructions support for use in .ad files C2 code generation
 74   void vabsnegd(int opcode, XMMRegister dst, XMMRegister src);
 75   void vabsnegd(int opcode, XMMRegister dst, XMMRegister src, int vector_len);
 76   void vabsnegf(int opcode, XMMRegister dst, XMMRegister src);
 77   void vabsnegf(int opcode, XMMRegister dst, XMMRegister src, int vector_len);
 78 
 79   void pminmax(int opcode, BasicType elem_bt, XMMRegister dst, XMMRegister src,
 80                XMMRegister tmp = xnoreg);
 81   void vpminmax(int opcode, BasicType elem_bt,
 82                 XMMRegister dst, XMMRegister src1, XMMRegister src2,
 83                 int vlen_enc);
 84 
 85   void vminmax_fp(int opcode, BasicType elem_bt,
 86                   XMMRegister dst, XMMRegister a, XMMRegister b,
 87                   XMMRegister tmp, XMMRegister atmp, XMMRegister btmp,
 88                   int vlen_enc);
 89   void evminmax_fp(int opcode, BasicType elem_bt,
 90                    XMMRegister dst, XMMRegister a, XMMRegister b,
 91                    KRegister ktmp, XMMRegister atmp, XMMRegister btmp,
 92                    int vlen_enc);
 93 
 94   void signum_fp(int opcode, XMMRegister dst, XMMRegister zero, XMMRegister one);
 95 
 96   void vector_compress_expand(int opcode, XMMRegister dst, XMMRegister src, KRegister mask,
 97                               bool merge, BasicType bt, int vec_enc);
 98 
 99   void vector_mask_compress(KRegister dst, KRegister src, Register rtmp1, Register rtmp2, int mask_len);
100 
101   void vextendbw(bool sign, XMMRegister dst, XMMRegister src, int vector_len);
102   void vextendbw(bool sign, XMMRegister dst, XMMRegister src);
103   void vextendbd(bool sign, XMMRegister dst, XMMRegister src, int vector_len);
104   void vextendwd(bool sign, XMMRegister dst, XMMRegister src, int vector_len);
105 
106   void vshiftd(int opcode, XMMRegister dst, XMMRegister shift);
107   void vshiftd_imm(int opcode, XMMRegister dst, int shift);
108   void vshiftd(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vlen_enc);
109   void vshiftd_imm(int opcode, XMMRegister dst, XMMRegister nds, int shift, int vector_len);
110   void vshiftw(int opcode, XMMRegister dst, XMMRegister shift);
111   void vshiftw(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vlen_enc);
112   void vshiftq(int opcode, XMMRegister dst, XMMRegister shift);
113   void vshiftq_imm(int opcode, XMMRegister dst, int shift);
114   void vshiftq(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vlen_enc);
115   void vshiftq_imm(int opcode, XMMRegister dst, XMMRegister nds, int shift, int vector_len);
116 
117   void vprotate_imm(int opcode, BasicType etype, XMMRegister dst, XMMRegister src, int shift, int vector_len);
118   void vprotate_var(int opcode, BasicType etype, XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
119 
120   void varshiftd(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vlen_enc);
121   void varshiftw(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vlen_enc);
122   void varshiftq(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vlen_enc, XMMRegister vtmp = xnoreg);
123   void varshiftbw(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len, XMMRegister vtmp);
124   void evarshiftb(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len, XMMRegister vtmp);
125 
126   void insert(BasicType typ, XMMRegister dst, Register val, int idx);
127   void vinsert(BasicType typ, XMMRegister dst, XMMRegister src, Register val, int idx);
128   void vgather(BasicType typ, XMMRegister dst, Register base, XMMRegister idx, XMMRegister mask, int vector_len);
129   void evgather(BasicType typ, XMMRegister dst, KRegister mask, Register base, XMMRegister idx, int vector_len);
130   void evscatter(BasicType typ, Register base, XMMRegister idx, KRegister mask, XMMRegister src, int vector_len);
131 
132   void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len);
133   void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len);
134 
135   // extract
136   void extract(BasicType typ, Register dst, XMMRegister src, int idx);
137   XMMRegister get_lane(BasicType typ, XMMRegister dst, XMMRegister src, int elemindex);
138   void get_elem(BasicType typ, Register dst, XMMRegister src, int elemindex);
139   void get_elem(BasicType typ, XMMRegister dst, XMMRegister src, int elemindex, XMMRegister vtmp = xnoreg);
140 
141   // vector test
142   void vectortest(int bt, int vlen, XMMRegister src1, XMMRegister src2,
143                   XMMRegister vtmp1 = xnoreg, XMMRegister vtmp2 = xnoreg, KRegister mask = knoreg);
144 
145  // Covert B2X
146  void vconvert_b2x(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, int vlen_enc);
147 #ifdef _LP64
148  void vpbroadcast(BasicType elem_bt, XMMRegister dst, Register src, int vlen_enc);
149 #endif
150 
151   // blend
152   void evpcmp(BasicType typ, KRegister kdmask, KRegister ksmask, XMMRegister src1, XMMRegister    src2, int comparison, int vector_len);
153   void evpcmp(BasicType typ, KRegister kdmask, KRegister ksmask, XMMRegister src1, AddressLiteral src2, int comparison, int vector_len, Register rscratch = noreg);
154   void evpblend(BasicType typ, XMMRegister dst, KRegister kmask, XMMRegister src1, XMMRegister src2, bool merge, int vector_len);
155 
156   void load_vector(XMMRegister dst, Address        src, int vlen_in_bytes);
157   void load_vector(XMMRegister dst, AddressLiteral src, int vlen_in_bytes, Register rscratch = noreg);
158 
159   void load_vector_mask(XMMRegister dst, XMMRegister src, int vlen_in_bytes, BasicType elem_bt, bool is_legacy);
160   void load_vector_mask(KRegister   dst, XMMRegister src, XMMRegister xtmp, bool novlbwdq, int vlen_enc);
161 
162   void load_constant_vector(BasicType bt, XMMRegister dst, InternalAddress src, int vlen);
163   void load_iota_indices(XMMRegister dst, int vlen_in_bytes);
164 
165   // Reductions for vectors of bytes, shorts, ints, longs, floats, and doubles.
166 
167   // dst = src1  reduce(op, src2) using vtmp as temps
168   void reduceI(int opcode, int vlen, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
169 #ifdef _LP64
170   void reduceL(int opcode, int vlen, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
171   void genmask(KRegister dst, Register len, Register temp);
172 #endif // _LP64
173 
174   // dst = reduce(op, src2) using vtmp as temps
175   void reduce_fp(int opcode, int vlen,
176                  XMMRegister dst, XMMRegister src,
177                  XMMRegister vtmp1, XMMRegister vtmp2 = xnoreg);
178   void reduceB(int opcode, int vlen, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
179   void mulreduceB(int opcode, int vlen, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
180   void reduceS(int opcode, int vlen, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
181   void reduceFloatMinMax(int opcode, int vlen, bool is_dst_valid,
182                          XMMRegister dst, XMMRegister src,
183                          XMMRegister tmp, XMMRegister atmp, XMMRegister btmp, XMMRegister xmm_0, XMMRegister xmm_1 = xnoreg);
184   void reduceDoubleMinMax(int opcode, int vlen, bool is_dst_valid,
185                           XMMRegister dst, XMMRegister src,
186                           XMMRegister tmp, XMMRegister atmp, XMMRegister btmp, XMMRegister xmm_0, XMMRegister xmm_1 = xnoreg);
187  private:
188   void reduceF(int opcode, int vlen, XMMRegister dst, XMMRegister src, XMMRegister vtmp1, XMMRegister vtmp2);
189   void reduceD(int opcode, int vlen, XMMRegister dst, XMMRegister src, XMMRegister vtmp1, XMMRegister vtmp2);
190 
191   // Int Reduction
192   void reduce2I (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
193   void reduce4I (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
194   void reduce8I (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
195   void reduce16I(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
196 
197   // Byte Reduction
198   void reduce8B (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
199   void reduce16B(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
200   void reduce32B(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
201   void reduce64B(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
202   void mulreduce8B (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
203   void mulreduce16B(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
204   void mulreduce32B(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
205   void mulreduce64B(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
206 
207   // Short Reduction
208   void reduce4S (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
209   void reduce8S (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
210   void reduce16S(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
211   void reduce32S(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
212 
213   // Long Reduction
214 #ifdef _LP64
215   void reduce2L(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
216   void reduce4L(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
217   void reduce8L(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
218 #endif // _LP64
219 
220   // Float Reduction
221   void reduce2F (int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp);
222   void reduce4F (int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp);
223   void reduce8F (int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp1, XMMRegister vtmp2);
224   void reduce16F(int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp1, XMMRegister vtmp2);
225 
226   // Double Reduction
227   void reduce2D(int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp);
228   void reduce4D(int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp1, XMMRegister vtmp2);
229   void reduce8D(int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp1, XMMRegister vtmp2);
230 
231   // Base reduction instruction
232   void reduce_operation_128(BasicType typ, int opcode, XMMRegister dst, XMMRegister src);
233   void reduce_operation_256(BasicType typ, int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2);
234 
235  public:
236 #ifdef _LP64
237   void vector_mask_operation_helper(int opc, Register dst, Register tmp, int masklen);
238 
239   void vector_mask_operation(int opc, Register dst, KRegister mask, Register tmp, int masklen, int masksize, int vec_enc);
240 
241   void vector_mask_operation(int opc, Register dst, XMMRegister mask, XMMRegister xtmp,
242                              Register tmp, int masklen, BasicType bt, int vec_enc);
243   void vector_long_to_maskvec(XMMRegister dst, Register src, Register rtmp1,
244                               Register rtmp2, XMMRegister xtmp, int mask_len, int vec_enc);
245 #endif
246 
247   void vector_maskall_operation(KRegister dst, Register src, int mask_len);
248 
249 #ifndef _LP64
250   void vector_maskall_operation32(KRegister dst, Register src, KRegister ktmp, int mask_len);
251 #endif
252 
253   void string_indexof_char(Register str1, Register cnt1, Register ch, Register result,
254                            XMMRegister vec1, XMMRegister vec2, XMMRegister vec3, Register tmp);
255 
256   void stringL_indexof_char(Register str1, Register cnt1, Register ch, Register result,
257                            XMMRegister vec1, XMMRegister vec2, XMMRegister vec3, Register tmp);
258 
259   // IndexOf strings.
260   // Small strings are loaded through stack if they cross page boundary.
261   void string_indexof(Register str1, Register str2,
262                       Register cnt1, Register cnt2,
263                       int int_cnt2,  Register result,
264                       XMMRegister vec, Register tmp,
265                       int ae);
266 
267   // IndexOf for constant substrings with size >= 8 elements
268   // which don't need to be loaded through stack.
269   void string_indexofC8(Register str1, Register str2,
270                       Register cnt1, Register cnt2,
271                       int int_cnt2,  Register result,
272                       XMMRegister vec, Register tmp,
273                       int ae);
274 
275     // Smallest code: we don't need to load through stack,
276     // check string tail.
277 
278   // helper function for string_compare
279   void load_next_elements(Register elem1, Register elem2, Register str1, Register str2,
280                           Address::ScaleFactor scale, Address::ScaleFactor scale1,
281                           Address::ScaleFactor scale2, Register index, int ae);
282   // Compare strings.
283   void string_compare(Register str1, Register str2,
284                       Register cnt1, Register cnt2, Register result,
285                       XMMRegister vec1, int ae, KRegister mask = knoreg);
286 
287   // Search for Non-ASCII character (Negative byte value) in a byte array,
288   // return index of the first such character, otherwise len.
289   void count_positives(Register ary1, Register len,
290                        Register result, Register tmp1,
291                        XMMRegister vec1, XMMRegister vec2, KRegister mask1 = knoreg, KRegister mask2 = knoreg);
292   // Compare char[] or byte[] arrays.
293   void arrays_equals(bool is_array_equ, Register ary1, Register ary2,
294                      Register limit, Register result, Register chr,
295                      XMMRegister vec1, XMMRegister vec2, bool is_char, KRegister mask = knoreg);
296 
297 
298   void evmasked_op(int ideal_opc, BasicType eType, KRegister mask,
299                    XMMRegister dst, XMMRegister src1, XMMRegister src2,
300                    bool merge, int vlen_enc, bool is_varshift = false);
301 
302   void evmasked_op(int ideal_opc, BasicType eType, KRegister mask,
303                    XMMRegister dst, XMMRegister src1, Address src2,
304                    bool merge, int vlen_enc);
305 
306   void evmasked_op(int ideal_opc, BasicType eType, KRegister mask, XMMRegister dst,
307                    XMMRegister src1, int imm8, bool merge, int vlen_enc);
308 
309   void masked_op(int ideal_opc, int mask_len, KRegister dst,
310                  KRegister src1, KRegister src2);
311 
312   void vector_castF2I_avx(XMMRegister dst, XMMRegister src, AddressLiteral float_sign_flip, int vec_enc,
313                           XMMRegister xtmp1, XMMRegister xtmp2, XMMRegister xtmp3, XMMRegister xtmp4, Register rscratch = noreg);
314 
315   void vector_castF2I_evex(XMMRegister dst, XMMRegister src, AddressLiteral float_sign_flip, int vec_enc,
316                            XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, Register rscratch = noreg);
317 
318   void vector_castF2L_evex(XMMRegister dst, XMMRegister src, AddressLiteral double_sign_flip, int vec_enc,
319                            XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, Register rscratch = noreg);
320 
321   void vector_castD2L_evex(XMMRegister dst, XMMRegister src, AddressLiteral double_sign_flip, int vec_enc,
322                            XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, Register rscratch = noreg );
323 
324   void vector_castD2X_evex(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, AddressLiteral double_sign_flip, int vec_enc,
325                            XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, Register rscratch = noreg);
326 
327   void vector_unsigned_cast(XMMRegister dst, XMMRegister src, int vlen_enc, BasicType from_elem_bt, BasicType to_elem_bt);
328 
329   void vector_cast_double_special_cases_evex(XMMRegister dst, XMMRegister src, AddressLiteral double_sign_flip, int vec_enc,
330                             XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, Register rscratch = noreg );
331 
332   void vector_cast_float_special_cases_evex(XMMRegister dst, XMMRegister src, AddressLiteral float_sign_flip, int vec_enc,
333                                             XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, Register rscratch = noreg);
334 
335   void vector_cast_float_to_long_special_cases_evex(XMMRegister dst, XMMRegister src, AddressLiteral double_sign_flip, int vec_enc,
336                                                     XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2,
337                                                     Register rscratch = noreg);
338 
339   void vector_cast_float_special_cases_avx(XMMRegister dst, XMMRegister src, AddressLiteral float_sign_flip, int vec_enc,
340                                            XMMRegister xtmp1, XMMRegister xtmp2, XMMRegister xtmp3, XMMRegister xtmp4,
341                                            Register rscratch = noreg);
342 
343 #ifdef _LP64
344   void vector_round_double_evex(XMMRegister dst, XMMRegister src, AddressLiteral double_sign_flip, AddressLiteral new_mxcsr, int vec_enc,
345                                 Register tmp, XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2);
346 
347   void vector_round_float_evex(XMMRegister dst, XMMRegister src, AddressLiteral double_sign_flip, AddressLiteral new_mxcsr, int vec_enc,
348                                Register tmp, XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2);
349 
350   void vector_round_float_avx(XMMRegister dst, XMMRegister src, AddressLiteral float_sign_flip, AddressLiteral new_mxcsr, int vec_enc,
351                               Register tmp, XMMRegister xtmp1, XMMRegister xtmp2, XMMRegister xtmp3, XMMRegister xtmp4);
352 #endif // _LP64
353 
354   void udivI(Register rax, Register divisor, Register rdx);
355   void umodI(Register rax, Register divisor, Register rdx);
356   void udivmodI(Register rax, Register divisor, Register rdx, Register tmp);
357 
358 #ifdef _LP64
359   void reverseI(Register dst, Register src, XMMRegister xtmp1,
360                 XMMRegister xtmp2, Register rtmp);
361   void reverseL(Register dst, Register src, XMMRegister xtmp1,
362                 XMMRegister xtmp2, Register rtmp1, Register rtmp2);
363   void udivL(Register rax, Register divisor, Register rdx);
364   void umodL(Register rax, Register divisor, Register rdx);
365   void udivmodL(Register rax, Register divisor, Register rdx, Register tmp);
366 #endif
367 
368   void evpternlog(XMMRegister dst, int func, KRegister mask, XMMRegister src2, XMMRegister src3,
369                   bool merge, BasicType bt, int vlen_enc);
370 
371   void evpternlog(XMMRegister dst, int func, KRegister mask, XMMRegister src2, Address src3,
372                   bool merge, BasicType bt, int vlen_enc);
373 
374   void vector_reverse_bit(BasicType bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
375                           XMMRegister xtmp2, Register rtmp, int vec_enc);
376 
377   void vector_reverse_bit_gfni(BasicType bt, XMMRegister dst, XMMRegister src, AddressLiteral mask, int vec_enc,
378                                XMMRegister xtmp, Register rscratch = noreg);
379 
380   void vector_reverse_byte(BasicType bt, XMMRegister dst, XMMRegister src, int vec_enc);
381 
382   void vector_popcount_int(XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
383                            XMMRegister xtmp2, Register rtmp, int vec_enc);
384 
385   void vector_popcount_long(XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
386                             XMMRegister xtmp2, Register rtmp, int vec_enc);
387 
388   void vector_popcount_short(XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
389                              XMMRegister xtmp2, Register rtmp, int vec_enc);
390 
391   void vector_popcount_byte(XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
392                             XMMRegister xtmp2, Register rtmp, int vec_enc);
393 
394   void vector_popcount_integral(BasicType bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
395                                 XMMRegister xtmp2, Register rtmp, int vec_enc);
396 
397   void vector_popcount_integral_evex(BasicType bt, XMMRegister dst, XMMRegister src,
398                                      KRegister mask, bool merge, int vec_enc);
399 
400   void vbroadcast(BasicType bt, XMMRegister dst, int imm32, Register rtmp, int vec_enc);
401 
402   void vector_reverse_byte64(BasicType bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
403                              XMMRegister xtmp2, Register rtmp, int vec_enc);
404 
405   void vector_count_leading_zeros_evex(BasicType bt, XMMRegister dst, XMMRegister src,
406                                        XMMRegister xtmp1, XMMRegister xtmp2, XMMRegister xtmp3,
407                                        KRegister ktmp, Register rtmp, bool merge, int vec_enc);
408 
409   void vector_count_leading_zeros_byte_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
410                                            XMMRegister xtmp2, XMMRegister xtmp3, Register rtmp, int vec_enc);
411 
412   void vector_count_leading_zeros_short_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
413                                             XMMRegister xtmp2, XMMRegister xtmp3, Register rtmp, int vec_enc);
414 
415   void vector_count_leading_zeros_int_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
416                                           XMMRegister xtmp2, XMMRegister xtmp3, int vec_enc);
417 
418   void vector_count_leading_zeros_long_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
419                                            XMMRegister xtmp2, XMMRegister xtmp3, Register rtmp, int vec_enc);
420 
421   void vector_count_leading_zeros_avx(BasicType bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
422                                       XMMRegister xtmp2, XMMRegister xtmp3, Register rtmp, int vec_enc);
423 
424   void vpadd(BasicType bt, XMMRegister dst, XMMRegister src1, XMMRegister src2, int vec_enc);
425 
426   void vpsub(BasicType bt, XMMRegister dst, XMMRegister src1, XMMRegister src2, int vec_enc);
427 
428   void vector_count_trailing_zeros_evex(BasicType bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
429                                         XMMRegister xtmp2, XMMRegister xtmp3, XMMRegister xtmp4, KRegister ktmp,
430                                         Register rtmp, int vec_enc);
431 
432   void vector_swap_nbits(int nbits, int bitmask, XMMRegister dst, XMMRegister src,
433                          XMMRegister xtmp1, Register rtmp, int vec_enc);
434 
435   void vector_count_trailing_zeros_avx(BasicType bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
436                                        XMMRegister xtmp2, XMMRegister xtmp3, Register rtmp, int vec_enc);
437 
438   void vector_signum_avx(int opcode, XMMRegister dst, XMMRegister src, XMMRegister zero, XMMRegister one,
439                          XMMRegister xtmp1, int vec_enc);
440 
441   void vector_signum_evex(int opcode, XMMRegister dst, XMMRegister src, XMMRegister zero, XMMRegister one,
442                           KRegister ktmp1, int vec_enc);
443 
444   void vmovmask(BasicType elem_bt, XMMRegister dst, Address src, XMMRegister mask, int vec_enc);
445 
446   void vmovmask(BasicType elem_bt, Address dst, XMMRegister src, XMMRegister mask, int vec_enc);
447 
448   void rearrange_bytes(XMMRegister dst, XMMRegister shuffle, XMMRegister src, XMMRegister xtmp1,
449                        XMMRegister xtmp2, XMMRegister xtmp3, Register rtmp, KRegister ktmp, int vlen_enc);
450 
451 #endif // CPU_X86_C2_MACROASSEMBLER_X86_HPP