1 /*
2 * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_C2_MACROASSEMBLER_X86_HPP
26 #define CPU_X86_C2_MACROASSEMBLER_X86_HPP
27
28 // C2_MacroAssembler contains high-level macros for C2
29
30 public:
31 // C2 compiled method's prolog code.
32 void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b, bool is_stub);
33
34 Assembler::AvxVectorLen vector_length_encoding(int vlen_in_bytes);
35
36 // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
37 // See full description in macroAssembler_x86.cpp.
38 void fast_lock(Register obj, Register box, Register tmp,
39 Register scr, Register cx1, Register cx2, Register thread,
40 RTMLockingCounters* rtm_counters,
41 RTMLockingCounters* stack_rtm_counters,
42 Metadata* method_data,
43 bool use_rtm, bool profile_rtm);
44 void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm);
45
46 #if INCLUDE_RTM_OPT
47 void rtm_counters_update(Register abort_status, Register rtm_counters);
48 void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel);
49 void rtm_abort_ratio_calculation(Register tmp, Register rtm_counters_reg,
50 RTMLockingCounters* rtm_counters,
51 Metadata* method_data);
52 void rtm_profiling(Register abort_status_Reg, Register rtm_counters_Reg,
53 RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm);
54 void rtm_retry_lock_on_abort(Register retry_count, Register abort_status, Label& retryLabel);
55 void rtm_retry_lock_on_busy(Register retry_count, Register box, Register tmp, Register scr, Label& retryLabel);
56 void rtm_stack_locking(Register obj, Register tmp, Register scr,
57 Register retry_on_abort_count,
58 RTMLockingCounters* stack_rtm_counters,
59 Metadata* method_data, bool profile_rtm,
60 Label& DONE_LABEL, Label& IsInflated);
61 void rtm_inflated_locking(Register obj, Register box, Register tmp,
62 Register scr, Register retry_on_busy_count,
63 Register retry_on_abort_count,
64 RTMLockingCounters* rtm_counters,
65 Metadata* method_data, bool profile_rtm,
66 Label& DONE_LABEL);
67 #endif
68
69 // Generic instructions support for use in .ad files C2 code generation
70 void vabsnegd(int opcode, XMMRegister dst, XMMRegister src);
71 void vabsnegd(int opcode, XMMRegister dst, XMMRegister src, int vector_len);
72 void vabsnegf(int opcode, XMMRegister dst, XMMRegister src);
73 void vabsnegf(int opcode, XMMRegister dst, XMMRegister src, int vector_len);
74
75 void pminmax(int opcode, BasicType elem_bt, XMMRegister dst, XMMRegister src,
76 XMMRegister tmp = xnoreg);
77 void vpminmax(int opcode, BasicType elem_bt,
78 XMMRegister dst, XMMRegister src1, XMMRegister src2,
79 int vlen_enc);
80
81 void vminmax_fp(int opcode, BasicType elem_bt,
82 XMMRegister dst, XMMRegister a, XMMRegister b,
83 XMMRegister tmp, XMMRegister atmp, XMMRegister btmp,
84 int vlen_enc);
85 void evminmax_fp(int opcode, BasicType elem_bt,
86 XMMRegister dst, XMMRegister a, XMMRegister b,
87 KRegister ktmp, XMMRegister atmp, XMMRegister btmp,
88 int vlen_enc);
89
90 void signum_fp(int opcode, XMMRegister dst, XMMRegister zero, XMMRegister one);
91
92 void vector_compress_expand(int opcode, XMMRegister dst, XMMRegister src, KRegister mask,
93 bool merge, BasicType bt, int vec_enc);
94
95 void vector_mask_compress(KRegister dst, KRegister src, Register rtmp1, Register rtmp2, int mask_len);
96
97 void vextendbw(bool sign, XMMRegister dst, XMMRegister src, int vector_len);
98 void vextendbw(bool sign, XMMRegister dst, XMMRegister src);
99 void vextendbd(bool sign, XMMRegister dst, XMMRegister src, int vector_len);
100 void vextendwd(bool sign, XMMRegister dst, XMMRegister src, int vector_len);
101
102 void vshiftd(int opcode, XMMRegister dst, XMMRegister shift);
103 void vshiftd_imm(int opcode, XMMRegister dst, int shift);
104 void vshiftd(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vlen_enc);
105 void vshiftd_imm(int opcode, XMMRegister dst, XMMRegister nds, int shift, int vector_len);
106 void vshiftw(int opcode, XMMRegister dst, XMMRegister shift);
107 void vshiftw(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vlen_enc);
108 void vshiftq(int opcode, XMMRegister dst, XMMRegister shift);
109 void vshiftq_imm(int opcode, XMMRegister dst, int shift);
110 void vshiftq(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vlen_enc);
111 void vshiftq_imm(int opcode, XMMRegister dst, XMMRegister nds, int shift, int vector_len);
112
113 void vprotate_imm(int opcode, BasicType etype, XMMRegister dst, XMMRegister src, int shift, int vector_len);
114 void vprotate_var(int opcode, BasicType etype, XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
115
116 void varshiftd(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vlen_enc);
117 void varshiftw(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vlen_enc);
118 void varshiftq(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vlen_enc, XMMRegister vtmp = xnoreg);
119 void varshiftbw(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len, XMMRegister vtmp);
120 void evarshiftb(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len, XMMRegister vtmp);
121
122 void insert(BasicType typ, XMMRegister dst, Register val, int idx);
123 void vinsert(BasicType typ, XMMRegister dst, XMMRegister src, Register val, int idx);
124 void vgather(BasicType typ, XMMRegister dst, Register base, XMMRegister idx, XMMRegister mask, int vector_len);
125 void evgather(BasicType typ, XMMRegister dst, KRegister mask, Register base, XMMRegister idx, int vector_len);
126 void evscatter(BasicType typ, Register base, XMMRegister idx, KRegister mask, XMMRegister src, int vector_len);
127
128 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len);
129 void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len);
130
131 // extract
132 void extract(BasicType typ, Register dst, XMMRegister src, int idx);
133 XMMRegister get_lane(BasicType typ, XMMRegister dst, XMMRegister src, int elemindex);
134 void get_elem(BasicType typ, Register dst, XMMRegister src, int elemindex);
135 void get_elem(BasicType typ, XMMRegister dst, XMMRegister src, int elemindex, XMMRegister vtmp = xnoreg);
136 void movsxl(BasicType typ, Register dst);
137
138 // vector test
139 void vectortest(BasicType bt, XMMRegister src1, XMMRegister src2, XMMRegister vtmp, int vlen_in_bytes);
140
141 // Covert B2X
142 void vconvert_b2x(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, int vlen_enc);
143 #ifdef _LP64
144 void vpbroadcast(BasicType elem_bt, XMMRegister dst, Register src, int vlen_enc);
145 #endif
146
147 // blend
148 void evpcmp(BasicType typ, KRegister kdmask, KRegister ksmask, XMMRegister src1, XMMRegister src2, int comparison, int vector_len);
149 void evpcmp(BasicType typ, KRegister kdmask, KRegister ksmask, XMMRegister src1, AddressLiteral src2, int comparison, int vector_len, Register rscratch = noreg);
150 void evpblend(BasicType typ, XMMRegister dst, KRegister kmask, XMMRegister src1, XMMRegister src2, bool merge, int vector_len);
151
152 void load_vector(XMMRegister dst, Address src, int vlen_in_bytes);
153 void load_vector(XMMRegister dst, AddressLiteral src, int vlen_in_bytes, Register rscratch = noreg);
154
155 void load_vector_mask(XMMRegister dst, XMMRegister src, int vlen_in_bytes, BasicType elem_bt, bool is_legacy);
156 void load_vector_mask(KRegister dst, XMMRegister src, XMMRegister xtmp, bool novlbwdq, int vlen_enc);
157
158 void load_constant_vector(BasicType bt, XMMRegister dst, InternalAddress src, int vlen);
159 void load_iota_indices(XMMRegister dst, int vlen_in_bytes, BasicType bt);
160
161 // Reductions for vectors of bytes, shorts, ints, longs, floats, and doubles.
162
163 // dst = src1 reduce(op, src2) using vtmp as temps
164 void reduceI(int opcode, int vlen, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
165 #ifdef _LP64
166 void reduceL(int opcode, int vlen, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
167 void genmask(KRegister dst, Register len, Register temp);
168 #endif // _LP64
169
170 // dst = reduce(op, src2) using vtmp as temps
171 void reduce_fp(int opcode, int vlen,
172 XMMRegister dst, XMMRegister src,
173 XMMRegister vtmp1, XMMRegister vtmp2 = xnoreg);
174 void reduceB(int opcode, int vlen, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
175 void mulreduceB(int opcode, int vlen, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
176 void reduceS(int opcode, int vlen, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
177 void reduceFloatMinMax(int opcode, int vlen, bool is_dst_valid,
178 XMMRegister dst, XMMRegister src,
179 XMMRegister tmp, XMMRegister atmp, XMMRegister btmp, XMMRegister xmm_0, XMMRegister xmm_1 = xnoreg);
180 void reduceDoubleMinMax(int opcode, int vlen, bool is_dst_valid,
181 XMMRegister dst, XMMRegister src,
182 XMMRegister tmp, XMMRegister atmp, XMMRegister btmp, XMMRegister xmm_0, XMMRegister xmm_1 = xnoreg);
183 private:
184 void reduceF(int opcode, int vlen, XMMRegister dst, XMMRegister src, XMMRegister vtmp1, XMMRegister vtmp2);
185 void reduceD(int opcode, int vlen, XMMRegister dst, XMMRegister src, XMMRegister vtmp1, XMMRegister vtmp2);
186
187 // Int Reduction
188 void reduce2I (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
189 void reduce4I (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
190 void reduce8I (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
191 void reduce16I(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
192
193 // Byte Reduction
194 void reduce8B (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
195 void reduce16B(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
196 void reduce32B(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
197 void reduce64B(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
198 void mulreduce8B (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
199 void mulreduce16B(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
200 void mulreduce32B(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
201 void mulreduce64B(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
202
203 // Short Reduction
204 void reduce4S (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
205 void reduce8S (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
206 void reduce16S(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
207 void reduce32S(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
208
209 // Long Reduction
210 #ifdef _LP64
211 void reduce2L(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
212 void reduce4L(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
213 void reduce8L(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2);
214 #endif // _LP64
215
216 // Float Reduction
217 void reduce2F (int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp);
218 void reduce4F (int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp);
219 void reduce8F (int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp1, XMMRegister vtmp2);
220 void reduce16F(int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp1, XMMRegister vtmp2);
221
222 // Double Reduction
223 void reduce2D(int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp);
224 void reduce4D(int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp1, XMMRegister vtmp2);
225 void reduce8D(int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp1, XMMRegister vtmp2);
226
227 // Base reduction instruction
228 void reduce_operation_128(BasicType typ, int opcode, XMMRegister dst, XMMRegister src);
229 void reduce_operation_256(BasicType typ, int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2);
230
231 public:
232 #ifdef _LP64
233 void vector_mask_operation_helper(int opc, Register dst, Register tmp, int masklen);
234
235 void vector_mask_operation(int opc, Register dst, KRegister mask, Register tmp, int masklen, int masksize, int vec_enc);
236
237 void vector_mask_operation(int opc, Register dst, XMMRegister mask, XMMRegister xtmp,
238 Register tmp, int masklen, BasicType bt, int vec_enc);
239 void vector_long_to_maskvec(XMMRegister dst, Register src, Register rtmp1,
240 Register rtmp2, XMMRegister xtmp, int mask_len, int vec_enc);
241 #endif
242
243 void vector_maskall_operation(KRegister dst, Register src, int mask_len);
244
245 #ifndef _LP64
246 void vector_maskall_operation32(KRegister dst, Register src, KRegister ktmp, int mask_len);
247 #endif
248
249 void string_indexof_char(Register str1, Register cnt1, Register ch, Register result,
250 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3, Register tmp);
251
252 void stringL_indexof_char(Register str1, Register cnt1, Register ch, Register result,
253 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3, Register tmp);
254
255 // IndexOf strings.
256 // Small strings are loaded through stack if they cross page boundary.
257 void string_indexof(Register str1, Register str2,
258 Register cnt1, Register cnt2,
259 int int_cnt2, Register result,
260 XMMRegister vec, Register tmp,
261 int ae);
262
263 // IndexOf for constant substrings with size >= 8 elements
264 // which don't need to be loaded through stack.
265 void string_indexofC8(Register str1, Register str2,
266 Register cnt1, Register cnt2,
267 int int_cnt2, Register result,
268 XMMRegister vec, Register tmp,
269 int ae);
270
271 // Smallest code: we don't need to load through stack,
272 // check string tail.
273
274 // helper function for string_compare
275 void load_next_elements(Register elem1, Register elem2, Register str1, Register str2,
276 Address::ScaleFactor scale, Address::ScaleFactor scale1,
277 Address::ScaleFactor scale2, Register index, int ae);
278 // Compare strings.
279 void string_compare(Register str1, Register str2,
280 Register cnt1, Register cnt2, Register result,
281 XMMRegister vec1, int ae, KRegister mask = knoreg);
282
283 // Search for Non-ASCII character (Negative byte value) in a byte array,
284 // return index of the first such character, otherwise len.
285 void count_positives(Register ary1, Register len,
286 Register result, Register tmp1,
287 XMMRegister vec1, XMMRegister vec2, KRegister mask1 = knoreg, KRegister mask2 = knoreg);
288 // Compare char[] or byte[] arrays.
289 void arrays_equals(bool is_array_equ, Register ary1, Register ary2,
290 Register limit, Register result, Register chr,
291 XMMRegister vec1, XMMRegister vec2, bool is_char, KRegister mask = knoreg);
292
293 void arrays_hashcode(Register str1, Register cnt1, Register result,
294 Register tmp1, Register tmp2, Register tmp3, XMMRegister vnext,
295 XMMRegister vcoef0, XMMRegister vcoef1, XMMRegister vcoef2, XMMRegister vcoef3,
296 XMMRegister vresult0, XMMRegister vresult1, XMMRegister vresult2, XMMRegister vresult3,
297 XMMRegister vtmp0, XMMRegister vtmp1, XMMRegister vtmp2, XMMRegister vtmp3,
298 BasicType eltype);
299
300 // helper functions for arrays_hashcode
301 int arrays_hashcode_elsize(BasicType eltype);
302 void arrays_hashcode_elload(Register dst, Address src, BasicType eltype);
303 void arrays_hashcode_elvload(XMMRegister dst, Address src, BasicType eltype);
304 void arrays_hashcode_elvload(XMMRegister dst, AddressLiteral src, BasicType eltype);
305 void arrays_hashcode_elvcast(XMMRegister dst, BasicType eltype);
306
307 #ifdef _LP64
308 void convertF2I(BasicType dst_bt, BasicType src_bt, Register dst, XMMRegister src);
309 #endif
310
311 void evmasked_op(int ideal_opc, BasicType eType, KRegister mask,
312 XMMRegister dst, XMMRegister src1, XMMRegister src2,
313 bool merge, int vlen_enc, bool is_varshift = false);
314
315 void evmasked_op(int ideal_opc, BasicType eType, KRegister mask,
316 XMMRegister dst, XMMRegister src1, Address src2,
317 bool merge, int vlen_enc);
318
319 void evmasked_op(int ideal_opc, BasicType eType, KRegister mask, XMMRegister dst,
320 XMMRegister src1, int imm8, bool merge, int vlen_enc);
321
322 void masked_op(int ideal_opc, int mask_len, KRegister dst,
323 KRegister src1, KRegister src2);
324
325 void vector_unsigned_cast(XMMRegister dst, XMMRegister src, int vlen_enc,
326 BasicType from_elem_bt, BasicType to_elem_bt);
327
328 void vector_signed_cast(XMMRegister dst, XMMRegister src, int vlen_enc,
329 BasicType from_elem_bt, BasicType to_elem_bt);
330
331 void vector_cast_int_to_subword(BasicType to_elem_bt, XMMRegister dst, XMMRegister zero,
332 XMMRegister xtmp, Register rscratch, int vec_enc);
333
334 void vector_castF2X_avx(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
335 XMMRegister xtmp2, XMMRegister xtmp3, XMMRegister xtmp4,
336 AddressLiteral float_sign_flip, Register rscratch, int vec_enc);
337
338 void vector_castF2X_evex(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
339 XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, AddressLiteral float_sign_flip,
340 Register rscratch, int vec_enc);
341
342 void vector_castF2L_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2,
343 KRegister ktmp1, KRegister ktmp2, AddressLiteral double_sign_flip,
344 Register rscratch, int vec_enc);
345
346 void vector_castD2X_evex(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
347 XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, AddressLiteral sign_flip,
348 Register rscratch, int vec_enc);
349
350 void vector_castD2X_avx(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
351 XMMRegister xtmp2, XMMRegister xtmp3, XMMRegister xtmp4, XMMRegister xtmp5,
352 AddressLiteral float_sign_flip, Register rscratch, int vec_enc);
353
354
355 void vector_cast_double_to_int_special_cases_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2,
356 XMMRegister xtmp3, XMMRegister xtmp4, XMMRegister xtmp5, Register rscratch,
357 AddressLiteral float_sign_flip, int vec_enc);
358
359 void vector_cast_double_to_int_special_cases_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2,
360 KRegister ktmp1, KRegister ktmp2, Register rscratch, AddressLiteral float_sign_flip,
361 int vec_enc);
362
363 void vector_cast_double_to_long_special_cases_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2,
364 KRegister ktmp1, KRegister ktmp2, Register rscratch, AddressLiteral double_sign_flip,
365 int vec_enc);
366
367 void vector_cast_float_to_int_special_cases_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2,
368 KRegister ktmp1, KRegister ktmp2, Register rscratch, AddressLiteral float_sign_flip,
369 int vec_enc);
370
371 void vector_cast_float_to_long_special_cases_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2,
372 KRegister ktmp1, KRegister ktmp2, Register rscratch, AddressLiteral double_sign_flip,
373 int vec_enc);
374
375 void vector_cast_float_to_int_special_cases_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, XMMRegister xtmp3,
376 XMMRegister xtmp4, Register rscratch, AddressLiteral float_sign_flip,
377 int vec_enc);
378
379 void vector_crosslane_doubleword_pack_avx(XMMRegister dst, XMMRegister src, XMMRegister zero,
380 XMMRegister xtmp, int index, int vec_enc);
381
382 void vector_mask_cast(XMMRegister dst, XMMRegister src, BasicType dst_bt, BasicType src_bt, int vlen);
383
384 #ifdef _LP64
385 void vector_round_double_evex(XMMRegister dst, XMMRegister src, AddressLiteral double_sign_flip, AddressLiteral new_mxcsr, int vec_enc,
386 Register tmp, XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2);
387
388 void vector_round_float_evex(XMMRegister dst, XMMRegister src, AddressLiteral double_sign_flip, AddressLiteral new_mxcsr, int vec_enc,
389 Register tmp, XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2);
390
391 void vector_round_float_avx(XMMRegister dst, XMMRegister src, AddressLiteral float_sign_flip, AddressLiteral new_mxcsr, int vec_enc,
392 Register tmp, XMMRegister xtmp1, XMMRegister xtmp2, XMMRegister xtmp3, XMMRegister xtmp4);
393 #endif // _LP64
394
395 void udivI(Register rax, Register divisor, Register rdx);
396 void umodI(Register rax, Register divisor, Register rdx);
397 void udivmodI(Register rax, Register divisor, Register rdx, Register tmp);
398
399 #ifdef _LP64
400 void reverseI(Register dst, Register src, XMMRegister xtmp1,
401 XMMRegister xtmp2, Register rtmp);
402 void reverseL(Register dst, Register src, XMMRegister xtmp1,
403 XMMRegister xtmp2, Register rtmp1, Register rtmp2);
404 void udivL(Register rax, Register divisor, Register rdx);
405 void umodL(Register rax, Register divisor, Register rdx);
406 void udivmodL(Register rax, Register divisor, Register rdx, Register tmp);
407 #endif
408
409 void evpternlog(XMMRegister dst, int func, KRegister mask, XMMRegister src2, XMMRegister src3,
410 bool merge, BasicType bt, int vlen_enc);
411
412 void evpternlog(XMMRegister dst, int func, KRegister mask, XMMRegister src2, Address src3,
413 bool merge, BasicType bt, int vlen_enc);
414
415 void vector_reverse_bit(BasicType bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
416 XMMRegister xtmp2, Register rtmp, int vec_enc);
417
418 void vector_reverse_bit_gfni(BasicType bt, XMMRegister dst, XMMRegister src, AddressLiteral mask, int vec_enc,
419 XMMRegister xtmp, Register rscratch = noreg);
420
421 void vector_reverse_byte(BasicType bt, XMMRegister dst, XMMRegister src, int vec_enc);
422
423 void vector_popcount_int(XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
424 XMMRegister xtmp2, Register rtmp, int vec_enc);
425
426 void vector_popcount_long(XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
427 XMMRegister xtmp2, Register rtmp, int vec_enc);
428
429 void vector_popcount_short(XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
430 XMMRegister xtmp2, Register rtmp, int vec_enc);
431
432 void vector_popcount_byte(XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
433 XMMRegister xtmp2, Register rtmp, int vec_enc);
434
435 void vector_popcount_integral(BasicType bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
436 XMMRegister xtmp2, Register rtmp, int vec_enc);
437
438 void vector_popcount_integral_evex(BasicType bt, XMMRegister dst, XMMRegister src,
439 KRegister mask, bool merge, int vec_enc);
440
441 void vbroadcast(BasicType bt, XMMRegister dst, int imm32, Register rtmp, int vec_enc);
442
443 void vector_reverse_byte64(BasicType bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
444 XMMRegister xtmp2, Register rtmp, int vec_enc);
445
446 void vector_count_leading_zeros_evex(BasicType bt, XMMRegister dst, XMMRegister src,
447 XMMRegister xtmp1, XMMRegister xtmp2, XMMRegister xtmp3,
448 KRegister ktmp, Register rtmp, bool merge, int vec_enc);
449
450 void vector_count_leading_zeros_byte_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
451 XMMRegister xtmp2, XMMRegister xtmp3, Register rtmp, int vec_enc);
452
453 void vector_count_leading_zeros_short_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
454 XMMRegister xtmp2, XMMRegister xtmp3, Register rtmp, int vec_enc);
455
456 void vector_count_leading_zeros_int_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
457 XMMRegister xtmp2, XMMRegister xtmp3, int vec_enc);
458
459 void vector_count_leading_zeros_long_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
460 XMMRegister xtmp2, XMMRegister xtmp3, Register rtmp, int vec_enc);
461
462 void vector_count_leading_zeros_avx(BasicType bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
463 XMMRegister xtmp2, XMMRegister xtmp3, Register rtmp, int vec_enc);
464
465 void vpadd(BasicType bt, XMMRegister dst, XMMRegister src1, XMMRegister src2, int vec_enc);
466
467 void vpsub(BasicType bt, XMMRegister dst, XMMRegister src1, XMMRegister src2, int vec_enc);
468
469 void vector_count_trailing_zeros_evex(BasicType bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
470 XMMRegister xtmp2, XMMRegister xtmp3, XMMRegister xtmp4, KRegister ktmp,
471 Register rtmp, int vec_enc);
472
473 void vector_swap_nbits(int nbits, int bitmask, XMMRegister dst, XMMRegister src,
474 XMMRegister xtmp1, Register rtmp, int vec_enc);
475
476 void vector_count_trailing_zeros_avx(BasicType bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp1,
477 XMMRegister xtmp2, XMMRegister xtmp3, Register rtmp, int vec_enc);
478
479 void vector_signum_avx(int opcode, XMMRegister dst, XMMRegister src, XMMRegister zero, XMMRegister one,
480 XMMRegister xtmp1, int vec_enc);
481
482 void vector_signum_evex(int opcode, XMMRegister dst, XMMRegister src, XMMRegister zero, XMMRegister one,
483 KRegister ktmp1, int vec_enc);
484
485 void vmovmask(BasicType elem_bt, XMMRegister dst, Address src, XMMRegister mask, int vec_enc);
486
487 void vmovmask(BasicType elem_bt, Address dst, XMMRegister src, XMMRegister mask, int vec_enc);
488
489 void rearrange_bytes(XMMRegister dst, XMMRegister shuffle, XMMRegister src, XMMRegister xtmp1,
490 XMMRegister xtmp2, XMMRegister xtmp3, Register rtmp, KRegister ktmp, int vlen_enc);
491
492 void vector_rearrange_int_float(BasicType bt, XMMRegister dst, XMMRegister shuffle,
493 XMMRegister src, int vlen_enc);
494
495 #endif // CPU_X86_C2_MACROASSEMBLER_X86_HPP