1 /* 2 * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_C2_MACROASSEMBLER_X86_HPP 26 #define CPU_X86_C2_MACROASSEMBLER_X86_HPP 27 28 // C2_MacroAssembler contains high-level macros for C2 29 30 public: 31 Assembler::AvxVectorLen vector_length_encoding(int vlen_in_bytes); 32 33 // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file. 34 // See full description in macroAssembler_x86.cpp. 35 void fast_lock(Register obj, Register box, Register tmp, 36 Register scr, Register cx1, Register cx2, 37 RTMLockingCounters* rtm_counters, 38 RTMLockingCounters* stack_rtm_counters, 39 Metadata* method_data, 40 bool use_rtm, bool profile_rtm); 41 void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm); 42 43 #if INCLUDE_RTM_OPT 44 void rtm_counters_update(Register abort_status, Register rtm_counters); 45 void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel); 46 void rtm_abort_ratio_calculation(Register tmp, Register rtm_counters_reg, 47 RTMLockingCounters* rtm_counters, 48 Metadata* method_data); 49 void rtm_profiling(Register abort_status_Reg, Register rtm_counters_Reg, 50 RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm); 51 void rtm_retry_lock_on_abort(Register retry_count, Register abort_status, Label& retryLabel); 52 void rtm_retry_lock_on_busy(Register retry_count, Register box, Register tmp, Register scr, Label& retryLabel); 53 void rtm_stack_locking(Register obj, Register tmp, Register scr, 54 Register retry_on_abort_count, 55 RTMLockingCounters* stack_rtm_counters, 56 Metadata* method_data, bool profile_rtm, 57 Label& DONE_LABEL, Label& IsInflated); 58 void rtm_inflated_locking(Register obj, Register box, Register tmp, 59 Register scr, Register retry_on_busy_count, 60 Register retry_on_abort_count, 61 RTMLockingCounters* rtm_counters, 62 Metadata* method_data, bool profile_rtm, 63 Label& DONE_LABEL); 64 #endif 65 66 // Generic instructions support for use in .ad files C2 code generation 67 void vabsnegd(int opcode, XMMRegister dst, XMMRegister src, Register scr); 68 void vabsnegd(int opcode, XMMRegister dst, XMMRegister src, int vector_len, Register scr); 69 void vabsnegf(int opcode, XMMRegister dst, XMMRegister src, Register scr); 70 void vabsnegf(int opcode, XMMRegister dst, XMMRegister src, int vector_len, Register scr); 71 72 void pminmax(int opcode, BasicType elem_bt, XMMRegister dst, XMMRegister src, 73 XMMRegister tmp = xnoreg); 74 void vpminmax(int opcode, BasicType elem_bt, 75 XMMRegister dst, XMMRegister src1, XMMRegister src2, 76 int vlen_enc); 77 78 void vminmax_fp(int opcode, BasicType elem_bt, 79 XMMRegister dst, XMMRegister a, XMMRegister b, 80 XMMRegister tmp, XMMRegister atmp, XMMRegister btmp, 81 int vlen_enc); 82 void evminmax_fp(int opcode, BasicType elem_bt, 83 XMMRegister dst, XMMRegister a, XMMRegister b, 84 KRegister ktmp, XMMRegister atmp, XMMRegister btmp, 85 int vlen_enc); 86 87 void signum_fp(int opcode, XMMRegister dst, 88 XMMRegister zero, XMMRegister one, 89 Register scratch); 90 91 void vector_compress_expand(int opcode, XMMRegister dst, XMMRegister src, KRegister mask, 92 bool merge, BasicType bt, int vec_enc); 93 94 void vector_mask_compress(KRegister dst, KRegister src, Register rtmp1, Register rtmp2, int mask_len); 95 96 void vextendbw(bool sign, XMMRegister dst, XMMRegister src, int vector_len); 97 void vextendbw(bool sign, XMMRegister dst, XMMRegister src); 98 void vextendbd(bool sign, XMMRegister dst, XMMRegister src, int vector_len); 99 void vextendwd(bool sign, XMMRegister dst, XMMRegister src, int vector_len); 100 101 void vshiftd(int opcode, XMMRegister dst, XMMRegister shift); 102 void vshiftd_imm(int opcode, XMMRegister dst, int shift); 103 void vshiftd(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vlen_enc); 104 void vshiftd_imm(int opcode, XMMRegister dst, XMMRegister nds, int shift, int vector_len); 105 void vshiftw(int opcode, XMMRegister dst, XMMRegister shift); 106 void vshiftw(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vlen_enc); 107 void vshiftq(int opcode, XMMRegister dst, XMMRegister shift); 108 void vshiftq_imm(int opcode, XMMRegister dst, int shift); 109 void vshiftq(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vlen_enc); 110 void vshiftq_imm(int opcode, XMMRegister dst, XMMRegister nds, int shift, int vector_len); 111 112 void vprotate_imm(int opcode, BasicType etype, XMMRegister dst, XMMRegister src, int shift, int vector_len); 113 void vprotate_var(int opcode, BasicType etype, XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 114 115 void varshiftd(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vlen_enc); 116 void varshiftw(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vlen_enc); 117 void varshiftq(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vlen_enc, XMMRegister vtmp = xnoreg); 118 void varshiftbw(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len, XMMRegister vtmp, Register scratch); 119 void evarshiftb(int opcode, XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len, XMMRegister vtmp, Register scratch); 120 121 void insert(BasicType typ, XMMRegister dst, Register val, int idx); 122 void vinsert(BasicType typ, XMMRegister dst, XMMRegister src, Register val, int idx); 123 void vgather(BasicType typ, XMMRegister dst, Register base, XMMRegister idx, XMMRegister mask, int vector_len); 124 void evgather(BasicType typ, XMMRegister dst, KRegister mask, Register base, XMMRegister idx, int vector_len); 125 void evscatter(BasicType typ, Register base, XMMRegister idx, KRegister mask, XMMRegister src, int vector_len); 126 127 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, int vector_len); 128 void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, int vector_len); 129 130 // extract 131 void extract(BasicType typ, Register dst, XMMRegister src, int idx); 132 XMMRegister get_lane(BasicType typ, XMMRegister dst, XMMRegister src, int elemindex); 133 void get_elem(BasicType typ, Register dst, XMMRegister src, int elemindex); 134 void get_elem(BasicType typ, XMMRegister dst, XMMRegister src, int elemindex, Register tmp = noreg, XMMRegister vtmp = xnoreg); 135 136 // vector test 137 void vectortest(int bt, int vlen, XMMRegister src1, XMMRegister src2, 138 XMMRegister vtmp1 = xnoreg, XMMRegister vtmp2 = xnoreg, KRegister mask = knoreg); 139 140 // blend 141 void evpcmp(BasicType typ, KRegister kdmask, KRegister ksmask, XMMRegister src1, AddressLiteral adr, int comparison, int vector_len, Register scratch = rscratch1); 142 void evpcmp(BasicType typ, KRegister kdmask, KRegister ksmask, XMMRegister src1, XMMRegister src2, int comparison, int vector_len); 143 void evpblend(BasicType typ, XMMRegister dst, KRegister kmask, XMMRegister src1, XMMRegister src2, bool merge, int vector_len); 144 145 void load_vector_mask(XMMRegister dst, XMMRegister src, int vlen_in_bytes, BasicType elem_bt, bool is_legacy); 146 void load_vector_mask(KRegister dst, XMMRegister src, XMMRegister xtmp, Register tmp, bool novlbwdq, int vlen_enc); 147 148 void load_vector(XMMRegister dst, Address src, int vlen_in_bytes); 149 void load_vector(XMMRegister dst, AddressLiteral src, int vlen_in_bytes, Register rscratch = rscratch1); 150 void load_iota_indices(XMMRegister dst, Register scratch, int vlen_in_bytes); 151 152 // Reductions for vectors of bytes, shorts, ints, longs, floats, and doubles. 153 154 // dst = src1 reduce(op, src2) using vtmp as temps 155 void reduceI(int opcode, int vlen, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); 156 #ifdef _LP64 157 void reduceL(int opcode, int vlen, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); 158 void genmask(KRegister dst, Register len, Register temp); 159 #endif // _LP64 160 161 // dst = reduce(op, src2) using vtmp as temps 162 void reduce_fp(int opcode, int vlen, 163 XMMRegister dst, XMMRegister src, 164 XMMRegister vtmp1, XMMRegister vtmp2 = xnoreg); 165 void reduceB(int opcode, int vlen, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); 166 void mulreduceB(int opcode, int vlen, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); 167 void reduceS(int opcode, int vlen, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); 168 void reduceFloatMinMax(int opcode, int vlen, bool is_dst_valid, 169 XMMRegister dst, XMMRegister src, 170 XMMRegister tmp, XMMRegister atmp, XMMRegister btmp, XMMRegister xmm_0, XMMRegister xmm_1 = xnoreg); 171 void reduceDoubleMinMax(int opcode, int vlen, bool is_dst_valid, 172 XMMRegister dst, XMMRegister src, 173 XMMRegister tmp, XMMRegister atmp, XMMRegister btmp, XMMRegister xmm_0, XMMRegister xmm_1 = xnoreg); 174 private: 175 void reduceF(int opcode, int vlen, XMMRegister dst, XMMRegister src, XMMRegister vtmp1, XMMRegister vtmp2); 176 void reduceD(int opcode, int vlen, XMMRegister dst, XMMRegister src, XMMRegister vtmp1, XMMRegister vtmp2); 177 178 // Int Reduction 179 void reduce2I (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); 180 void reduce4I (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); 181 void reduce8I (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); 182 void reduce16I(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); 183 184 // Byte Reduction 185 void reduce8B (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); 186 void reduce16B(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); 187 void reduce32B(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); 188 void reduce64B(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); 189 void mulreduce8B (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); 190 void mulreduce16B(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); 191 void mulreduce32B(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); 192 void mulreduce64B(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); 193 194 // Short Reduction 195 void reduce4S (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); 196 void reduce8S (int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); 197 void reduce16S(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); 198 void reduce32S(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); 199 200 // Long Reduction 201 #ifdef _LP64 202 void reduce2L(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); 203 void reduce4L(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); 204 void reduce8L(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); 205 #endif // _LP64 206 207 // Float Reduction 208 void reduce2F (int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp); 209 void reduce4F (int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp); 210 void reduce8F (int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp1, XMMRegister vtmp2); 211 void reduce16F(int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp1, XMMRegister vtmp2); 212 213 // Double Reduction 214 void reduce2D(int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp); 215 void reduce4D(int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp1, XMMRegister vtmp2); 216 void reduce8D(int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp1, XMMRegister vtmp2); 217 218 // Base reduction instruction 219 void reduce_operation_128(BasicType typ, int opcode, XMMRegister dst, XMMRegister src); 220 void reduce_operation_256(BasicType typ, int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2); 221 222 public: 223 #ifdef _LP64 224 void vector_mask_operation_helper(int opc, Register dst, Register tmp, int masklen); 225 226 void vector_mask_operation(int opc, Register dst, KRegister mask, Register tmp, int masklen, int masksize, int vec_enc); 227 228 void vector_mask_operation(int opc, Register dst, XMMRegister mask, XMMRegister xtmp, 229 Register tmp, int masklen, BasicType bt, int vec_enc); 230 void vector_long_to_maskvec(XMMRegister dst, Register src, Register rtmp1, 231 Register rtmp2, XMMRegister xtmp, int mask_len, int vec_enc); 232 #endif 233 234 void vector_maskall_operation(KRegister dst, Register src, int mask_len); 235 236 #ifndef _LP64 237 void vector_maskall_operation32(KRegister dst, Register src, KRegister ktmp, int mask_len); 238 #endif 239 240 void string_indexof_char(Register str1, Register cnt1, Register ch, Register result, 241 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3, Register tmp); 242 243 void stringL_indexof_char(Register str1, Register cnt1, Register ch, Register result, 244 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3, Register tmp); 245 246 // IndexOf strings. 247 // Small strings are loaded through stack if they cross page boundary. 248 void string_indexof(Register str1, Register str2, 249 Register cnt1, Register cnt2, 250 int int_cnt2, Register result, 251 XMMRegister vec, Register tmp, 252 int ae); 253 254 // IndexOf for constant substrings with size >= 8 elements 255 // which don't need to be loaded through stack. 256 void string_indexofC8(Register str1, Register str2, 257 Register cnt1, Register cnt2, 258 int int_cnt2, Register result, 259 XMMRegister vec, Register tmp, 260 int ae); 261 262 // Smallest code: we don't need to load through stack, 263 // check string tail. 264 265 // helper function for string_compare 266 void load_next_elements(Register elem1, Register elem2, Register str1, Register str2, 267 Address::ScaleFactor scale, Address::ScaleFactor scale1, 268 Address::ScaleFactor scale2, Register index, int ae); 269 // Compare strings. 270 void string_compare(Register str1, Register str2, 271 Register cnt1, Register cnt2, Register result, 272 XMMRegister vec1, int ae, KRegister mask = knoreg); 273 274 // Search for Non-ASCII character (Negative byte value) in a byte array, 275 // return index of the first such character, otherwise len. 276 void count_positives(Register ary1, Register len, 277 Register result, Register tmp1, 278 XMMRegister vec1, XMMRegister vec2, KRegister mask1 = knoreg, KRegister mask2 = knoreg); 279 // Compare char[] or byte[] arrays. 280 void arrays_equals(bool is_array_equ, Register ary1, Register ary2, 281 Register limit, Register result, Register chr, 282 XMMRegister vec1, XMMRegister vec2, bool is_char, KRegister mask = knoreg); 283 284 285 void evmasked_op(int ideal_opc, BasicType eType, KRegister mask, 286 XMMRegister dst, XMMRegister src1, XMMRegister src2, 287 bool merge, int vlen_enc, bool is_varshift = false); 288 289 void evmasked_op(int ideal_opc, BasicType eType, KRegister mask, 290 XMMRegister dst, XMMRegister src1, Address src2, 291 bool merge, int vlen_enc); 292 293 void evmasked_op(int ideal_opc, BasicType eType, KRegister mask, XMMRegister dst, 294 XMMRegister src1, int imm8, bool merge, int vlen_enc); 295 296 void masked_op(int ideal_opc, int mask_len, KRegister dst, 297 KRegister src1, KRegister src2); 298 299 void vector_castF2I_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, 300 XMMRegister xtmp2, XMMRegister xtmp3, XMMRegister xtmp4, 301 AddressLiteral float_sign_flip, Register scratch, int vec_enc); 302 303 void vector_castF2I_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, 304 KRegister ktmp1, KRegister ktmp2, AddressLiteral float_sign_flip, 305 Register scratch, int vec_enc); 306 307 308 void vector_castD2L_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, 309 KRegister ktmp1, KRegister ktmp2, AddressLiteral double_sign_flip, 310 Register scratch, int vec_enc); 311 312 void vector_unsigned_cast(XMMRegister dst, XMMRegister src, int vlen_enc, 313 BasicType from_elem_bt, BasicType to_elem_bt); 314 315 void vector_cast_double_special_cases_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, 316 KRegister ktmp1, KRegister ktmp2, Register scratch, AddressLiteral double_sign_flip, 317 int vec_enc); 318 319 void vector_cast_float_special_cases_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, 320 KRegister ktmp1, KRegister ktmp2, Register scratch, AddressLiteral float_sign_flip, 321 int vec_enc); 322 323 void vector_cast_float_special_cases_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, 324 XMMRegister xtmp2, XMMRegister xtmp3, XMMRegister xtmp4, 325 Register scratch, AddressLiteral float_sign_flip, 326 int vec_enc); 327 328 #ifdef _LP64 329 void vector_round_double_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, 330 KRegister ktmp1, KRegister ktmp2, AddressLiteral double_sign_flip, 331 AddressLiteral new_mxcsr, Register scratch, int vec_enc); 332 333 void vector_round_float_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, 334 KRegister ktmp1, KRegister ktmp2, AddressLiteral double_sign_flip, 335 AddressLiteral new_mxcsr, Register scratch, int vec_enc); 336 337 void vector_round_float_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, 338 XMMRegister xtmp3, XMMRegister xtmp4, AddressLiteral float_sign_flip, 339 AddressLiteral new_mxcsr, Register scratch, int vec_enc); 340 #endif 341 342 void evpternlog(XMMRegister dst, int func, KRegister mask, XMMRegister src2, XMMRegister src3, 343 bool merge, BasicType bt, int vlen_enc); 344 345 void evpternlog(XMMRegister dst, int func, KRegister mask, XMMRegister src2, Address src3, 346 bool merge, BasicType bt, int vlen_enc); 347 348 void vector_reverse_bit(BasicType bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp1, 349 XMMRegister xtmp2, Register rtmp, int vec_enc); 350 351 void vector_reverse_bit_gfni(BasicType bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp, 352 AddressLiteral mask, Register rtmp, int vec_enc); 353 354 void vector_reverse_byte(BasicType bt, XMMRegister dst, XMMRegister src, Register rtmp, int vec_enc); 355 356 void udivI(Register rax, Register divisor, Register rdx); 357 void umodI(Register rax, Register divisor, Register rdx); 358 void udivmodI(Register rax, Register divisor, Register rdx, Register tmp); 359 360 #ifdef _LP64 361 void udivL(Register rax, Register divisor, Register rdx); 362 void umodL(Register rax, Register divisor, Register rdx); 363 void udivmodL(Register rax, Register divisor, Register rdx, Register tmp); 364 #endif 365 366 void vector_popcount_int(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, 367 XMMRegister xtmp2, Register rtmp, int vec_enc); 368 369 void vector_popcount_long(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, 370 XMMRegister xtmp2, Register rtmp, int vec_enc); 371 372 void vector_popcount_short(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, 373 XMMRegister xtmp2, Register rtmp, int vec_enc); 374 375 void vector_popcount_byte(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, 376 XMMRegister xtmp2, Register rtmp, int vec_enc); 377 378 void vector_popcount_integral(BasicType bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp1, 379 XMMRegister xtmp2, Register rtmp, int vec_enc); 380 381 void vector_popcount_integral_evex(BasicType bt, XMMRegister dst, XMMRegister src, 382 KRegister mask, bool merge, int vec_enc); 383 384 void vbroadcast(BasicType bt, XMMRegister dst, int imm32, Register rtmp, int vec_enc); 385 386 void vector_reverse_byte64(BasicType bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp1, 387 XMMRegister xtmp2, Register rtmp, int vec_enc); 388 389 390 void vector_count_leading_zeros_evex(BasicType bt, XMMRegister dst, XMMRegister src, 391 XMMRegister xtmp1, XMMRegister xtmp2, XMMRegister xtmp3, 392 KRegister ktmp, Register rtmp, bool merge, int vec_enc); 393 394 void vector_count_leading_zeros_byte_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, 395 XMMRegister xtmp2, XMMRegister xtmp3, Register rtmp, int vec_enc); 396 397 void vector_count_leading_zeros_short_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, 398 XMMRegister xtmp2, XMMRegister xtmp3, Register rtmp, int vec_enc); 399 400 void vector_count_leading_zeros_int_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, 401 XMMRegister xtmp2, XMMRegister xtmp3, int vec_enc); 402 403 void vector_count_leading_zeros_long_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, 404 XMMRegister xtmp2, XMMRegister xtmp3, Register rtmp, int vec_enc); 405 406 void vector_count_leading_zeros_avx(BasicType bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp1, 407 XMMRegister xtmp2, XMMRegister xtmp3, Register rtmp, int vec_enc); 408 409 void vpadd(BasicType bt, XMMRegister dst, XMMRegister src1, XMMRegister src2, int vec_enc); 410 411 void vpsub(BasicType bt, XMMRegister dst, XMMRegister src1, XMMRegister src2, int vec_enc); 412 413 void vector_count_trailing_zeros_evex(BasicType bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp1, 414 XMMRegister xtmp2, XMMRegister xtmp3, XMMRegister xtmp4, KRegister ktmp, 415 Register rtmp, int vec_enc); 416 417 void vector_count_trailing_zeros_avx(BasicType bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp1, 418 XMMRegister xtmp2, XMMRegister xtmp3, Register rtmp, int vec_enc); 419 420 #endif // CPU_X86_C2_MACROASSEMBLER_X86_HPP