1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.hpp"
26 #include "asm/assembler.inline.hpp"
27 #include "asm/codeBuffer.hpp"
28 #include "code/codeCache.hpp"
29 #include "gc/shared/cardTableBarrierSet.hpp"
30 #include "interpreter/interpreter.hpp"
31 #include "memory/resourceArea.hpp"
32 #include "memory/universe.hpp"
33 #include "prims/methodHandles.hpp"
34 #include "runtime/objectMonitor.hpp"
35 #include "runtime/os.hpp"
36 #include "runtime/sharedRuntime.hpp"
37 #include "runtime/stubRoutines.hpp"
38 #include "runtime/vm_version.hpp"
39 #include "utilities/checkedCast.hpp"
40 #include "utilities/macros.hpp"
41
42 #ifdef PRODUCT
43 #define BLOCK_COMMENT(str) /* nothing */
44 #define STOP(error) stop(error)
45 #else
46 #define BLOCK_COMMENT(str) block_comment(str)
47 #define STOP(error) block_comment(error); stop(error)
48 #endif
49
50 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
51 // Implementation of AddressLiteral
52
53 // A 2-D table for managing compressed displacement(disp8) on EVEX enabled platforms.
54 static const unsigned char tuple_table[Assembler::EVEX_ETUP + 1][Assembler::AVX_512bit + 1] = {
55 // -----------------Table 4.5 -------------------- //
56 16, 32, 64, // EVEX_FV(0)
57 4, 4, 4, // EVEX_FV(1) - with Evex.b
58 16, 32, 64, // EVEX_FV(2) - with Evex.w
59 8, 8, 8, // EVEX_FV(3) - with Evex.w and Evex.b
60 8, 16, 32, // EVEX_HV(0)
61 4, 4, 4, // EVEX_HV(1) - with Evex.b
62 // -----------------Table 4.6 -------------------- //
63 16, 32, 64, // EVEX_FVM(0)
64 1, 1, 1, // EVEX_T1S(0)
65 2, 2, 2, // EVEX_T1S(1)
66 4, 4, 4, // EVEX_T1S(2)
67 8, 8, 8, // EVEX_T1S(3)
68 4, 4, 4, // EVEX_T1F(0)
69 8, 8, 8, // EVEX_T1F(1)
70 8, 8, 8, // EVEX_T2(0)
71 0, 16, 16, // EVEX_T2(1)
72 0, 16, 16, // EVEX_T4(0)
73 0, 0, 32, // EVEX_T4(1)
74 0, 0, 32, // EVEX_T8(0)
75 8, 16, 32, // EVEX_HVM(0)
76 4, 8, 16, // EVEX_QVM(0)
77 2, 4, 8, // EVEX_OVM(0)
78 16, 16, 16, // EVEX_M128(0)
79 8, 32, 64, // EVEX_DUP(0)
80 1, 1, 1, // EVEX_NOSCALE(0)
81 0, 0, 0 // EVEX_ETUP
82 };
83
84 AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) {
85 _is_lval = false;
86 _target = target;
87 switch (rtype) {
88 case relocInfo::oop_type:
89 case relocInfo::metadata_type:
90 // Oops are a special case. Normally they would be their own section
91 // but in cases like icBuffer they are literals in the code stream that
92 // we don't have a section for. We use none so that we get a literal address
93 // which is always patchable.
94 break;
95 case relocInfo::external_word_type:
96 _rspec = external_word_Relocation::spec(target);
97 break;
98 case relocInfo::internal_word_type:
99 _rspec = internal_word_Relocation::spec(target);
100 break;
101 case relocInfo::opt_virtual_call_type:
102 _rspec = opt_virtual_call_Relocation::spec();
103 break;
104 case relocInfo::static_call_type:
105 _rspec = static_call_Relocation::spec();
106 break;
107 case relocInfo::runtime_call_type:
108 _rspec = runtime_call_Relocation::spec();
109 break;
110 case relocInfo::poll_type:
111 case relocInfo::poll_return_type:
112 _rspec = Relocation::spec_simple(rtype);
113 break;
114 case relocInfo::none:
115 break;
116 default:
117 ShouldNotReachHere();
118 break;
119 }
120 }
121
122 // Implementation of Address
123
124 Address Address::make_array(ArrayAddress adr) {
125 // Not implementable on 64bit machines
126 // Should have been handled higher up the call chain.
127 ShouldNotReachHere();
128 return Address();
129 }
130
131 // exceedingly dangerous constructor
132 Address::Address(int disp, address loc, relocInfo::relocType rtype) {
133 _base = noreg;
134 _index = noreg;
135 _scale = no_scale;
136 _disp = disp;
137 _xmmindex = xnoreg;
138 _isxmmindex = false;
139 switch (rtype) {
140 case relocInfo::external_word_type:
141 _rspec = external_word_Relocation::spec(loc);
142 break;
143 case relocInfo::internal_word_type:
144 _rspec = internal_word_Relocation::spec(loc);
145 break;
146 case relocInfo::runtime_call_type:
147 // HMM
148 _rspec = runtime_call_Relocation::spec();
149 break;
150 case relocInfo::poll_type:
151 case relocInfo::poll_return_type:
152 _rspec = Relocation::spec_simple(rtype);
153 break;
154 case relocInfo::none:
155 break;
156 default:
157 ShouldNotReachHere();
158 }
159 }
160
161
162 // Convert the raw encoding form into the form expected by the constructor for
163 // Address. An index of 4 (rsp) corresponds to having no index, so convert
164 // that to noreg for the Address constructor.
165 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) {
166 RelocationHolder rspec = RelocationHolder::none;
167 if (disp_reloc != relocInfo::none) {
168 rspec = Relocation::spec_simple(disp_reloc);
169 }
170 bool valid_index = index != rsp->encoding();
171 if (valid_index) {
172 Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp));
173 madr._rspec = rspec;
174 return madr;
175 } else {
176 Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp));
177 madr._rspec = rspec;
178 return madr;
179 }
180 }
181
182 // Implementation of Assembler
183
184 int AbstractAssembler::code_fill_byte() {
185 return (u_char)'\xF4'; // hlt
186 }
187
188 void Assembler::init_attributes(void) {
189 _legacy_mode_bw = (VM_Version::supports_avx512bw() == false);
190 _legacy_mode_dq = (VM_Version::supports_avx512dq() == false);
191 _legacy_mode_vl = (VM_Version::supports_avx512vl() == false);
192 _legacy_mode_vlbw = (VM_Version::supports_avx512vlbw() == false);
193 _attributes = nullptr;
194 }
195
196 void Assembler::set_attributes(InstructionAttr* attributes) {
197 // Record the assembler in the attributes, so the attributes destructor can
198 // clear the assembler's attributes, cleaning up the otherwise dangling
199 // pointer. gcc13 has a false positive warning, because it doesn't tie that
200 // cleanup to the assignment of _attributes here.
201 attributes->set_current_assembler(this);
202 PRAGMA_DIAG_PUSH
203 PRAGMA_DANGLING_POINTER_IGNORED
204 _attributes = attributes;
205 PRAGMA_DIAG_POP
206 }
207
208 void Assembler::membar(Membar_mask_bits order_constraint) {
209 // We only have to handle StoreLoad
210 if (order_constraint & StoreLoad) {
211 // All usable chips support "locked" instructions which suffice
212 // as barriers, and are much faster than the alternative of
213 // using cpuid instruction. We use here a locked add [esp-C],0.
214 // This is conveniently otherwise a no-op except for blowing
215 // flags, and introducing a false dependency on target memory
216 // location. We can't do anything with flags, but we can avoid
217 // memory dependencies in the current method by locked-adding
218 // somewhere else on the stack. Doing [esp+C] will collide with
219 // something on stack in current method, hence we go for [esp-C].
220 // It is convenient since it is almost always in data cache, for
221 // any small C. We need to step back from SP to avoid data
222 // dependencies with other things on below SP (callee-saves, for
223 // example). Without a clear way to figure out the minimal safe
224 // distance from SP, it makes sense to step back the complete
225 // cache line, as this will also avoid possible second-order effects
226 // with locked ops against the cache line. Our choice of offset
227 // is bounded by x86 operand encoding, which should stay within
228 // [-128; +127] to have the 8-byte displacement encoding.
229 //
230 // Any change to this code may need to revisit other places in
231 // the code where this idiom is used, in particular the
232 // orderAccess code.
233
234 int offset = -VM_Version::L1_line_size();
235 if (offset < -128) {
236 offset = -128;
237 }
238
239 lock();
240 addl(Address(rsp, offset), 0);// Assert the lock# signal here
241 }
242 }
243
244 // make this go away someday
245 void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) {
246 if (rtype == relocInfo::none)
247 emit_int32(data);
248 else
249 emit_data(data, Relocation::spec_simple(rtype), format);
250 }
251
252 void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) {
253 assert(imm_operand == 0, "default format must be immediate in this file");
254 assert(inst_mark() != nullptr, "must be inside InstructionMark");
255 if (rspec.type() != relocInfo::none) {
256 #ifdef ASSERT
257 check_relocation(rspec, format);
258 #endif
259 // Do not use AbstractAssembler::relocate, which is not intended for
260 // embedded words. Instead, relocate to the enclosing instruction.
261
262 // hack. call32 is too wide for mask so use disp32
263 if (format == call32_operand)
264 code_section()->relocate(inst_mark(), rspec, disp32_operand);
265 else
266 code_section()->relocate(inst_mark(), rspec, format);
267 }
268 emit_int32(data);
269 }
270
271 static int encode(Register r) {
272 return r->encoding() & 7;
273 }
274
275 void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
276 assert(dst->has_byte_register(), "must have byte register");
277 assert(isByte(op1) && isByte(op2), "wrong opcode");
278 assert(isByte(imm8), "not a byte");
279 assert((op1 & 0x01) == 0, "should be 8bit operation");
280 emit_int24(op1, (op2 | encode(dst)), imm8);
281 }
282
283 void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32, bool optimize_rax_dst) {
284 assert(isByte(op1) && isByte(op2), "wrong opcode");
285 assert(op1 == 0x81, "Unexpected opcode");
286 if (is8bit(imm32)) {
287 emit_int24(op1 | 0x02, // set sign bit
288 op2 | encode(dst),
289 imm32 & 0xFF);
290 } else if (optimize_rax_dst && dst == rax) {
291 switch (op2) {
292 case 0xD0: emit_int8(0x15); break; // adc
293 case 0xC0: emit_int8(0x05); break; // add
294 case 0xE0: emit_int8(0x25); break; // and
295 case 0xF8: emit_int8(0x3D); break; // cmp
296 case 0xC8: emit_int8(0x0D); break; // or
297 case 0xD8: emit_int8(0x1D); break; // sbb
298 case 0xE8: emit_int8(0x2D); break; // sub
299 case 0xF0: emit_int8(0x35); break; // xor
300 default: ShouldNotReachHere();
301 }
302 emit_int32(imm32);
303 } else {
304 emit_int16(op1, (op2 | encode(dst)));
305 emit_int32(imm32);
306 }
307 }
308
309 // Force generation of a 4 byte immediate value even if it fits into 8bit
310 void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) {
311 assert(isByte(op1) && isByte(op2), "wrong opcode");
312 assert((op1 & 0x01) == 1, "should be 32bit operation");
313 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
314 emit_int16(op1, (op2 | encode(dst)));
315 emit_int32(imm32);
316 }
317
318 // immediate-to-memory forms
319 void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) {
320 assert((op1 & 0x01) == 1, "should be 32bit operation");
321 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
322 if (is8bit(imm32)) {
323 emit_int8(op1 | 0x02); // set sign bit
324 emit_operand(rm, adr, 1);
325 emit_int8(imm32 & 0xFF);
326 } else {
327 emit_int8(op1);
328 emit_operand(rm, adr, 4);
329 emit_int32(imm32);
330 }
331 }
332
333 void Assembler::emit_arith_operand_imm32(int op1, Register rm, Address adr, int32_t imm32) {
334 assert(op1 == 0x81, "unexpected opcode");
335 emit_int8(op1);
336 emit_operand(rm, adr, 4);
337 emit_int32(imm32);
338 }
339
340 void Assembler::emit_arith(int op1, int op2, Register dst, Register src) {
341 assert(isByte(op1) && isByte(op2), "wrong opcode");
342 emit_int16(op1, (op2 | encode(dst) << 3 | encode(src)));
343 }
344
345
346 bool Assembler::query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len,
347 int cur_tuple_type, int in_size_in_bits, int cur_encoding) {
348 int mod_idx = 0;
349 // We will test if the displacement fits the compressed format and if so
350 // apply the compression to the displacement iff the result is8bit.
351 if (VM_Version::supports_evex() && is_evex_inst) {
352 switch (cur_tuple_type) {
353 case EVEX_FV:
354 if ((cur_encoding & VEX_W) == VEX_W) {
355 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 3 : 2;
356 } else {
357 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0;
358 }
359 break;
360
361 case EVEX_HV:
362 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0;
363 break;
364
365 case EVEX_FVM:
366 break;
367
368 case EVEX_T1S:
369 switch (in_size_in_bits) {
370 case EVEX_8bit:
371 break;
372
373 case EVEX_16bit:
374 mod_idx = 1;
375 break;
376
377 case EVEX_32bit:
378 mod_idx = 2;
379 break;
380
381 case EVEX_64bit:
382 mod_idx = 3;
383 break;
384 }
385 break;
386
387 case EVEX_T1F:
388 case EVEX_T2:
389 case EVEX_T4:
390 mod_idx = (in_size_in_bits == EVEX_64bit) ? 1 : 0;
391 break;
392
393 case EVEX_T8:
394 break;
395
396 case EVEX_HVM:
397 break;
398
399 case EVEX_QVM:
400 break;
401
402 case EVEX_OVM:
403 break;
404
405 case EVEX_M128:
406 break;
407
408 case EVEX_DUP:
409 break;
410
411 case EVEX_NOSCALE:
412 break;
413
414 default:
415 assert(0, "no valid evex tuple_table entry");
416 break;
417 }
418
419 if (vector_len >= AVX_128bit && vector_len <= AVX_512bit) {
420 int disp_factor = tuple_table[cur_tuple_type + mod_idx][vector_len];
421 if ((disp % disp_factor) == 0) {
422 int new_disp = disp / disp_factor;
423 if ((-0x80 <= new_disp && new_disp < 0x80)) {
424 disp = new_disp;
425 }
426 } else {
427 return false;
428 }
429 }
430 }
431 return (-0x80 <= disp && disp < 0x80);
432 }
433
434
435 bool Assembler::emit_compressed_disp_byte(int &disp) {
436 int mod_idx = 0;
437 // We will test if the displacement fits the compressed format and if so
438 // apply the compression to the displacement iff the result is8bit.
439 if (VM_Version::supports_evex() && _attributes && _attributes->is_evex_instruction()) {
440 int evex_encoding = _attributes->get_evex_encoding();
441 int tuple_type = _attributes->get_tuple_type();
442 switch (tuple_type) {
443 case EVEX_FV:
444 if ((evex_encoding & VEX_W) == VEX_W) {
445 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 3 : 2;
446 } else {
447 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0;
448 }
449 break;
450
451 case EVEX_HV:
452 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0;
453 break;
454
455 case EVEX_FVM:
456 break;
457
458 case EVEX_T1S:
459 switch (_attributes->get_input_size()) {
460 case EVEX_8bit:
461 break;
462
463 case EVEX_16bit:
464 mod_idx = 1;
465 break;
466
467 case EVEX_32bit:
468 mod_idx = 2;
469 break;
470
471 case EVEX_64bit:
472 mod_idx = 3;
473 break;
474 }
475 break;
476
477 case EVEX_T1F:
478 case EVEX_T2:
479 case EVEX_T4:
480 mod_idx = (_attributes->get_input_size() == EVEX_64bit) ? 1 : 0;
481 break;
482
483 case EVEX_T8:
484 break;
485
486 case EVEX_HVM:
487 break;
488
489 case EVEX_QVM:
490 break;
491
492 case EVEX_OVM:
493 break;
494
495 case EVEX_M128:
496 break;
497
498 case EVEX_DUP:
499 break;
500
501 case EVEX_NOSCALE:
502 break;
503
504 default:
505 assert(0, "no valid evex tuple_table entry");
506 break;
507 }
508
509 int vector_len = _attributes->get_vector_len();
510 if (vector_len >= AVX_128bit && vector_len <= AVX_512bit) {
511 int disp_factor = tuple_table[tuple_type + mod_idx][vector_len];
512 if ((disp % disp_factor) == 0) {
513 int new_disp = disp / disp_factor;
514 if (is8bit(new_disp)) {
515 disp = new_disp;
516 }
517 } else {
518 return false;
519 }
520 }
521 }
522 return is8bit(disp);
523 }
524
525 bool Assembler::needs_rex2(Register reg1, Register reg2, Register reg3) {
526 bool rex2 = (reg1->is_valid() && reg1->encoding() >= 16) ||
527 (reg2->is_valid() && reg2->encoding() >= 16) ||
528 (reg3->is_valid() && reg3->encoding() >= 16);
529 assert(!rex2 || UseAPX, "extended gpr use requires UseAPX");
530 return rex2;
531 }
532
533 #ifndef PRODUCT
534 bool Assembler::needs_evex(XMMRegister reg1, XMMRegister reg2, XMMRegister reg3) {
535 return (reg1->is_valid() && reg1->encoding() >= 16) ||
536 (reg2->is_valid() && reg2->encoding() >= 16) ||
537 (reg3->is_valid() && reg3->encoding() >= 16);
538 }
539 #endif
540
541 bool Assembler::needs_eevex(Register reg1, Register reg2, Register reg3) {
542 return needs_rex2(reg1, reg2, reg3);
543 }
544
545 bool Assembler::needs_eevex(int enc1, int enc2, int enc3) {
546 bool eevex = enc1 >= 16 || enc2 >= 16 || enc3 >=16;
547 assert(!eevex || UseAPX, "extended gpr use requires UseAPX");
548 return eevex;
549 }
550
551 static bool is_valid_encoding(int reg_enc) {
552 return reg_enc >= 0;
553 }
554
555 static int raw_encode(Register reg) {
556 assert(reg == noreg || reg->is_valid(), "sanity");
557 int reg_enc = reg->raw_encoding();
558 assert(reg_enc == -1 || is_valid_encoding(reg_enc), "sanity");
559 return reg_enc;
560 }
561
562 static int raw_encode(XMMRegister xmmreg) {
563 assert(xmmreg == xnoreg || xmmreg->is_valid(), "sanity");
564 int xmmreg_enc = xmmreg->raw_encoding();
565 assert(xmmreg_enc == -1 || is_valid_encoding(xmmreg_enc), "sanity");
566 return xmmreg_enc;
567 }
568
569 static int raw_encode(KRegister kreg) {
570 assert(kreg == knoreg || kreg->is_valid(), "sanity");
571 int kreg_enc = kreg->raw_encoding();
572 assert(kreg_enc == -1 || is_valid_encoding(kreg_enc), "sanity");
573 return kreg_enc;
574 }
575
576 static int modrm_encoding(int mod, int dst_enc, int src_enc) {
577 return (mod & 3) << 6 | (dst_enc & 7) << 3 | (src_enc & 7);
578 }
579
580 static int sib_encoding(Address::ScaleFactor scale, int index_enc, int base_enc) {
581 return (scale & 3) << 6 | (index_enc & 7) << 3 | (base_enc & 7);
582 }
583
584 inline void Assembler::emit_modrm(int mod, int dst_enc, int src_enc) {
585 assert((mod & 3) != 0b11, "forbidden");
586 int modrm = modrm_encoding(mod, dst_enc, src_enc);
587 emit_int8(modrm);
588 }
589
590 inline void Assembler::emit_modrm_disp8(int mod, int dst_enc, int src_enc,
591 int disp) {
592 int modrm = modrm_encoding(mod, dst_enc, src_enc);
593 emit_int16(modrm, disp & 0xFF);
594 }
595
596 inline void Assembler::emit_modrm_sib(int mod, int dst_enc, int src_enc,
597 Address::ScaleFactor scale, int index_enc, int base_enc) {
598 int modrm = modrm_encoding(mod, dst_enc, src_enc);
599 int sib = sib_encoding(scale, index_enc, base_enc);
600 emit_int16(modrm, sib);
601 }
602
603 inline void Assembler::emit_modrm_sib_disp8(int mod, int dst_enc, int src_enc,
604 Address::ScaleFactor scale, int index_enc, int base_enc,
605 int disp) {
606 int modrm = modrm_encoding(mod, dst_enc, src_enc);
607 int sib = sib_encoding(scale, index_enc, base_enc);
608 emit_int24(modrm, sib, disp & 0xFF);
609 }
610
611 void Assembler::emit_operand_helper(int reg_enc, int base_enc, int index_enc,
612 Address::ScaleFactor scale, int disp,
613 RelocationHolder const& rspec,
614 int post_addr_length) {
615 bool no_relocation = (rspec.type() == relocInfo::none);
616
617 if (is_valid_encoding(base_enc)) {
618 if (is_valid_encoding(index_enc)) {
619 assert(scale != Address::no_scale, "inconsistent address");
620 // [base + index*scale + disp]
621 if (disp == 0 && no_relocation && ((base_enc & 0x7) != 5)) {
622 // [base + index*scale]
623 // !(rbp | r13 | r21 | r29)
624 // [00 reg 100][ss index base]
625 emit_modrm_sib(0b00, reg_enc, 0b100,
626 scale, index_enc, base_enc);
627 } else if (emit_compressed_disp_byte(disp) && no_relocation) {
628 // [base + index*scale + imm8]
629 // [01 reg 100][ss index base] imm8
630 emit_modrm_sib_disp8(0b01, reg_enc, 0b100,
631 scale, index_enc, base_enc,
632 disp);
633 } else {
634 // [base + index*scale + disp32]
635 // [10 reg 100][ss index base] disp32
636 emit_modrm_sib(0b10, reg_enc, 0b100,
637 scale, index_enc, base_enc);
638 emit_data(disp, rspec, disp32_operand);
639 }
640 } else if ((base_enc & 0x7) == 4) {
641 // rsp | r12 | r20 | r28
642 // [rsp + disp]
643 if (disp == 0 && no_relocation) {
644 // [rsp]
645 // [00 reg 100][00 100 100]
646 emit_modrm_sib(0b00, reg_enc, 0b100,
647 Address::times_1, 0b100, 0b100);
648 } else if (emit_compressed_disp_byte(disp) && no_relocation) {
649 // [rsp + imm8]
650 // [01 reg 100][00 100 100] disp8
651 emit_modrm_sib_disp8(0b01, reg_enc, 0b100,
652 Address::times_1, 0b100, 0b100,
653 disp);
654 } else {
655 // [rsp + imm32]
656 // [10 reg 100][00 100 100] disp32
657 emit_modrm_sib(0b10, reg_enc, 0b100,
658 Address::times_1, 0b100, 0b100);
659 emit_data(disp, rspec, disp32_operand);
660 }
661 } else {
662 // [base + disp]
663 // !(rsp | r12 | r20 | r28) were handled above
664 assert(((base_enc & 0x7) != 4), "illegal addressing mode");
665 if (disp == 0 && no_relocation && ((base_enc & 0x7) != 5)) {
666 // [base]
667 // !(rbp | r13 | r21 | r29)
668 // [00 reg base]
669 emit_modrm(0, reg_enc, base_enc);
670 } else if (emit_compressed_disp_byte(disp) && no_relocation) {
671 // [base + disp8]
672 // [01 reg base] disp8
673 emit_modrm_disp8(0b01, reg_enc, base_enc,
674 disp);
675 } else {
676 // [base + disp32]
677 // [10 reg base] disp32
678 emit_modrm(0b10, reg_enc, base_enc);
679 emit_data(disp, rspec, disp32_operand);
680 }
681 }
682 } else {
683 if (is_valid_encoding(index_enc)) {
684 assert(scale != Address::no_scale, "inconsistent address");
685 // base == noreg
686 // [index*scale + disp]
687 // [00 reg 100][ss index 101] disp32
688 emit_modrm_sib(0b00, reg_enc, 0b100,
689 scale, index_enc, 0b101 /* no base */);
690 emit_data(disp, rspec, disp32_operand);
691 } else if (!no_relocation) {
692 // base == noreg, index == noreg
693 // [disp] (64bit) RIP-RELATIVE (32bit) abs
694 // [00 reg 101] disp32
695
696 emit_modrm(0b00, reg_enc, 0b101 /* no base */);
697 // Note that the RIP-rel. correction applies to the generated
698 // disp field, but _not_ to the target address in the rspec.
699
700 // disp was created by converting the target address minus the pc
701 // at the start of the instruction. That needs more correction here.
702 // intptr_t disp = target - next_ip;
703 assert(inst_mark() != nullptr, "must be inside InstructionMark");
704 address next_ip = pc() + sizeof(int32_t) + post_addr_length;
705 int64_t adjusted = disp;
706 // Do rip-rel adjustment
707 adjusted -= (next_ip - inst_mark());
708 assert(is_simm32(adjusted),
709 "must be 32bit offset (RIP relative address)");
710 emit_data((int32_t) adjusted, rspec, disp32_operand);
711
712 } else {
713 // base == noreg, index == noreg, no_relocation == true
714 // 32bit never did this, did everything as the rip-rel/disp code above
715 // [disp] ABSOLUTE
716 // [00 reg 100][00 100 101] disp32
717 emit_modrm_sib(0b00, reg_enc, 0b100 /* no base */,
718 Address::times_1, 0b100, 0b101);
719 emit_data(disp, rspec, disp32_operand);
720 }
721 }
722 }
723
724 void Assembler::emit_operand(Register reg, Register base, Register index,
725 Address::ScaleFactor scale, int disp,
726 RelocationHolder const& rspec,
727 int post_addr_length) {
728 assert(!index->is_valid() || index != rsp, "illegal addressing mode");
729 emit_operand_helper(raw_encode(reg), raw_encode(base), raw_encode(index),
730 scale, disp, rspec, post_addr_length);
731
732 }
733 void Assembler::emit_operand(XMMRegister xmmreg, Register base, Register index,
734 Address::ScaleFactor scale, int disp,
735 RelocationHolder const& rspec,
736 int post_addr_length) {
737 assert(!index->is_valid() || index != rsp, "illegal addressing mode");
738 assert(xmmreg->encoding() < 16 || UseAVX > 2, "not supported");
739 emit_operand_helper(raw_encode(xmmreg), raw_encode(base), raw_encode(index),
740 scale, disp, rspec, post_addr_length);
741 }
742
743 void Assembler::emit_operand(XMMRegister xmmreg, Register base, XMMRegister xmmindex,
744 Address::ScaleFactor scale, int disp,
745 RelocationHolder const& rspec,
746 int post_addr_length) {
747 assert(xmmreg->encoding() < 16 || UseAVX > 2, "not supported");
748 assert(xmmindex->encoding() < 16 || UseAVX > 2, "not supported");
749 emit_operand_helper(raw_encode(xmmreg), raw_encode(base), raw_encode(xmmindex),
750 scale, disp, rspec, post_addr_length);
751 }
752
753 void Assembler::emit_operand(KRegister kreg, Address adr,
754 int post_addr_length) {
755 emit_operand(kreg, adr._base, adr._index, adr._scale, adr._disp,
756 adr._rspec,
757 post_addr_length);
758 }
759
760 void Assembler::emit_operand(KRegister kreg, Register base, Register index,
761 Address::ScaleFactor scale, int disp,
762 RelocationHolder const& rspec,
763 int post_addr_length) {
764 assert(!index->is_valid() || index != rsp, "illegal addressing mode");
765 emit_operand_helper(raw_encode(kreg), raw_encode(base), raw_encode(index),
766 scale, disp, rspec, post_addr_length);
767 }
768
769 // Secret local extension to Assembler::WhichOperand:
770 #define end_pc_operand (_WhichOperand_limit)
771
772 address Assembler::locate_operand(address inst, WhichOperand which) {
773 // Decode the given instruction, and return the address of
774 // an embedded 32-bit operand word.
775
776 // If "which" is disp32_operand, selects the displacement portion
777 // of an effective address specifier.
778 // If "which" is imm64_operand, selects the trailing immediate constant.
779 // If "which" is call32_operand, selects the displacement of a call or jump.
780 // Caller is responsible for ensuring that there is such an operand,
781 // and that it is 32/64 bits wide.
782
783 // If "which" is end_pc_operand, find the end of the instruction.
784
785 address ip = inst;
786 bool is_64bit = false;
787
788 DEBUG_ONLY(bool has_disp32 = false);
789 int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn
790
791 again_after_prefix:
792 switch (0xFF & *ip++) {
793
794 // These convenience macros generate groups of "case" labels for the switch.
795 #define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3
796 #define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \
797 case (x)+4: case (x)+5: case (x)+6: case (x)+7
798 #define REP16(x) REP8((x)+0): \
799 case REP8((x)+8)
800
801 case CS_segment:
802 case SS_segment:
803 case DS_segment:
804 case ES_segment:
805 case FS_segment:
806 case GS_segment:
807 // Seems dubious
808 assert(false, "shouldn't have that prefix");
809 assert(ip == inst+1, "only one prefix allowed");
810 goto again_after_prefix;
811
812 case 0x67:
813 case REX:
814 case REX_B:
815 case REX_X:
816 case REX_XB:
817 case REX_R:
818 case REX_RB:
819 case REX_RX:
820 case REX_RXB:
821 goto again_after_prefix;
822
823 case REX2:
824 if ((0xFF & *ip++) & REX2BIT_W) {
825 is_64bit = true;
826 }
827 goto again_after_prefix;
828
829 case REX_W:
830 case REX_WB:
831 case REX_WX:
832 case REX_WXB:
833 case REX_WR:
834 case REX_WRB:
835 case REX_WRX:
836 case REX_WRXB:
837 is_64bit = true;
838 goto again_after_prefix;
839
840 case 0xFF: // pushq a; decl a; incl a; call a; jmp a
841 case 0x88: // movb a, r
842 case 0x89: // movl a, r
843 case 0x8A: // movb r, a
844 case 0x8B: // movl r, a
845 case 0x8F: // popl a
846 DEBUG_ONLY(has_disp32 = true);
847 break;
848
849 case 0x68: // pushq #32
850 if (which == end_pc_operand) {
851 return ip + 4;
852 }
853 assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate");
854 return ip; // not produced by emit_operand
855
856 case 0x66: // movw ... (size prefix)
857 again_after_size_prefix2:
858 switch (0xFF & *ip++) {
859 case REX:
860 case REX_B:
861 case REX_X:
862 case REX_XB:
863 case REX_R:
864 case REX_RB:
865 case REX_RX:
866 case REX_RXB:
867 case REX_W:
868 case REX_WB:
869 case REX_WX:
870 case REX_WXB:
871 case REX_WR:
872 case REX_WRB:
873 case REX_WRX:
874 case REX_WRXB:
875 goto again_after_size_prefix2;
876
877 case REX2:
878 if ((0xFF & *ip++) & REX2BIT_W) {
879 is_64bit = true;
880 }
881 goto again_after_size_prefix2;
882
883 case 0x8B: // movw r, a
884 case 0x89: // movw a, r
885 DEBUG_ONLY(has_disp32 = true);
886 break;
887 case 0xC7: // movw a, #16
888 DEBUG_ONLY(has_disp32 = true);
889 tail_size = 2; // the imm16
890 break;
891 case 0x0F: // several SSE/SSE2 variants
892 ip--; // reparse the 0x0F
893 goto again_after_prefix;
894 default:
895 ShouldNotReachHere();
896 }
897 break;
898
899 case REP8(0xB8): // movl/q r, #32/#64(oop?)
900 if (which == end_pc_operand) return ip + (is_64bit ? 8 : 4);
901 // these asserts are somewhat nonsensical
902 assert(((which == call32_operand || which == imm_operand) && is_64bit) ||
903 (which == narrow_oop_operand && !is_64bit),
904 "which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip));
905 return ip;
906
907 case 0x69: // imul r, a, #32
908 case 0xC7: // movl a, #32(oop?)
909 tail_size = 4;
910 DEBUG_ONLY(has_disp32 = true); // has both kinds of operands!
911 break;
912
913 case 0x0F: // movx..., etc.
914 switch (0xFF & *ip++) {
915 case 0x3A: // pcmpestri
916 tail_size = 1;
917 case 0x38: // ptest, pmovzxbw
918 ip++; // skip opcode
919 DEBUG_ONLY(has_disp32 = true); // has both kinds of operands!
920 break;
921
922 case 0x70: // pshufd r, r/a, #8
923 DEBUG_ONLY(has_disp32 = true); // has both kinds of operands!
924 case 0x73: // psrldq r, #8
925 tail_size = 1;
926 break;
927
928 case 0x10: // movups
929 case 0x11: // movups
930 case 0x12: // movlps
931 case 0x28: // movaps
932 case 0x29: // movaps
933 case 0x2E: // ucomiss
934 case 0x2F: // comiss
935 case 0x54: // andps
936 case 0x55: // andnps
937 case 0x56: // orps
938 case 0x57: // xorps
939 case 0x58: // addpd
940 case 0x59: // mulpd
941 case 0x6E: // movd
942 case 0x7E: // movd
943 case 0x6F: // movdq
944 case 0x7F: // movdq
945 case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush
946 case 0xD6: // movq
947 case 0xFE: // paddd
948 DEBUG_ONLY(has_disp32 = true);
949 break;
950
951 case 0xAD: // shrd r, a, %cl
952 case 0xAF: // imul r, a
953 case 0xBE: // movsbl r, a (movsxb)
954 case 0xBF: // movswl r, a (movsxw)
955 case 0xB6: // movzbl r, a (movzxb)
956 case 0xB7: // movzwl r, a (movzxw)
957 case REP16(0x40): // cmovl cc, r, a
958 case 0xB0: // cmpxchgb
959 case 0xB1: // cmpxchg
960 case 0xC1: // xaddl
961 case 0xC7: // cmpxchg8
962 case REP16(0x90): // setcc a
963 DEBUG_ONLY(has_disp32 = true);
964 // fall out of the switch to decode the address
965 break;
966
967 case 0xC4: // pinsrw r, a, #8
968 DEBUG_ONLY(has_disp32 = true);
969 case 0xC5: // pextrw r, r, #8
970 tail_size = 1; // the imm8
971 break;
972
973 case 0xAC: // shrd r, a, #8
974 DEBUG_ONLY(has_disp32 = true);
975 tail_size = 1; // the imm8
976 break;
977
978 case REP16(0x80): // jcc rdisp32
979 if (which == end_pc_operand) return ip + 4;
980 assert(which == call32_operand, "jcc has no disp32 or imm");
981 return ip;
982 default:
983 fatal("not handled: 0x0F%2X", 0xFF & *(ip-1));
984 }
985 break;
986
987 case 0x81: // addl a, #32; addl r, #32
988 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
989 // on 32bit in the case of cmpl, the imm might be an oop
990 tail_size = 4;
991 DEBUG_ONLY(has_disp32 = true); // has both kinds of operands!
992 break;
993
994 case 0x83: // addl a, #8; addl r, #8
995 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
996 DEBUG_ONLY(has_disp32 = true); // has both kinds of operands!
997 tail_size = 1;
998 break;
999
1000 case 0x15: // adc rax, #32
1001 case 0x05: // add rax, #32
1002 case 0x25: // and rax, #32
1003 case 0x3D: // cmp rax, #32
1004 case 0x0D: // or rax, #32
1005 case 0x1D: // sbb rax, #32
1006 case 0x2D: // sub rax, #32
1007 case 0x35: // xor rax, #32
1008 return which == end_pc_operand ? ip + 4 : ip;
1009
1010 case 0x9B:
1011 switch (0xFF & *ip++) {
1012 case 0xD9: // fnstcw a
1013 DEBUG_ONLY(has_disp32 = true);
1014 break;
1015 default:
1016 ShouldNotReachHere();
1017 }
1018 break;
1019
1020 case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a
1021 case REP4(0x10): // adc...
1022 case REP4(0x20): // and...
1023 case REP4(0x30): // xor...
1024 case REP4(0x08): // or...
1025 case REP4(0x18): // sbb...
1026 case REP4(0x28): // sub...
1027 case 0xF7: // mull a
1028 case 0x8D: // lea r, a
1029 case 0x87: // xchg r, a
1030 case REP4(0x38): // cmp...
1031 case 0x85: // test r, a
1032 DEBUG_ONLY(has_disp32 = true); // has both kinds of operands!
1033 break;
1034
1035 case 0xA8: // testb rax, #8
1036 return which == end_pc_operand ? ip + 1 : ip;
1037 case 0xA9: // testl/testq rax, #32
1038 return which == end_pc_operand ? ip + 4 : ip;
1039
1040 case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8
1041 case 0xC6: // movb a, #8
1042 case 0x80: // cmpb a, #8
1043 case 0x6B: // imul r, a, #8
1044 DEBUG_ONLY(has_disp32 = true); // has both kinds of operands!
1045 tail_size = 1; // the imm8
1046 break;
1047
1048 case 0xC4: // VEX_3bytes
1049 case 0xC5: // VEX_2bytes
1050 assert((UseAVX > 0), "shouldn't have VEX prefix");
1051 assert(ip == inst+1, "no prefixes allowed");
1052 // C4 and C5 are also used as opcodes for PINSRW and PEXTRW instructions
1053 // but they have prefix 0x0F and processed when 0x0F processed above.
1054 //
1055 // In 32-bit mode the VEX first byte C4 and C5 alias onto LDS and LES
1056 // instructions (these instructions are not supported in 64-bit mode).
1057 // To distinguish them bits [7:6] are set in the VEX second byte since
1058 // ModRM byte can not be of the form 11xxxxxx in 32-bit mode. To set
1059 // those VEX bits REX and vvvv bits are inverted.
1060 //
1061 // Fortunately C2 doesn't generate these instructions so we don't need
1062 // to check for them in product version.
1063
1064 // Check second byte
1065 int vex_opcode;
1066 // First byte
1067 if ((0xFF & *inst) == VEX_3bytes) {
1068 vex_opcode = VEX_OPCODE_MASK & *ip;
1069 ip++; // third byte
1070 is_64bit = ((VEX_W & *ip) == VEX_W);
1071 } else {
1072 vex_opcode = VEX_OPCODE_0F;
1073 }
1074 ip++; // opcode
1075 // To find the end of instruction (which == end_pc_operand).
1076 switch (vex_opcode) {
1077 case VEX_OPCODE_0F:
1078 switch (0xFF & *ip) {
1079 case 0x70: // pshufd r, r/a, #8
1080 case 0x71: // ps[rl|ra|ll]w r, #8
1081 case 0x72: // ps[rl|ra|ll]d r, #8
1082 case 0x73: // ps[rl|ra|ll]q r, #8
1083 case 0xC2: // cmp[ps|pd|ss|sd] r, r, r/a, #8
1084 case 0xC4: // pinsrw r, r, r/a, #8
1085 case 0xC5: // pextrw r/a, r, #8
1086 case 0xC6: // shufp[s|d] r, r, r/a, #8
1087 tail_size = 1; // the imm8
1088 break;
1089 }
1090 break;
1091 case VEX_OPCODE_0F_3A:
1092 tail_size = 1;
1093 break;
1094 }
1095 ip++; // skip opcode
1096 DEBUG_ONLY(has_disp32 = true); // has both kinds of operands!
1097 break;
1098
1099 case 0x62: // EVEX_4bytes
1100 assert(VM_Version::cpu_supports_evex(), "shouldn't have EVEX prefix");
1101 assert(ip == inst+1, "no prefixes allowed");
1102 // no EVEX collisions, all instructions that have 0x62 opcodes
1103 // have EVEX versions and are subopcodes of 0x66
1104 ip++; // skip P0 and examine W in P1
1105 is_64bit = ((VEX_W & *ip) == VEX_W);
1106 ip++; // move to P2
1107 ip++; // skip P2, move to opcode
1108 // To find the end of instruction (which == end_pc_operand).
1109 switch (0xFF & *ip) {
1110 case 0x22: // pinsrd r, r/a, #8
1111 case 0x61: // pcmpestri r, r/a, #8
1112 case 0x70: // pshufd r, r/a, #8
1113 case 0x73: // psrldq r, #8
1114 case 0x1f: // evpcmpd/evpcmpq
1115 case 0x3f: // evpcmpb/evpcmpw
1116 tail_size = 1; // the imm8
1117 break;
1118 default:
1119 break;
1120 }
1121 ip++; // skip opcode
1122 DEBUG_ONLY(has_disp32 = true); // has both kinds of operands!
1123 break;
1124
1125 case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1
1126 case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl
1127 case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a
1128 case 0xDD: // fld_d a; fst_d a; fstp_d a
1129 case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a
1130 case 0xDF: // fild_d a; fistp_d a
1131 case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a
1132 case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a
1133 case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a
1134 DEBUG_ONLY(has_disp32 = true);
1135 break;
1136
1137 case 0xE8: // call rdisp32
1138 case 0xE9: // jmp rdisp32
1139 if (which == end_pc_operand) return ip + 4;
1140 assert(which == call32_operand, "call has no disp32 or imm");
1141 return ip;
1142
1143 case 0xF0: // Lock
1144 goto again_after_prefix;
1145
1146 case 0xF3: // For SSE
1147 case 0xF2: // For SSE2
1148 switch (0xFF & *ip++) {
1149 case REX:
1150 case REX_B:
1151 case REX_X:
1152 case REX_XB:
1153 case REX_R:
1154 case REX_RB:
1155 case REX_RX:
1156 case REX_RXB:
1157 case REX_W:
1158 case REX_WB:
1159 case REX_WX:
1160 case REX_WXB:
1161 case REX_WR:
1162 case REX_WRB:
1163 case REX_WRX:
1164 case REX_WRXB:
1165 case REX2:
1166 ip++;
1167 // fall-through
1168 default:
1169 ip++;
1170 }
1171 DEBUG_ONLY(has_disp32 = true); // has both kinds of operands!
1172 break;
1173
1174 default:
1175 ShouldNotReachHere();
1176
1177 #undef REP8
1178 #undef REP16
1179 }
1180
1181 assert(which != call32_operand, "instruction is not a call, jmp, or jcc");
1182 assert(which != imm_operand, "instruction is not a movq reg, imm64");
1183 assert(which != disp32_operand || has_disp32, "instruction has no disp32 field");
1184
1185 // parse the output of emit_operand
1186 int op2 = 0xFF & *ip++;
1187 int base = op2 & 0x07;
1188 int op3 = -1;
1189 const int b100 = 4;
1190 const int b101 = 5;
1191 if (base == b100 && (op2 >> 6) != 3) {
1192 op3 = 0xFF & *ip++;
1193 base = op3 & 0x07; // refetch the base
1194 }
1195 // now ip points at the disp (if any)
1196
1197 switch (op2 >> 6) {
1198 case 0:
1199 // [00 reg 100][ss index base]
1200 // [00 reg 100][00 100 esp]
1201 // [00 reg base]
1202 // [00 reg 100][ss index 101][disp32]
1203 // [00 reg 101] [disp32]
1204
1205 if (base == b101) {
1206 if (which == disp32_operand)
1207 return ip; // caller wants the disp32
1208 ip += 4; // skip the disp32
1209 }
1210 break;
1211
1212 case 1:
1213 // [01 reg 100][ss index base][disp8]
1214 // [01 reg 100][00 100 esp][disp8]
1215 // [01 reg base] [disp8]
1216 ip += 1; // skip the disp8
1217 break;
1218
1219 case 2:
1220 // [10 reg 100][ss index base][disp32]
1221 // [10 reg 100][00 100 esp][disp32]
1222 // [10 reg base] [disp32]
1223 if (which == disp32_operand)
1224 return ip; // caller wants the disp32
1225 ip += 4; // skip the disp32
1226 break;
1227
1228 case 3:
1229 // [11 reg base] (not a memory addressing mode)
1230 break;
1231 }
1232
1233 if (which == end_pc_operand) {
1234 return ip + tail_size;
1235 }
1236
1237 assert(which == narrow_oop_operand && !is_64bit, "instruction is not a movl adr, imm32");
1238 return ip;
1239 }
1240
1241 address Assembler::locate_next_instruction(address inst) {
1242 // Secretly share code with locate_operand:
1243 return locate_operand(inst, end_pc_operand);
1244 }
1245
1246
1247 #ifdef ASSERT
1248 void Assembler::check_relocation(RelocationHolder const& rspec, int format) {
1249 address inst = inst_mark();
1250 assert(inst != nullptr && inst < pc(), "must point to beginning of instruction");
1251 address opnd;
1252
1253 Relocation* r = rspec.reloc();
1254 if (r->type() == relocInfo::none) {
1255 return;
1256 } else if (r->is_call() || format == call32_operand) {
1257 // assert(format == imm32_operand, "cannot specify a nonzero format");
1258 opnd = locate_operand(inst, call32_operand);
1259 } else if (r->is_data()) {
1260 assert(format == imm_operand || format == disp32_operand || format == narrow_oop_operand, "format ok");
1261 opnd = locate_operand(inst, (WhichOperand)format);
1262 } else {
1263 assert(format == imm_operand, "cannot specify a format");
1264 return;
1265 }
1266 assert(opnd == pc(), "must put operand where relocs can find it");
1267 }
1268 #endif // ASSERT
1269
1270 void Assembler::emit_operand(Register reg, Address adr, int post_addr_length) {
1271 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec, post_addr_length);
1272 }
1273
1274 void Assembler::emit_operand(XMMRegister reg, Address adr, int post_addr_length) {
1275 if (adr.isxmmindex()) {
1276 emit_operand(reg, adr._base, adr._xmmindex, adr._scale, adr._disp, adr._rspec, post_addr_length);
1277 } else {
1278 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec, post_addr_length);
1279 }
1280 }
1281
1282 void Assembler::emit_opcode_prefix_and_encoding(int byte1, int byte2, int ocp_and_encoding, int byte3) {
1283 int opcode_prefix = (ocp_and_encoding & 0xFF00) >> 8;
1284 if (opcode_prefix != 0) {
1285 emit_int32(opcode_prefix, (unsigned char)byte1, byte2 | (ocp_and_encoding & 0xFF), byte3);
1286 } else {
1287 emit_int24((unsigned char)byte1, byte2 | (ocp_and_encoding & 0xFF), byte3);
1288 }
1289 }
1290
1291 void Assembler::emit_opcode_prefix_and_encoding(int byte1, int byte2, int ocp_and_encoding) {
1292 int opcode_prefix = (ocp_and_encoding & 0xFF00) >> 8;
1293 if (opcode_prefix != 0) {
1294 emit_int24(opcode_prefix, (unsigned char)byte1, byte2 | (ocp_and_encoding & 0xFF));
1295 } else {
1296 emit_int16((unsigned char)byte1, byte2 | (ocp_and_encoding & 0xFF));
1297 }
1298 }
1299
1300 void Assembler::emit_opcode_prefix_and_encoding(int byte1, int ocp_and_encoding) {
1301 int opcode_prefix = (ocp_and_encoding & 0xFF00) >> 8;
1302 if (opcode_prefix != 0) {
1303 emit_int16(opcode_prefix, (unsigned char)byte1 | (ocp_and_encoding & 0xFF));
1304 } else {
1305 emit_int8((unsigned char)byte1 | (ocp_and_encoding & 0xFF));
1306 }
1307 }
1308
1309 // Now the Assembler instructions (identical for 32/64 bits)
1310
1311 void Assembler::adcl(Address dst, int32_t imm32) {
1312 InstructionMark im(this);
1313 prefix(dst);
1314 emit_arith_operand(0x81, rdx, dst, imm32);
1315 }
1316
1317 void Assembler::adcl(Address dst, Register src) {
1318 InstructionMark im(this);
1319 prefix(dst, src);
1320 emit_int8(0x11);
1321 emit_operand(src, dst, 0);
1322 }
1323
1324 void Assembler::adcl(Register dst, int32_t imm32) {
1325 prefix(dst);
1326 emit_arith(0x81, 0xD0, dst, imm32);
1327 }
1328
1329 void Assembler::adcl(Register dst, Address src) {
1330 InstructionMark im(this);
1331 prefix(src, dst);
1332 emit_int8(0x13);
1333 emit_operand(dst, src, 0);
1334 }
1335
1336 void Assembler::adcl(Register dst, Register src) {
1337 (void) prefix_and_encode(dst->encoding(), src->encoding());
1338 emit_arith(0x13, 0xC0, dst, src);
1339 }
1340
1341 void Assembler::addl(Address dst, int32_t imm32) {
1342 InstructionMark im(this);
1343 prefix(dst);
1344 emit_arith_operand(0x81, rax, dst, imm32);
1345 }
1346
1347 void Assembler::eaddl(Register dst, Address src, int32_t imm32, bool no_flags) {
1348 InstructionMark im(this);
1349 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1350 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
1351 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
1352 emit_arith_operand(0x81, rax, src, imm32);
1353 }
1354
1355 void Assembler::addb(Address dst, int imm8) {
1356 InstructionMark im(this);
1357 prefix(dst);
1358 emit_int8((unsigned char)0x80);
1359 emit_operand(rax, dst, 1);
1360 emit_int8(imm8);
1361 }
1362
1363 void Assembler::addb(Address dst, Register src) {
1364 InstructionMark im(this);
1365 prefix(dst, src);
1366 emit_int8(0x00);
1367 emit_operand(src, dst, 0);
1368 }
1369
1370 void Assembler::addb(Register dst, int imm8) {
1371 (void) prefix_and_encode(dst->encoding(), true);
1372 emit_arith_b(0x80, 0xC0, dst, imm8);
1373 }
1374
1375 void Assembler::addw(Address dst, int imm16) {
1376 InstructionMark im(this);
1377 emit_int8(0x66);
1378 prefix(dst);
1379 emit_int8((unsigned char)0x81);
1380 emit_operand(rax, dst, 2);
1381 emit_int16(imm16);
1382 }
1383
1384 void Assembler::addw(Address dst, Register src) {
1385 InstructionMark im(this);
1386 emit_int8(0x66);
1387 prefix(dst, src);
1388 emit_int8(0x01);
1389 emit_operand(src, dst, 0);
1390 }
1391
1392 void Assembler::addl(Address dst, Register src) {
1393 InstructionMark im(this);
1394 prefix(dst, src);
1395 emit_int8(0x01);
1396 emit_operand(src, dst, 0);
1397 }
1398
1399 void Assembler::eaddl(Register dst, Address src1, Register src2, bool no_flags) {
1400 InstructionMark im(this);
1401 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, 0x01, no_flags, false /* is_map1 */, true /* is_commutative */);
1402 }
1403
1404 void Assembler::addl(Register dst, int32_t imm32) {
1405 prefix(dst);
1406 emit_arith(0x81, 0xC0, dst, imm32);
1407 }
1408
1409 void Assembler::eaddl(Register dst, Register src, int32_t imm32, bool no_flags) {
1410 emit_eevex_prefix_or_demote_arith_ndd(dst, src, imm32, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, 0x81, 0xC0, no_flags);
1411 }
1412
1413 void Assembler::addl(Register dst, Address src) {
1414 InstructionMark im(this);
1415 prefix(src, dst);
1416 emit_int8(0x03);
1417 emit_operand(dst, src, 0);
1418 }
1419
1420 void Assembler::eaddl(Register dst, Register src1, Address src2, bool no_flags) {
1421 InstructionMark im(this);
1422 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, 0x03, no_flags);
1423 }
1424
1425 void Assembler::addl(Register dst, Register src) {
1426 (void) prefix_and_encode(dst->encoding(), src->encoding());
1427 emit_arith(0x03, 0xC0, dst, src);
1428 }
1429
1430 void Assembler::eaddl(Register dst, Register src1, Register src2, bool no_flags) {
1431 emit_eevex_prefix_or_demote_arith_ndd(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, 0x03, 0xC0, no_flags, true /* is_commutative */);
1432 }
1433
1434 void Assembler::addr_nop_4() {
1435 assert(UseAddressNop, "no CPU support");
1436 // 4 bytes: NOP DWORD PTR [EAX+0]
1437 emit_int32(0x0F,
1438 0x1F,
1439 0x40, // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc);
1440 0); // 8-bits offset (1 byte)
1441 }
1442
1443 void Assembler::addr_nop_5() {
1444 assert(UseAddressNop, "no CPU support");
1445 // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
1446 emit_int32(0x0F,
1447 0x1F,
1448 0x44, // emit_rm(cbuf, 0x1, EAX_enc, 0x4);
1449 0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
1450 emit_int8(0); // 8-bits offset (1 byte)
1451 }
1452
1453 void Assembler::addr_nop_7() {
1454 assert(UseAddressNop, "no CPU support");
1455 // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
1456 emit_int24(0x0F,
1457 0x1F,
1458 (unsigned char)0x80);
1459 // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc);
1460 emit_int32(0); // 32-bits offset (4 bytes)
1461 }
1462
1463 void Assembler::addr_nop_8() {
1464 assert(UseAddressNop, "no CPU support");
1465 // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
1466 emit_int32(0x0F,
1467 0x1F,
1468 (unsigned char)0x84,
1469 // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
1470 0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
1471 emit_int32(0); // 32-bits offset (4 bytes)
1472 }
1473
1474 void Assembler::addsd(XMMRegister dst, XMMRegister src) {
1475 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1476 attributes.set_rex_vex_w_reverted();
1477 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
1478 emit_int16(0x58, (0xC0 | encode));
1479 }
1480
1481 void Assembler::addsd(XMMRegister dst, Address src) {
1482 InstructionMark im(this);
1483 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1484 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
1485 attributes.set_rex_vex_w_reverted();
1486 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
1487 emit_int8(0x58);
1488 emit_operand(dst, src, 0);
1489 }
1490
1491 void Assembler::addss(XMMRegister dst, XMMRegister src) {
1492 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1493 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
1494 emit_int16(0x58, (0xC0 | encode));
1495 }
1496
1497 void Assembler::addss(XMMRegister dst, Address src) {
1498 InstructionMark im(this);
1499 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1500 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
1501 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
1502 emit_int8(0x58);
1503 emit_operand(dst, src, 0);
1504 }
1505
1506 void Assembler::aesdec(XMMRegister dst, Address src) {
1507 assert(VM_Version::supports_aes(), "");
1508 InstructionMark im(this);
1509 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1510 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1511 emit_int8((unsigned char)0xDE);
1512 emit_operand(dst, src, 0);
1513 }
1514
1515 void Assembler::aesdec(XMMRegister dst, XMMRegister src) {
1516 assert(VM_Version::supports_aes(), "");
1517 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1518 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1519 emit_int16((unsigned char)0xDE, (0xC0 | encode));
1520 }
1521
1522 void Assembler::vaesdec(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1523 assert(VM_Version::supports_avx512_vaes(), "");
1524 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1525 attributes.set_is_evex_instruction();
1526 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1527 emit_int16((unsigned char)0xDE, (0xC0 | encode));
1528 }
1529
1530
1531 void Assembler::aesdeclast(XMMRegister dst, Address src) {
1532 assert(VM_Version::supports_aes(), "");
1533 InstructionMark im(this);
1534 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1535 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1536 emit_int8((unsigned char)0xDF);
1537 emit_operand(dst, src, 0);
1538 }
1539
1540 void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) {
1541 assert(VM_Version::supports_aes(), "");
1542 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1543 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1544 emit_int16((unsigned char)0xDF, (0xC0 | encode));
1545 }
1546
1547 void Assembler::vaesdeclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1548 assert(VM_Version::supports_avx512_vaes(), "");
1549 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1550 attributes.set_is_evex_instruction();
1551 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1552 emit_int16((unsigned char)0xDF, (0xC0 | encode));
1553 }
1554
1555 void Assembler::aesenc(XMMRegister dst, Address src) {
1556 assert(VM_Version::supports_aes(), "");
1557 InstructionMark im(this);
1558 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1559 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1560 emit_int8((unsigned char)0xDC);
1561 emit_operand(dst, src, 0);
1562 }
1563
1564 void Assembler::aesenc(XMMRegister dst, XMMRegister src) {
1565 assert(VM_Version::supports_aes(), "");
1566 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1567 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1568 emit_int16((unsigned char)0xDC, 0xC0 | encode);
1569 }
1570
1571 void Assembler::vaesenc(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1572 assert(VM_Version::supports_avx512_vaes(), "requires vaes support/enabling");
1573 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1574 attributes.set_is_evex_instruction();
1575 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1576 emit_int16((unsigned char)0xDC, (0xC0 | encode));
1577 }
1578
1579 void Assembler::aesenclast(XMMRegister dst, Address src) {
1580 assert(VM_Version::supports_aes(), "");
1581 InstructionMark im(this);
1582 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1583 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1584 emit_int8((unsigned char)0xDD);
1585 emit_operand(dst, src, 0);
1586 }
1587
1588 void Assembler::aesenclast(XMMRegister dst, XMMRegister src) {
1589 assert(VM_Version::supports_aes(), "");
1590 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1591 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1592 emit_int16((unsigned char)0xDD, (0xC0 | encode));
1593 }
1594
1595 void Assembler::vaesenclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1596 assert(VM_Version::supports_avx512_vaes(), "requires vaes support/enabling");
1597 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1598 attributes.set_is_evex_instruction();
1599 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1600 emit_int16((unsigned char)0xDD, (0xC0 | encode));
1601 }
1602
1603 void Assembler::andb(Address dst, Register src) {
1604 InstructionMark im(this);
1605 prefix(dst, src, true);
1606 emit_int8(0x20);
1607 emit_operand(src, dst, 0);
1608 }
1609
1610 void Assembler::andl(Address dst, int32_t imm32) {
1611 InstructionMark im(this);
1612 prefix(dst);
1613 emit_arith_operand(0x81, as_Register(4), dst, imm32);
1614 }
1615
1616 void Assembler::eandl(Register dst, Address src, int32_t imm32, bool no_flags) {
1617 InstructionMark im(this);
1618 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1619 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
1620 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
1621 emit_arith_operand(0x81, rsp, src, imm32);
1622 }
1623
1624 void Assembler::andl(Register dst, int32_t imm32) {
1625 prefix(dst);
1626 emit_arith(0x81, 0xE0, dst, imm32);
1627 }
1628
1629 void Assembler::eandl(Register dst, Register src, int32_t imm32, bool no_flags) {
1630 emit_eevex_prefix_or_demote_arith_ndd(dst, src, imm32, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, 0x81, 0xE0, no_flags);
1631 }
1632
1633 void Assembler::andl(Address dst, Register src) {
1634 InstructionMark im(this);
1635 prefix(dst, src);
1636 emit_int8(0x21);
1637 emit_operand(src, dst, 0);
1638 }
1639
1640 void Assembler::andl(Register dst, Address src) {
1641 InstructionMark im(this);
1642 prefix(src, dst);
1643 emit_int8(0x23);
1644 emit_operand(dst, src, 0);
1645 }
1646
1647 void Assembler::eandl(Register dst, Register src1, Address src2, bool no_flags) {
1648 InstructionMark im(this);
1649 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, 0x23, no_flags);
1650 }
1651
1652 void Assembler::eandl(Register dst, Address src1, Register src2, bool no_flags) {
1653 InstructionMark im(this);
1654 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, 0x21, no_flags, false /* is_map1 */, true /* is_commutative */);
1655 }
1656
1657 void Assembler::andl(Register dst, Register src) {
1658 (void) prefix_and_encode(dst->encoding(), src->encoding());
1659 emit_arith(0x23, 0xC0, dst, src);
1660 }
1661
1662 void Assembler::eandl(Register dst, Register src1, Register src2, bool no_flags) {
1663 emit_eevex_prefix_or_demote_arith_ndd(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, 0x23, 0xC0, no_flags, true /* is_commutative */);
1664 }
1665
1666 void Assembler::andnl(Register dst, Register src1, Register src2) {
1667 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1668 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1669 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes, true);
1670 emit_int16((unsigned char)0xF2, (0xC0 | encode));
1671 }
1672
1673 void Assembler::andnl(Register dst, Register src1, Address src2) {
1674 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1675 InstructionMark im(this);
1676 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1677 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
1678 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
1679 emit_int8((unsigned char)0xF2);
1680 emit_operand(dst, src2, 0);
1681 }
1682
1683 void Assembler::bsfl(Register dst, Register src) {
1684 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
1685 emit_opcode_prefix_and_encoding((unsigned char)0xBC, 0xC0, encode);
1686 }
1687
1688 void Assembler::bsrl(Register dst, Register src) {
1689 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
1690 emit_opcode_prefix_and_encoding((unsigned char)0xBD, 0xC0, encode);
1691 }
1692
1693 void Assembler::bswapl(Register reg) { // bswap
1694 int encode = prefix_and_encode(reg->encoding(), false, true /* is_map1 */);
1695 emit_opcode_prefix_and_encoding((unsigned char)0xC8, encode);
1696 }
1697
1698 void Assembler::blsil(Register dst, Register src) {
1699 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1700 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1701 int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes, true);
1702 emit_int16((unsigned char)0xF3, (0xC0 | encode));
1703 }
1704
1705 void Assembler::blsil(Register dst, Address src) {
1706 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1707 InstructionMark im(this);
1708 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1709 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
1710 vex_prefix(src, dst->encoding(), rbx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
1711 emit_int8((unsigned char)0xF3);
1712 emit_operand(rbx, src, 0);
1713 }
1714
1715 void Assembler::blsmskl(Register dst, Register src) {
1716 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1717 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1718 int encode = vex_prefix_and_encode(rdx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes, true);
1719 emit_int16((unsigned char)0xF3,
1720 0xC0 | encode);
1721 }
1722
1723 void Assembler::blsmskl(Register dst, Address src) {
1724 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1725 InstructionMark im(this);
1726 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1727 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
1728 vex_prefix(src, dst->encoding(), rdx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
1729 emit_int8((unsigned char)0xF3);
1730 emit_operand(rdx, src, 0);
1731 }
1732
1733 void Assembler::blsrl(Register dst, Register src) {
1734 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1735 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1736 int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes, true);
1737 emit_int16((unsigned char)0xF3, (0xC0 | encode));
1738 }
1739
1740 void Assembler::blsrl(Register dst, Address src) {
1741 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1742 InstructionMark im(this);
1743 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1744 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
1745 vex_prefix(src, dst->encoding(), rcx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
1746 emit_int8((unsigned char)0xF3);
1747 emit_operand(rcx, src, 0);
1748 }
1749
1750 void Assembler::call(Label& L, relocInfo::relocType rtype) {
1751 if (L.is_bound()) {
1752 const int long_size = 5;
1753 int offs = (int)( target(L) - pc() );
1754 assert(offs <= 0, "assembler error");
1755 InstructionMark im(this);
1756 // 1110 1000 #32-bit disp
1757 emit_int8((unsigned char)0xE8);
1758 emit_data(offs - long_size, rtype, disp32_operand);
1759 } else {
1760 InstructionMark im(this);
1761 // 1110 1000 #32-bit disp
1762 L.add_patch_at(code(), locator());
1763
1764 emit_int8((unsigned char)0xE8);
1765 emit_data(int(0), rtype, disp32_operand);
1766 }
1767 }
1768
1769 void Assembler::call(Register dst) {
1770 int encode = prefix_and_encode(dst->encoding());
1771 emit_int16((unsigned char)0xFF, (0xD0 | encode));
1772 }
1773
1774
1775 void Assembler::call(Address adr) {
1776 assert(!adr._rspec.reloc()->is_data(), "should not use ExternalAddress for call");
1777 InstructionMark im(this);
1778 prefix(adr);
1779 emit_int8((unsigned char)0xFF);
1780 emit_operand(rdx, adr, 0);
1781 }
1782
1783 void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
1784 InstructionMark im(this);
1785 emit_int8((unsigned char)0xE8);
1786 intptr_t disp = entry - (pc() + sizeof(int32_t));
1787 // Entry is null in case of a scratch emit.
1788 assert(entry == nullptr || is_simm32(disp), "disp=" INTPTR_FORMAT " must be 32bit offset (call2)", disp);
1789 // Technically, should use call32_operand, but this format is
1790 // implied by the fact that we're emitting a call instruction.
1791
1792 emit_data((int) disp, rspec, disp32_operand);
1793 }
1794
1795 void Assembler::cdql() {
1796 emit_int8((unsigned char)0x99);
1797 }
1798
1799 void Assembler::cld() {
1800 emit_int8((unsigned char)0xFC);
1801 }
1802
1803 void Assembler::cmovl(Condition cc, Register dst, Register src) {
1804 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
1805 emit_opcode_prefix_and_encoding(0x40 | cc, 0xC0, encode);
1806 }
1807
1808 void Assembler::ecmovl(Condition cc, Register dst, Register src1, Register src2) {
1809 emit_eevex_or_demote(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, 0x40 | cc, false /* no_flags */, true /* is_map1 */, true /* swap */);
1810 }
1811
1812 void Assembler::cmovl(Condition cc, Register dst, Address src) {
1813 InstructionMark im(this);
1814 prefix(src, dst, false, true /* is_map1 */);
1815 emit_int8((0x40 | cc));
1816 emit_operand(dst, src, 0);
1817 }
1818
1819 void Assembler::ecmovl(Condition cc, Register dst, Register src1, Address src2) {
1820 InstructionMark im(this);
1821 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, (0x40 | cc) , false /* no_flags */, true /* is_map1 */);
1822 }
1823
1824 void Assembler::cmpb(Address dst, Register reg) {
1825 assert(reg->has_byte_register(), "must have byte register");
1826 InstructionMark im(this);
1827 prefix(dst, reg, true);
1828 emit_int8((unsigned char)0x38);
1829 emit_operand(reg, dst, 0);
1830 }
1831
1832 void Assembler::cmpb(Register reg, Address dst) {
1833 assert(reg->has_byte_register(), "must have byte register");
1834 InstructionMark im(this);
1835 prefix(dst, reg, true);
1836 emit_int8((unsigned char)0x3a);
1837 emit_operand(reg, dst, 0);
1838 }
1839
1840 void Assembler::cmpb(Address dst, int imm8) {
1841 InstructionMark im(this);
1842 prefix(dst);
1843 emit_int8((unsigned char)0x80);
1844 emit_operand(rdi, dst, 1);
1845 emit_int8(imm8);
1846 }
1847
1848 void Assembler::cmpb(Register dst, int imm8) {
1849 prefix(dst);
1850 emit_arith_b(0x80, 0xF8, dst, imm8);
1851 }
1852
1853 void Assembler::cmpl(Address dst, int32_t imm32) {
1854 InstructionMark im(this);
1855 prefix(dst);
1856 emit_arith_operand(0x81, as_Register(7), dst, imm32);
1857 }
1858
1859 void Assembler::cmpl(Register dst, int32_t imm32) {
1860 prefix(dst);
1861 emit_arith(0x81, 0xF8, dst, imm32);
1862 }
1863
1864 void Assembler::cmpl(Register dst, Register src) {
1865 (void) prefix_and_encode(dst->encoding(), src->encoding());
1866 emit_arith(0x3B, 0xC0, dst, src);
1867 }
1868
1869 void Assembler::cmpl(Register dst, Address src) {
1870 InstructionMark im(this);
1871 prefix(src, dst);
1872 emit_int8(0x3B);
1873 emit_operand(dst, src, 0);
1874 }
1875
1876 void Assembler::cmpl(Address dst, Register reg) {
1877 InstructionMark im(this);
1878 prefix(dst, reg);
1879 emit_int8(0x39);
1880 emit_operand(reg, dst, 0);
1881 }
1882
1883 void Assembler::cmpl_imm32(Address dst, int32_t imm32) {
1884 InstructionMark im(this);
1885 prefix(dst);
1886 emit_arith_operand_imm32(0x81, as_Register(7), dst, imm32);
1887 }
1888
1889 void Assembler::cmpw(Address dst, int imm16) {
1890 InstructionMark im(this);
1891 emit_int8(0x66);
1892 prefix(dst);
1893 emit_int8((unsigned char)0x81);
1894 emit_operand(rdi, dst, 2);
1895 emit_int16(imm16);
1896 }
1897
1898 void Assembler::cmpw(Address dst, Register reg) {
1899 InstructionMark im(this);
1900 emit_int8(0x66);
1901 prefix(dst, reg);
1902 emit_int8((unsigned char)0x39);
1903 emit_operand(reg, dst, 0);
1904 }
1905
1906 // The 32-bit cmpxchg compares the value at adr with the contents of rax,
1907 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,.
1908 // The ZF is set if the compared values were equal, and cleared otherwise.
1909 void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg
1910 InstructionMark im(this);
1911 prefix(adr, reg, false, true /* is_map1 */);
1912 emit_int8((unsigned char)0xB1);
1913 emit_operand(reg, adr, 0);
1914 }
1915
1916 void Assembler::cmpxchgw(Register reg, Address adr) { // cmpxchg
1917 InstructionMark im(this);
1918 size_prefix();
1919 prefix(adr, reg, false, true /* is_map1 */);
1920 emit_int8((unsigned char)0xB1);
1921 emit_operand(reg, adr, 0);
1922 }
1923
1924 // The 8-bit cmpxchg compares the value at adr with the contents of rax,
1925 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,.
1926 // The ZF is set if the compared values were equal, and cleared otherwise.
1927 void Assembler::cmpxchgb(Register reg, Address adr) { // cmpxchg
1928 InstructionMark im(this);
1929 prefix(adr, reg, true, true /* is_map1 */);
1930 emit_int8((unsigned char)0xB0);
1931 emit_operand(reg, adr, 0);
1932 }
1933
1934 void Assembler::comisd(XMMRegister dst, Address src) {
1935 // NOTE: dbx seems to decode this as comiss even though the
1936 // 0x66 is there. Strangely ucomisd comes out correct
1937 InstructionMark im(this);
1938 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);;
1939 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
1940 attributes.set_rex_vex_w_reverted();
1941 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
1942 emit_int8(0x2F);
1943 emit_operand(dst, src, 0);
1944 }
1945
1946 void Assembler::comisd(XMMRegister dst, XMMRegister src) {
1947 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1948 attributes.set_rex_vex_w_reverted();
1949 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
1950 emit_int16(0x2F, (0xC0 | encode));
1951 }
1952
1953 void Assembler::comiss(XMMRegister dst, Address src) {
1954 InstructionMark im(this);
1955 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1956 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
1957 simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
1958 emit_int8(0x2F);
1959 emit_operand(dst, src, 0);
1960 }
1961
1962 void Assembler::comiss(XMMRegister dst, XMMRegister src) {
1963 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1964 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
1965 emit_int16(0x2F, (0xC0 | encode));
1966 }
1967
1968 void Assembler::cpuid() {
1969 emit_int16(0x0F, (unsigned char)0xA2);
1970 }
1971
1972 void Assembler::serialize() {
1973 assert(VM_Version::supports_serialize(), "");
1974 emit_int24(0x0F, 0x01, 0xE8);
1975 }
1976
1977 // Opcode / Instruction Op / En 64 - Bit Mode Compat / Leg Mode Description Implemented
1978 // F2 0F 38 F0 / r CRC32 r32, r / m8 RM Valid Valid Accumulate CRC32 on r / m8. v
1979 // F2 REX 0F 38 F0 / r CRC32 r32, r / m8* RM Valid N.E. Accumulate CRC32 on r / m8. -
1980 // F2 REX.W 0F 38 F0 / r CRC32 r64, r / m8 RM Valid N.E. Accumulate CRC32 on r / m8. -
1981 //
1982 // F2 0F 38 F1 / r CRC32 r32, r / m16 RM Valid Valid Accumulate CRC32 on r / m16. v
1983 //
1984 // F2 0F 38 F1 / r CRC32 r32, r / m32 RM Valid Valid Accumulate CRC32 on r / m32. v
1985 //
1986 // F2 REX.W 0F 38 F1 / r CRC32 r64, r / m64 RM Valid N.E. Accumulate CRC32 on r / m64. v
1987 void Assembler::crc32(Register crc, Register v, int8_t sizeInBytes) {
1988 assert(VM_Version::supports_sse4_2(), "");
1989 if (needs_eevex(crc, v)) {
1990 InstructionAttr attributes(AVX_128bit, /* rex_w */ sizeInBytes == 8, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1991 int encode = vex_prefix_and_encode(crc->encoding(), 0, v->encoding(), sizeInBytes == 2 ? VEX_SIMD_66 : VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, true);
1992 emit_int16(sizeInBytes == 1 ? (unsigned char)0xF0 : (unsigned char)0xF1, (0xC0 | encode));
1993 } else {
1994 int8_t w = 0x01;
1995 Prefix p = Prefix_EMPTY;
1996
1997 emit_int8((unsigned char)0xF2);
1998 switch (sizeInBytes) {
1999 case 1:
2000 w = 0;
2001 break;
2002 case 2:
2003 case 4:
2004 break;
2005 case 8:
2006 // Note:
2007 // http://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf
2008 //
2009 // Page B - 72 Vol. 2C says
2010 // qwreg2 to qwreg 1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : 11 qwreg1 qwreg2
2011 // mem64 to qwreg 1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : mod qwreg r / m
2012 // F0!!!
2013 // while 3 - 208 Vol. 2A
2014 // F2 REX.W 0F 38 F1 / r CRC32 r64, r / m64 RM Valid N.E.Accumulate CRC32 on r / m64.
2015 //
2016 // the 0 on a last bit is reserved for a different flavor of this instruction :
2017 // F2 REX.W 0F 38 F0 / r CRC32 r64, r / m8 RM Valid N.E.Accumulate CRC32 on r / m8.
2018 p = REX_W;
2019 break;
2020 default:
2021 assert(0, "Unsupported value for a sizeInBytes argument");
2022 break;
2023 }
2024 prefix(crc, v, p);
2025 emit_int32(0x0F,
2026 0x38,
2027 0xF0 | w,
2028 0xC0 | ((crc->encoding() & 0x7) << 3) | (v->encoding() & 7));
2029 }
2030 }
2031
2032 void Assembler::crc32(Register crc, Address adr, int8_t sizeInBytes) {
2033 assert(VM_Version::supports_sse4_2(), "");
2034 InstructionMark im(this);
2035 if (needs_eevex(crc, adr.base(), adr.index())) {
2036 InstructionAttr attributes(AVX_128bit, /* vex_w */ sizeInBytes == 8, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2037 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
2038 vex_prefix(adr, 0, crc->encoding(), sizeInBytes == 2 ? VEX_SIMD_66 : VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes);
2039 emit_int8(sizeInBytes == 1 ? (unsigned char)0xF0 : (unsigned char)0xF1);
2040 emit_operand(crc, adr, 0);
2041 } else {
2042 int8_t w = 0x01;
2043 Prefix p = Prefix_EMPTY;
2044
2045 emit_int8((uint8_t)0xF2);
2046 switch (sizeInBytes) {
2047 case 1:
2048 w = 0;
2049 break;
2050 case 2:
2051 case 4:
2052 break;
2053 case 8:
2054 p = REX_W;
2055 break;
2056 default:
2057 assert(0, "Unsupported value for a sizeInBytes argument");
2058 break;
2059 }
2060 prefix(crc, adr, p);
2061 emit_int24(0x0F, 0x38, (0xF0 | w));
2062 emit_operand(crc, adr, 0);
2063 }
2064 }
2065
2066 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
2067 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2068 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2069 emit_int16((unsigned char)0xE6, (0xC0 | encode));
2070 }
2071
2072 void Assembler::vcvtdq2pd(XMMRegister dst, XMMRegister src, int vector_len) {
2073 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
2074 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2075 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2076 emit_int16((unsigned char)0xE6, (0xC0 | encode));
2077 }
2078
2079 void Assembler::vcvtps2ph(XMMRegister dst, XMMRegister src, int imm8, int vector_len) {
2080 assert(VM_Version::supports_evex() || VM_Version::supports_f16c(), "");
2081 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /*uses_vl */ true);
2082 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
2083 emit_int24(0x1D, (0xC0 | encode), imm8);
2084 }
2085
2086 void Assembler::evcvtps2ph(Address dst, KRegister mask, XMMRegister src, int imm8, int vector_len) {
2087 assert(VM_Version::supports_evex(), "");
2088 InstructionMark im(this);
2089 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /*uses_vl */ true);
2090 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_64bit);
2091 attributes.reset_is_clear_context();
2092 attributes.set_embedded_opmask_register_specifier(mask);
2093 attributes.set_is_evex_instruction();
2094 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
2095 emit_int8(0x1D);
2096 emit_operand(src, dst, 1);
2097 emit_int8(imm8);
2098 }
2099
2100 void Assembler::vcvtps2ph(Address dst, XMMRegister src, int imm8, int vector_len) {
2101 assert(VM_Version::supports_evex() || VM_Version::supports_f16c(), "");
2102 InstructionMark im(this);
2103 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /*uses_vl */ true);
2104 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
2105 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
2106 emit_int8(0x1D);
2107 emit_operand(src, dst, 1);
2108 emit_int8(imm8);
2109 }
2110
2111 void Assembler::vcvtph2ps(XMMRegister dst, XMMRegister src, int vector_len) {
2112 assert(VM_Version::supports_evex() || VM_Version::supports_f16c(), "");
2113 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */false, /* no_mask_reg */ true, /* uses_vl */ true);
2114 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
2115 emit_int16(0x13, (0xC0 | encode));
2116 }
2117
2118 void Assembler::vcvtph2ps(XMMRegister dst, Address src, int vector_len) {
2119 assert(VM_Version::supports_evex() || VM_Version::supports_f16c(), "");
2120 InstructionMark im(this);
2121 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /*uses_vl */ true);
2122 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
2123 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
2124 emit_int8(0x13);
2125 emit_operand(dst, src, 0);
2126 }
2127
2128 void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
2129 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2130 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
2131 emit_int16(0x5B, (0xC0 | encode));
2132 }
2133
2134 void Assembler::vcvtdq2ps(XMMRegister dst, XMMRegister src, int vector_len) {
2135 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
2136 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2137 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
2138 emit_int16(0x5B, (0xC0 | encode));
2139 }
2140
2141 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
2142 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2143 attributes.set_rex_vex_w_reverted();
2144 int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2145 emit_int16(0x5A, (0xC0 | encode));
2146 }
2147
2148 void Assembler::cvtsd2ss(XMMRegister dst, Address src) {
2149 InstructionMark im(this);
2150 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2151 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
2152 attributes.set_rex_vex_w_reverted();
2153 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2154 emit_int8(0x5A);
2155 emit_operand(dst, src, 0);
2156 }
2157
2158 void Assembler::cvtsi2sdl(XMMRegister dst, Register src) {
2159 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2160 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes, true);
2161 emit_int16(0x2A, (0xC0 | encode));
2162 }
2163
2164 void Assembler::cvtsi2sdl(XMMRegister dst, Address src) {
2165 InstructionMark im(this);
2166 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2167 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
2168 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2169 emit_int8(0x2A);
2170 emit_operand(dst, src, 0);
2171 }
2172
2173 void Assembler::cvtsi2ssl(XMMRegister dst, Register src) {
2174 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2175 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes, true);
2176 emit_int16(0x2A, (0xC0 | encode));
2177 }
2178
2179 void Assembler::cvtsi2ssl(XMMRegister dst, Address src) {
2180 InstructionMark im(this);
2181 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2182 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
2183 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2184 emit_int8(0x2A);
2185 emit_operand(dst, src, 0);
2186 }
2187
2188 void Assembler::cvtsi2ssq(XMMRegister dst, Register src) {
2189 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2190 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes, true);
2191 emit_int16(0x2A, (0xC0 | encode));
2192 }
2193
2194 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
2195 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2196 int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2197 emit_int16(0x5A, (0xC0 | encode));
2198 }
2199
2200 void Assembler::cvtss2sd(XMMRegister dst, Address src) {
2201 InstructionMark im(this);
2202 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2203 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
2204 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2205 emit_int8(0x5A);
2206 emit_operand(dst, src, 0);
2207 }
2208
2209
2210 void Assembler::cvttsd2sil(Register dst, XMMRegister src) {
2211 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2212 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2213 emit_int16(0x2C, (0xC0 | encode));
2214 }
2215
2216 void Assembler::cvtss2sil(Register dst, XMMRegister src) {
2217 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2218 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2219 emit_int16(0x2D, (0xC0 | encode));
2220 }
2221
2222 void Assembler::cvttss2sil(Register dst, XMMRegister src) {
2223 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2224 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2225 emit_int16(0x2C, (0xC0 | encode));
2226 }
2227
2228 void Assembler::evcvttss2sisl(Register dst, XMMRegister src) {
2229 assert(VM_Version::supports_avx10_2(), "");
2230 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2231 attributes.set_is_evex_instruction();
2232 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_MAP5, &attributes);
2233 emit_int16(0x6D, (0xC0 | encode));
2234 }
2235
2236 void Assembler::evcvttss2sisl(Register dst, Address src) {
2237 assert(VM_Version::supports_avx10_2(), "");
2238 InstructionMark im(this);
2239 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2240 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
2241 attributes.set_is_evex_instruction();
2242 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_MAP5, &attributes);
2243 emit_int8((unsigned char)0x6D);
2244 emit_operand(dst, src, 0);
2245 }
2246
2247 void Assembler::evcvttss2sisq(Register dst, XMMRegister src) {
2248 assert(VM_Version::supports_avx10_2(), "");
2249 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2250 attributes.set_is_evex_instruction();
2251 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_MAP5, &attributes);
2252 emit_int16(0x6D, (0xC0 | encode));
2253 }
2254
2255 void Assembler::evcvttss2sisq(Register dst, Address src) {
2256 assert(VM_Version::supports_avx10_2(), "");
2257 InstructionMark im(this);
2258 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2259 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
2260 attributes.set_is_evex_instruction();
2261 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_MAP5, &attributes);
2262 emit_int8((unsigned char)0x6D);
2263 emit_operand(dst, src, 0);
2264 }
2265
2266 void Assembler::cvttpd2dq(XMMRegister dst, XMMRegister src) {
2267 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
2268 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2269 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2270 emit_int16((unsigned char)0xE6, (0xC0 | encode));
2271 }
2272
2273 void Assembler::pabsb(XMMRegister dst, XMMRegister src) {
2274 assert(VM_Version::supports_ssse3(), "");
2275 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
2276 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
2277 emit_int16(0x1C, (0xC0 | encode));
2278 }
2279
2280 void Assembler::pabsw(XMMRegister dst, XMMRegister src) {
2281 assert(VM_Version::supports_ssse3(), "");
2282 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
2283 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
2284 emit_int16(0x1D, (0xC0 | encode));
2285 }
2286
2287 void Assembler::pabsd(XMMRegister dst, XMMRegister src) {
2288 assert(VM_Version::supports_ssse3(), "");
2289 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2290 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
2291 emit_int16(0x1E, (0xC0 | encode));
2292 }
2293
2294 void Assembler::vpabsb(XMMRegister dst, XMMRegister src, int vector_len) {
2295 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
2296 vector_len == AVX_256bit ? VM_Version::supports_avx2() :
2297 vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : false, "not supported");
2298 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
2299 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
2300 emit_int16(0x1C, (0xC0 | encode));
2301 }
2302
2303 void Assembler::vpabsw(XMMRegister dst, XMMRegister src, int vector_len) {
2304 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
2305 vector_len == AVX_256bit ? VM_Version::supports_avx2() :
2306 vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : false, "");
2307 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
2308 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
2309 emit_int16(0x1D, (0xC0 | encode));
2310 }
2311
2312 void Assembler::vpabsd(XMMRegister dst, XMMRegister src, int vector_len) {
2313 assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
2314 vector_len == AVX_256bit? VM_Version::supports_avx2() :
2315 vector_len == AVX_512bit? VM_Version::supports_evex() : 0, "");
2316 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2317 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
2318 emit_int16(0x1E, (0xC0 | encode));
2319 }
2320
2321 void Assembler::evpabsq(XMMRegister dst, XMMRegister src, int vector_len) {
2322 assert(UseAVX > 2, "");
2323 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2324 attributes.set_is_evex_instruction();
2325 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
2326 emit_int16(0x1F, (0xC0 | encode));
2327 }
2328
2329 void Assembler::vcvtps2pd(XMMRegister dst, XMMRegister src, int vector_len) {
2330 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
2331 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2332 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
2333 emit_int16(0x5A, (0xC0 | encode));
2334 }
2335
2336 void Assembler::vcvtpd2ps(XMMRegister dst, XMMRegister src, int vector_len) {
2337 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
2338 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2339 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2340 attributes.set_rex_vex_w_reverted();
2341 emit_int16(0x5A, (0xC0 | encode));
2342 }
2343
2344 void Assembler::vcvttps2dq(XMMRegister dst, XMMRegister src, int vector_len) {
2345 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
2346 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2347 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2348 emit_int16(0x5B, (0xC0 | encode));
2349 }
2350
2351 void Assembler::evcvttps2dqs(XMMRegister dst, XMMRegister src, int vector_len) {
2352 assert(VM_Version::supports_avx10_2(), "");
2353 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2354 attributes.set_is_evex_instruction();
2355 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_MAP5, &attributes);
2356 emit_int16(0x6D, (0xC0 | encode));
2357 }
2358
2359 void Assembler::evcvttps2dqs(XMMRegister dst, Address src, int vector_len) {
2360 assert(VM_Version::supports_avx10_2(), "");
2361 InstructionMark im(this);
2362 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2363 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
2364 attributes.set_is_evex_instruction();
2365 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_MAP5, &attributes);
2366 emit_int8((unsigned char)0x6D);
2367 emit_operand(dst, src, 0);
2368 }
2369
2370 void Assembler::vcvttpd2dq(XMMRegister dst, XMMRegister src, int vector_len) {
2371 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
2372 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2373 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2374 emit_int16((unsigned char)0xE6, (0xC0 | encode));
2375 }
2376
2377 void Assembler::evcvttpd2dqs(XMMRegister dst, XMMRegister src, int vector_len) {
2378 assert(VM_Version::supports_avx10_2(), "");
2379 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2380 attributes.set_is_evex_instruction();
2381 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_MAP5, &attributes);
2382 emit_int16(0x6D, (0xC0 | encode));
2383 }
2384
2385 void Assembler::evcvttpd2dqs(XMMRegister dst, Address src, int vector_len) {
2386 assert(VM_Version::supports_avx10_2(), "");
2387 InstructionMark im(this);
2388 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2389 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
2390 attributes.set_is_evex_instruction();
2391 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_MAP5, &attributes);
2392 emit_int8((unsigned char)0x6D);
2393 emit_operand(dst, src, 0);
2394 }
2395
2396 void Assembler::vcvtps2dq(XMMRegister dst, XMMRegister src, int vector_len) {
2397 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
2398 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2399 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2400 emit_int16(0x5B, (0xC0 | encode));
2401 }
2402
2403 void Assembler::evcvttps2qq(XMMRegister dst, XMMRegister src, int vector_len) {
2404 assert(VM_Version::supports_avx512dq(), "");
2405 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2406 attributes.set_is_evex_instruction();
2407 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2408 emit_int16(0x7A, (0xC0 | encode));
2409 }
2410
2411 void Assembler::evcvttps2qqs(XMMRegister dst, XMMRegister src, int vector_len) {
2412 assert(VM_Version::supports_avx10_2(), "");
2413 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2414 attributes.set_is_evex_instruction();
2415 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_MAP5, &attributes);
2416 emit_int16(0x6D, (0xC0 | encode));
2417 }
2418
2419 void Assembler::evcvttps2qqs(XMMRegister dst, Address src, int vector_len) {
2420 assert(VM_Version::supports_avx10_2(), "");
2421 InstructionMark im(this);
2422 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2423 attributes.set_address_attributes(/* tuple_type */ EVEX_HV, /* input_size_in_bits */ EVEX_32bit);
2424 attributes.set_is_evex_instruction();
2425 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_MAP5, &attributes);
2426 emit_int8((unsigned char)0x6D);
2427 emit_operand(dst, src, 0);
2428 }
2429
2430 void Assembler::evcvtpd2qq(XMMRegister dst, XMMRegister src, int vector_len) {
2431 assert(VM_Version::supports_avx512dq(), "");
2432 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2433 attributes.set_is_evex_instruction();
2434 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2435 emit_int16(0x7B, (0xC0 | encode));
2436 }
2437
2438 void Assembler::evcvtqq2ps(XMMRegister dst, XMMRegister src, int vector_len) {
2439 assert(VM_Version::supports_avx512dq(), "");
2440 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2441 attributes.set_is_evex_instruction();
2442 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
2443 emit_int16(0x5B, (0xC0 | encode));
2444 }
2445
2446 void Assembler::evcvttpd2qq(XMMRegister dst, XMMRegister src, int vector_len) {
2447 assert(VM_Version::supports_avx512dq(), "");
2448 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2449 attributes.set_is_evex_instruction();
2450 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2451 emit_int16(0x7A, (0xC0 | encode));
2452 }
2453
2454 void Assembler::evcvttpd2qqs(XMMRegister dst, XMMRegister src, int vector_len) {
2455 assert(VM_Version::supports_avx10_2(), "");
2456 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2457 attributes.set_is_evex_instruction();
2458 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_MAP5, &attributes);
2459 emit_int16(0x6D, (0xC0 | encode));
2460 }
2461
2462 void Assembler::evcvttpd2qqs(XMMRegister dst, Address src, int vector_len) {
2463 assert(VM_Version::supports_avx10_2(), "");
2464 InstructionMark im(this);
2465 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2466 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
2467 attributes.set_is_evex_instruction();
2468 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_MAP5, &attributes);
2469 emit_int8((unsigned char)0x6D);
2470 emit_operand(dst, src, 0);
2471 }
2472
2473 void Assembler::evcvtqq2pd(XMMRegister dst, XMMRegister src, int vector_len) {
2474 assert(VM_Version::supports_avx512dq(), "");
2475 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2476 attributes.set_is_evex_instruction();
2477 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2478 emit_int16((unsigned char)0xE6, (0xC0 | encode));
2479 }
2480
2481 void Assembler::evpmovwb(XMMRegister dst, XMMRegister src, int vector_len) {
2482 assert(VM_Version::supports_avx512bw(), "");
2483 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2484 attributes.set_is_evex_instruction();
2485 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
2486 emit_int16(0x30, (0xC0 | encode));
2487 }
2488
2489 void Assembler::evpmovdw(XMMRegister dst, XMMRegister src, int vector_len) {
2490 assert(UseAVX > 2, "");
2491 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2492 attributes.set_is_evex_instruction();
2493 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
2494 emit_int16(0x33, (0xC0 | encode));
2495 }
2496
2497 void Assembler::evpmovdb(XMMRegister dst, XMMRegister src, int vector_len) {
2498 assert(UseAVX > 2, "");
2499 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2500 attributes.set_is_evex_instruction();
2501 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
2502 emit_int16(0x31, (0xC0 | encode));
2503 }
2504
2505 void Assembler::evpmovqd(XMMRegister dst, XMMRegister src, int vector_len) {
2506 assert(UseAVX > 2, "");
2507 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2508 attributes.set_is_evex_instruction();
2509 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
2510 emit_int16(0x35, (0xC0 | encode));
2511 }
2512
2513 void Assembler::evpmovqb(XMMRegister dst, XMMRegister src, int vector_len) {
2514 assert(UseAVX > 2, "");
2515 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2516 attributes.set_is_evex_instruction();
2517 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
2518 emit_int16(0x32, (0xC0 | encode));
2519 }
2520
2521 void Assembler::evpmovqw(XMMRegister dst, XMMRegister src, int vector_len) {
2522 assert(UseAVX > 2, "");
2523 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2524 attributes.set_is_evex_instruction();
2525 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
2526 emit_int16(0x34, (0xC0 | encode));
2527 }
2528
2529 void Assembler::evpmovsqd(XMMRegister dst, XMMRegister src, int vector_len) {
2530 assert(UseAVX > 2, "");
2531 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2532 attributes.set_is_evex_instruction();
2533 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
2534 emit_int16(0x25, (0xC0 | encode));
2535 }
2536
2537 void Assembler::decl(Address dst) {
2538 // Don't use it directly. Use MacroAssembler::decrement() instead.
2539 InstructionMark im(this);
2540 prefix(dst);
2541 emit_int8((unsigned char)0xFF);
2542 emit_operand(rcx, dst, 0);
2543 }
2544
2545 void Assembler::edecl(Register dst, Address src, bool no_flags) {
2546 InstructionMark im(this);
2547 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2548 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
2549 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
2550 emit_int8((unsigned char)0xFF);
2551 emit_operand(rcx, src, 0);
2552 }
2553
2554 void Assembler::divsd(XMMRegister dst, Address src) {
2555 InstructionMark im(this);
2556 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2557 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
2558 attributes.set_rex_vex_w_reverted();
2559 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2560 emit_int8(0x5E);
2561 emit_operand(dst, src, 0);
2562 }
2563
2564 void Assembler::divsd(XMMRegister dst, XMMRegister src) {
2565 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2566 attributes.set_rex_vex_w_reverted();
2567 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2568 emit_int16(0x5E, (0xC0 | encode));
2569 }
2570
2571 void Assembler::divss(XMMRegister dst, Address src) {
2572 InstructionMark im(this);
2573 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2574 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
2575 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2576 emit_int8(0x5E);
2577 emit_operand(dst, src, 0);
2578 }
2579
2580 void Assembler::divss(XMMRegister dst, XMMRegister src) {
2581 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2582 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2583 emit_int16(0x5E, (0xC0 | encode));
2584 }
2585
2586 void Assembler::hlt() {
2587 emit_int8((unsigned char)0xF4);
2588 }
2589
2590 void Assembler::idivl(Register src) {
2591 int encode = prefix_and_encode(src->encoding());
2592 emit_int16((unsigned char)0xF7, (0xF8 | encode));
2593 }
2594
2595 void Assembler::eidivl(Register src, bool no_flags) { // Signed
2596 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2597 int encode = eevex_prefix_and_encode_nf(0, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
2598 emit_int16((unsigned char)0xF7, (0xF8 | encode));
2599 }
2600
2601 void Assembler::divl(Register src) { // Unsigned
2602 int encode = prefix_and_encode(src->encoding());
2603 emit_int16((unsigned char)0xF7, (0xF0 | encode));
2604 }
2605
2606 void Assembler::edivl(Register src, bool no_flags) { // Unsigned
2607 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2608 int encode = eevex_prefix_and_encode_nf(0, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
2609 emit_int16((unsigned char)0xF7, (0xF0 | encode));
2610 }
2611
2612 void Assembler::imull(Register src) {
2613 int encode = prefix_and_encode(src->encoding());
2614 emit_int16((unsigned char)0xF7, (0xE8 | encode));
2615 }
2616
2617 void Assembler::eimull(Register src, bool no_flags) {
2618 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2619 int encode = eevex_prefix_and_encode_nf(0, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
2620 emit_int16((unsigned char)0xF7, (0xE8 | encode));
2621 }
2622
2623 void Assembler::imull(Register dst, Register src) {
2624 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
2625 emit_opcode_prefix_and_encoding((unsigned char)0xAF, 0xC0, encode);
2626 }
2627
2628 void Assembler::eimull(Register dst, Register src1, Register src2, bool no_flags) {
2629 emit_eevex_or_demote(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, 0xAF, no_flags, true /* is_map1 */, true /* swap */, true /* is_commutative */);
2630 }
2631
2632 void Assembler::imull(Register dst, Address src, int32_t value) {
2633 InstructionMark im(this);
2634 prefix(src, dst);
2635 if (is8bit(value)) {
2636 emit_int8((unsigned char)0x6B);
2637 emit_operand(dst, src, 1);
2638 emit_int8(value);
2639 } else {
2640 emit_int8((unsigned char)0x69);
2641 emit_operand(dst, src, 4);
2642 emit_int32(value);
2643 }
2644 }
2645
2646 void Assembler::eimull(Register dst, Address src, int32_t value, bool no_flags) {
2647 InstructionMark im(this);
2648 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2649 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
2650 eevex_prefix_nf(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
2651 if (is8bit(value)) {
2652 emit_int8((unsigned char)0x6B);
2653 emit_operand(dst, src, 1);
2654 emit_int8(value);
2655 } else {
2656 emit_int8((unsigned char)0x69);
2657 emit_operand(dst, src, 4);
2658 emit_int32(value);
2659 }
2660 }
2661
2662 void Assembler::imull(Register dst, Register src, int value) {
2663 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2664 if (is8bit(value)) {
2665 emit_int24(0x6B, (0xC0 | encode), value & 0xFF);
2666 } else {
2667 emit_int16(0x69, (0xC0 | encode));
2668 emit_int32(value);
2669 }
2670 }
2671
2672 void Assembler::eimull(Register dst, Register src, int value, bool no_flags) {
2673 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2674 int encode = eevex_prefix_and_encode_nf(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
2675 if (is8bit(value)) {
2676 emit_int24(0x6B, (0xC0 | encode), value & 0xFF);
2677 } else {
2678 emit_int16(0x69, (0xC0 | encode));
2679 emit_int32(value);
2680 }
2681 }
2682
2683 void Assembler::imull(Register dst, Address src) {
2684 InstructionMark im(this);
2685 prefix(src, dst, false, true /* is_map1 */);
2686 emit_int8((unsigned char)0xAF);
2687 emit_operand(dst, src, 0);
2688 }
2689
2690 void Assembler::eimull(Register dst, Register src1, Address src2, bool no_flags) {
2691 InstructionMark im(this);
2692 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, (unsigned char)0xAF, no_flags, true /* is_map1 */);
2693 }
2694
2695 void Assembler::incl(Address dst) {
2696 // Don't use it directly. Use MacroAssembler::increment() instead.
2697 InstructionMark im(this);
2698 prefix(dst);
2699 emit_int8((unsigned char)0xFF);
2700 emit_operand(rax, dst, 0);
2701 }
2702
2703 void Assembler::eincl(Register dst, Address src, bool no_flags) {
2704 // Don't use it directly. Use MacroAssembler::increment() instead.
2705 InstructionMark im(this);
2706 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2707 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
2708 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
2709 emit_int8((unsigned char)0xFF);
2710 emit_operand(rax, src, 0);
2711 }
2712
2713 void Assembler::jcc(Condition cc, Label& L, bool maybe_short) {
2714 InstructionMark im(this);
2715 assert((0 <= cc) && (cc < 16), "illegal cc");
2716 if (L.is_bound()) {
2717 address dst = target(L);
2718 assert(dst != nullptr, "jcc most probably wrong");
2719
2720 const int short_size = 2;
2721 const int long_size = 6;
2722 int offs = checked_cast<int>((intptr_t)dst - (intptr_t)pc());
2723 if (maybe_short && is8bit(offs - short_size)) {
2724 // 0111 tttn #8-bit disp
2725 emit_int16(0x70 | cc, (offs - short_size) & 0xFF);
2726 } else {
2727 // 0000 1111 1000 tttn #32-bit disp
2728 assert(is_simm32(offs - long_size),
2729 "must be 32bit offset (call4)");
2730 emit_int16(0x0F, (0x80 | cc));
2731 emit_int32(offs - long_size);
2732 }
2733 } else {
2734 // Note: could eliminate cond. jumps to this jump if condition
2735 // is the same however, seems to be rather unlikely case.
2736 // Note: use jccb() if label to be bound is very close to get
2737 // an 8-bit displacement
2738 L.add_patch_at(code(), locator());
2739 emit_int16(0x0F, (0x80 | cc));
2740 emit_int32(0);
2741 }
2742 }
2743
2744 void Assembler::jccb_0(Condition cc, Label& L, const char* file, int line) {
2745 if (L.is_bound()) {
2746 const int short_size = 2;
2747 address entry = target(L);
2748 #ifdef ASSERT
2749 int dist = checked_cast<int>((intptr_t)entry - (intptr_t)(pc() + short_size));
2750 int delta = short_branch_delta();
2751 if (delta != 0) {
2752 dist += (dist < 0 ? (-delta) :delta);
2753 }
2754 assert(is8bit(dist), "Displacement too large for a short jmp at %s:%d", file, line);
2755 #endif
2756 int offs = checked_cast<int>((intptr_t)entry - (intptr_t)pc());
2757 // 0111 tttn #8-bit disp
2758 emit_int16(0x70 | cc, (offs - short_size) & 0xFF);
2759 } else {
2760 InstructionMark im(this);
2761 L.add_patch_at(code(), locator(), file, line);
2762 emit_int16(0x70 | cc, 0);
2763 }
2764 }
2765
2766 void Assembler::jmp(Address adr) {
2767 InstructionMark im(this);
2768 prefix(adr);
2769 emit_int8((unsigned char)0xFF);
2770 emit_operand(rsp, adr, 0);
2771 }
2772
2773 void Assembler::jmp(Label& L, bool maybe_short) {
2774 if (L.is_bound()) {
2775 address entry = target(L);
2776 assert(entry != nullptr, "jmp most probably wrong");
2777 InstructionMark im(this);
2778 const int short_size = 2;
2779 const int long_size = 5;
2780 int offs = checked_cast<int>(entry - pc());
2781 if (maybe_short && is8bit(offs - short_size)) {
2782 emit_int16((unsigned char)0xEB, ((offs - short_size) & 0xFF));
2783 } else {
2784 emit_int8((unsigned char)0xE9);
2785 emit_int32(offs - long_size);
2786 }
2787 } else {
2788 // By default, forward jumps are always 32-bit displacements, since
2789 // we can't yet know where the label will be bound. If you're sure that
2790 // the forward jump will not run beyond 256 bytes, use jmpb to
2791 // force an 8-bit displacement.
2792 InstructionMark im(this);
2793 L.add_patch_at(code(), locator());
2794 emit_int8((unsigned char)0xE9);
2795 emit_int32(0);
2796 }
2797 }
2798
2799 void Assembler::jmp(Register entry) {
2800 int encode = prefix_and_encode(entry->encoding());
2801 emit_int16((unsigned char)0xFF, (0xE0 | encode));
2802 }
2803
2804 void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) {
2805 InstructionMark im(this);
2806 emit_int8((unsigned char)0xE9);
2807 assert(dest != nullptr, "must have a target");
2808 intptr_t disp = dest - (pc() + sizeof(int32_t));
2809 assert(is_simm32(disp), "must be 32bit offset (jmp)");
2810 emit_data(checked_cast<int32_t>(disp), rspec, call32_operand);
2811 }
2812
2813 void Assembler::jmpb_0(Label& L, const char* file, int line) {
2814 if (L.is_bound()) {
2815 const int short_size = 2;
2816 address entry = target(L);
2817 assert(entry != nullptr, "jmp most probably wrong");
2818 #ifdef ASSERT
2819 int dist = checked_cast<int>((intptr_t)entry - (intptr_t)(pc() + short_size));
2820 int delta = short_branch_delta();
2821 if (delta != 0) {
2822 dist += (dist < 0 ? (-delta) :delta);
2823 }
2824 assert(is8bit(dist), "Displacement too large for a short jmp at %s:%d", file, line);
2825 #endif
2826 intptr_t offs = entry - pc();
2827 emit_int16((unsigned char)0xEB, (offs - short_size) & 0xFF);
2828 } else {
2829 InstructionMark im(this);
2830 L.add_patch_at(code(), locator(), file, line);
2831 emit_int16((unsigned char)0xEB, 0);
2832 }
2833 }
2834
2835 void Assembler::ldmxcsr( Address src) {
2836 // This instruction should be SSE encoded with the REX2 prefix when an
2837 // extended GPR is present. To be consistent when UseAPX is enabled, use
2838 // this encoding even when an extended GPR is not used.
2839 if (UseAVX > 0 && !UseAPX ) {
2840 InstructionMark im(this);
2841 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
2842 vex_prefix(src, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
2843 emit_int8((unsigned char)0xAE);
2844 emit_operand(as_Register(2), src, 0);
2845 } else {
2846 InstructionMark im(this);
2847 prefix(src, true /* is_map1 */);
2848 emit_int8((unsigned char)0xAE);
2849 emit_operand(as_Register(2), src, 0);
2850 }
2851 }
2852
2853 void Assembler::leal(Register dst, Address src) {
2854 InstructionMark im(this);
2855 prefix(src, dst);
2856 emit_int8((unsigned char)0x8D);
2857 emit_operand(dst, src, 0);
2858 }
2859
2860 void Assembler::lea(Register dst, Label& L) {
2861 emit_prefix_and_int8(get_prefixq(Address(), dst), (unsigned char)0x8D);
2862 if (!L.is_bound()) {
2863 // Patch @0x8D opcode
2864 L.add_patch_at(code(), CodeBuffer::locator(offset() - 1, sect()));
2865 // Register and [rip+disp] operand
2866 emit_modrm(0b00, raw_encode(dst), 0b101);
2867 emit_int32(0);
2868 } else {
2869 // Register and [rip+disp] operand
2870 emit_modrm(0b00, raw_encode(dst), 0b101);
2871 // Adjust displacement by sizeof lea instruction
2872 int32_t disp = checked_cast<int32_t>(target(L) - (pc() + sizeof(int32_t)));
2873 assert(is_simm32(disp), "must be 32bit offset [rip+offset]");
2874 emit_int32(disp);
2875 }
2876 }
2877
2878 void Assembler::lfence() {
2879 emit_int24(0x0F, (unsigned char)0xAE, (unsigned char)0xE8);
2880 }
2881
2882 void Assembler::lock() {
2883 emit_int8((unsigned char)0xF0);
2884 }
2885
2886 void Assembler::size_prefix() {
2887 emit_int8(0x66);
2888 }
2889
2890 void Assembler::lzcntl(Register dst, Register src) {
2891 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
2892 emit_int8((unsigned char)0xF3);
2893 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
2894 emit_opcode_prefix_and_encoding((unsigned char)0xBD, 0xC0, encode);
2895 }
2896
2897 void Assembler::elzcntl(Register dst, Register src, bool no_flags) {
2898 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
2899 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2900 int encode = eevex_prefix_and_encode_nf(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
2901 emit_int16((unsigned char)0xF5, (0xC0 | encode));
2902 }
2903
2904 void Assembler::lzcntl(Register dst, Address src) {
2905 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
2906 InstructionMark im(this);
2907 emit_int8((unsigned char)0xF3);
2908 prefix(src, dst, false, true /* is_map1 */);
2909 emit_int8((unsigned char)0xBD);
2910 emit_operand(dst, src, 0);
2911 }
2912
2913 void Assembler::elzcntl(Register dst, Address src, bool no_flags) {
2914 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
2915 InstructionMark im(this);
2916 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2917 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
2918 eevex_prefix_nf(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
2919 emit_int8((unsigned char)0xF5);
2920 emit_operand(dst, src, 0);
2921 }
2922
2923 // Emit mfence instruction
2924 void Assembler::mfence() {
2925 emit_int24(0x0F, (unsigned char)0xAE, (unsigned char)0xF0);
2926 }
2927
2928 // Emit sfence instruction
2929 void Assembler::sfence() {
2930 emit_int24(0x0F, (unsigned char)0xAE, (unsigned char)0xF8);
2931 }
2932
2933 void Assembler::mov(Register dst, Register src) {
2934 movq(dst, src);
2935 }
2936
2937 void Assembler::movapd(XMMRegister dst, Address src) {
2938 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2939 InstructionMark im(this);
2940 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2941 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2942 attributes.set_rex_vex_w_reverted();
2943 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2944 emit_int8(0x28);
2945 emit_operand(dst, src, 0);
2946 }
2947
2948 void Assembler::movapd(XMMRegister dst, XMMRegister src) {
2949 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
2950 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2951 attributes.set_rex_vex_w_reverted();
2952 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2953 emit_int16(0x28, (0xC0 | encode));
2954 }
2955
2956 void Assembler::movaps(XMMRegister dst, XMMRegister src) {
2957 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
2958 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2959 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
2960 emit_int16(0x28, (0xC0 | encode));
2961 }
2962
2963 void Assembler::movlhps(XMMRegister dst, XMMRegister src) {
2964 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2965 int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
2966 emit_int16(0x16, (0xC0 | encode));
2967 }
2968
2969 void Assembler::movb(Register dst, Address src) {
2970 InstructionMark im(this);
2971 prefix(src, dst, true);
2972 emit_int8((unsigned char)0x8A);
2973 emit_operand(dst, src, 0);
2974 }
2975
2976 void Assembler::movddup(XMMRegister dst, XMMRegister src) {
2977 assert(VM_Version::supports_sse3(), "");
2978 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
2979 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2980 attributes.set_rex_vex_w_reverted();
2981 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2982 emit_int16(0x12, 0xC0 | encode);
2983 }
2984
2985 void Assembler::movddup(XMMRegister dst, Address src) {
2986 assert(VM_Version::supports_sse3(), "");
2987 InstructionMark im(this);
2988 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2989 attributes.set_address_attributes(/* tuple_type */ EVEX_DUP, /* input_size_in_bits */ EVEX_64bit);
2990 attributes.set_rex_vex_w_reverted();
2991 simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2992 emit_int8(0x12);
2993 emit_operand(dst, src, 0);
2994 }
2995
2996 void Assembler::vmovddup(XMMRegister dst, Address src, int vector_len) {
2997 assert(VM_Version::supports_avx(), "");
2998 InstructionMark im(this);
2999 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3000 attributes.set_address_attributes(/* tuple_type */ EVEX_DUP, /* input_size_in_bits */ EVEX_64bit);
3001 attributes.set_rex_vex_w_reverted();
3002 simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
3003 emit_int8(0x12);
3004 emit_operand(dst, src, 0);
3005 }
3006
3007 void Assembler::kmovbl(KRegister dst, KRegister src) {
3008 assert(VM_Version::supports_avx512dq(), "");
3009 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3010 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3011 emit_int16((unsigned char)0x90, (0xC0 | encode));
3012 }
3013
3014 void Assembler::kmovbl(KRegister dst, Register src) {
3015 assert(VM_Version::supports_avx512dq(), "");
3016 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3017 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes, true);
3018 emit_int16((unsigned char)0x92, (0xC0 | encode));
3019 }
3020
3021 void Assembler::kmovbl(Register dst, KRegister src) {
3022 assert(VM_Version::supports_avx512dq(), "");
3023 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3024 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3025 emit_int16((unsigned char)0x93, (0xC0 | encode));
3026 }
3027
3028 void Assembler::kmovwl(KRegister dst, Register src) {
3029 assert(VM_Version::supports_evex(), "");
3030 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3031 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes, true);
3032 emit_int16((unsigned char)0x92, (0xC0 | encode));
3033 }
3034
3035 void Assembler::kmovwl(Register dst, KRegister src) {
3036 assert(VM_Version::supports_evex(), "");
3037 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3038 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
3039 emit_int16((unsigned char)0x93, (0xC0 | encode));
3040 }
3041
3042 void Assembler::kmovwl(KRegister dst, Address src) {
3043 assert(VM_Version::supports_evex(), "");
3044 InstructionMark im(this);
3045 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3046 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
3047 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
3048 emit_int8((unsigned char)0x90);
3049 emit_operand(dst, src, 0);
3050 }
3051
3052 void Assembler::kmovwl(Address dst, KRegister src) {
3053 assert(VM_Version::supports_evex(), "");
3054 InstructionMark im(this);
3055 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3056 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
3057 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
3058 emit_int8((unsigned char)0x91);
3059 emit_operand(src, dst, 0);
3060 }
3061
3062 void Assembler::kmovwl(KRegister dst, KRegister src) {
3063 assert(VM_Version::supports_evex(), "");
3064 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3065 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
3066 emit_int16((unsigned char)0x90, (0xC0 | encode));
3067 }
3068
3069 void Assembler::kmovdl(KRegister dst, Register src) {
3070 assert(VM_Version::supports_avx512bw(), "");
3071 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3072 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes, true);
3073 emit_int16((unsigned char)0x92, (0xC0 | encode));
3074 }
3075
3076 void Assembler::kmovdl(Register dst, KRegister src) {
3077 assert(VM_Version::supports_avx512bw(), "");
3078 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3079 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
3080 emit_int16((unsigned char)0x93, (0xC0 | encode));
3081 }
3082
3083 void Assembler::kmovql(KRegister dst, KRegister src) {
3084 assert(VM_Version::supports_avx512bw(), "");
3085 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3086 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
3087 emit_int16((unsigned char)0x90, (0xC0 | encode));
3088 }
3089
3090 void Assembler::kmovql(KRegister dst, Address src) {
3091 assert(VM_Version::supports_avx512bw(), "");
3092 InstructionMark im(this);
3093 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3094 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
3095 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
3096 emit_int8((unsigned char)0x90);
3097 emit_operand(dst, src, 0);
3098 }
3099
3100 void Assembler::kmovql(Address dst, KRegister src) {
3101 assert(VM_Version::supports_avx512bw(), "");
3102 InstructionMark im(this);
3103 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3104 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
3105 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
3106 emit_int8((unsigned char)0x91);
3107 emit_operand(src, dst, 0);
3108 }
3109
3110 void Assembler::kmovql(KRegister dst, Register src) {
3111 assert(VM_Version::supports_avx512bw(), "");
3112 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3113 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes, true);
3114 emit_int16((unsigned char)0x92, (0xC0 | encode));
3115 }
3116
3117 void Assembler::kmovql(Register dst, KRegister src) {
3118 assert(VM_Version::supports_avx512bw(), "");
3119 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3120 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
3121 emit_int16((unsigned char)0x93, (0xC0 | encode));
3122 }
3123
3124 void Assembler::knotwl(KRegister dst, KRegister src) {
3125 assert(VM_Version::supports_evex(), "");
3126 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3127 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
3128 emit_int16(0x44, (0xC0 | encode));
3129 }
3130
3131 void Assembler::knotbl(KRegister dst, KRegister src) {
3132 assert(VM_Version::supports_evex(), "");
3133 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3134 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3135 emit_int16(0x44, (0xC0 | encode));
3136 }
3137
3138 void Assembler::korbl(KRegister dst, KRegister src1, KRegister src2) {
3139 assert(VM_Version::supports_avx512dq(), "");
3140 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3141 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3142 emit_int16(0x45, (0xC0 | encode));
3143 }
3144
3145 void Assembler::korwl(KRegister dst, KRegister src1, KRegister src2) {
3146 assert(VM_Version::supports_evex(), "");
3147 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3148 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
3149 emit_int16(0x45, (0xC0 | encode));
3150 }
3151
3152 void Assembler::kordl(KRegister dst, KRegister src1, KRegister src2) {
3153 assert(VM_Version::supports_avx512bw(), "");
3154 InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3155 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3156 emit_int16(0x45, (0xC0 | encode));
3157 }
3158
3159 void Assembler::korql(KRegister dst, KRegister src1, KRegister src2) {
3160 assert(VM_Version::supports_avx512bw(), "");
3161 InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3162 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
3163 emit_int16(0x45, (0xC0 | encode));
3164 }
3165
3166 void Assembler::kxorbl(KRegister dst, KRegister src1, KRegister src2) {
3167 assert(VM_Version::supports_avx512dq(), "");
3168 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3169 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3170 emit_int16(0x47, (0xC0 | encode));
3171 }
3172
3173 void Assembler::kxnorwl(KRegister dst, KRegister src1, KRegister src2) {
3174 assert(VM_Version::supports_evex(), "");
3175 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3176 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
3177 emit_int16(0x46, (0xC0 | encode));
3178 }
3179
3180 void Assembler::kxorwl(KRegister dst, KRegister src1, KRegister src2) {
3181 assert(VM_Version::supports_evex(), "");
3182 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3183 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
3184 emit_int16(0x47, (0xC0 | encode));
3185 }
3186
3187 void Assembler::kxordl(KRegister dst, KRegister src1, KRegister src2) {
3188 assert(VM_Version::supports_avx512bw(), "");
3189 InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3190 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3191 emit_int16(0x47, (0xC0 | encode));
3192 }
3193
3194 void Assembler::kxorql(KRegister dst, KRegister src1, KRegister src2) {
3195 assert(VM_Version::supports_avx512bw(), "");
3196 InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3197 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
3198 emit_int16(0x47, (0xC0 | encode));
3199 }
3200
3201 void Assembler::kandbl(KRegister dst, KRegister src1, KRegister src2) {
3202 assert(VM_Version::supports_avx512dq(), "");
3203 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3204 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3205 emit_int16(0x41, (0xC0 | encode));
3206 }
3207
3208 void Assembler::kandwl(KRegister dst, KRegister src1, KRegister src2) {
3209 assert(VM_Version::supports_evex(), "");
3210 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3211 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
3212 emit_int16(0x41, (0xC0 | encode));
3213 }
3214
3215 void Assembler::kanddl(KRegister dst, KRegister src1, KRegister src2) {
3216 assert(VM_Version::supports_avx512bw(), "");
3217 InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3218 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3219 emit_int16(0x41, (0xC0 | encode));
3220 }
3221
3222 void Assembler::kandql(KRegister dst, KRegister src1, KRegister src2) {
3223 assert(VM_Version::supports_avx512bw(), "");
3224 InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3225 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
3226 emit_int16(0x41, (0xC0 | encode));
3227 }
3228
3229 void Assembler::knotdl(KRegister dst, KRegister src) {
3230 assert(VM_Version::supports_avx512bw(), "");
3231 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3232 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3233 emit_int16(0x44, (0xC0 | encode));
3234 }
3235
3236 void Assembler::knotql(KRegister dst, KRegister src) {
3237 assert(VM_Version::supports_avx512bw(), "");
3238 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3239 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
3240 emit_int16(0x44, (0xC0 | encode));
3241 }
3242
3243 // This instruction produces ZF or CF flags
3244 void Assembler::kortestbl(KRegister src1, KRegister src2) {
3245 assert(VM_Version::supports_avx512dq(), "");
3246 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3247 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3248 emit_int16((unsigned char)0x98, (0xC0 | encode));
3249 }
3250
3251 // This instruction produces ZF or CF flags
3252 void Assembler::kortestwl(KRegister src1, KRegister src2) {
3253 assert(VM_Version::supports_evex(), "");
3254 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3255 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
3256 emit_int16((unsigned char)0x98, (0xC0 | encode));
3257 }
3258
3259 // This instruction produces ZF or CF flags
3260 void Assembler::kortestdl(KRegister src1, KRegister src2) {
3261 assert(VM_Version::supports_avx512bw(), "");
3262 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3263 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3264 emit_int16((unsigned char)0x98, (0xC0 | encode));
3265 }
3266
3267 // This instruction produces ZF or CF flags
3268 void Assembler::kortestql(KRegister src1, KRegister src2) {
3269 assert(VM_Version::supports_avx512bw(), "");
3270 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3271 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
3272 emit_int16((unsigned char)0x98, (0xC0 | encode));
3273 }
3274
3275 // This instruction produces ZF or CF flags
3276 void Assembler::ktestql(KRegister src1, KRegister src2) {
3277 assert(VM_Version::supports_avx512bw(), "");
3278 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3279 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
3280 emit_int16((unsigned char)0x99, (0xC0 | encode));
3281 }
3282
3283 void Assembler::ktestdl(KRegister src1, KRegister src2) {
3284 assert(VM_Version::supports_avx512bw(), "");
3285 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3286 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3287 emit_int16((unsigned char)0x99, (0xC0 | encode));
3288 }
3289
3290 void Assembler::ktestwl(KRegister src1, KRegister src2) {
3291 assert(VM_Version::supports_avx512dq(), "");
3292 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3293 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
3294 emit_int16((unsigned char)0x99, (0xC0 | encode));
3295 }
3296
3297 void Assembler::ktestbl(KRegister src1, KRegister src2) {
3298 assert(VM_Version::supports_avx512dq(), "");
3299 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3300 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3301 emit_int16((unsigned char)0x99, (0xC0 | encode));
3302 }
3303
3304 void Assembler::ktestq(KRegister src1, KRegister src2) {
3305 assert(VM_Version::supports_avx512bw(), "");
3306 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3307 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
3308 emit_int16((unsigned char)0x99, (0xC0 | encode));
3309 }
3310
3311 void Assembler::ktestd(KRegister src1, KRegister src2) {
3312 assert(VM_Version::supports_avx512bw(), "");
3313 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3314 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3315 emit_int16((unsigned char)0x99, (0xC0 | encode));
3316 }
3317
3318 void Assembler::kxnorbl(KRegister dst, KRegister src1, KRegister src2) {
3319 assert(VM_Version::supports_avx512dq(), "");
3320 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3321 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3322 emit_int16(0x46, (0xC0 | encode));
3323 }
3324
3325 void Assembler::kshiftlbl(KRegister dst, KRegister src, int imm8) {
3326 assert(VM_Version::supports_avx512dq(), "");
3327 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3328 int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3329 emit_int16(0x32, (0xC0 | encode));
3330 emit_int8(imm8);
3331 }
3332
3333 void Assembler::kshiftlql(KRegister dst, KRegister src, int imm8) {
3334 assert(VM_Version::supports_avx512bw(), "");
3335 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3336 int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3337 emit_int16(0x33, (0xC0 | encode));
3338 emit_int8(imm8);
3339 }
3340
3341
3342 void Assembler::kshiftrbl(KRegister dst, KRegister src, int imm8) {
3343 assert(VM_Version::supports_avx512dq(), "");
3344 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3345 int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3346 emit_int16(0x30, (0xC0 | encode));
3347 }
3348
3349 void Assembler::kshiftrwl(KRegister dst, KRegister src, int imm8) {
3350 assert(VM_Version::supports_evex(), "");
3351 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3352 int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3353 emit_int16(0x30, (0xC0 | encode));
3354 emit_int8(imm8);
3355 }
3356
3357 void Assembler::kshiftrdl(KRegister dst, KRegister src, int imm8) {
3358 assert(VM_Version::supports_avx512bw(), "");
3359 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3360 int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3361 emit_int16(0x31, (0xC0 | encode));
3362 emit_int8(imm8);
3363 }
3364
3365 void Assembler::kshiftrql(KRegister dst, KRegister src, int imm8) {
3366 assert(VM_Version::supports_avx512bw(), "");
3367 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3368 int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3369 emit_int16(0x31, (0xC0 | encode));
3370 emit_int8(imm8);
3371 }
3372
3373 void Assembler::kunpckdql(KRegister dst, KRegister src1, KRegister src2) {
3374 assert(VM_Version::supports_avx512bw(), "");
3375 InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3376 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
3377 emit_int16(0x4B, (0xC0 | encode));
3378 }
3379
3380 void Assembler::movb(Address dst, int imm8) {
3381 InstructionMark im(this);
3382 prefix(dst);
3383 emit_int8((unsigned char)0xC6);
3384 emit_operand(rax, dst, 1);
3385 emit_int8(imm8);
3386 }
3387
3388
3389 void Assembler::movb(Address dst, Register src) {
3390 assert(src->has_byte_register(), "must have byte register");
3391 InstructionMark im(this);
3392 prefix(dst, src, true);
3393 emit_int8((unsigned char)0x88);
3394 emit_operand(src, dst, 0);
3395 }
3396
3397 void Assembler::movdl(XMMRegister dst, Register src) {
3398 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3399 int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes, true);
3400 emit_int16(0x6E, (0xC0 | encode));
3401 }
3402
3403 void Assembler::movdl(Register dst, XMMRegister src) {
3404 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3405 // swap src/dst to get correct prefix
3406 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes, true);
3407 emit_int16(0x7E, (0xC0 | encode));
3408 }
3409
3410 void Assembler::movdl(XMMRegister dst, Address src) {
3411 InstructionMark im(this);
3412 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3413 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
3414 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3415 emit_int8(0x6E);
3416 emit_operand(dst, src, 0);
3417 }
3418
3419 void Assembler::movdl(Address dst, XMMRegister src) {
3420 InstructionMark im(this);
3421 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3422 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
3423 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3424 emit_int8(0x7E);
3425 emit_operand(src, dst, 0);
3426 }
3427
3428 void Assembler::movdqa(XMMRegister dst, XMMRegister src) {
3429 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3430 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3431 emit_int16(0x6F, (0xC0 | encode));
3432 }
3433
3434 void Assembler::movdqa(XMMRegister dst, Address src) {
3435 InstructionMark im(this);
3436 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3437 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3438 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3439 emit_int8(0x6F);
3440 emit_operand(dst, src, 0);
3441 }
3442
3443 void Assembler::movdqu(XMMRegister dst, Address src) {
3444 InstructionMark im(this);
3445 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3446 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3447 simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
3448 emit_int8(0x6F);
3449 emit_operand(dst, src, 0);
3450 }
3451
3452 void Assembler::movdqu(XMMRegister dst, XMMRegister src) {
3453 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3454 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
3455 emit_int16(0x6F, (0xC0 | encode));
3456 }
3457
3458 void Assembler::movdqu(Address dst, XMMRegister src) {
3459 InstructionMark im(this);
3460 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3461 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3462 attributes.reset_is_clear_context();
3463 simd_prefix(src, xnoreg, dst, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
3464 emit_int8(0x7F);
3465 emit_operand(src, dst, 0);
3466 }
3467
3468 // Move Unaligned 256bit Vector
3469 void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) {
3470 assert(UseAVX > 0, "");
3471 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3472 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
3473 emit_int16(0x6F, (0xC0 | encode));
3474 }
3475
3476 void Assembler::vmovw(XMMRegister dst, Register src) {
3477 assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
3478 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3479 attributes.set_is_evex_instruction();
3480 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_MAP5, &attributes, true);
3481 emit_int16(0x6E, (0xC0 | encode));
3482 }
3483
3484 void Assembler::vmovw(Register dst, XMMRegister src) {
3485 assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
3486 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3487 attributes.set_is_evex_instruction();
3488 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_MAP5, &attributes, true);
3489 emit_int16(0x7E, (0xC0 | encode));
3490 }
3491
3492 void Assembler::vmovdqu(XMMRegister dst, Address src) {
3493 assert(UseAVX > 0, "");
3494 InstructionMark im(this);
3495 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3496 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3497 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
3498 emit_int8(0x6F);
3499 emit_operand(dst, src, 0);
3500 }
3501
3502 void Assembler::vmovdqu(Address dst, XMMRegister src) {
3503 assert(UseAVX > 0, "");
3504 InstructionMark im(this);
3505 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3506 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3507 attributes.reset_is_clear_context();
3508 // swap src<->dst for encoding
3509 assert(src != xnoreg, "sanity");
3510 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
3511 emit_int8(0x7F);
3512 emit_operand(src, dst, 0);
3513 }
3514
3515 // Move Aligned 256bit Vector
3516 void Assembler::vmovdqa(XMMRegister dst, Address src) {
3517 assert(UseAVX > 0, "");
3518 InstructionMark im(this);
3519 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3520 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3521 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3522 emit_int8(0x6F);
3523 emit_operand(dst, src, 0);
3524 }
3525
3526 void Assembler::vmovdqa(Address dst, XMMRegister src) {
3527 assert(UseAVX > 0, "");
3528 InstructionMark im(this);
3529 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3530 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3531 attributes.reset_is_clear_context();
3532 // swap src<->dst for encoding
3533 assert(src != xnoreg, "sanity");
3534 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3535 emit_int8(0x7F);
3536 emit_operand(src, dst, 0);
3537 }
3538
3539 void Assembler::vpmaskmovd(XMMRegister dst, XMMRegister mask, Address src, int vector_len) {
3540 assert((VM_Version::supports_avx2() && vector_len == AVX_256bit), "");
3541 InstructionMark im(this);
3542 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
3543 vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3544 emit_int8((unsigned char)0x8C);
3545 emit_operand(dst, src, 0);
3546 }
3547
3548 void Assembler::vpmaskmovq(XMMRegister dst, XMMRegister mask, Address src, int vector_len) {
3549 assert((VM_Version::supports_avx2() && vector_len == AVX_256bit), "");
3550 InstructionMark im(this);
3551 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
3552 vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3553 emit_int8((unsigned char)0x8C);
3554 emit_operand(dst, src, 0);
3555 }
3556
3557 void Assembler::vmaskmovps(XMMRegister dst, Address src, XMMRegister mask, int vector_len) {
3558 assert(UseAVX > 0, "requires some form of AVX");
3559 InstructionMark im(this);
3560 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3561 vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3562 emit_int8(0x2C);
3563 emit_operand(dst, src, 0);
3564 }
3565
3566 void Assembler::vmaskmovpd(XMMRegister dst, Address src, XMMRegister mask, int vector_len) {
3567 assert(UseAVX > 0, "requires some form of AVX");
3568 InstructionMark im(this);
3569 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3570 vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3571 emit_int8(0x2D);
3572 emit_operand(dst, src, 0);
3573 }
3574
3575 void Assembler::vmaskmovps(Address dst, XMMRegister src, XMMRegister mask, int vector_len) {
3576 assert(UseAVX > 0, "");
3577 InstructionMark im(this);
3578 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3579 vex_prefix(dst, mask->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3580 emit_int8(0x2E);
3581 emit_operand(src, dst, 0);
3582 }
3583
3584 void Assembler::vmaskmovpd(Address dst, XMMRegister src, XMMRegister mask, int vector_len) {
3585 assert(UseAVX > 0, "");
3586 InstructionMark im(this);
3587 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3588 vex_prefix(dst, mask->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3589 emit_int8(0x2F);
3590 emit_operand(src, dst, 0);
3591 }
3592
3593 // Move Unaligned EVEX enabled Vector (programmable : 8,16,32,64)
3594 void Assembler::evmovdqub(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
3595 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
3596 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
3597 attributes.set_embedded_opmask_register_specifier(mask);
3598 attributes.set_is_evex_instruction();
3599 if (merge) {
3600 attributes.reset_is_clear_context();
3601 }
3602 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
3603 emit_int16(0x6F, (0xC0 | encode));
3604 }
3605
3606 void Assembler::evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) {
3607 // Unmasked instruction
3608 evmovdqub(dst, k0, src, /*merge*/ false, vector_len);
3609 }
3610
3611 void Assembler::evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
3612 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
3613 InstructionMark im(this);
3614 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
3615 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3616 attributes.set_embedded_opmask_register_specifier(mask);
3617 attributes.set_is_evex_instruction();
3618 if (merge) {
3619 attributes.reset_is_clear_context();
3620 }
3621 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
3622 emit_int8(0x6F);
3623 emit_operand(dst, src, 0);
3624 }
3625
3626 void Assembler::evmovdqub(XMMRegister dst, Address src, int vector_len) {
3627 // Unmasked instruction
3628 evmovdqub(dst, k0, src, /*merge*/ false, vector_len);
3629 }
3630
3631 void Assembler::evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
3632 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
3633 assert(src != xnoreg, "sanity");
3634 InstructionMark im(this);
3635 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
3636 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3637 attributes.set_embedded_opmask_register_specifier(mask);
3638 attributes.set_is_evex_instruction();
3639 if (merge) {
3640 attributes.reset_is_clear_context();
3641 }
3642 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
3643 emit_int8(0x7F);
3644 emit_operand(src, dst, 0);
3645 }
3646
3647 void Assembler::evmovdquw(XMMRegister dst, XMMRegister src, int vector_len) {
3648 // Unmasked instruction
3649 evmovdquw(dst, k0, src, /*merge*/ false, vector_len);
3650 }
3651
3652 void Assembler::evmovdquw(XMMRegister dst, Address src, int vector_len) {
3653 // Unmasked instruction
3654 evmovdquw(dst, k0, src, /*merge*/ false, vector_len);
3655 }
3656
3657 void Assembler::evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
3658 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
3659 InstructionMark im(this);
3660 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
3661 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3662 attributes.set_embedded_opmask_register_specifier(mask);
3663 attributes.set_is_evex_instruction();
3664 if (merge) {
3665 attributes.reset_is_clear_context();
3666 }
3667 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
3668 emit_int8(0x6F);
3669 emit_operand(dst, src, 0);
3670 }
3671
3672 void Assembler::evmovdquw(Address dst, XMMRegister src, int vector_len) {
3673 // Unmasked instruction
3674 evmovdquw(dst, k0, src, /*merge*/ false, vector_len);
3675 }
3676
3677 void Assembler::evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
3678 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
3679 assert(src != xnoreg, "sanity");
3680 InstructionMark im(this);
3681 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
3682 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3683 attributes.set_embedded_opmask_register_specifier(mask);
3684 attributes.set_is_evex_instruction();
3685 if (merge) {
3686 attributes.reset_is_clear_context();
3687 }
3688 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
3689 emit_int8(0x7F);
3690 emit_operand(src, dst, 0);
3691 }
3692
3693 void Assembler::evmovdquw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
3694 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
3695 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
3696 attributes.set_embedded_opmask_register_specifier(mask);
3697 attributes.set_is_evex_instruction();
3698 if (merge) {
3699 attributes.reset_is_clear_context();
3700 }
3701 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
3702 emit_int16(0x6F, (0xC0 | encode));
3703 }
3704
3705
3706 void Assembler::evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) {
3707 // Unmasked instruction
3708 evmovdqul(dst, k0, src, /*merge*/ false, vector_len);
3709 }
3710
3711 void Assembler::evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
3712 assert(VM_Version::supports_evex(), "");
3713 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
3714 attributes.set_embedded_opmask_register_specifier(mask);
3715 attributes.set_is_evex_instruction();
3716 if (merge) {
3717 attributes.reset_is_clear_context();
3718 }
3719 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
3720 emit_int16(0x6F, (0xC0 | encode));
3721 }
3722
3723 void Assembler::evmovdqul(XMMRegister dst, Address src, int vector_len) {
3724 // Unmasked instruction
3725 evmovdqul(dst, k0, src, /*merge*/ false, vector_len);
3726 }
3727
3728 void Assembler::evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
3729 assert(VM_Version::supports_evex(), "");
3730 InstructionMark im(this);
3731 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false , /* uses_vl */ true);
3732 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3733 attributes.set_embedded_opmask_register_specifier(mask);
3734 attributes.set_is_evex_instruction();
3735 if (merge) {
3736 attributes.reset_is_clear_context();
3737 }
3738 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
3739 emit_int8(0x6F);
3740 emit_operand(dst, src, 0);
3741 }
3742
3743 void Assembler::evmovdqul(Address dst, XMMRegister src, int vector_len) {
3744 // Unmasked isntruction
3745 evmovdqul(dst, k0, src, /*merge*/ true, vector_len);
3746 }
3747
3748 void Assembler::evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
3749 assert(VM_Version::supports_evex(), "");
3750 assert(src != xnoreg, "sanity");
3751 InstructionMark im(this);
3752 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
3753 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3754 attributes.set_embedded_opmask_register_specifier(mask);
3755 attributes.set_is_evex_instruction();
3756 if (merge) {
3757 attributes.reset_is_clear_context();
3758 }
3759 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
3760 emit_int8(0x7F);
3761 emit_operand(src, dst, 0);
3762 }
3763
3764 void Assembler::evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) {
3765 // Unmasked instruction
3766 evmovdquq(dst, k0, src, /*merge*/ false, vector_len);
3767 }
3768
3769 void Assembler::evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
3770 assert(VM_Version::supports_evex(), "");
3771 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
3772 attributes.set_embedded_opmask_register_specifier(mask);
3773 attributes.set_is_evex_instruction();
3774 if (merge) {
3775 attributes.reset_is_clear_context();
3776 }
3777 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
3778 emit_int16(0x6F, (0xC0 | encode));
3779 }
3780
3781 void Assembler::evmovdquq(XMMRegister dst, Address src, int vector_len) {
3782 // Unmasked instruction
3783 evmovdquq(dst, k0, src, /*merge*/ false, vector_len);
3784 }
3785
3786 void Assembler::evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
3787 assert(VM_Version::supports_evex(), "");
3788 InstructionMark im(this);
3789 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
3790 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3791 attributes.set_embedded_opmask_register_specifier(mask);
3792 attributes.set_is_evex_instruction();
3793 if (merge) {
3794 attributes.reset_is_clear_context();
3795 }
3796 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
3797 emit_int8(0x6F);
3798 emit_operand(dst, src, 0);
3799 }
3800
3801 // Move Aligned 512bit Vector
3802 void Assembler::evmovdqaq(XMMRegister dst, Address src, int vector_len) {
3803 // Unmasked instruction
3804 evmovdqaq(dst, k0, src, /*merge*/ false, vector_len);
3805 }
3806
3807 void Assembler::evmovdqaq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
3808 assert(VM_Version::supports_evex(), "");
3809 InstructionMark im(this);
3810 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
3811 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3812 attributes.set_embedded_opmask_register_specifier(mask);
3813 attributes.set_is_evex_instruction();
3814 if (merge) {
3815 attributes.reset_is_clear_context();
3816 }
3817 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3818 emit_int8(0x6F);
3819 emit_operand(dst, src, 0);
3820 }
3821
3822 void Assembler::evmovntdquq(Address dst, XMMRegister src, int vector_len) {
3823 // Unmasked instruction
3824 evmovntdquq(dst, k0, src, /*merge*/ true, vector_len);
3825 }
3826
3827 void Assembler::evmovntdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
3828 assert(VM_Version::supports_evex(), "");
3829 assert(src != xnoreg, "sanity");
3830 InstructionMark im(this);
3831 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
3832 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3833 attributes.set_embedded_opmask_register_specifier(mask);
3834 if (merge) {
3835 attributes.reset_is_clear_context();
3836 }
3837 attributes.set_is_evex_instruction();
3838 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3839 emit_int8(0xE7);
3840 emit_operand(src, dst, 0);
3841 }
3842
3843 void Assembler::evmovdquq(Address dst, XMMRegister src, int vector_len) {
3844 // Unmasked instruction
3845 evmovdquq(dst, k0, src, /*merge*/ true, vector_len);
3846 }
3847
3848 void Assembler::evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
3849 assert(VM_Version::supports_evex(), "");
3850 assert(src != xnoreg, "sanity");
3851 InstructionMark im(this);
3852 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
3853 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3854 attributes.set_embedded_opmask_register_specifier(mask);
3855 if (merge) {
3856 attributes.reset_is_clear_context();
3857 }
3858 attributes.set_is_evex_instruction();
3859 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
3860 emit_int8(0x7F);
3861 emit_operand(src, dst, 0);
3862 }
3863
3864 // Uses zero extension on 64bit
3865
3866 void Assembler::movl(Register dst, int32_t imm32) {
3867 int encode = prefix_and_encode(dst->encoding());
3868 emit_int8(0xB8 | encode);
3869 emit_int32(imm32);
3870 }
3871
3872 void Assembler::movl(Register dst, Register src) {
3873 int encode = prefix_and_encode(dst->encoding(), src->encoding());
3874 emit_int16((unsigned char)0x8B, (0xC0 | encode));
3875 }
3876
3877 void Assembler::movl(Register dst, Address src) {
3878 InstructionMark im(this);
3879 prefix(src, dst);
3880 emit_int8((unsigned char)0x8B);
3881 emit_operand(dst, src, 0);
3882 }
3883
3884 void Assembler::movl(Address dst, int32_t imm32) {
3885 InstructionMark im(this);
3886 prefix(dst);
3887 emit_int8((unsigned char)0xC7);
3888 emit_operand(rax, dst, 4);
3889 emit_int32(imm32);
3890 }
3891
3892 void Assembler::movl(Address dst, Register src) {
3893 InstructionMark im(this);
3894 prefix(dst, src);
3895 emit_int8((unsigned char)0x89);
3896 emit_operand(src, dst, 0);
3897 }
3898
3899 // New cpus require to use movsd and movss to avoid partial register stall
3900 // when loading from memory. But for old Opteron use movlpd instead of movsd.
3901 // The selection is done in MacroAssembler::movdbl() and movflt().
3902 void Assembler::movlpd(XMMRegister dst, Address src) {
3903 InstructionMark im(this);
3904 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3905 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
3906 attributes.set_rex_vex_w_reverted();
3907 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3908 emit_int8(0x12);
3909 emit_operand(dst, src, 0);
3910 }
3911
3912 void Assembler::movq(XMMRegister dst, Address src) {
3913 InstructionMark im(this);
3914 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3915 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
3916 attributes.set_rex_vex_w_reverted();
3917 simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
3918 emit_int8(0x7E);
3919 emit_operand(dst, src, 0);
3920 }
3921
3922 void Assembler::movq(Address dst, XMMRegister src) {
3923 InstructionMark im(this);
3924 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3925 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
3926 attributes.set_rex_vex_w_reverted();
3927 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3928 emit_int8((unsigned char)0xD6);
3929 emit_operand(src, dst, 0);
3930 }
3931
3932 void Assembler::movq(XMMRegister dst, XMMRegister src) {
3933 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3934 attributes.set_rex_vex_w_reverted();
3935 int encode = simd_prefix_and_encode(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3936 emit_int16((unsigned char)0xD6, (0xC0 | encode));
3937 }
3938
3939 void Assembler::movq(Register dst, XMMRegister src) {
3940 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3941 // swap src/dst to get correct prefix
3942 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes, true);
3943 emit_int16(0x7E, (0xC0 | encode));
3944 }
3945
3946 void Assembler::movq(XMMRegister dst, Register src) {
3947 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3948 int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes, true);
3949 emit_int16(0x6E, (0xC0 | encode));
3950 }
3951
3952 void Assembler::movsbl(Register dst, Address src) { // movsxb
3953 InstructionMark im(this);
3954 prefix(src, dst, false, true /* is_map1 */);
3955 emit_int8((unsigned char)0xBE);
3956 emit_operand(dst, src, 0);
3957 }
3958
3959 void Assembler::movsbl(Register dst, Register src) { // movsxb
3960 int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true, true /* is_map1 */);
3961 emit_opcode_prefix_and_encoding((unsigned char)0xBE, 0xC0, encode);
3962 }
3963
3964 void Assembler::movsd(XMMRegister dst, XMMRegister src) {
3965 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3966 attributes.set_rex_vex_w_reverted();
3967 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
3968 emit_int16(0x10, (0xC0 | encode));
3969 }
3970
3971 void Assembler::movsd(XMMRegister dst, Address src) {
3972 InstructionMark im(this);
3973 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3974 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
3975 attributes.set_rex_vex_w_reverted();
3976 simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
3977 emit_int8(0x10);
3978 emit_operand(dst, src, 0);
3979 }
3980
3981 void Assembler::movsd(Address dst, XMMRegister src) {
3982 InstructionMark im(this);
3983 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3984 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
3985 attributes.reset_is_clear_context();
3986 attributes.set_rex_vex_w_reverted();
3987 simd_prefix(src, xnoreg, dst, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
3988 emit_int8(0x11);
3989 emit_operand(src, dst, 0);
3990 }
3991
3992 void Assembler::vmovsd(XMMRegister dst, XMMRegister src, XMMRegister src2) {
3993 assert(UseAVX > 0, "Requires some form of AVX");
3994 InstructionMark im(this);
3995 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
3996 int encode = vex_prefix_and_encode(src2->encoding(), src->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
3997 emit_int16(0x11, (0xC0 | encode));
3998 }
3999
4000 void Assembler::movss(XMMRegister dst, XMMRegister src) {
4001 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4002 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
4003 emit_int16(0x10, (0xC0 | encode));
4004 }
4005
4006 void Assembler::movss(XMMRegister dst, Address src) {
4007 InstructionMark im(this);
4008 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4009 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
4010 simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
4011 emit_int8(0x10);
4012 emit_operand(dst, src, 0);
4013 }
4014
4015 void Assembler::movss(Address dst, XMMRegister src) {
4016 InstructionMark im(this);
4017 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4018 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
4019 attributes.reset_is_clear_context();
4020 simd_prefix(src, xnoreg, dst, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
4021 emit_int8(0x11);
4022 emit_operand(src, dst, 0);
4023 }
4024
4025 void Assembler::movswl(Register dst, Address src) { // movsxw
4026 InstructionMark im(this);
4027 prefix(src, dst, false, true /* is_map1 */);
4028 emit_int8((unsigned char)0xBF);
4029 emit_operand(dst, src, 0);
4030 }
4031
4032 void Assembler::movswl(Register dst, Register src) { // movsxw
4033 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
4034 emit_opcode_prefix_and_encoding((unsigned char)0xBF, 0xC0, encode);
4035 }
4036
4037 void Assembler::movups(XMMRegister dst, Address src) {
4038 InstructionMark im(this);
4039 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4040 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
4041 simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
4042 emit_int8(0x10);
4043 emit_operand(dst, src, 0);
4044 }
4045
4046 void Assembler::vmovups(XMMRegister dst, Address src, int vector_len) {
4047 assert(vector_len == AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), "");
4048 InstructionMark im(this);
4049 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4050 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
4051 simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
4052 emit_int8(0x10);
4053 emit_operand(dst, src, 0);
4054 }
4055
4056 void Assembler::movups(Address dst, XMMRegister src) {
4057 InstructionMark im(this);
4058 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4059 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
4060 simd_prefix(src, xnoreg, dst, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
4061 emit_int8(0x11);
4062 emit_operand(src, dst, 0);
4063 }
4064
4065 void Assembler::vmovups(Address dst, XMMRegister src, int vector_len) {
4066 assert(vector_len == AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), "");
4067 InstructionMark im(this);
4068 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4069 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
4070 simd_prefix(src, xnoreg, dst, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
4071 emit_int8(0x11);
4072 emit_operand(src, dst, 0);
4073 }
4074
4075 void Assembler::movw(Address dst, int imm16) {
4076 InstructionMark im(this);
4077
4078 emit_int8(0x66); // switch to 16-bit mode
4079 prefix(dst);
4080 emit_int8((unsigned char)0xC7);
4081 emit_operand(rax, dst, 2);
4082 emit_int16(imm16);
4083 }
4084
4085 void Assembler::movw(Register dst, Address src) {
4086 InstructionMark im(this);
4087 emit_int8(0x66);
4088 prefix(src, dst);
4089 emit_int8((unsigned char)0x8B);
4090 emit_operand(dst, src, 0);
4091 }
4092
4093 void Assembler::movw(Address dst, Register src) {
4094 InstructionMark im(this);
4095 emit_int8(0x66);
4096 prefix(dst, src);
4097 emit_int8((unsigned char)0x89);
4098 emit_operand(src, dst, 0);
4099 }
4100
4101 void Assembler::movzbl(Register dst, Address src) { // movzxb
4102 InstructionMark im(this);
4103 prefix(src, dst, false, true /* is_map1 */);
4104 emit_int8((unsigned char)0xB6);
4105 emit_operand(dst, src, 0);
4106 }
4107
4108 void Assembler::movzbl(Register dst, Register src) { // movzxb
4109 int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true, true /* is_map1 */);
4110 emit_opcode_prefix_and_encoding((unsigned char)0xB6, 0xC0, encode);
4111 }
4112
4113 void Assembler::movzwl(Register dst, Address src) { // movzxw
4114 InstructionMark im(this);
4115 prefix(src, dst, false, true /* is_map1 */);
4116 emit_int8((unsigned char)0xB7);
4117 emit_operand(dst, src, 0);
4118 }
4119
4120 void Assembler::movzwl(Register dst, Register src) { // movzxw
4121 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
4122 emit_opcode_prefix_and_encoding((unsigned char)0xB7, 0xC0, encode);
4123 }
4124
4125 void Assembler::mull(Address src) {
4126 InstructionMark im(this);
4127 prefix(src);
4128 emit_int8((unsigned char)0xF7);
4129 emit_operand(rsp, src, 0);
4130 }
4131
4132 void Assembler::emull(Address src, bool no_flags) {
4133 InstructionMark im(this);
4134 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4135 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
4136 eevex_prefix_nf(src, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
4137 emit_int8((unsigned char)0xF7);
4138 emit_operand(rsp, src, 0);
4139 }
4140
4141 void Assembler::mull(Register src) {
4142 int encode = prefix_and_encode(src->encoding());
4143 emit_int16((unsigned char)0xF7, (0xE0 | encode));
4144 }
4145
4146 void Assembler::emull(Register src, bool no_flags) {
4147 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4148 int encode = eevex_prefix_and_encode_nf(0, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
4149 emit_int16((unsigned char)0xF7, (0xE0 | encode));
4150 }
4151
4152 void Assembler::mulsd(XMMRegister dst, Address src) {
4153 InstructionMark im(this);
4154 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4155 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
4156 attributes.set_rex_vex_w_reverted();
4157 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
4158 emit_int8(0x59);
4159 emit_operand(dst, src, 0);
4160 }
4161
4162 void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
4163 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4164 attributes.set_rex_vex_w_reverted();
4165 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
4166 emit_int16(0x59, (0xC0 | encode));
4167 }
4168
4169 void Assembler::mulss(XMMRegister dst, Address src) {
4170 InstructionMark im(this);
4171 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4172 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
4173 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
4174 emit_int8(0x59);
4175 emit_operand(dst, src, 0);
4176 }
4177
4178 void Assembler::mulss(XMMRegister dst, XMMRegister src) {
4179 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4180 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
4181 emit_int16(0x59, (0xC0 | encode));
4182 }
4183
4184 void Assembler::negl(Register dst) {
4185 int encode = prefix_and_encode(dst->encoding());
4186 emit_int16((unsigned char)0xF7, (0xD8 | encode));
4187 }
4188
4189 void Assembler::enegl(Register dst, Register src, bool no_flags) {
4190 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4191 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
4192 emit_int16((unsigned char)0xF7, (0xD8 | encode));
4193 }
4194
4195 void Assembler::negl(Address dst) {
4196 InstructionMark im(this);
4197 prefix(dst);
4198 emit_int8((unsigned char)0xF7);
4199 emit_operand(as_Register(3), dst, 0);
4200 }
4201
4202 void Assembler::enegl(Register dst, Address src, bool no_flags) {
4203 InstructionMark im(this);
4204 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4205 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
4206 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
4207 emit_int8((unsigned char)0xF7);
4208 emit_operand(as_Register(3), src, 0);
4209 }
4210
4211 void Assembler::nop(uint i) {
4212 #ifdef ASSERT
4213 assert(i > 0, " ");
4214 // The fancy nops aren't currently recognized by debuggers making it a
4215 // pain to disassemble code while debugging. If asserts are on clearly
4216 // speed is not an issue so simply use the single byte traditional nop
4217 // to do alignment.
4218
4219 for (; i > 0 ; i--) emit_int8((unsigned char)0x90);
4220 return;
4221
4222 #endif // ASSERT
4223
4224 if (UseAddressNop && VM_Version::is_intel()) {
4225 //
4226 // Using multi-bytes nops "0x0F 0x1F [address]" for Intel
4227 // 1: 0x90
4228 // 2: 0x66 0x90
4229 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
4230 // 4: 0x0F 0x1F 0x40 0x00
4231 // 5: 0x0F 0x1F 0x44 0x00 0x00
4232 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
4233 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
4234 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
4235 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
4236 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
4237 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
4238
4239 // The rest coding is Intel specific - don't use consecutive address nops
4240
4241 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
4242 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
4243 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
4244 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
4245
4246 while(i >= 15) {
4247 // For Intel don't generate consecutive address nops (mix with regular nops)
4248 i -= 15;
4249 emit_int24(0x66, 0x66, 0x66);
4250 addr_nop_8();
4251 emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90);
4252 }
4253 switch (i) {
4254 case 14:
4255 emit_int8(0x66); // size prefix
4256 case 13:
4257 emit_int8(0x66); // size prefix
4258 case 12:
4259 addr_nop_8();
4260 emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90);
4261 break;
4262 case 11:
4263 emit_int8(0x66); // size prefix
4264 case 10:
4265 emit_int8(0x66); // size prefix
4266 case 9:
4267 emit_int8(0x66); // size prefix
4268 case 8:
4269 addr_nop_8();
4270 break;
4271 case 7:
4272 addr_nop_7();
4273 break;
4274 case 6:
4275 emit_int8(0x66); // size prefix
4276 case 5:
4277 addr_nop_5();
4278 break;
4279 case 4:
4280 addr_nop_4();
4281 break;
4282 case 3:
4283 // Don't use "0x0F 0x1F 0x00" - need patching safe padding
4284 emit_int8(0x66); // size prefix
4285 case 2:
4286 emit_int8(0x66); // size prefix
4287 case 1:
4288 emit_int8((unsigned char)0x90);
4289 // nop
4290 break;
4291 default:
4292 assert(i == 0, " ");
4293 }
4294 return;
4295 }
4296 if (UseAddressNop && VM_Version::is_amd_family()) {
4297 //
4298 // Using multi-bytes nops "0x0F 0x1F [address]" for AMD.
4299 // 1: 0x90
4300 // 2: 0x66 0x90
4301 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
4302 // 4: 0x0F 0x1F 0x40 0x00
4303 // 5: 0x0F 0x1F 0x44 0x00 0x00
4304 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
4305 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
4306 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
4307 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
4308 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
4309 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
4310
4311 // The rest coding is AMD specific - use consecutive address nops
4312
4313 // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
4314 // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
4315 // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
4316 // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
4317 // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
4318 // Size prefixes (0x66) are added for larger sizes
4319
4320 while(i >= 22) {
4321 i -= 11;
4322 emit_int24(0x66, 0x66, 0x66);
4323 addr_nop_8();
4324 }
4325 // Generate first nop for size between 21-12
4326 switch (i) {
4327 case 21:
4328 i -= 1;
4329 emit_int8(0x66); // size prefix
4330 case 20:
4331 case 19:
4332 i -= 1;
4333 emit_int8(0x66); // size prefix
4334 case 18:
4335 case 17:
4336 i -= 1;
4337 emit_int8(0x66); // size prefix
4338 case 16:
4339 case 15:
4340 i -= 8;
4341 addr_nop_8();
4342 break;
4343 case 14:
4344 case 13:
4345 i -= 7;
4346 addr_nop_7();
4347 break;
4348 case 12:
4349 i -= 6;
4350 emit_int8(0x66); // size prefix
4351 addr_nop_5();
4352 break;
4353 default:
4354 assert(i < 12, " ");
4355 }
4356
4357 // Generate second nop for size between 11-1
4358 switch (i) {
4359 case 11:
4360 emit_int8(0x66); // size prefix
4361 case 10:
4362 emit_int8(0x66); // size prefix
4363 case 9:
4364 emit_int8(0x66); // size prefix
4365 case 8:
4366 addr_nop_8();
4367 break;
4368 case 7:
4369 addr_nop_7();
4370 break;
4371 case 6:
4372 emit_int8(0x66); // size prefix
4373 case 5:
4374 addr_nop_5();
4375 break;
4376 case 4:
4377 addr_nop_4();
4378 break;
4379 case 3:
4380 // Don't use "0x0F 0x1F 0x00" - need patching safe padding
4381 emit_int8(0x66); // size prefix
4382 case 2:
4383 emit_int8(0x66); // size prefix
4384 case 1:
4385 emit_int8((unsigned char)0x90);
4386 // nop
4387 break;
4388 default:
4389 assert(i == 0, " ");
4390 }
4391 return;
4392 }
4393
4394 if (UseAddressNop && VM_Version::is_zx()) {
4395 //
4396 // Using multi-bytes nops "0x0F 0x1F [address]" for ZX
4397 // 1: 0x90
4398 // 2: 0x66 0x90
4399 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
4400 // 4: 0x0F 0x1F 0x40 0x00
4401 // 5: 0x0F 0x1F 0x44 0x00 0x00
4402 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
4403 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
4404 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
4405 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
4406 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
4407 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
4408
4409 // The rest coding is ZX specific - don't use consecutive address nops
4410
4411 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
4412 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
4413 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
4414 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
4415
4416 while (i >= 15) {
4417 // For ZX don't generate consecutive address nops (mix with regular nops)
4418 i -= 15;
4419 emit_int24(0x66, 0x66, 0x66);
4420 addr_nop_8();
4421 emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90);
4422 }
4423 switch (i) {
4424 case 14:
4425 emit_int8(0x66); // size prefix
4426 case 13:
4427 emit_int8(0x66); // size prefix
4428 case 12:
4429 addr_nop_8();
4430 emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90);
4431 break;
4432 case 11:
4433 emit_int8(0x66); // size prefix
4434 case 10:
4435 emit_int8(0x66); // size prefix
4436 case 9:
4437 emit_int8(0x66); // size prefix
4438 case 8:
4439 addr_nop_8();
4440 break;
4441 case 7:
4442 addr_nop_7();
4443 break;
4444 case 6:
4445 emit_int8(0x66); // size prefix
4446 case 5:
4447 addr_nop_5();
4448 break;
4449 case 4:
4450 addr_nop_4();
4451 break;
4452 case 3:
4453 // Don't use "0x0F 0x1F 0x00" - need patching safe padding
4454 emit_int8(0x66); // size prefix
4455 case 2:
4456 emit_int8(0x66); // size prefix
4457 case 1:
4458 emit_int8((unsigned char)0x90);
4459 // nop
4460 break;
4461 default:
4462 assert(i == 0, " ");
4463 }
4464 return;
4465 }
4466
4467 // Using nops with size prefixes "0x66 0x90".
4468 // From AMD Optimization Guide:
4469 // 1: 0x90
4470 // 2: 0x66 0x90
4471 // 3: 0x66 0x66 0x90
4472 // 4: 0x66 0x66 0x66 0x90
4473 // 5: 0x66 0x66 0x90 0x66 0x90
4474 // 6: 0x66 0x66 0x90 0x66 0x66 0x90
4475 // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90
4476 // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90
4477 // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
4478 // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
4479 //
4480 while (i > 12) {
4481 i -= 4;
4482 emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90);
4483 }
4484 // 1 - 12 nops
4485 if (i > 8) {
4486 if (i > 9) {
4487 i -= 1;
4488 emit_int8(0x66);
4489 }
4490 i -= 3;
4491 emit_int24(0x66, 0x66, (unsigned char)0x90);
4492 }
4493 // 1 - 8 nops
4494 if (i > 4) {
4495 if (i > 6) {
4496 i -= 1;
4497 emit_int8(0x66);
4498 }
4499 i -= 3;
4500 emit_int24(0x66, 0x66, (unsigned char)0x90);
4501 }
4502 switch (i) {
4503 case 4:
4504 emit_int8(0x66);
4505 case 3:
4506 emit_int8(0x66);
4507 case 2:
4508 emit_int8(0x66);
4509 case 1:
4510 emit_int8((unsigned char)0x90);
4511 break;
4512 default:
4513 assert(i == 0, " ");
4514 }
4515 }
4516
4517 void Assembler::notl(Register dst) {
4518 int encode = prefix_and_encode(dst->encoding());
4519 emit_int16((unsigned char)0xF7, (0xD0 | encode));
4520 }
4521
4522 void Assembler::enotl(Register dst, Register src) {
4523 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4524 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes);
4525 emit_int16((unsigned char)0xF7, (0xD0 | encode));
4526 }
4527
4528 void Assembler::eorw(Register dst, Register src1, Register src2, bool no_flags) {
4529 emit_eevex_prefix_or_demote_arith_ndd(dst, src1, src2, VEX_SIMD_66, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_16bit, 0x0B, 0xC0, no_flags, true /* is_commutative */);
4530 }
4531
4532 void Assembler::orl(Address dst, int32_t imm32) {
4533 InstructionMark im(this);
4534 prefix(dst);
4535 emit_arith_operand(0x81, rcx, dst, imm32);
4536 }
4537
4538 void Assembler::eorl(Register dst, Address src, int32_t imm32, bool no_flags) {
4539 InstructionMark im(this);
4540 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4541 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
4542 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
4543 emit_arith_operand(0x81, rcx, src, imm32);
4544 }
4545
4546 void Assembler::orl(Register dst, int32_t imm32) {
4547 prefix(dst);
4548 emit_arith(0x81, 0xC8, dst, imm32);
4549 }
4550
4551 void Assembler::eorl(Register dst, Register src, int32_t imm32, bool no_flags) {
4552 emit_eevex_prefix_or_demote_arith_ndd(dst, src, imm32, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, 0x81, 0xC8, no_flags);
4553 }
4554
4555 void Assembler::orl(Register dst, Address src) {
4556 InstructionMark im(this);
4557 prefix(src, dst);
4558 emit_int8(0x0B);
4559 emit_operand(dst, src, 0);
4560 }
4561
4562 void Assembler::eorl(Register dst, Register src1, Address src2, bool no_flags) {
4563 InstructionMark im(this);
4564 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, 0x0B, no_flags);
4565 }
4566
4567 void Assembler::orl(Register dst, Register src) {
4568 (void) prefix_and_encode(dst->encoding(), src->encoding());
4569 emit_arith(0x0B, 0xC0, dst, src);
4570 }
4571
4572 void Assembler::eorl(Register dst, Register src1, Register src2, bool no_flags) {
4573 emit_eevex_prefix_or_demote_arith_ndd(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, 0x0B, 0xC0, no_flags, true /* is_commutative */);
4574 }
4575
4576 void Assembler::orl(Address dst, Register src) {
4577 InstructionMark im(this);
4578 prefix(dst, src);
4579 emit_int8(0x09);
4580 emit_operand(src, dst, 0);
4581 }
4582
4583 void Assembler::eorl(Register dst, Address src1, Register src2, bool no_flags) {
4584 InstructionMark im(this);
4585 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, 0x09, no_flags, false /* is_map1 */, true /* is_commutative */);
4586 }
4587
4588 void Assembler::orb(Address dst, int imm8) {
4589 InstructionMark im(this);
4590 prefix(dst);
4591 emit_int8((unsigned char)0x80);
4592 emit_operand(rcx, dst, 1);
4593 emit_int8(imm8);
4594 }
4595
4596 void Assembler::eorb(Register dst, Address src, int imm8, bool no_flags) {
4597 InstructionMark im(this);
4598 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4599 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_8bit);
4600 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
4601 emit_int8((unsigned char)0x80);
4602 emit_operand(rcx, src, 1);
4603 emit_int8(imm8);
4604 }
4605
4606 void Assembler::orb(Address dst, Register src) {
4607 InstructionMark im(this);
4608 prefix(dst, src, true);
4609 emit_int8(0x08);
4610 emit_operand(src, dst, 0);
4611 }
4612
4613 void Assembler::eorb(Register dst, Address src1, Register src2, bool no_flags) {
4614 InstructionMark im(this);
4615 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_8bit, 0x08, no_flags, false /* is_map1 */, true /* is_commutative */);
4616 }
4617
4618 void Assembler::packsswb(XMMRegister dst, XMMRegister src) {
4619 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4620 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4621 emit_int16(0x63, (0xC0 | encode));
4622 }
4623
4624 void Assembler::vpacksswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4625 assert(UseAVX > 0, "some form of AVX must be enabled");
4626 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4627 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4628 emit_int16(0x63, (0xC0 | encode));
4629 }
4630
4631 void Assembler::packssdw(XMMRegister dst, XMMRegister src) {
4632 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4633 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4634 emit_int16(0x6B, (0xC0 | encode));
4635 }
4636
4637 void Assembler::vpackssdw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4638 assert(UseAVX > 0, "some form of AVX must be enabled");
4639 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4640 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4641 emit_int16(0x6B, (0xC0 | encode));
4642 }
4643
4644 void Assembler::packuswb(XMMRegister dst, Address src) {
4645 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
4646 InstructionMark im(this);
4647 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4648 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
4649 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4650 emit_int8(0x67);
4651 emit_operand(dst, src, 0);
4652 }
4653
4654 void Assembler::packuswb(XMMRegister dst, XMMRegister src) {
4655 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4656 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4657 emit_int16(0x67, (0xC0 | encode));
4658 }
4659
4660 void Assembler::vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4661 assert(UseAVX > 0, "some form of AVX must be enabled");
4662 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4663 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4664 emit_int16(0x67, (0xC0 | encode));
4665 }
4666
4667 void Assembler::packusdw(XMMRegister dst, XMMRegister src) {
4668 assert(VM_Version::supports_sse4_1(), "");
4669 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4670 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4671 emit_int16(0x2B, (0xC0 | encode));
4672 }
4673
4674 void Assembler::vpackusdw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4675 assert(UseAVX > 0, "some form of AVX must be enabled");
4676 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4677 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4678 emit_int16(0x2B, (0xC0 | encode));
4679 }
4680
4681 void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len) {
4682 assert(VM_Version::supports_avx2(), "");
4683 assert(vector_len != AVX_128bit, "");
4684 // VEX.256.66.0F3A.W1 00 /r ib
4685 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4686 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4687 emit_int24(0x00, (0xC0 | encode), imm8);
4688 }
4689
4690 void Assembler::vpermq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4691 assert(vector_len == AVX_256bit ? VM_Version::supports_avx512vl() :
4692 vector_len == AVX_512bit ? VM_Version::supports_evex() : false, "not supported");
4693 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4694 attributes.set_is_evex_instruction();
4695 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4696 emit_int16(0x36, (0xC0 | encode));
4697 }
4698
4699 void Assembler::vpermb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4700 assert(VM_Version::supports_avx512_vbmi(), "");
4701 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4702 attributes.set_is_evex_instruction();
4703 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4704 emit_int16((unsigned char)0x8D, (0xC0 | encode));
4705 }
4706
4707 void Assembler::vpermb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
4708 assert(VM_Version::supports_avx512_vbmi(), "");
4709 InstructionMark im(this);
4710 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4711 attributes.set_is_evex_instruction();
4712 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
4713 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4714 emit_int8((unsigned char)0x8D);
4715 emit_operand(dst, src, 0);
4716 }
4717
4718 void Assembler::vpermw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4719 assert(vector_len == AVX_128bit ? VM_Version::supports_avx512vlbw() :
4720 vector_len == AVX_256bit ? VM_Version::supports_avx512vlbw() :
4721 vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : false, "not supported");
4722 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4723 attributes.set_is_evex_instruction();
4724 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4725 emit_int16((unsigned char)0x8D, (0xC0 | encode));
4726 }
4727
4728 void Assembler::vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4729 assert((vector_len == AVX_256bit && VM_Version::supports_avx2()) ||
4730 (vector_len == AVX_512bit && VM_Version::supports_evex()), "");
4731 // VEX.NDS.256.66.0F38.W0 36 /r
4732 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4733 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4734 emit_int16(0x36, (0xC0 | encode));
4735 }
4736
4737 void Assembler::vpermd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
4738 assert((vector_len == AVX_256bit && VM_Version::supports_avx2()) ||
4739 (vector_len == AVX_512bit && VM_Version::supports_evex()), "");
4740 // VEX.NDS.256.66.0F38.W0 36 /r
4741 InstructionMark im(this);
4742 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4743 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
4744 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4745 emit_int8(0x36);
4746 emit_operand(dst, src, 0);
4747 }
4748
4749 void Assembler::vpermps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4750 assert((vector_len == AVX_256bit && VM_Version::supports_avx2()) ||
4751 (vector_len == AVX_512bit && VM_Version::supports_evex()), "");
4752 // VEX.NDS.XXX.66.0F38.W0 16 /r
4753 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4754 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4755 emit_int16(0x16, (0xC0 | encode));
4756 }
4757
4758 void Assembler::vperm2i128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
4759 assert(VM_Version::supports_avx2(), "");
4760 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4761 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4762 emit_int24(0x46, (0xC0 | encode), imm8);
4763 }
4764
4765 void Assembler::vperm2f128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
4766 assert(VM_Version::supports_avx(), "");
4767 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4768 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4769 emit_int24(0x06, (0xC0 | encode), imm8);
4770 }
4771
4772 void Assembler::vpermilps(XMMRegister dst, XMMRegister src, int imm8, int vector_len) {
4773 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
4774 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4775 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4776 emit_int24(0x04, (0xC0 | encode), imm8);
4777 }
4778
4779 void Assembler::vpermilps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4780 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
4781 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4782 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4783 emit_int16(0x0C, (0xC0 | encode));
4784 }
4785
4786 void Assembler::vpermilpd(XMMRegister dst, XMMRegister src, int imm8, int vector_len) {
4787 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
4788 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(),/* legacy_mode */ false,/* no_mask_reg */ true, /* uses_vl */ false);
4789 attributes.set_rex_vex_w_reverted();
4790 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4791 emit_int24(0x05, (0xC0 | encode), imm8);
4792 }
4793
4794 void Assembler::vpermpd(XMMRegister dst, XMMRegister src, int imm8, int vector_len) {
4795 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_evex(), "");
4796 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */false, /* no_mask_reg */ true, /* uses_vl */ false);
4797 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4798 emit_int24(0x01, (0xC0 | encode), imm8);
4799 }
4800
4801 void Assembler::evpmultishiftqb(XMMRegister dst, XMMRegister ctl, XMMRegister src, int vector_len) {
4802 assert(VM_Version::supports_avx512_vbmi(), "");
4803 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4804 attributes.set_is_evex_instruction();
4805 int encode = vex_prefix_and_encode(dst->encoding(), ctl->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4806 emit_int16((unsigned char)0x83, (unsigned char)(0xC0 | encode));
4807 }
4808
4809 void Assembler::pause() {
4810 emit_int16((unsigned char)0xF3, (unsigned char)0x90);
4811 }
4812
4813 void Assembler::ud2() {
4814 emit_int16(0x0F, 0x0B);
4815 }
4816
4817 void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
4818 assert(VM_Version::supports_sse4_2(), "");
4819 assert(!needs_eevex(src.base(), src.index()), "does not support extended gprs as BASE or INDEX of address operand");
4820 InstructionMark im(this);
4821 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4822 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4823 emit_int8(0x61);
4824 emit_operand(dst, src, 1);
4825 emit_int8(imm8);
4826 }
4827
4828 void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
4829 assert(VM_Version::supports_sse4_2(), "");
4830 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4831 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4832 emit_int24(0x61, (0xC0 | encode), imm8);
4833 }
4834
4835 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
4836 void Assembler::pcmpeqb(XMMRegister dst, XMMRegister src) {
4837 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4838 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4839 emit_int16(0x74, (0xC0 | encode));
4840 }
4841
4842 void Assembler::vpcmpCCbwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, int vector_len) {
4843 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
4844 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
4845 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4846 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4847 emit_int16(cond_encoding, (0xC0 | encode));
4848 }
4849
4850 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
4851 void Assembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4852 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
4853 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
4854 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4855 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4856 emit_int16(0x74, (0xC0 | encode));
4857 }
4858
4859 void Assembler::vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) {
4860 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
4861 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
4862 InstructionMark im(this);
4863 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4864 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4865 emit_int8(0x74);
4866 emit_operand(dst, src2, 0);
4867 }
4868
4869 // In this context, kdst is written the mask used to process the equal components
4870 void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) {
4871 assert(VM_Version::supports_avx512bw(), "");
4872 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
4873 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4874 attributes.set_is_evex_instruction();
4875 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4876 emit_int16(0x74, (0xC0 | encode));
4877 }
4878
4879 void Assembler::evpcmpgtb(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
4880 assert(VM_Version::supports_avx512bw(), "");
4881 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
4882 InstructionMark im(this);
4883 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4884 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
4885 attributes.set_is_evex_instruction();
4886 int dst_enc = kdst->encoding();
4887 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4888 emit_int8(0x64);
4889 emit_operand(as_Register(dst_enc), src, 0);
4890 }
4891
4892 void Assembler::evpcmpgtb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) {
4893 assert(VM_Version::supports_avx512bw(), "");
4894 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
4895 InstructionMark im(this);
4896 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
4897 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
4898 attributes.reset_is_clear_context();
4899 attributes.set_embedded_opmask_register_specifier(mask);
4900 attributes.set_is_evex_instruction();
4901 int dst_enc = kdst->encoding();
4902 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4903 emit_int8(0x64);
4904 emit_operand(as_Register(dst_enc), src, 0);
4905 }
4906
4907 void Assembler::evpcmpub(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) {
4908 assert(VM_Version::supports_avx512bw(), "");
4909 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
4910 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4911 attributes.set_is_evex_instruction();
4912 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4913 emit_int24(0x3E, (0xC0 | encode), vcc);
4914 }
4915
4916 void Assembler::evpcmpuw(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) {
4917 assert(VM_Version::supports_avx512bw(), "");
4918 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
4919 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4920 attributes.set_is_evex_instruction();
4921 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4922 emit_int24(0x3E, (0xC0 | encode), vcc);
4923 }
4924
4925 void Assembler::evpcmpud(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) {
4926 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
4927 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4928 attributes.set_is_evex_instruction();
4929 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4930 emit_int24(0x1E, (0xC0 | encode), vcc);
4931 }
4932
4933 void Assembler::evpcmpuq(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) {
4934 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
4935 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4936 attributes.set_is_evex_instruction();
4937 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4938 emit_int24(0x1E, (0xC0 | encode), vcc);
4939 }
4940
4941 void Assembler::evpcmpuw(KRegister kdst, XMMRegister nds, Address src, ComparisonPredicate vcc, int vector_len) {
4942 assert(VM_Version::supports_avx512bw(), "");
4943 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
4944 InstructionMark im(this);
4945 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4946 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
4947 attributes.set_is_evex_instruction();
4948 int dst_enc = kdst->encoding();
4949 vex_prefix(src, nds->encoding(), kdst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4950 emit_int8(0x3E);
4951 emit_operand(as_Register(dst_enc), src, 1);
4952 emit_int8(vcc);
4953 }
4954
4955 void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
4956 assert(VM_Version::supports_avx512bw(), "");
4957 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
4958 InstructionMark im(this);
4959 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4960 attributes.set_is_evex_instruction();
4961 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
4962 int dst_enc = kdst->encoding();
4963 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4964 emit_int8(0x74);
4965 emit_operand(as_Register(dst_enc), src, 0);
4966 }
4967
4968 void Assembler::evpcmpeqb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) {
4969 assert(VM_Version::supports_avx512bw(), "");
4970 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
4971 InstructionMark im(this);
4972 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
4973 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
4974 attributes.reset_is_clear_context();
4975 attributes.set_embedded_opmask_register_specifier(mask);
4976 attributes.set_is_evex_instruction();
4977 vex_prefix(src, nds->encoding(), kdst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4978 emit_int8(0x74);
4979 emit_operand(as_Register(kdst->encoding()), src, 0);
4980 }
4981
4982 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
4983 void Assembler::pcmpeqw(XMMRegister dst, XMMRegister src) {
4984 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4985 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4986 emit_int16(0x75, (0xC0 | encode));
4987 }
4988
4989 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
4990 void Assembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4991 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
4992 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
4993 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4994 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4995 emit_int16(0x75, (0xC0 | encode));
4996 }
4997
4998 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
4999 void Assembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5000 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
5001 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
5002 InstructionMark im(this);
5003 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
5004 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5005 emit_int8(0x75);
5006 emit_operand(dst, src, 0);
5007 }
5008
5009 // In this context, kdst is written the mask used to process the equal components
5010 void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) {
5011 assert(VM_Version::supports_avx512bw(), "");
5012 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5013 attributes.set_is_evex_instruction();
5014 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5015 emit_int16(0x75, (0xC0 | encode));
5016 }
5017
5018 void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
5019 assert(VM_Version::supports_avx512bw(), "");
5020 InstructionMark im(this);
5021 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5022 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
5023 attributes.set_is_evex_instruction();
5024 int dst_enc = kdst->encoding();
5025 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5026 emit_int8(0x75);
5027 emit_operand(as_Register(dst_enc), src, 0);
5028 }
5029
5030 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
5031 void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
5032 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
5033 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5034 emit_int16(0x76, (0xC0 | encode));
5035 }
5036
5037 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
5038 void Assembler::vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5039 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
5040 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
5041 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
5042 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5043 emit_int16(0x76, (0xC0 | encode));
5044 }
5045
5046 // In this context, kdst is written the mask used to process the equal components
5047 void Assembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int vector_len) {
5048 assert(VM_Version::supports_evex(), "");
5049 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5050 attributes.set_is_evex_instruction();
5051 attributes.reset_is_clear_context();
5052 attributes.set_embedded_opmask_register_specifier(mask);
5053 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5054 emit_int16(0x76, (0xC0 | encode));
5055 }
5056
5057 void Assembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) {
5058 assert(VM_Version::supports_evex(), "");
5059 InstructionMark im(this);
5060 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5061 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
5062 attributes.set_is_evex_instruction();
5063 attributes.reset_is_clear_context();
5064 attributes.set_embedded_opmask_register_specifier(mask);
5065 int dst_enc = kdst->encoding();
5066 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5067 emit_int8(0x76);
5068 emit_operand(as_Register(dst_enc), src, 0);
5069 }
5070
5071 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
5072 void Assembler::pcmpeqq(XMMRegister dst, XMMRegister src) {
5073 assert(VM_Version::supports_sse4_1(), "");
5074 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
5075 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5076 emit_int16(0x29, (0xC0 | encode));
5077 }
5078
5079 void Assembler::evpcmpeqq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int vector_len) {
5080 assert(VM_Version::supports_evex(), "");
5081 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5082 attributes.set_is_evex_instruction();
5083 attributes.reset_is_clear_context();
5084 attributes.set_embedded_opmask_register_specifier(mask);
5085 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5086 emit_int16(0x29, (0xC0 | encode));
5087 }
5088
5089 void Assembler::vpcmpCCq(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, int vector_len) {
5090 assert(VM_Version::supports_avx(), "");
5091 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
5092 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5093 emit_int16(cond_encoding, (0xC0 | encode));
5094 }
5095
5096 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
5097 void Assembler::vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5098 assert(VM_Version::supports_avx(), "");
5099 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
5100 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5101 emit_int16(0x29, (0xC0 | encode));
5102 }
5103
5104 // In this context, kdst is written the mask used to process the equal components
5105 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) {
5106 assert(VM_Version::supports_evex(), "");
5107 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5108 attributes.reset_is_clear_context();
5109 attributes.set_is_evex_instruction();
5110 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5111 emit_int16(0x29, (0xC0 | encode));
5112 }
5113
5114 // In this context, kdst is written the mask used to process the equal components
5115 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
5116 assert(VM_Version::supports_evex(), "");
5117 InstructionMark im(this);
5118 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5119 attributes.reset_is_clear_context();
5120 attributes.set_is_evex_instruction();
5121 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
5122 int dst_enc = kdst->encoding();
5123 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5124 emit_int8(0x29);
5125 emit_operand(as_Register(dst_enc), src, 0);
5126 }
5127
5128 void Assembler::pcmpgtq(XMMRegister dst, XMMRegister src) {
5129 assert(VM_Version::supports_sse4_1(), "");
5130 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
5131 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5132 emit_int16(0x37, (0xC0 | encode));
5133 }
5134
5135 void Assembler::pmovmskb(Register dst, XMMRegister src) {
5136 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
5137 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5138 emit_int16((unsigned char)0xD7, (0xC0 | encode));
5139 }
5140
5141 void Assembler::vpmovmskb(Register dst, XMMRegister src, int vec_enc) {
5142 assert((VM_Version::supports_avx() && vec_enc == AVX_128bit) ||
5143 (VM_Version::supports_avx2() && vec_enc == AVX_256bit), "");
5144 InstructionAttr attributes(vec_enc, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
5145 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5146 emit_int16((unsigned char)0xD7, (0xC0 | encode));
5147 }
5148
5149 void Assembler::vmovmskps(Register dst, XMMRegister src, int vec_enc) {
5150 assert(VM_Version::supports_avx(), "");
5151 InstructionAttr attributes(vec_enc, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
5152 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
5153 emit_int16(0x50, (0xC0 | encode));
5154 }
5155
5156 void Assembler::vmovmskpd(Register dst, XMMRegister src, int vec_enc) {
5157 assert(VM_Version::supports_avx(), "");
5158 InstructionAttr attributes(vec_enc, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
5159 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5160 emit_int16(0x50, (0xC0 | encode));
5161 }
5162
5163
5164 void Assembler::pextrd(Register dst, XMMRegister src, int imm8) {
5165 assert(VM_Version::supports_sse4_1(), "");
5166 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
5167 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes, true);
5168 emit_int24(0x16, (0xC0 | encode), imm8);
5169 }
5170
5171 void Assembler::pextrd(Address dst, XMMRegister src, int imm8) {
5172 assert(VM_Version::supports_sse4_1(), "");
5173 InstructionMark im(this);
5174 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
5175 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
5176 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5177 emit_int8(0x16);
5178 emit_operand(src, dst, 1);
5179 emit_int8(imm8);
5180 }
5181
5182 void Assembler::pextrq(Register dst, XMMRegister src, int imm8) {
5183 assert(VM_Version::supports_sse4_1(), "");
5184 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
5185 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes, true);
5186 emit_int24(0x16, (0xC0 | encode), imm8);
5187 }
5188
5189 void Assembler::pextrq(Address dst, XMMRegister src, int imm8) {
5190 assert(VM_Version::supports_sse4_1(), "");
5191 InstructionMark im(this);
5192 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
5193 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
5194 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5195 emit_int8(0x16);
5196 emit_operand(src, dst, 1);
5197 emit_int8(imm8);
5198 }
5199
5200 void Assembler::pextrw(Register dst, XMMRegister src, int imm8) {
5201 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
5202 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5203 emit_int24((unsigned char)0xC5, (0xC0 | encode), imm8);
5204 }
5205
5206 void Assembler::pextrw(Address dst, XMMRegister src, int imm8) {
5207 assert(VM_Version::supports_sse4_1(), "");
5208 InstructionMark im(this);
5209 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
5210 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit);
5211 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5212 emit_int8(0x15);
5213 emit_operand(src, dst, 1);
5214 emit_int8(imm8);
5215 }
5216
5217 void Assembler::pextrb(Register dst, XMMRegister src, int imm8) {
5218 assert(VM_Version::supports_sse4_1(), "");
5219 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
5220 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes, true);
5221 emit_int24(0x14, (0xC0 | encode), imm8);
5222 }
5223
5224 void Assembler::pextrb(Address dst, XMMRegister src, int imm8) {
5225 assert(VM_Version::supports_sse4_1(), "");
5226 InstructionMark im(this);
5227 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
5228 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit);
5229 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5230 emit_int8(0x14);
5231 emit_operand(src, dst, 1);
5232 emit_int8(imm8);
5233 }
5234
5235 void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) {
5236 assert(VM_Version::supports_sse4_1(), "");
5237 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
5238 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes, true);
5239 emit_int24(0x22, (0xC0 | encode), imm8);
5240 }
5241
5242 void Assembler::pinsrd(XMMRegister dst, Address src, int imm8) {
5243 assert(VM_Version::supports_sse4_1(), "");
5244 InstructionMark im(this);
5245 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
5246 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
5247 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5248 emit_int8(0x22);
5249 emit_operand(dst, src, 1);
5250 emit_int8(imm8);
5251 }
5252
5253 void Assembler::vpinsrd(XMMRegister dst, XMMRegister nds, Register src, int imm8) {
5254 assert(VM_Version::supports_avx(), "");
5255 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
5256 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes, true);
5257 emit_int24(0x22, (0xC0 | encode), imm8);
5258 }
5259
5260 void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) {
5261 assert(VM_Version::supports_sse4_1(), "");
5262 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
5263 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes, true);
5264 emit_int24(0x22, (0xC0 | encode), imm8);
5265 }
5266
5267 void Assembler::pinsrq(XMMRegister dst, Address src, int imm8) {
5268 assert(VM_Version::supports_sse4_1(), "");
5269 InstructionMark im(this);
5270 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
5271 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
5272 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5273 emit_int8(0x22);
5274 emit_operand(dst, src, 1);
5275 emit_int8(imm8);
5276 }
5277
5278 void Assembler::vpinsrq(XMMRegister dst, XMMRegister nds, Register src, int imm8) {
5279 assert(VM_Version::supports_avx(), "");
5280 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
5281 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes, true);
5282 emit_int24(0x22, (0xC0 | encode), imm8);
5283 }
5284
5285 void Assembler::pinsrw(XMMRegister dst, Register src, int imm8) {
5286 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
5287 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes, true);
5288 emit_int24((unsigned char)0xC4, (0xC0 | encode), imm8);
5289 }
5290
5291 void Assembler::pinsrw(XMMRegister dst, Address src, int imm8) {
5292 InstructionMark im(this);
5293 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
5294 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit);
5295 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5296 emit_int8((unsigned char)0xC4);
5297 emit_operand(dst, src, 1);
5298 emit_int8(imm8);
5299 }
5300
5301 void Assembler::vpinsrw(XMMRegister dst, XMMRegister nds, Register src, int imm8) {
5302 assert(VM_Version::supports_avx(), "");
5303 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
5304 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes, true);
5305 emit_int24((unsigned char)0xC4, (0xC0 | encode), imm8);
5306 }
5307
5308 void Assembler::pinsrb(XMMRegister dst, Address src, int imm8) {
5309 assert(VM_Version::supports_sse4_1(), "");
5310 InstructionMark im(this);
5311 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
5312 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit);
5313 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5314 emit_int8(0x20);
5315 emit_operand(dst, src, 1);
5316 emit_int8(imm8);
5317 }
5318
5319 void Assembler::pinsrb(XMMRegister dst, Register src, int imm8) {
5320 assert(VM_Version::supports_sse4_1(), "");
5321 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
5322 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes, true);
5323 emit_int24(0x20, (0xC0 | encode), imm8);
5324 }
5325
5326 void Assembler::vpinsrb(XMMRegister dst, XMMRegister nds, Register src, int imm8) {
5327 assert(VM_Version::supports_avx(), "");
5328 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
5329 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes, true);
5330 emit_int24(0x20, (0xC0 | encode), imm8);
5331 }
5332
5333 void Assembler::insertps(XMMRegister dst, XMMRegister src, int imm8) {
5334 assert(VM_Version::supports_sse4_1(), "");
5335 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
5336 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5337 emit_int24(0x21, (0xC0 | encode), imm8);
5338 }
5339
5340 void Assembler::vinsertps(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
5341 assert(VM_Version::supports_avx(), "");
5342 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
5343 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5344 emit_int24(0x21, (0xC0 | encode), imm8);
5345 }
5346
5347 void Assembler::pmovzxbw(XMMRegister dst, Address src) {
5348 assert(VM_Version::supports_sse4_1(), "");
5349 InstructionMark im(this);
5350 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5351 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
5352 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5353 emit_int8(0x30);
5354 emit_operand(dst, src, 0);
5355 }
5356
5357 void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
5358 assert(VM_Version::supports_sse4_1(), "");
5359 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5360 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5361 emit_int16(0x30, (0xC0 | encode));
5362 }
5363
5364 void Assembler::pmovsxbw(XMMRegister dst, XMMRegister src) {
5365 assert(VM_Version::supports_sse4_1(), "");
5366 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5367 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5368 emit_int16(0x20, (0xC0 | encode));
5369 }
5370
5371 void Assembler::pmovzxdq(XMMRegister dst, XMMRegister src) {
5372 assert(VM_Version::supports_sse4_1(), "");
5373 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5374 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5375 emit_int16(0x35, (0xC0 | encode));
5376 }
5377
5378 void Assembler::pmovsxbd(XMMRegister dst, XMMRegister src) {
5379 assert(VM_Version::supports_sse4_1(), "");
5380 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5381 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5382 emit_int16(0x21, (0xC0 | encode));
5383 }
5384
5385 void Assembler::pmovzxbd(XMMRegister dst, XMMRegister src) {
5386 assert(VM_Version::supports_sse4_1(), "");
5387 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5388 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5389 emit_int16(0x31, (0xC0 | encode));
5390 }
5391
5392 void Assembler::pmovsxbq(XMMRegister dst, XMMRegister src) {
5393 assert(VM_Version::supports_sse4_1(), "");
5394 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5395 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5396 emit_int16(0x22, (0xC0 | encode));
5397 }
5398
5399 void Assembler::pmovsxwd(XMMRegister dst, XMMRegister src) {
5400 assert(VM_Version::supports_sse4_1(), "");
5401 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5402 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5403 emit_int16(0x23, (0xC0 | encode));
5404 }
5405
5406 void Assembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) {
5407 assert(VM_Version::supports_avx(), "");
5408 InstructionMark im(this);
5409 assert(dst != xnoreg, "sanity");
5410 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5411 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
5412 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5413 emit_int8(0x30);
5414 emit_operand(dst, src, 0);
5415 }
5416
5417 void Assembler::vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) {
5418 assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
5419 vector_len == AVX_256bit? VM_Version::supports_avx2() :
5420 vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, "");
5421 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5422 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5423 emit_int16(0x30, (unsigned char) (0xC0 | encode));
5424 }
5425
5426 void Assembler::vpmovsxbw(XMMRegister dst, XMMRegister src, int vector_len) {
5427 assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
5428 vector_len == AVX_256bit? VM_Version::supports_avx2() :
5429 vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, "");
5430 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5431 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5432 emit_int16(0x20, (0xC0 | encode));
5433 }
5434
5435 void Assembler::evpmovzxbw(XMMRegister dst, KRegister mask, Address src, int vector_len) {
5436 assert(VM_Version::supports_avx512vlbw(), "");
5437 assert(dst != xnoreg, "sanity");
5438 InstructionMark im(this);
5439 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
5440 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
5441 attributes.set_embedded_opmask_register_specifier(mask);
5442 attributes.set_is_evex_instruction();
5443 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5444 emit_int8(0x30);
5445 emit_operand(dst, src, 0);
5446 }
5447
5448 void Assembler::evpmovzxbd(XMMRegister dst, KRegister mask, Address src, int vector_len) {
5449 assert(VM_Version::supports_avx512vl(), "");
5450 assert(dst != xnoreg, "sanity");
5451 InstructionMark im(this);
5452 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
5453 attributes.set_address_attributes(/* tuple_type */ EVEX_QVM, /* input_size_in_bits */ EVEX_NObit);
5454 attributes.set_embedded_opmask_register_specifier(mask);
5455 attributes.set_is_evex_instruction();
5456 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5457 emit_int8(0x31);
5458 emit_operand(dst, src, 0);
5459 }
5460
5461 void Assembler::evpmovzxbd(XMMRegister dst, Address src, int vector_len) {
5462 evpmovzxbd(dst, k0, src, vector_len);
5463 }
5464
5465 void Assembler::evpandd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
5466 assert(VM_Version::supports_evex(), "");
5467 // Encoding: EVEX.NDS.XXX.66.0F.W0 DB /r
5468 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5469 attributes.set_is_evex_instruction();
5470 attributes.set_embedded_opmask_register_specifier(mask);
5471 if (merge) {
5472 attributes.reset_is_clear_context();
5473 }
5474 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5475 emit_int16((unsigned char)0xDB, (0xC0 | encode));
5476 }
5477
5478 void Assembler::vpmovzxdq(XMMRegister dst, XMMRegister src, int vector_len) {
5479 assert(vector_len > AVX_128bit ? VM_Version::supports_avx2() : VM_Version::supports_avx(), "");
5480 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5481 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5482 emit_int16(0x35, (0xC0 | encode));
5483 }
5484
5485 void Assembler::vpmovzxbd(XMMRegister dst, XMMRegister src, int vector_len) {
5486 assert(vector_len > AVX_128bit ? VM_Version::supports_avx2() : VM_Version::supports_avx(), "");
5487 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5488 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5489 emit_int16(0x31, (0xC0 | encode));
5490 }
5491
5492 void Assembler::vpmovzxbq(XMMRegister dst, XMMRegister src, int vector_len) {
5493 assert(vector_len > AVX_128bit ? VM_Version::supports_avx2() : VM_Version::supports_avx(), "");
5494 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5495 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5496 emit_int16(0x32, (0xC0 | encode));
5497 }
5498
5499 void Assembler::vpmovsxbd(XMMRegister dst, XMMRegister src, int vector_len) {
5500 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
5501 vector_len == AVX_256bit ? VM_Version::supports_avx2() :
5502 VM_Version::supports_evex(), "");
5503 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5504 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5505 emit_int16(0x21, (0xC0 | encode));
5506 }
5507
5508 void Assembler::vpmovsxbq(XMMRegister dst, XMMRegister src, int vector_len) {
5509 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
5510 vector_len == AVX_256bit ? VM_Version::supports_avx2() :
5511 VM_Version::supports_evex(), "");
5512 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5513 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5514 emit_int16(0x22, (0xC0 | encode));
5515 }
5516
5517 void Assembler::vpmovsxwd(XMMRegister dst, XMMRegister src, int vector_len) {
5518 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
5519 vector_len == AVX_256bit ? VM_Version::supports_avx2() :
5520 VM_Version::supports_evex(), "");
5521 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5522 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5523 emit_int16(0x23, (0xC0 | encode));
5524 }
5525
5526 void Assembler::vpmovsxwq(XMMRegister dst, XMMRegister src, int vector_len) {
5527 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
5528 vector_len == AVX_256bit ? VM_Version::supports_avx2() :
5529 VM_Version::supports_evex(), "");
5530 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5531 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5532 emit_int16(0x24, (0xC0 | encode));
5533 }
5534
5535 void Assembler::vpmovsxdq(XMMRegister dst, XMMRegister src, int vector_len) {
5536 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
5537 vector_len == AVX_256bit ? VM_Version::supports_avx2() :
5538 VM_Version::supports_evex(), "");
5539 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5540 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5541 emit_int16(0x25, (0xC0 | encode));
5542 }
5543
5544 void Assembler::evpmovwb(Address dst, XMMRegister src, int vector_len) {
5545 assert(VM_Version::supports_avx512vlbw(), "");
5546 assert(src != xnoreg, "sanity");
5547 InstructionMark im(this);
5548 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5549 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
5550 attributes.set_is_evex_instruction();
5551 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
5552 emit_int8(0x30);
5553 emit_operand(src, dst, 0);
5554 }
5555
5556 void Assembler::evpmovwb(Address dst, KRegister mask, XMMRegister src, int vector_len) {
5557 assert(VM_Version::supports_avx512vlbw(), "");
5558 assert(src != xnoreg, "sanity");
5559 InstructionMark im(this);
5560 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5561 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
5562 attributes.reset_is_clear_context();
5563 attributes.set_embedded_opmask_register_specifier(mask);
5564 attributes.set_is_evex_instruction();
5565 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
5566 emit_int8(0x30);
5567 emit_operand(src, dst, 0);
5568 }
5569
5570 void Assembler::evpmovdb(Address dst, XMMRegister src, int vector_len) {
5571 assert(VM_Version::supports_evex(), "");
5572 assert(src != xnoreg, "sanity");
5573 InstructionMark im(this);
5574 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5575 attributes.set_address_attributes(/* tuple_type */ EVEX_QVM, /* input_size_in_bits */ EVEX_NObit);
5576 attributes.set_is_evex_instruction();
5577 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
5578 emit_int8(0x31);
5579 emit_operand(src, dst, 0);
5580 }
5581
5582 void Assembler::vpmovzxwd(XMMRegister dst, XMMRegister src, int vector_len) {
5583 assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
5584 vector_len == AVX_256bit? VM_Version::supports_avx2() :
5585 vector_len == AVX_512bit? VM_Version::supports_evex() : 0, " ");
5586 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5587 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5588 emit_int16(0x33, (0xC0 | encode));
5589 }
5590
5591 void Assembler::vpmovzxwq(XMMRegister dst, XMMRegister src, int vector_len) {
5592 assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
5593 vector_len == AVX_256bit? VM_Version::supports_avx2() :
5594 vector_len == AVX_512bit? VM_Version::supports_evex() : 0, " ");
5595 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5596 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5597 emit_int16(0x34, (0xC0 | encode));
5598 }
5599
5600 void Assembler::pmaddwd(XMMRegister dst, XMMRegister src) {
5601 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5602 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5603 emit_int16((unsigned char)0xF5, (0xC0 | encode));
5604 }
5605
5606 void Assembler::vpmaddwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5607 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
5608 (vector_len == AVX_256bit ? VM_Version::supports_avx2() :
5609 (vector_len == AVX_512bit ? VM_Version::supports_evex() : 0)), "");
5610 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5611 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5612 emit_int16((unsigned char)0xF5, (0xC0 | encode));
5613 }
5614
5615 void Assembler::vpmaddubsw(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
5616 assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
5617 vector_len == AVX_256bit? VM_Version::supports_avx2() :
5618 vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, "");
5619 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5620 int encode = simd_prefix_and_encode(dst, src1, src2, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5621 emit_int16(0x04, (0xC0 | encode));
5622 }
5623
5624 void Assembler::vpmadd52luq(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) {
5625 assert ((VM_Version::supports_avxifma() && vector_len <= AVX_256bit) || (VM_Version::supports_avx512ifma() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl())), "");
5626
5627 InstructionMark im(this);
5628 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5629
5630 if (VM_Version::supports_avx512ifma()) {
5631 attributes.set_is_evex_instruction();
5632 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
5633 }
5634 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5635 emit_int8((unsigned char)0xB4);
5636 emit_operand(dst, src2, 0);
5637 }
5638
5639 void Assembler::vpmadd52luq(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
5640 assert ((VM_Version::supports_avxifma() && vector_len <= AVX_256bit) || (VM_Version::supports_avx512ifma() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl())), "");
5641
5642 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5643
5644 if (VM_Version::supports_avx512ifma()) {
5645 attributes.set_is_evex_instruction();
5646 }
5647 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5648 emit_int16((unsigned char)0xB4, (0xC0 | encode));
5649 }
5650
5651 void Assembler::evpmadd52luq(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
5652 evpmadd52luq(dst, k0, src1, src2, false, vector_len);
5653 }
5654
5655 void Assembler::evpmadd52luq(XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vector_len) {
5656 assert(VM_Version::supports_avx512ifma(), "");
5657 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5658 attributes.set_is_evex_instruction();
5659 attributes.set_embedded_opmask_register_specifier(mask);
5660 if (merge) {
5661 attributes.reset_is_clear_context();
5662 }
5663
5664 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5665 emit_int16((unsigned char)0xB4, (0xC0 | encode));
5666 }
5667
5668 void Assembler::vpmadd52huq(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) {
5669 assert ((VM_Version::supports_avxifma() && vector_len <= AVX_256bit) || (VM_Version::supports_avx512ifma() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl())), "");
5670
5671 InstructionMark im(this);
5672 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5673
5674 if (VM_Version::supports_avx512ifma()) {
5675 attributes.set_is_evex_instruction();
5676 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
5677 }
5678 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5679 emit_int8((unsigned char)0xB5);
5680 emit_operand(dst, src2, 0);
5681 }
5682
5683 void Assembler::vpmadd52huq(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
5684 assert ((VM_Version::supports_avxifma() && vector_len <= AVX_256bit) || (VM_Version::supports_avx512ifma() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl())), "");
5685
5686 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5687
5688 if (VM_Version::supports_avx512ifma()) {
5689 attributes.set_is_evex_instruction();
5690 }
5691 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5692 emit_int16((unsigned char)0xB5, (0xC0 | encode));
5693 }
5694
5695 void Assembler::evpmadd52huq(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
5696 evpmadd52huq(dst, k0, src1, src2, false, vector_len);
5697 }
5698
5699 void Assembler::evpmadd52huq(XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vector_len) {
5700 assert(VM_Version::supports_avx512ifma(), "");
5701 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5702 attributes.set_is_evex_instruction();
5703 attributes.set_embedded_opmask_register_specifier(mask);
5704 if (merge) {
5705 attributes.reset_is_clear_context();
5706 }
5707
5708 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5709 emit_int16((unsigned char)0xB5, (0xC0 | encode));
5710 }
5711
5712 void Assembler::evpdpwssd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5713 assert(VM_Version::supports_evex(), "");
5714 assert(VM_Version::supports_avx512_vnni(), "must support vnni");
5715 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5716 attributes.set_is_evex_instruction();
5717 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5718 emit_int16(0x52, (0xC0 | encode));
5719 }
5720
5721 // generic
5722 void Assembler::pop(Register dst) {
5723 int encode = prefix_and_encode(dst->encoding());
5724 emit_int8(0x58 | encode);
5725 }
5726
5727 void Assembler::popcntl(Register dst, Address src) {
5728 assert(VM_Version::supports_popcnt(), "must support");
5729 InstructionMark im(this);
5730 emit_int8((unsigned char)0xF3);
5731 prefix(src, dst, false, true /* is_map1 */);
5732 emit_int8((unsigned char)0xB8);
5733 emit_operand(dst, src, 0);
5734 }
5735
5736 void Assembler::epopcntl(Register dst, Address src, bool no_flags) {
5737 assert(VM_Version::supports_popcnt(), "must support");
5738 InstructionMark im(this);
5739 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
5740 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
5741 eevex_prefix_nf(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
5742 emit_int8((unsigned char)0x88);
5743 emit_operand(dst, src, 0);
5744 }
5745
5746 void Assembler::popcntl(Register dst, Register src) {
5747 assert(VM_Version::supports_popcnt(), "must support");
5748 emit_int8((unsigned char)0xF3);
5749 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
5750 emit_opcode_prefix_and_encoding((unsigned char)0xB8, 0xC0, encode);
5751 }
5752
5753 void Assembler::epopcntl(Register dst, Register src, bool no_flags) {
5754 assert(VM_Version::supports_popcnt(), "must support");
5755 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
5756 int encode = eevex_prefix_and_encode_nf(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
5757 emit_int16((unsigned char)0x88, (0xC0 | encode));
5758 }
5759
5760 void Assembler::evpopcntb(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
5761 assert(VM_Version::supports_avx512_bitalg(), "must support avx512bitalg feature");
5762 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
5763 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5764 attributes.set_embedded_opmask_register_specifier(mask);
5765 attributes.set_is_evex_instruction();
5766 if (merge) {
5767 attributes.reset_is_clear_context();
5768 }
5769 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5770 emit_int16(0x54, (0xC0 | encode));
5771 }
5772
5773 void Assembler::evpopcntw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
5774 assert(VM_Version::supports_avx512_bitalg(), "must support avx512bitalg feature");
5775 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
5776 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5777 attributes.set_is_evex_instruction();
5778 attributes.set_embedded_opmask_register_specifier(mask);
5779 if (merge) {
5780 attributes.reset_is_clear_context();
5781 }
5782 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5783 emit_int16(0x54, (0xC0 | encode));
5784 }
5785
5786 void Assembler::evpopcntd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
5787 assert(VM_Version::supports_avx512_vpopcntdq(), "must support vpopcntdq feature");
5788 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
5789 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5790 attributes.set_is_evex_instruction();
5791 attributes.set_embedded_opmask_register_specifier(mask);
5792 if (merge) {
5793 attributes.reset_is_clear_context();
5794 }
5795 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5796 emit_int16(0x55, (0xC0 | encode));
5797 }
5798
5799 void Assembler::evpopcntq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
5800 assert(VM_Version::supports_avx512_vpopcntdq(), "must support vpopcntdq feature");
5801 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
5802 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5803 attributes.set_is_evex_instruction();
5804 attributes.set_embedded_opmask_register_specifier(mask);
5805 if (merge) {
5806 attributes.reset_is_clear_context();
5807 }
5808 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5809 emit_int16(0x55, (0xC0 | encode));
5810 }
5811
5812 void Assembler::popf() {
5813 emit_int8((unsigned char)0x9D);
5814 }
5815
5816 void Assembler::prefetchnta(Address src) {
5817 InstructionMark im(this);
5818 prefix(src, true /* is_map1 */);
5819 emit_int8(0x18);
5820 emit_operand(rax, src, 0); // 0, src
5821 }
5822
5823 void Assembler::prefetchr(Address src) {
5824 assert(VM_Version::supports_3dnow_prefetch(), "must support");
5825 InstructionMark im(this);
5826 prefix(src, true /* is_map1 */);
5827 emit_int8(0x0D);
5828 emit_operand(rax, src, 0); // 0, src
5829 }
5830
5831 void Assembler::prefetcht0(Address src) {
5832 InstructionMark im(this);
5833 prefix(src, true /* is_map1 */);
5834 emit_int8(0x18);
5835 emit_operand(rcx, src, 0); // 1, src
5836 }
5837
5838 void Assembler::prefetcht1(Address src) {
5839 InstructionMark im(this);
5840 prefix(src, true /* is_map1 */);
5841 emit_int8(0x18);
5842 emit_operand(rdx, src, 0); // 2, src
5843 }
5844
5845 void Assembler::prefetcht2(Address src) {
5846 InstructionMark im(this);
5847 prefix(src, true /* is_map1 */);
5848 emit_int8(0x18);
5849 emit_operand(rbx, src, 0); // 3, src
5850 }
5851
5852 void Assembler::prefetchw(Address src) {
5853 assert(VM_Version::supports_3dnow_prefetch(), "must support");
5854 InstructionMark im(this);
5855 prefix(src, true /* is_map1 */);
5856 emit_int8(0x0D);
5857 emit_operand(rcx, src, 0); // 1, src
5858 }
5859
5860 void Assembler::prefix(Prefix p) {
5861 emit_int8(p);
5862 }
5863
5864 void Assembler::prefix16(int prefix) {
5865 assert(UseAPX, "APX features not enabled");
5866 emit_int8((prefix & 0xff00) >> 8);
5867 emit_int8(prefix & 0xff);
5868 }
5869
5870 void Assembler::pshufb(XMMRegister dst, XMMRegister src) {
5871 assert(VM_Version::supports_ssse3(), "");
5872 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5873 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5874 emit_int16(0x00, (0xC0 | encode));
5875 }
5876
5877 void Assembler::evpshufb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
5878 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
5879 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5880 attributes.set_is_evex_instruction();
5881 attributes.set_embedded_opmask_register_specifier(mask);
5882 if (merge) {
5883 attributes.reset_is_clear_context();
5884 }
5885 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5886 emit_int16(0x00, (0xC0 | encode));
5887 }
5888
5889 void Assembler::vpshufb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5890 assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
5891 vector_len == AVX_256bit? VM_Version::supports_avx2() :
5892 vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, "");
5893 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5894 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5895 emit_int16(0x00, (0xC0 | encode));
5896 }
5897
5898 void Assembler::vpshufb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5899 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
5900 vector_len == AVX_256bit ? VM_Version::supports_avx2() :
5901 vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : 0, "");
5902 InstructionMark im(this);
5903 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5904 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
5905 simd_prefix(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5906 emit_int8(0x00);
5907 emit_operand(dst, src, 0);
5908 }
5909
5910 void Assembler::pshufb(XMMRegister dst, Address src) {
5911 assert(VM_Version::supports_ssse3(), "");
5912 InstructionMark im(this);
5913 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5914 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
5915 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5916 emit_int8(0x00);
5917 emit_operand(dst, src, 0);
5918 }
5919
5920 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
5921 assert(isByte(mode), "invalid value");
5922 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
5923 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5924 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5925 emit_int24(0x70, (0xC0 | encode), mode & 0xFF);
5926 }
5927
5928 void Assembler::vpshufd(XMMRegister dst, XMMRegister src, int mode, int vector_len) {
5929 assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
5930 (vector_len == AVX_256bit? VM_Version::supports_avx2() :
5931 (vector_len == AVX_512bit? VM_Version::supports_evex() : 0)), "");
5932 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5933 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5934 emit_int24(0x70, (0xC0 | encode), mode & 0xFF);
5935 }
5936
5937 void Assembler::pshufd(XMMRegister dst, Address src, int mode) {
5938 assert(isByte(mode), "invalid value");
5939 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
5940 InstructionMark im(this);
5941 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5942 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
5943 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5944 emit_int8(0x70);
5945 emit_operand(dst, src, 1);
5946 emit_int8(mode & 0xFF);
5947 }
5948
5949 void Assembler::pshufhw(XMMRegister dst, XMMRegister src, int mode) {
5950 assert(isByte(mode), "invalid value");
5951 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5952 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
5953 emit_int24(0x70, (0xC0 | encode), mode & 0xFF);
5954 }
5955
5956 void Assembler::vpshufhw(XMMRegister dst, XMMRegister src, int mode, int vector_len) {
5957 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
5958 (vector_len == AVX_256bit ? VM_Version::supports_avx2() :
5959 (vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : false)), "");
5960 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5961 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
5962 emit_int24(0x70, (0xC0 | encode), mode & 0xFF);
5963 }
5964
5965 void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
5966 assert(isByte(mode), "invalid value");
5967 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5968 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
5969 emit_int24(0x70, (0xC0 | encode), mode & 0xFF);
5970 }
5971
5972 void Assembler::pshuflw(XMMRegister dst, Address src, int mode) {
5973 assert(isByte(mode), "invalid value");
5974 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
5975 InstructionMark im(this);
5976 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5977 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
5978 simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
5979 emit_int8(0x70);
5980 emit_operand(dst, src, 1);
5981 emit_int8(mode & 0xFF);
5982 }
5983
5984 void Assembler::vpshuflw(XMMRegister dst, XMMRegister src, int mode, int vector_len) {
5985 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
5986 (vector_len == AVX_256bit ? VM_Version::supports_avx2() :
5987 (vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : false)), "");
5988 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5989 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
5990 emit_int24(0x70, (0xC0 | encode), mode & 0xFF);
5991 }
5992
5993 void Assembler::evshufi64x2(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
5994 assert(VM_Version::supports_evex(), "requires EVEX support");
5995 assert(vector_len == Assembler::AVX_256bit || vector_len == Assembler::AVX_512bit, "");
5996 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5997 attributes.set_is_evex_instruction();
5998 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5999 emit_int24(0x43, (0xC0 | encode), imm8 & 0xFF);
6000 }
6001
6002 void Assembler::shufpd(XMMRegister dst, XMMRegister src, int imm8) {
6003 assert(isByte(imm8), "invalid value");
6004 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6005 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6006 emit_int24((unsigned char)0xC6, (0xC0 | encode), imm8 & 0xFF);
6007 }
6008
6009 void Assembler::vshufpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
6010 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6011 attributes.set_rex_vex_w_reverted();
6012 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6013 emit_int24((unsigned char)0xC6, (0xC0 | encode), imm8 & 0xFF);
6014 }
6015
6016 void Assembler::shufps(XMMRegister dst, XMMRegister src, int imm8) {
6017 assert(isByte(imm8), "invalid value");
6018 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6019 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
6020 emit_int24((unsigned char)0xC6, (0xC0 | encode), imm8 & 0xFF);
6021 }
6022
6023 void Assembler::vshufps(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
6024 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6025 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
6026 emit_int24((unsigned char)0xC6, (0xC0 | encode), imm8 & 0xFF);
6027 }
6028
6029 void Assembler::psrldq(XMMRegister dst, int shift) {
6030 // Shift left 128 bit value in dst XMMRegister by shift number of bytes.
6031 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6032 int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6033 emit_int24(0x73, (0xC0 | encode), shift);
6034 }
6035
6036 void Assembler::vpsrldq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
6037 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
6038 vector_len == AVX_256bit ? VM_Version::supports_avx2() :
6039 vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : 0, "");
6040 InstructionAttr attributes(vector_len, /*vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6041 int encode = vex_prefix_and_encode(xmm3->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6042 emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
6043 }
6044
6045 void Assembler::pslldq(XMMRegister dst, int shift) {
6046 // Shift left 128 bit value in dst XMMRegister by shift number of bytes.
6047 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6048 // XMM7 is for /7 encoding: 66 0F 73 /7 ib
6049 int encode = simd_prefix_and_encode(xmm7, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6050 emit_int24(0x73, (0xC0 | encode), shift);
6051 }
6052
6053 void Assembler::vpslldq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
6054 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
6055 vector_len == AVX_256bit ? VM_Version::supports_avx2() :
6056 vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : 0, "");
6057 InstructionAttr attributes(vector_len, /*vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6058 int encode = vex_prefix_and_encode(xmm7->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6059 emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
6060 }
6061
6062 void Assembler::ptest(XMMRegister dst, Address src) {
6063 assert(VM_Version::supports_sse4_1(), "");
6064 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
6065 assert(!needs_eevex(src.base(), src.index()), "does not support extended gprs");
6066 InstructionMark im(this);
6067 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
6068 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6069 emit_int8(0x17);
6070 emit_operand(dst, src, 0);
6071 }
6072
6073 void Assembler::ptest(XMMRegister dst, XMMRegister src) {
6074 assert(VM_Version::supports_sse4_1() || VM_Version::supports_avx(), "");
6075 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
6076 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6077 emit_int8(0x17);
6078 emit_int8((0xC0 | encode));
6079 }
6080
6081 void Assembler::vptest(XMMRegister dst, Address src) {
6082 assert(VM_Version::supports_avx(), "");
6083 assert(!needs_eevex(src.base(), src.index()), "does not support extended gprs");
6084 InstructionMark im(this);
6085 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
6086 assert(dst != xnoreg, "sanity");
6087 // swap src<->dst for encoding
6088 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6089 emit_int8(0x17);
6090 emit_operand(dst, src, 0);
6091 }
6092
6093 void Assembler::vptest(XMMRegister dst, XMMRegister src) {
6094 assert(VM_Version::supports_avx(), "");
6095 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
6096 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6097 emit_int16(0x17, (0xC0 | encode));
6098 }
6099
6100 void Assembler::vptest(XMMRegister dst, XMMRegister src, int vector_len) {
6101 assert(VM_Version::supports_avx(), "");
6102 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
6103 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6104 emit_int16(0x17, (0xC0 | encode));
6105 }
6106
6107 void Assembler::vtestps(XMMRegister dst, XMMRegister src, int vector_len) {
6108 assert(VM_Version::supports_avx(), "");
6109 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
6110 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6111 emit_int16(0x0E, (0xC0 | encode));
6112 }
6113
6114 void Assembler::evptestmb(KRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6115 assert(vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : VM_Version::supports_avx512vlbw(), "");
6116 // Encoding: EVEX.NDS.XXX.66.0F38.W0 DB /r
6117 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6118 attributes.set_is_evex_instruction();
6119 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6120 emit_int16(0x26, (0xC0 | encode));
6121 }
6122
6123 void Assembler::evptestmd(KRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6124 assert(vector_len == AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx512vl(), "");
6125 // Encoding: EVEX.NDS.XXX.66.0F38.W0 DB /r
6126 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6127 attributes.set_is_evex_instruction();
6128 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6129 emit_int16(0x27, (0xC0 | encode));
6130 }
6131
6132 void Assembler::evptestnmd(KRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6133 assert(vector_len == AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx512vl(), "");
6134 // Encoding: EVEX.NDS.XXX.F3.0F38.W0 DB /r
6135 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6136 attributes.set_is_evex_instruction();
6137 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
6138 emit_int16(0x27, (0xC0 | encode));
6139 }
6140
6141 void Assembler::punpcklbw(XMMRegister dst, Address src) {
6142 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
6143 InstructionMark im(this);
6144 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ true, /* uses_vl */ true);
6145 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
6146 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6147 emit_int8(0x60);
6148 emit_operand(dst, src, 0);
6149 }
6150
6151 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) {
6152 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ true, /* uses_vl */ true);
6153 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6154 emit_int16(0x60, (0xC0 | encode));
6155 }
6156
6157 void Assembler::punpckldq(XMMRegister dst, Address src) {
6158 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
6159 InstructionMark im(this);
6160 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6161 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
6162 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6163 emit_int8(0x62);
6164 emit_operand(dst, src, 0);
6165 }
6166
6167 void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
6168 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6169 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6170 emit_int16(0x62, (0xC0 | encode));
6171 }
6172
6173 void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) {
6174 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6175 attributes.set_rex_vex_w_reverted();
6176 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6177 emit_int16(0x6C, (0xC0 | encode));
6178 }
6179
6180 void Assembler::evpunpcklqdq(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
6181 evpunpcklqdq(dst, k0, src1, src2, false, vector_len);
6182 }
6183
6184 void Assembler::evpunpcklqdq(XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vector_len) {
6185 assert(VM_Version::supports_evex(), "requires AVX512F");
6186 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL");
6187 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
6188 attributes.set_is_evex_instruction();
6189 attributes.set_embedded_opmask_register_specifier(mask);
6190 if (merge) {
6191 attributes.reset_is_clear_context();
6192 }
6193
6194 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6195 emit_int16(0x6C, (0xC0 | encode));
6196 }
6197
6198 void Assembler::evpunpckhqdq(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
6199 evpunpckhqdq(dst, k0, src1, src2, false, vector_len);
6200 }
6201
6202 void Assembler::evpunpckhqdq(XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vector_len) {
6203 assert(VM_Version::supports_evex(), "requires AVX512F");
6204 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL");
6205 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
6206 attributes.set_is_evex_instruction();
6207 attributes.set_embedded_opmask_register_specifier(mask);
6208 if (merge) {
6209 attributes.reset_is_clear_context();
6210 }
6211
6212 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6213 emit_int16(0x6D, (0xC0 | encode));
6214 }
6215
6216 void Assembler::push2(Register src1, Register src2, bool with_ppx) {
6217 assert(VM_Version::supports_apx_f(), "requires APX");
6218 InstructionAttr attributes(0, /* rex_w */ with_ppx, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6219 /* EVEX.BASE */
6220 int src_enc = src1->encoding();
6221 /* EVEX.VVVV */
6222 int nds_enc = src2->encoding();
6223
6224 bool vex_b = (src_enc & 8) == 8;
6225 bool evex_v = (nds_enc >= 16);
6226 bool evex_b = (src_enc >= 16);
6227
6228 // EVEX.ND = 1;
6229 attributes.set_extended_context();
6230 attributes.set_is_evex_instruction();
6231 set_attributes(&attributes);
6232
6233 evex_prefix(0, vex_b, 0, 0, evex_b, evex_v, false /*eevex_x*/, nds_enc, VEX_SIMD_NONE, /* map4 */ VEX_OPCODE_0F_3C);
6234 emit_int16(0xFF, (0xC0 | (0x6 << 3) | (src_enc & 7)));
6235 }
6236
6237 void Assembler::pop2(Register src1, Register src2, bool with_ppx) {
6238 assert(VM_Version::supports_apx_f(), "requires APX");
6239 InstructionAttr attributes(0, /* rex_w */ with_ppx, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6240 /* EVEX.BASE */
6241 int src_enc = src1->encoding();
6242 /* EVEX.VVVV */
6243 int nds_enc = src2->encoding();
6244
6245 bool vex_b = (src_enc & 8) == 8;
6246 bool evex_v = (nds_enc >= 16);
6247 bool evex_b = (src_enc >= 16);
6248
6249 // EVEX.ND = 1;
6250 attributes.set_extended_context();
6251 attributes.set_is_evex_instruction();
6252 set_attributes(&attributes);
6253
6254 evex_prefix(0, vex_b, 0, 0, evex_b, evex_v, false /*eevex_x*/, nds_enc, VEX_SIMD_NONE, /* map4 */ VEX_OPCODE_0F_3C);
6255 emit_int16(0x8F, (0xC0 | (src_enc & 7)));
6256 }
6257
6258 void Assembler::push2p(Register src1, Register src2) {
6259 push2(src1, src2, true);
6260 }
6261
6262 void Assembler::pop2p(Register src1, Register src2) {
6263 pop2(src1, src2, true);
6264 }
6265
6266 void Assembler::pushp(Register src) {
6267 assert(VM_Version::supports_apx_f(), "requires APX");
6268 int encode = prefixq_and_encode_rex2(src->encoding());
6269 emit_int8(0x50 | encode);
6270 }
6271
6272 void Assembler::popp(Register dst) {
6273 assert(VM_Version::supports_apx_f(), "requires APX");
6274 int encode = prefixq_and_encode_rex2(dst->encoding());
6275 emit_int8((unsigned char)0x58 | encode);
6276 }
6277
6278 void Assembler::push(int32_t imm32) {
6279 // in 64bits we push 64bits onto the stack but only
6280 // take a 32bit immediate
6281 emit_int8(0x68);
6282 emit_int32(imm32);
6283 }
6284
6285 void Assembler::push(Register src) {
6286 int encode = prefix_and_encode(src->encoding());
6287 emit_int8(0x50 | encode);
6288 }
6289
6290 void Assembler::pushf() {
6291 emit_int8((unsigned char)0x9C);
6292 }
6293
6294 void Assembler::rcll(Register dst, int imm8) {
6295 assert(isShiftCount(imm8), "illegal shift count");
6296 int encode = prefix_and_encode(dst->encoding());
6297 if (imm8 == 1) {
6298 emit_int16((unsigned char)0xD1, (0xD0 | encode));
6299 } else {
6300 emit_int24((unsigned char)0xC1, (0xD0 | encode), imm8);
6301 }
6302 }
6303
6304 void Assembler::ercll(Register dst, Register src, int imm8) {
6305 assert(isShiftCount(imm8), "illegal shift count");
6306 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6307 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes);
6308 if (imm8 == 1) {
6309 emit_int16((unsigned char)0xD1, (0xD0 | encode));
6310 } else {
6311 emit_int24((unsigned char)0xC1, (0xD0 | encode), imm8);
6312 }
6313 }
6314
6315 void Assembler::rcpps(XMMRegister dst, XMMRegister src) {
6316 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
6317 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
6318 emit_int16(0x53, (0xC0 | encode));
6319 }
6320
6321 void Assembler::rcpss(XMMRegister dst, XMMRegister src) {
6322 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
6323 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
6324 emit_int16(0x53, (0xC0 | encode));
6325 }
6326
6327 void Assembler::rdtsc() {
6328 emit_int16(0x0F, 0x31);
6329 }
6330
6331 void Assembler::rdtscp() {
6332 emit_int24(0x0F, 0x01, (unsigned char)0xF9);
6333 }
6334
6335 // copies data from [esi] to [edi] using rcx pointer sized words
6336 // generic
6337 void Assembler::rep_mov() {
6338 // REP
6339 // MOVSQ
6340 emit_int24((unsigned char)0xF3, REX_W, (unsigned char)0xA5);
6341 }
6342
6343 // sets rcx bytes with rax, value at [edi]
6344 void Assembler::rep_stosb() {
6345 // REP
6346 // STOSB
6347 emit_int24((unsigned char)0xF3, REX_W, (unsigned char)0xAA);
6348 }
6349
6350 // sets rcx pointer sized words with rax, value at [edi]
6351 // generic
6352 void Assembler::rep_stos() {
6353 // REP
6354 // STOSQ
6355 emit_int24((unsigned char)0xF3, REX_W, (unsigned char)0xAB);
6356 }
6357
6358 // scans rcx pointer sized words at [edi] for occurrence of rax,
6359 // generic
6360 void Assembler::repne_scan() { // repne_scan
6361 // SCASQ
6362 emit_int24((unsigned char)0xF2, REX_W, (unsigned char)0xAF);
6363 }
6364
6365 // scans rcx 4 byte words at [edi] for occurrence of rax,
6366 // generic
6367 void Assembler::repne_scanl() { // repne_scan
6368 // SCASL
6369 emit_int16((unsigned char)0xF2, (unsigned char)0xAF);
6370 }
6371
6372 void Assembler::ret(int imm16) {
6373 if (imm16 == 0) {
6374 emit_int8((unsigned char)0xC3);
6375 } else {
6376 emit_int8((unsigned char)0xC2);
6377 emit_int16(imm16);
6378 }
6379 }
6380
6381 void Assembler::roll(Register dst, int imm8) {
6382 assert(isShiftCount(imm8), "illegal shift count");
6383 int encode = prefix_and_encode(dst->encoding());
6384 if (imm8 == 1) {
6385 emit_int16((unsigned char)0xD1, (0xC0 | encode));
6386 } else {
6387 emit_int24((unsigned char)0xC1, (0xc0 | encode), imm8);
6388 }
6389 }
6390
6391 void Assembler::eroll(Register dst, Register src, int imm8, bool no_flags) {
6392 assert(isShiftCount(imm8), "illegal shift count");
6393 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6394 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
6395 if (imm8 == 1) {
6396 emit_int16((unsigned char)0xD1, (0xC0 | encode));
6397 } else {
6398 emit_int24((unsigned char)0xC1, (0xc0 | encode), imm8);
6399 }
6400 }
6401
6402 void Assembler::roll(Register dst) {
6403 int encode = prefix_and_encode(dst->encoding());
6404 emit_int16((unsigned char)0xD3, (0xC0 | encode));
6405 }
6406
6407 void Assembler::eroll(Register dst, Register src, bool no_flags) {
6408 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6409 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
6410 emit_int16((unsigned char)0xD3, (0xC0 | encode));
6411 }
6412
6413 void Assembler::rorl(Register dst, int imm8) {
6414 assert(isShiftCount(imm8), "illegal shift count");
6415 int encode = prefix_and_encode(dst->encoding());
6416 if (imm8 == 1) {
6417 emit_int16((unsigned char)0xD1, (0xC8 | encode));
6418 } else {
6419 emit_int24((unsigned char)0xC1, (0xc8 | encode), imm8);
6420 }
6421 }
6422
6423 void Assembler::erorl(Register dst, Register src, int imm8, bool no_flags) {
6424 assert(isShiftCount(imm8), "illegal shift count");
6425 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6426 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
6427 if (imm8 == 1) {
6428 emit_int16((unsigned char)0xD1, (0xC8 | encode));
6429 } else {
6430 emit_int24((unsigned char)0xC1, (0xc8 | encode), imm8);
6431 }
6432 }
6433
6434 void Assembler::rorl(Register dst) {
6435 int encode = prefix_and_encode(dst->encoding());
6436 emit_int16((unsigned char)0xD3, (0xC8 | encode));
6437 }
6438
6439 void Assembler::erorl(Register dst, Register src, bool no_flags) {
6440 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6441 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
6442 emit_int16((unsigned char)0xD3, (0xC8 | encode));
6443 }
6444
6445 void Assembler::rorq(Register dst) {
6446 int encode = prefixq_and_encode(dst->encoding());
6447 emit_int16((unsigned char)0xD3, (0xC8 | encode));
6448 }
6449
6450 void Assembler::erorq(Register dst, Register src, bool no_flags) {
6451 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6452 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags, true /* use_prefixq */);
6453 emit_int16((unsigned char)0xD3, (0xC8 | encode));
6454 }
6455
6456 void Assembler::rorq(Register dst, int imm8) {
6457 assert(isShiftCount(imm8 >> 1), "illegal shift count");
6458 int encode = prefixq_and_encode(dst->encoding());
6459 if (imm8 == 1) {
6460 emit_int16((unsigned char)0xD1, (0xC8 | encode));
6461 } else {
6462 emit_int24((unsigned char)0xC1, (0xc8 | encode), imm8);
6463 }
6464 }
6465
6466 void Assembler::erorq(Register dst, Register src, int imm8, bool no_flags) {
6467 assert(isShiftCount(imm8), "illegal shift count");
6468 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6469 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags, true /* use_prefixq */);
6470 if (imm8 == 1) {
6471 emit_int16((unsigned char)0xD1, (0xC8 | encode));
6472 } else {
6473 emit_int24((unsigned char)0xC1, (0xC8 | encode), imm8);
6474 }
6475 }
6476
6477 void Assembler::rolq(Register dst) {
6478 int encode = prefixq_and_encode(dst->encoding());
6479 emit_int16((unsigned char)0xD3, (0xC0 | encode));
6480 }
6481
6482 void Assembler::erolq(Register dst, Register src, bool no_flags) {
6483 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6484 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags, true /* use_prefixq */);
6485 emit_int16((unsigned char)0xD3, (0xC0 | encode));
6486 }
6487
6488 void Assembler::rolq(Register dst, int imm8) {
6489 assert(isShiftCount(imm8 >> 1), "illegal shift count");
6490 int encode = prefixq_and_encode(dst->encoding());
6491 if (imm8 == 1) {
6492 emit_int16((unsigned char)0xD1, (0xC0 | encode));
6493 } else {
6494 emit_int24((unsigned char)0xC1, (0xc0 | encode), imm8);
6495 }
6496 }
6497
6498 void Assembler::erolq(Register dst, Register src, int imm8, bool no_flags) {
6499 assert(isShiftCount(imm8), "illegal shift count");
6500 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6501 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags, true /* use_prefixq */);
6502 if (imm8 == 1) {
6503 emit_int16((unsigned char)0xD1, (0xC0 | encode));
6504 } else {
6505 emit_int24((unsigned char)0xC1, (0xc0 | encode), imm8);
6506 }
6507 }
6508
6509 void Assembler::sall(Address dst, int imm8) {
6510 InstructionMark im(this);
6511 assert(isShiftCount(imm8), "illegal shift count");
6512 prefix(dst);
6513 if (imm8 == 1) {
6514 emit_int8((unsigned char)0xD1);
6515 emit_operand(as_Register(4), dst, 0);
6516 }
6517 else {
6518 emit_int8((unsigned char)0xC1);
6519 emit_operand(as_Register(4), dst, 1);
6520 emit_int8(imm8);
6521 }
6522 }
6523
6524 void Assembler::esall(Register dst, Address src, int imm8, bool no_flags) {
6525 InstructionMark im(this);
6526 assert(isShiftCount(imm8), "illegal shift count");
6527 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6528 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
6529 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
6530 if (imm8 == 1) {
6531 emit_int8((unsigned char)0xD1);
6532 emit_operand(as_Register(4), src, 0);
6533 }
6534 else {
6535 emit_int8((unsigned char)0xC1);
6536 emit_operand(as_Register(4), src, 1);
6537 emit_int8(imm8);
6538 }
6539 }
6540
6541 void Assembler::sall(Address dst) {
6542 InstructionMark im(this);
6543 prefix(dst);
6544 emit_int8((unsigned char)0xD3);
6545 emit_operand(as_Register(4), dst, 0);
6546 }
6547
6548 void Assembler::esall(Register dst, Address src, bool no_flags) {
6549 InstructionMark im(this);
6550 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6551 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
6552 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
6553 emit_int8((unsigned char)0xD3);
6554 emit_operand(as_Register(4), src, 0);
6555 }
6556
6557 void Assembler::sall(Register dst, int imm8) {
6558 assert(isShiftCount(imm8), "illegal shift count");
6559 int encode = prefix_and_encode(dst->encoding());
6560 if (imm8 == 1) {
6561 emit_int16((unsigned char)0xD1, (0xE0 | encode));
6562 } else {
6563 emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8);
6564 }
6565 }
6566
6567 void Assembler::esall(Register dst, Register src, int imm8, bool no_flags) {
6568 assert(isShiftCount(imm8), "illegal shift count");
6569 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6570 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
6571 if (imm8 == 1) {
6572 emit_int16((unsigned char)0xD1, (0xE0 | encode));
6573 } else {
6574 emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8);
6575 }
6576 }
6577
6578 void Assembler::sall(Register dst) {
6579 int encode = prefix_and_encode(dst->encoding());
6580 emit_int16((unsigned char)0xD3, (0xE0 | encode));
6581 }
6582
6583 void Assembler::esall(Register dst, Register src, bool no_flags) {
6584 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6585 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
6586 emit_int16((unsigned char)0xD3, (0xE0 | encode));
6587 }
6588
6589 void Assembler::sarl(Address dst, int imm8) {
6590 assert(isShiftCount(imm8), "illegal shift count");
6591 InstructionMark im(this);
6592 prefix(dst);
6593 if (imm8 == 1) {
6594 emit_int8((unsigned char)0xD1);
6595 emit_operand(as_Register(7), dst, 0);
6596 }
6597 else {
6598 emit_int8((unsigned char)0xC1);
6599 emit_operand(as_Register(7), dst, 1);
6600 emit_int8(imm8);
6601 }
6602 }
6603
6604 void Assembler::esarl(Register dst, Address src, int imm8, bool no_flags) {
6605 assert(isShiftCount(imm8), "illegal shift count");
6606 InstructionMark im(this);
6607 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6608 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
6609 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
6610 if (imm8 == 1) {
6611 emit_int8((unsigned char)0xD1);
6612 emit_operand(as_Register(7), src, 0);
6613 }
6614 else {
6615 emit_int8((unsigned char)0xC1);
6616 emit_operand(as_Register(7), src, 1);
6617 emit_int8(imm8);
6618 }
6619 }
6620
6621 void Assembler::sarl(Address dst) {
6622 InstructionMark im(this);
6623 prefix(dst);
6624 emit_int8((unsigned char)0xD3);
6625 emit_operand(as_Register(7), dst, 0);
6626 }
6627
6628 void Assembler::esarl(Register dst, Address src, bool no_flags) {
6629 InstructionMark im(this);
6630 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6631 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
6632 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
6633 emit_int8((unsigned char)0xD3);
6634 emit_operand(as_Register(7), src, 0);
6635 }
6636
6637 void Assembler::sarl(Register dst, int imm8) {
6638 int encode = prefix_and_encode(dst->encoding());
6639 assert(isShiftCount(imm8), "illegal shift count");
6640 if (imm8 == 1) {
6641 emit_int16((unsigned char)0xD1, (0xF8 | encode));
6642 } else {
6643 emit_int24((unsigned char)0xC1, (0xF8 | encode), imm8);
6644 }
6645 }
6646
6647 void Assembler::esarl(Register dst, Register src, int imm8, bool no_flags) {
6648 assert(isShiftCount(imm8), "illegal shift count");
6649 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6650 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
6651 if (imm8 == 1) {
6652 emit_int16((unsigned char)0xD1, (0xF8 | encode));
6653 } else {
6654 emit_int24((unsigned char)0xC1, (0xF8 | encode), imm8);
6655 }
6656 }
6657
6658 void Assembler::sarl(Register dst) {
6659 int encode = prefix_and_encode(dst->encoding());
6660 emit_int16((unsigned char)0xD3, (0xF8 | encode));
6661 }
6662
6663 void Assembler::esarl(Register dst, Register src, bool no_flags) {
6664 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6665 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
6666 emit_int16((unsigned char)0xD3, (0xF8 | encode));
6667 }
6668
6669 void Assembler::sbbl(Address dst, int32_t imm32) {
6670 InstructionMark im(this);
6671 prefix(dst);
6672 emit_arith_operand(0x81, rbx, dst, imm32);
6673 }
6674
6675 void Assembler::sbbl(Register dst, int32_t imm32) {
6676 prefix(dst);
6677 emit_arith(0x81, 0xD8, dst, imm32);
6678 }
6679
6680 void Assembler::sbbl(Register dst, Address src) {
6681 InstructionMark im(this);
6682 prefix(src, dst);
6683 emit_int8(0x1B);
6684 emit_operand(dst, src, 0);
6685 }
6686
6687 void Assembler::sbbl(Register dst, Register src) {
6688 (void) prefix_and_encode(dst->encoding(), src->encoding());
6689 emit_arith(0x1B, 0xC0, dst, src);
6690 }
6691
6692 void Assembler::setb(Condition cc, Register dst) {
6693 assert(0 <= cc && cc < 16, "illegal cc");
6694 int encode = prefix_and_encode(dst->encoding(), true, true /* is_map1 */);
6695 emit_opcode_prefix_and_encoding((unsigned char)0x90 | cc, 0xC0, encode);
6696 }
6697
6698 void Assembler::palignr(XMMRegister dst, XMMRegister src, int imm8) {
6699 assert(VM_Version::supports_ssse3(), "");
6700 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6701 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6702 emit_int24(0x0F, (0xC0 | encode), imm8);
6703 }
6704
6705 void Assembler::vpalignr(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
6706 assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
6707 vector_len == AVX_256bit? VM_Version::supports_avx2() :
6708 0, "");
6709 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6710 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6711 emit_int24(0x0F, (0xC0 | encode), imm8);
6712 }
6713
6714 void Assembler::evalignq(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
6715 assert(VM_Version::supports_evex(), "");
6716 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6717 attributes.set_is_evex_instruction();
6718 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6719 emit_int24(0x3, (0xC0 | encode), imm8);
6720 }
6721
6722 void Assembler::pblendw(XMMRegister dst, XMMRegister src, int imm8) {
6723 assert(VM_Version::supports_sse4_1(), "");
6724 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
6725 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6726 emit_int24(0x0E, (0xC0 | encode), imm8);
6727 }
6728
6729 void Assembler::sha1rnds4(XMMRegister dst, XMMRegister src, int imm8) {
6730 assert(VM_Version::supports_sha(), "");
6731 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3A, /* rex_w */ false);
6732 emit_int24((unsigned char)0xCC, (0xC0 | encode), (unsigned char)imm8);
6733 }
6734
6735 void Assembler::sha1nexte(XMMRegister dst, XMMRegister src) {
6736 assert(VM_Version::supports_sha(), "");
6737 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false);
6738 emit_int16((unsigned char)0xC8, (0xC0 | encode));
6739 }
6740
6741 void Assembler::sha1msg1(XMMRegister dst, XMMRegister src) {
6742 assert(VM_Version::supports_sha(), "");
6743 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false);
6744 emit_int16((unsigned char)0xC9, (0xC0 | encode));
6745 }
6746
6747 void Assembler::sha1msg2(XMMRegister dst, XMMRegister src) {
6748 assert(VM_Version::supports_sha(), "");
6749 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false);
6750 emit_int16((unsigned char)0xCA, (0xC0 | encode));
6751 }
6752
6753 // xmm0 is implicit additional source to this instruction.
6754 void Assembler::sha256rnds2(XMMRegister dst, XMMRegister src) {
6755 assert(VM_Version::supports_sha(), "");
6756 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false);
6757 emit_int16((unsigned char)0xCB, (0xC0 | encode));
6758 }
6759
6760 void Assembler::sha256msg1(XMMRegister dst, XMMRegister src) {
6761 assert(VM_Version::supports_sha(), "");
6762 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false);
6763 emit_int16((unsigned char)0xCC, (0xC0 | encode));
6764 }
6765
6766 void Assembler::sha256msg2(XMMRegister dst, XMMRegister src) {
6767 assert(VM_Version::supports_sha(), "");
6768 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false);
6769 emit_int16((unsigned char)0xCD, (0xC0 | encode));
6770 }
6771
6772 void Assembler::sha512msg1(XMMRegister dst, XMMRegister src) {
6773 assert(VM_Version::supports_sha512() && VM_Version::supports_avx(), "");
6774 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
6775 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes);
6776 emit_int16((unsigned char)0xCC, (0xC0 | encode));
6777 }
6778
6779 void Assembler::sha512msg2(XMMRegister dst, XMMRegister src) {
6780 assert(VM_Version::supports_sha512() && VM_Version::supports_avx(), "");
6781 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
6782 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes);
6783 emit_int16((unsigned char)0xCD, (0xC0 | encode));
6784 }
6785
6786 void Assembler::sha512rnds2(XMMRegister dst, XMMRegister nds, XMMRegister src) {
6787 assert(VM_Version::supports_sha512() && VM_Version::supports_avx(), "");
6788 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
6789 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes);
6790 emit_int16((unsigned char)0xCB, (0xC0 | encode));
6791 }
6792
6793 void Assembler::shll(Register dst, int imm8) {
6794 assert(isShiftCount(imm8), "illegal shift count");
6795 int encode = prefix_and_encode(dst->encoding());
6796 if (imm8 == 1 ) {
6797 emit_int16((unsigned char)0xD1, (0xE0 | encode));
6798 } else {
6799 emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8);
6800 }
6801 }
6802
6803 void Assembler::eshll(Register dst, Register src, int imm8, bool no_flags) {
6804 assert(isShiftCount(imm8), "illegal shift count");
6805 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6806 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
6807 if (imm8 == 1 ) {
6808 emit_int16((unsigned char)0xD1, (0xE0 | encode));
6809 } else {
6810 emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8);
6811 }
6812 }
6813
6814 void Assembler::shll(Register dst) {
6815 int encode = prefix_and_encode(dst->encoding());
6816 emit_int16((unsigned char)0xD3, (0xE0 | encode));
6817 }
6818
6819 void Assembler::eshll(Register dst, Register src, bool no_flags) {
6820 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6821 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
6822 emit_int16((unsigned char)0xD3, (0xE0 | encode));
6823 }
6824
6825 void Assembler::shrl(Register dst, int imm8) {
6826 assert(isShiftCount(imm8), "illegal shift count");
6827 int encode = prefix_and_encode(dst->encoding());
6828 if (imm8 == 1) {
6829 emit_int16((unsigned char)0xD1, (0xE8 | encode));
6830 }
6831 else {
6832 emit_int24((unsigned char)0xC1, (0xE8 | encode), imm8);
6833 }
6834 }
6835
6836 void Assembler::eshrl(Register dst, Register src, int imm8, bool no_flags) {
6837 assert(isShiftCount(imm8), "illegal shift count");
6838 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6839 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
6840 if (imm8 == 1) {
6841 emit_int16((unsigned char)0xD1, (0xE8 | encode));
6842 }
6843 else {
6844 emit_int24((unsigned char)0xC1, (0xE8 | encode), imm8);
6845 }
6846 }
6847
6848 void Assembler::shrl(Register dst) {
6849 int encode = prefix_and_encode(dst->encoding());
6850 emit_int16((unsigned char)0xD3, (0xE8 | encode));
6851 }
6852
6853 void Assembler::eshrl(Register dst, Register src, bool no_flags) {
6854 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6855 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
6856 emit_int16((unsigned char)0xD3, (0xE8 | encode));
6857 }
6858
6859 void Assembler::shrl(Address dst) {
6860 InstructionMark im(this);
6861 prefix(dst);
6862 emit_int8((unsigned char)0xD3);
6863 emit_operand(as_Register(5), dst, 0);
6864 }
6865
6866 void Assembler::eshrl(Register dst, Address src, bool no_flags) {
6867 InstructionMark im(this);
6868 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6869 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
6870 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
6871 emit_int8((unsigned char)0xD3);
6872 emit_operand(as_Register(5), src, 0);
6873 }
6874
6875 void Assembler::shrl(Address dst, int imm8) {
6876 InstructionMark im(this);
6877 assert(isShiftCount(imm8), "illegal shift count");
6878 prefix(dst);
6879 if (imm8 == 1) {
6880 emit_int8((unsigned char)0xD1);
6881 emit_operand(as_Register(5), dst, 0);
6882 }
6883 else {
6884 emit_int8((unsigned char)0xC1);
6885 emit_operand(as_Register(5), dst, 1);
6886 emit_int8(imm8);
6887 }
6888 }
6889
6890 void Assembler::eshrl(Register dst, Address src, int imm8, bool no_flags) {
6891 InstructionMark im(this);
6892 assert(isShiftCount(imm8), "illegal shift count");
6893 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6894 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
6895 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
6896 if (imm8 == 1) {
6897 emit_int8((unsigned char)0xD1);
6898 emit_operand(as_Register(5), src, 0);
6899 }
6900 else {
6901 emit_int8((unsigned char)0xC1);
6902 emit_operand(as_Register(5), src, 1);
6903 emit_int8(imm8);
6904 }
6905 }
6906
6907 void Assembler::shldl(Register dst, Register src) {
6908 int encode = prefix_and_encode(src->encoding(), dst->encoding(), true /* is_map1 */);
6909 emit_opcode_prefix_and_encoding((unsigned char)0xA5, 0xC0, encode);
6910 }
6911
6912 void Assembler::eshldl(Register dst, Register src1, Register src2, bool no_flags) {
6913 emit_eevex_or_demote(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, 0xA5, no_flags, true /* is_map1 */);
6914 }
6915
6916 void Assembler::shldl(Register dst, Register src, int8_t imm8) {
6917 int encode = prefix_and_encode(src->encoding(), dst->encoding(), true /* is_map1 */);
6918 emit_opcode_prefix_and_encoding((unsigned char)0xA4, 0xC0, encode, imm8);
6919 }
6920
6921 void Assembler::eshldl(Register dst, Register src1, Register src2, int8_t imm8, bool no_flags) {
6922 emit_eevex_or_demote(dst->encoding(), src1->encoding(), src2->encoding(), imm8, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, 0x24, no_flags, true /* is_map1 */);
6923 }
6924
6925 void Assembler::shrdl(Register dst, Register src) {
6926 int encode = prefix_and_encode(src->encoding(), dst->encoding(), true /* is_map1 */);
6927 emit_opcode_prefix_and_encoding((unsigned char)0xAD, 0xC0, encode);
6928 }
6929
6930 void Assembler::eshrdl(Register dst, Register src1, Register src2, bool no_flags) {
6931 emit_eevex_or_demote(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, 0xAD, no_flags, true /* is_map1 */);
6932 }
6933
6934 void Assembler::shrdl(Register dst, Register src, int8_t imm8) {
6935 int encode = prefix_and_encode(src->encoding(), dst->encoding(), true /* is_map1 */);
6936 emit_opcode_prefix_and_encoding((unsigned char)0xAC, 0xC0, encode, imm8);
6937 }
6938
6939 void Assembler::eshrdl(Register dst, Register src1, Register src2, int8_t imm8, bool no_flags) {
6940 emit_eevex_or_demote(dst->encoding(), src1->encoding(), src2->encoding(), imm8, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, 0x2C, no_flags, true /* is_map1 */);
6941 }
6942
6943 void Assembler::shldq(Register dst, Register src, int8_t imm8) {
6944 int encode = prefixq_and_encode(src->encoding(), dst->encoding(), true /* is_map1 */);
6945 emit_opcode_prefix_and_encoding((unsigned char)0xA4, 0xC0, encode, imm8);
6946 }
6947
6948 void Assembler::eshldq(Register dst, Register src1, Register src2, int8_t imm8, bool no_flags) {
6949 emit_eevex_or_demote(dst->encoding(), src1->encoding(), src2->encoding(), imm8, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_64bit, 0x24, no_flags, true /* is_map1 */);
6950 }
6951
6952 void Assembler::shrdq(Register dst, Register src, int8_t imm8) {
6953 int encode = prefixq_and_encode(src->encoding(), dst->encoding(), true /* is_map1 */);
6954 emit_opcode_prefix_and_encoding((unsigned char)0xAC, 0xC0, encode, imm8);
6955 }
6956
6957 void Assembler::eshrdq(Register dst, Register src1, Register src2, int8_t imm8, bool no_flags) {
6958 emit_eevex_or_demote(dst->encoding(), src1->encoding(), src2->encoding(), imm8, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_64bit, 0x2C, no_flags, true /* is_map1 */);
6959 }
6960
6961 // copies a single word from [esi] to [edi]
6962 void Assembler::smovl() {
6963 emit_int8((unsigned char)0xA5);
6964 }
6965
6966 void Assembler::roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) {
6967 assert(VM_Version::supports_sse4_1(), "");
6968 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
6969 int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6970 emit_int24(0x0B, (0xC0 | encode), (unsigned char)rmode);
6971 }
6972
6973 void Assembler::roundsd(XMMRegister dst, Address src, int32_t rmode) {
6974 assert(VM_Version::supports_sse4_1(), "");
6975 InstructionMark im(this);
6976 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
6977 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6978 emit_int8(0x0B);
6979 emit_operand(dst, src, 1);
6980 emit_int8((unsigned char)rmode);
6981 }
6982
6983 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
6984 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6985 attributes.set_rex_vex_w_reverted();
6986 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
6987 emit_int16(0x51, (0xC0 | encode));
6988 }
6989
6990 void Assembler::sqrtsd(XMMRegister dst, Address src) {
6991 InstructionMark im(this);
6992 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
6993 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
6994 attributes.set_rex_vex_w_reverted();
6995 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
6996 emit_int8(0x51);
6997 emit_operand(dst, src, 0);
6998 }
6999
7000 void Assembler::sqrtss(XMMRegister dst, XMMRegister src) {
7001 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7002 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
7003 emit_int16(0x51, (0xC0 | encode));
7004 }
7005
7006 void Assembler::std() {
7007 emit_int8((unsigned char)0xFD);
7008 }
7009
7010 void Assembler::sqrtss(XMMRegister dst, Address src) {
7011 InstructionMark im(this);
7012 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7013 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
7014 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
7015 emit_int8(0x51);
7016 emit_operand(dst, src, 0);
7017 }
7018
7019 void Assembler::stmxcsr(Address dst) {
7020 // This instruction should be SSE encoded with the REX2 prefix when an
7021 // extended GPR is present. To be consistent when UseAPX is enabled, use
7022 // this encoding even when an extended GPR is not used.
7023 if (UseAVX > 0 && !UseAPX ) {
7024 assert(VM_Version::supports_avx(), "");
7025 InstructionMark im(this);
7026 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
7027 vex_prefix(dst, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
7028 emit_int8((unsigned char)0xAE);
7029 emit_operand(as_Register(3), dst, 0);
7030 } else {
7031 InstructionMark im(this);
7032 prefix(dst, true /* is_map1 */);
7033 emit_int8((unsigned char)0xAE);
7034 emit_operand(as_Register(3), dst, 0);
7035 }
7036 }
7037
7038 void Assembler::subl(Address dst, int32_t imm32) {
7039 InstructionMark im(this);
7040 prefix(dst);
7041 emit_arith_operand(0x81, rbp, dst, imm32);
7042 }
7043
7044 void Assembler::esubl(Register dst, Address src, int32_t imm32, bool no_flags) {
7045 InstructionMark im(this);
7046 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7047 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
7048 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
7049 emit_arith_operand(0x81, rbp, src, imm32);
7050 }
7051
7052 void Assembler::subl(Address dst, Register src) {
7053 InstructionMark im(this);
7054 prefix(dst, src);
7055 emit_int8(0x29);
7056 emit_operand(src, dst, 0);
7057 }
7058
7059 void Assembler::esubl(Register dst, Address src1, Register src2, bool no_flags) {
7060 InstructionMark im(this);
7061 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7062 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
7063 eevex_prefix_ndd(src1, dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
7064 emit_int8(0x29);
7065 emit_operand(src2, src1, 0);
7066 }
7067
7068 void Assembler::subl(Register dst, int32_t imm32) {
7069 prefix(dst);
7070 emit_arith(0x81, 0xE8, dst, imm32);
7071 }
7072
7073 void Assembler::esubl(Register dst, Register src, int32_t imm32, bool no_flags) {
7074 emit_eevex_prefix_or_demote_arith_ndd(dst, src, imm32, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, 0x81, 0xE8, no_flags);
7075 }
7076
7077 // Force generation of a 4 byte immediate value even if it fits into 8bit
7078 void Assembler::subl_imm32(Register dst, int32_t imm32) {
7079 prefix(dst);
7080 emit_arith_imm32(0x81, 0xE8, dst, imm32);
7081 }
7082
7083 void Assembler::esubl_imm32(Register dst, Register src, int32_t imm32, bool no_flags) {
7084 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7085 (void) emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
7086 emit_arith_imm32(0x81, 0xE8, src, imm32);
7087 }
7088
7089 void Assembler::subl(Register dst, Address src) {
7090 InstructionMark im(this);
7091 prefix(src, dst);
7092 emit_int8(0x2B);
7093 emit_operand(dst, src, 0);
7094 }
7095
7096 void Assembler::esubl(Register dst, Register src1, Address src2, bool no_flags) {
7097 InstructionMark im(this);
7098 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, 0x2B, no_flags);
7099 }
7100
7101 void Assembler::subl(Register dst, Register src) {
7102 (void) prefix_and_encode(dst->encoding(), src->encoding());
7103 emit_arith(0x2B, 0xC0, dst, src);
7104 }
7105
7106 void Assembler::esubl(Register dst, Register src1, Register src2, bool no_flags) {
7107 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7108 // NDD shares its encoding bits with NDS bits for regular EVEX instruction.
7109 // Therefore, DST is passed as the second argument to minimize changes in the leaf level routine.
7110 (void) emit_eevex_prefix_or_demote_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
7111 emit_arith(0x2B, 0xC0, src1, src2);
7112 }
7113
7114 void Assembler::subsd(XMMRegister dst, XMMRegister src) {
7115 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7116 attributes.set_rex_vex_w_reverted();
7117 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
7118 emit_int16(0x5C, (0xC0 | encode));
7119 }
7120
7121 void Assembler::subsd(XMMRegister dst, Address src) {
7122 InstructionMark im(this);
7123 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7124 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
7125 attributes.set_rex_vex_w_reverted();
7126 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
7127 emit_int8(0x5C);
7128 emit_operand(dst, src, 0);
7129 }
7130
7131 void Assembler::subss(XMMRegister dst, XMMRegister src) {
7132 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true , /* uses_vl */ false);
7133 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
7134 emit_int16(0x5C, (0xC0 | encode));
7135 }
7136
7137 void Assembler::subss(XMMRegister dst, Address src) {
7138 InstructionMark im(this);
7139 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7140 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
7141 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
7142 emit_int8(0x5C);
7143 emit_operand(dst, src, 0);
7144 }
7145
7146 void Assembler::testb(Register dst, int imm8, bool use_ral) {
7147 if (dst == rax) {
7148 if (use_ral) {
7149 emit_int8((unsigned char)0xA8);
7150 emit_int8(imm8);
7151 } else {
7152 emit_int8((unsigned char)0xF6);
7153 emit_int8((unsigned char)0xC4);
7154 emit_int8(imm8);
7155 }
7156 } else {
7157 (void) prefix_and_encode(dst->encoding(), true);
7158 emit_arith_b(0xF6, 0xC0, dst, imm8);
7159 }
7160 }
7161
7162 void Assembler::testb(Address dst, int imm8) {
7163 InstructionMark im(this);
7164 prefix(dst);
7165 emit_int8((unsigned char)0xF6);
7166 emit_operand(rax, dst, 1);
7167 emit_int8(imm8);
7168 }
7169
7170 void Assembler::testl(Address dst, int32_t imm32) {
7171 InstructionMark im(this);
7172 prefix(dst);
7173 emit_int8((unsigned char)0xF7);
7174 emit_operand(as_Register(0), dst, 4);
7175 emit_int32(imm32);
7176 }
7177
7178 void Assembler::testl(Register dst, int32_t imm32) {
7179 // not using emit_arith because test
7180 // doesn't support sign-extension of
7181 // 8bit operands
7182 if (dst == rax) {
7183 emit_int8((unsigned char)0xA9);
7184 emit_int32(imm32);
7185 } else {
7186 int encode = dst->encoding();
7187 encode = prefix_and_encode(encode);
7188 emit_int16((unsigned char)0xF7, (0xC0 | encode));
7189 emit_int32(imm32);
7190 }
7191 }
7192
7193 void Assembler::testl(Register dst, Register src) {
7194 (void) prefix_and_encode(dst->encoding(), src->encoding());
7195 emit_arith(0x85, 0xC0, dst, src);
7196 }
7197
7198 void Assembler::testl(Register dst, Address src) {
7199 InstructionMark im(this);
7200 prefix(src, dst);
7201 emit_int8((unsigned char)0x85);
7202 emit_operand(dst, src, 0);
7203 }
7204
7205 void Assembler::tzcntl(Register dst, Register src) {
7206 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
7207 emit_int8((unsigned char)0xF3);
7208 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
7209 emit_opcode_prefix_and_encoding((unsigned char)0xBC, 0xC0, encode);
7210 }
7211
7212 void Assembler::etzcntl(Register dst, Register src, bool no_flags) {
7213 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
7214 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7215 int encode = eevex_prefix_and_encode_nf(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
7216 emit_int16((unsigned char)0xF4, (0xC0 | encode));
7217 }
7218
7219 void Assembler::tzcntl(Register dst, Address src) {
7220 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
7221 InstructionMark im(this);
7222 emit_int8((unsigned char)0xF3);
7223 prefix(src, dst, false, true /* is_map1 */);
7224 emit_int8((unsigned char)0xBC);
7225 emit_operand(dst, src, 0);
7226 }
7227
7228 void Assembler::etzcntl(Register dst, Address src, bool no_flags) {
7229 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
7230 InstructionMark im(this);
7231 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7232 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
7233 eevex_prefix_nf(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
7234 emit_int8((unsigned char)0xF4);
7235 emit_operand(dst, src, 0);
7236 }
7237
7238 void Assembler::tzcntq(Register dst, Register src) {
7239 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
7240 emit_int8((unsigned char)0xF3);
7241 int encode = prefixq_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
7242 emit_opcode_prefix_and_encoding((unsigned char)0xBC, 0xC0, encode);
7243 }
7244
7245 void Assembler::etzcntq(Register dst, Register src, bool no_flags) {
7246 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
7247 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7248 int encode = eevex_prefix_and_encode_nf(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
7249 emit_int16((unsigned char)0xF4, (0xC0 | encode));
7250 }
7251
7252 void Assembler::tzcntq(Register dst, Address src) {
7253 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
7254 InstructionMark im(this);
7255 emit_int8((unsigned char)0xF3);
7256 prefixq(src, dst, true /* is_map1 */);
7257 emit_int8((unsigned char)0xBC);
7258 emit_operand(dst, src, 0);
7259 }
7260
7261 void Assembler::etzcntq(Register dst, Address src, bool no_flags) {
7262 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
7263 InstructionMark im(this);
7264 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7265 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
7266 eevex_prefix_nf(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
7267 emit_int8((unsigned char)0xF4);
7268 emit_operand(dst, src, 0);
7269 }
7270
7271 void Assembler::ucomisd(XMMRegister dst, Address src) {
7272 InstructionMark im(this);
7273 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7274 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
7275 attributes.set_rex_vex_w_reverted();
7276 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7277 emit_int8(0x2E);
7278 emit_operand(dst, src, 0);
7279 }
7280
7281 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
7282 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7283 attributes.set_rex_vex_w_reverted();
7284 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7285 emit_int16(0x2E, (0xC0 | encode));
7286 }
7287
7288 void Assembler::ucomiss(XMMRegister dst, Address src) {
7289 InstructionMark im(this);
7290 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7291 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
7292 simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
7293 emit_int8(0x2E);
7294 emit_operand(dst, src, 0);
7295 }
7296
7297 void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
7298 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7299 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
7300 emit_int16(0x2E, (0xC0 | encode));
7301 }
7302
7303 void Assembler::xabort(int8_t imm8) {
7304 emit_int24((unsigned char)0xC6, (unsigned char)0xF8, (imm8 & 0xFF));
7305 }
7306
7307 void Assembler::xaddb(Address dst, Register src) {
7308 InstructionMark im(this);
7309 prefix(dst, src, true, true /* is_map1 */);
7310 emit_int8((unsigned char)0xC0);
7311 emit_operand(src, dst, 0);
7312 }
7313
7314 void Assembler::xaddw(Address dst, Register src) {
7315 InstructionMark im(this);
7316 emit_int8(0x66);
7317 prefix(dst, src, false, true /* is_map1 */);
7318 emit_int8((unsigned char)0xC1);
7319 emit_operand(src, dst, 0);
7320 }
7321
7322 void Assembler::xaddl(Address dst, Register src) {
7323 InstructionMark im(this);
7324 prefix(dst, src, false, true /* is_map1 */);
7325 emit_int8((unsigned char)0xC1);
7326 emit_operand(src, dst, 0);
7327 }
7328
7329 void Assembler::xbegin(Label& abort, relocInfo::relocType rtype) {
7330 InstructionMark im(this);
7331 relocate(rtype);
7332 if (abort.is_bound()) {
7333 address entry = target(abort);
7334 assert(entry != nullptr, "abort entry null");
7335 int offset = checked_cast<int>(entry - pc());
7336 emit_int16((unsigned char)0xC7, (unsigned char)0xF8);
7337 emit_int32(offset - 6); // 2 opcode + 4 address
7338 } else {
7339 abort.add_patch_at(code(), locator());
7340 emit_int16((unsigned char)0xC7, (unsigned char)0xF8);
7341 emit_int32(0);
7342 }
7343 }
7344
7345 void Assembler::xchgb(Register dst, Address src) { // xchg
7346 InstructionMark im(this);
7347 prefix(src, dst, true);
7348 emit_int8((unsigned char)0x86);
7349 emit_operand(dst, src, 0);
7350 }
7351
7352 void Assembler::xchgw(Register dst, Address src) { // xchg
7353 InstructionMark im(this);
7354 emit_int8(0x66);
7355 prefix(src, dst);
7356 emit_int8((unsigned char)0x87);
7357 emit_operand(dst, src, 0);
7358 }
7359
7360 void Assembler::xchgl(Register dst, Address src) { // xchg
7361 InstructionMark im(this);
7362 prefix(src, dst);
7363 emit_int8((unsigned char)0x87);
7364 emit_operand(dst, src, 0);
7365 }
7366
7367 void Assembler::xchgl(Register dst, Register src) {
7368 int encode = prefix_and_encode(dst->encoding(), src->encoding());
7369 emit_int16((unsigned char)0x87, (0xC0 | encode));
7370 }
7371
7372 void Assembler::xend() {
7373 emit_int24(0x0F, 0x01, (unsigned char)0xD5);
7374 }
7375
7376 void Assembler::xgetbv() {
7377 emit_int24(0x0F, 0x01, (unsigned char)0xD0);
7378 }
7379
7380 void Assembler::xorl(Address dst, int32_t imm32) {
7381 InstructionMark im(this);
7382 prefix(dst);
7383 emit_arith_operand(0x81, as_Register(6), dst, imm32);
7384 }
7385
7386 void Assembler::exorl(Register dst, Address src, int32_t imm32, bool no_flags) {
7387 InstructionMark im(this);
7388 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7389 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
7390 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
7391 emit_arith_operand(0x81, as_Register(6), src, imm32);
7392 }
7393
7394 void Assembler::xorl(Register dst, int32_t imm32) {
7395 prefix(dst);
7396 emit_arith(0x81, 0xF0, dst, imm32);
7397 }
7398
7399 void Assembler::exorl(Register dst, Register src, int32_t imm32, bool no_flags) {
7400 emit_eevex_prefix_or_demote_arith_ndd(dst, src, imm32, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, 0x81, 0xF0, no_flags);
7401 }
7402
7403 void Assembler::xorl(Register dst, Address src) {
7404 InstructionMark im(this);
7405 prefix(src, dst);
7406 emit_int8(0x33);
7407 emit_operand(dst, src, 0);
7408 }
7409
7410 void Assembler::exorl(Register dst, Register src1, Address src2, bool no_flags) {
7411 InstructionMark im(this);
7412 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, 0x33, no_flags);
7413 }
7414
7415 void Assembler::xorl(Register dst, Register src) {
7416 (void) prefix_and_encode(dst->encoding(), src->encoding());
7417 emit_arith(0x33, 0xC0, dst, src);
7418 }
7419
7420 void Assembler::exorl(Register dst, Register src1, Register src2, bool no_flags) {
7421 emit_eevex_prefix_or_demote_arith_ndd(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, 0x33, 0xC0, no_flags, true /* is_commutative */);
7422 }
7423
7424 void Assembler::xorl(Address dst, Register src) {
7425 InstructionMark im(this);
7426 prefix(dst, src);
7427 emit_int8(0x31);
7428 emit_operand(src, dst, 0);
7429 }
7430
7431 void Assembler::exorl(Register dst, Address src1, Register src2, bool no_flags) {
7432 InstructionMark im(this);
7433 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_32bit, 0x31, no_flags, false /* is_map1 */, true /* is_commutative */);
7434 }
7435
7436 void Assembler::xorb(Register dst, Address src) {
7437 InstructionMark im(this);
7438 prefix(src, dst);
7439 emit_int8(0x32);
7440 emit_operand(dst, src, 0);
7441 }
7442
7443 void Assembler::exorb(Register dst, Register src1, Address src2, bool no_flags) {
7444 InstructionMark im(this);
7445 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_8bit, 0x32, no_flags);
7446 }
7447
7448 void Assembler::xorb(Address dst, Register src) {
7449 InstructionMark im(this);
7450 prefix(dst, src, true);
7451 emit_int8(0x30);
7452 emit_operand(src, dst, 0);
7453 }
7454
7455 void Assembler::exorb(Register dst, Address src1, Register src2, bool no_flags) {
7456 InstructionMark im(this);
7457 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_8bit, 0x30, no_flags, false /* is_map1 */, true /* is_commutative */);
7458 }
7459
7460 void Assembler::xorw(Register dst, Address src) {
7461 InstructionMark im(this);
7462 emit_int8(0x66);
7463 prefix(src, dst);
7464 emit_int8(0x33);
7465 emit_operand(dst, src, 0);
7466 }
7467
7468 void Assembler::exorw(Register dst, Register src1, Address src2, bool no_flags) {
7469 InstructionMark im(this);
7470 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_66, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_16bit, 0x33, no_flags);
7471 }
7472
7473 // AVX 3-operands scalar float-point arithmetic instructions
7474
7475 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) {
7476 assert(VM_Version::supports_avx(), "");
7477 InstructionMark im(this);
7478 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7479 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
7480 attributes.set_rex_vex_w_reverted();
7481 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
7482 emit_int8(0x58);
7483 emit_operand(dst, src, 0);
7484 }
7485
7486 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
7487 assert(VM_Version::supports_avx(), "");
7488 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7489 attributes.set_rex_vex_w_reverted();
7490 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
7491 emit_int16(0x58, (0xC0 | encode));
7492 }
7493
7494 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) {
7495 assert(VM_Version::supports_avx(), "");
7496 InstructionMark im(this);
7497 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7498 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
7499 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
7500 emit_int8(0x58);
7501 emit_operand(dst, src, 0);
7502 }
7503
7504 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
7505 assert(VM_Version::supports_avx(), "");
7506 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7507 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
7508 emit_int16(0x58, (0xC0 | encode));
7509 }
7510
7511 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) {
7512 assert(VM_Version::supports_avx(), "");
7513 InstructionMark im(this);
7514 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7515 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
7516 attributes.set_rex_vex_w_reverted();
7517 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
7518 emit_int8(0x5E);
7519 emit_operand(dst, src, 0);
7520 }
7521
7522 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
7523 assert(VM_Version::supports_avx(), "");
7524 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7525 attributes.set_rex_vex_w_reverted();
7526 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
7527 emit_int16(0x5E, (0xC0 | encode));
7528 }
7529
7530 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) {
7531 assert(VM_Version::supports_avx(), "");
7532 InstructionMark im(this);
7533 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7534 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
7535 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
7536 emit_int8(0x5E);
7537 emit_operand(dst, src, 0);
7538 }
7539
7540 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
7541 assert(VM_Version::supports_avx(), "");
7542 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7543 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
7544 emit_int16(0x5E, (0xC0 | encode));
7545 }
7546
7547 void Assembler::vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
7548 assert(VM_Version::supports_fma(), "");
7549 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7550 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7551 emit_int16((unsigned char)0xB9, (0xC0 | encode));
7552 }
7553
7554 void Assembler::evfnmadd213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2, EvexRoundPrefix rmode) { // Need to add rmode for rounding mode support
7555 assert(VM_Version::supports_evex(), "");
7556 InstructionAttr attributes(rmode, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7557 attributes.set_extended_context();
7558 attributes.set_is_evex_instruction();
7559 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7560 emit_int16((unsigned char)0xAD, (0xC0 | encode));
7561 }
7562
7563 void Assembler::vfnmadd213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
7564 assert(VM_Version::supports_fma(), "");
7565 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7566 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7567 emit_int16((unsigned char)0xAD, (0xC0 | encode));
7568 }
7569
7570 void Assembler::vfnmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
7571 assert(VM_Version::supports_fma(), "");
7572 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7573 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7574 emit_int16((unsigned char)0xBD, (0xC0 | encode));
7575 }
7576
7577 void Assembler::vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
7578 assert(VM_Version::supports_fma(), "");
7579 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7580 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7581 emit_int16((unsigned char)0xB9, (0xC0 | encode));
7582 }
7583
7584 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) {
7585 assert(VM_Version::supports_avx(), "");
7586 InstructionMark im(this);
7587 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7588 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
7589 attributes.set_rex_vex_w_reverted();
7590 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
7591 emit_int8(0x59);
7592 emit_operand(dst, src, 0);
7593 }
7594
7595 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
7596 assert(VM_Version::supports_avx(), "");
7597 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7598 attributes.set_rex_vex_w_reverted();
7599 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
7600 emit_int16(0x59, (0xC0 | encode));
7601 }
7602
7603 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) {
7604 assert(VM_Version::supports_avx(), "");
7605 InstructionMark im(this);
7606 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7607 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
7608 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
7609 emit_int8(0x59);
7610 emit_operand(dst, src, 0);
7611 }
7612
7613 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
7614 assert(VM_Version::supports_avx(), "");
7615 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7616 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
7617 emit_int16(0x59, (0xC0 | encode));
7618 }
7619
7620 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) {
7621 assert(VM_Version::supports_avx(), "");
7622 InstructionMark im(this);
7623 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7624 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
7625 attributes.set_rex_vex_w_reverted();
7626 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
7627 emit_int8(0x5C);
7628 emit_operand(dst, src, 0);
7629 }
7630
7631 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
7632 assert(VM_Version::supports_avx(), "");
7633 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7634 attributes.set_rex_vex_w_reverted();
7635 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
7636 emit_int16(0x5C, (0xC0 | encode));
7637 }
7638
7639 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) {
7640 assert(VM_Version::supports_avx(), "");
7641 InstructionMark im(this);
7642 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7643 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
7644 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
7645 emit_int8(0x5C);
7646 emit_operand(dst, src, 0);
7647 }
7648
7649 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
7650 assert(VM_Version::supports_avx(), "");
7651 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7652 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
7653 emit_int16(0x5C, (0xC0 | encode));
7654 }
7655
7656 //====================VECTOR ARITHMETIC=====================================
7657
7658 // Float-point vector arithmetic
7659
7660 void Assembler::addpd(XMMRegister dst, XMMRegister src) {
7661 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7662 attributes.set_rex_vex_w_reverted();
7663 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7664 emit_int16(0x58, (0xC0 | encode));
7665 }
7666
7667 void Assembler::addpd(XMMRegister dst, Address src) {
7668 InstructionMark im(this);
7669 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7670 attributes.set_rex_vex_w_reverted();
7671 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
7672 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7673 emit_int8(0x58);
7674 emit_operand(dst, src, 0);
7675 }
7676
7677
7678 void Assembler::addps(XMMRegister dst, XMMRegister src) {
7679 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7680 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
7681 emit_int16(0x58, (0xC0 | encode));
7682 }
7683
7684 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
7685 assert(VM_Version::supports_avx(), "");
7686 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7687 attributes.set_rex_vex_w_reverted();
7688 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7689 emit_int16(0x58, (0xC0 | encode));
7690 }
7691
7692 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
7693 assert(VM_Version::supports_avx(), "");
7694 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7695 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
7696 emit_int16(0x58, (0xC0 | encode));
7697 }
7698
7699 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
7700 assert(VM_Version::supports_avx(), "");
7701 InstructionMark im(this);
7702 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7703 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
7704 attributes.set_rex_vex_w_reverted();
7705 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7706 emit_int8(0x58);
7707 emit_operand(dst, src, 0);
7708 }
7709
7710 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
7711 assert(VM_Version::supports_avx(), "");
7712 InstructionMark im(this);
7713 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7714 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
7715 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
7716 emit_int8(0x58);
7717 emit_operand(dst, src, 0);
7718 }
7719
7720 void Assembler::subpd(XMMRegister dst, XMMRegister src) {
7721 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7722 attributes.set_rex_vex_w_reverted();
7723 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7724 emit_int16(0x5C, (0xC0 | encode));
7725 }
7726
7727 void Assembler::subps(XMMRegister dst, XMMRegister src) {
7728 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7729 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
7730 emit_int16(0x5C, (0xC0 | encode));
7731 }
7732
7733 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
7734 assert(VM_Version::supports_avx(), "");
7735 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7736 attributes.set_rex_vex_w_reverted();
7737 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7738 emit_int16(0x5C, (0xC0 | encode));
7739 }
7740
7741 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
7742 assert(VM_Version::supports_avx(), "");
7743 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7744 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
7745 emit_int16(0x5C, (0xC0 | encode));
7746 }
7747
7748 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
7749 assert(VM_Version::supports_avx(), "");
7750 InstructionMark im(this);
7751 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7752 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
7753 attributes.set_rex_vex_w_reverted();
7754 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7755 emit_int8(0x5C);
7756 emit_operand(dst, src, 0);
7757 }
7758
7759 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
7760 assert(VM_Version::supports_avx(), "");
7761 InstructionMark im(this);
7762 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7763 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
7764 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
7765 emit_int8(0x5C);
7766 emit_operand(dst, src, 0);
7767 }
7768
7769 void Assembler::mulpd(XMMRegister dst, XMMRegister src) {
7770 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7771 attributes.set_rex_vex_w_reverted();
7772 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7773 emit_int16(0x59, (0xC0 | encode));
7774 }
7775
7776 void Assembler::mulpd(XMMRegister dst, Address src) {
7777 InstructionMark im(this);
7778 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7779 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
7780 attributes.set_rex_vex_w_reverted();
7781 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7782 emit_int8(0x59);
7783 emit_operand(dst, src, 0);
7784 }
7785
7786 void Assembler::mulps(XMMRegister dst, XMMRegister src) {
7787 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7788 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
7789 emit_int16(0x59, (0xC0 | encode));
7790 }
7791
7792 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
7793 assert(VM_Version::supports_avx(), "");
7794 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7795 attributes.set_rex_vex_w_reverted();
7796 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7797 emit_int16(0x59, (0xC0 | encode));
7798 }
7799
7800 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
7801 assert(VM_Version::supports_avx(), "");
7802 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7803 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
7804 emit_int16(0x59, (0xC0 | encode));
7805 }
7806
7807 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
7808 assert(VM_Version::supports_avx(), "");
7809 InstructionMark im(this);
7810 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7811 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
7812 attributes.set_rex_vex_w_reverted();
7813 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7814 emit_int8(0x59);
7815 emit_operand(dst, src, 0);
7816 }
7817
7818 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
7819 assert(VM_Version::supports_avx(), "");
7820 InstructionMark im(this);
7821 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7822 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
7823 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
7824 emit_int8(0x59);
7825 emit_operand(dst, src, 0);
7826 }
7827
7828 void Assembler::vfmadd231pd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
7829 assert(VM_Version::supports_fma(), "");
7830 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7831 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7832 emit_int16((unsigned char)0xB8, (0xC0 | encode));
7833 }
7834
7835 void Assembler::vfmadd231ps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
7836 assert(VM_Version::supports_fma(), "");
7837 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7838 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7839 emit_int16((unsigned char)0xB8, (0xC0 | encode));
7840 }
7841
7842 void Assembler::vfmadd231pd(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) {
7843 assert(VM_Version::supports_fma(), "");
7844 InstructionMark im(this);
7845 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7846 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
7847 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7848 emit_int8((unsigned char)0xB8);
7849 emit_operand(dst, src2, 0);
7850 }
7851
7852 void Assembler::vfmadd231ps(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) {
7853 assert(VM_Version::supports_fma(), "");
7854 InstructionMark im(this);
7855 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7856 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
7857 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7858 emit_int8((unsigned char)0xB8);
7859 emit_operand(dst, src2, 0);
7860 }
7861
7862 void Assembler::divpd(XMMRegister dst, XMMRegister src) {
7863 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7864 attributes.set_rex_vex_w_reverted();
7865 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7866 emit_int16(0x5E, (0xC0 | encode));
7867 }
7868
7869 void Assembler::divps(XMMRegister dst, XMMRegister src) {
7870 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7871 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
7872 emit_int16(0x5E, (0xC0 | encode));
7873 }
7874
7875 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
7876 assert(VM_Version::supports_avx(), "");
7877 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7878 attributes.set_rex_vex_w_reverted();
7879 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7880 emit_int16(0x5E, (0xC0 | encode));
7881 }
7882
7883 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
7884 assert(VM_Version::supports_avx(), "");
7885 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7886 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
7887 emit_int16(0x5E, (0xC0 | encode));
7888 }
7889
7890 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
7891 assert(VM_Version::supports_avx(), "");
7892 InstructionMark im(this);
7893 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7894 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
7895 attributes.set_rex_vex_w_reverted();
7896 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7897 emit_int8(0x5E);
7898 emit_operand(dst, src, 0);
7899 }
7900
7901 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
7902 assert(VM_Version::supports_avx(), "");
7903 InstructionMark im(this);
7904 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7905 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
7906 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
7907 emit_int8(0x5E);
7908 emit_operand(dst, src, 0);
7909 }
7910
7911 void Assembler::vroundpd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len) {
7912 assert(VM_Version::supports_avx(), "");
7913 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
7914 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
7915 emit_int24(0x09, (0xC0 | encode), (rmode));
7916 }
7917
7918 void Assembler::vroundpd(XMMRegister dst, Address src, int32_t rmode, int vector_len) {
7919 assert(VM_Version::supports_avx(), "");
7920 assert(!needs_eevex(src.base(), src.index()), "does not support extended gprs as BASE or INDEX of address operand");
7921 InstructionMark im(this);
7922 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
7923 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
7924 emit_int8(0x09);
7925 emit_operand(dst, src, 1);
7926 emit_int8((rmode));
7927 }
7928
7929 void Assembler::vroundsd(XMMRegister dst, XMMRegister src, XMMRegister src2, int32_t rmode) {
7930 assert(VM_Version::supports_avx(), "");
7931 assert(rmode <= 0x0f, "rmode 0x%x", rmode);
7932 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7933 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
7934 emit_int24(0x0B, (0xC0 | encode), (rmode));
7935 }
7936
7937 void Assembler::vrndscalesd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int32_t rmode) {
7938 assert(VM_Version::supports_evex(), "requires EVEX support");
7939 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7940 attributes.set_is_evex_instruction();
7941 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
7942 emit_int24(0x0B, (0xC0 | encode), (rmode));
7943 }
7944
7945 void Assembler::vrndscalepd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len) {
7946 assert(VM_Version::supports_evex(), "requires EVEX support");
7947 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7948 attributes.set_is_evex_instruction();
7949 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
7950 emit_int24(0x09, (0xC0 | encode), (rmode));
7951 }
7952
7953 void Assembler::vrndscalepd(XMMRegister dst, Address src, int32_t rmode, int vector_len) {
7954 assert(VM_Version::supports_evex(), "requires EVEX support");
7955 assert(dst != xnoreg, "sanity");
7956 InstructionMark im(this);
7957 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7958 attributes.set_is_evex_instruction();
7959 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
7960 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
7961 emit_int8(0x09);
7962 emit_operand(dst, src, 1);
7963 emit_int8((rmode));
7964 }
7965
7966 void Assembler::vsqrtpd(XMMRegister dst, XMMRegister src, int vector_len) {
7967 assert(VM_Version::supports_avx(), "");
7968 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7969 attributes.set_rex_vex_w_reverted();
7970 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7971 emit_int16(0x51, (0xC0 | encode));
7972 }
7973
7974 void Assembler::vsqrtpd(XMMRegister dst, Address src, int vector_len) {
7975 assert(VM_Version::supports_avx(), "");
7976 InstructionMark im(this);
7977 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7978 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
7979 attributes.set_rex_vex_w_reverted();
7980 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7981 emit_int8(0x51);
7982 emit_operand(dst, src, 0);
7983 }
7984
7985 void Assembler::vsqrtps(XMMRegister dst, XMMRegister src, int vector_len) {
7986 assert(VM_Version::supports_avx(), "");
7987 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7988 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
7989 emit_int16(0x51, (0xC0 | encode));
7990 }
7991
7992 void Assembler::vsqrtps(XMMRegister dst, Address src, int vector_len) {
7993 assert(VM_Version::supports_avx(), "");
7994 InstructionMark im(this);
7995 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7996 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
7997 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
7998 emit_int8(0x51);
7999 emit_operand(dst, src, 0);
8000 }
8001
8002 void Assembler::andpd(XMMRegister dst, XMMRegister src) {
8003 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
8004 attributes.set_rex_vex_w_reverted();
8005 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8006 emit_int16(0x54, (0xC0 | encode));
8007 }
8008
8009 void Assembler::andnpd(XMMRegister dst, XMMRegister src) {
8010 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
8011 attributes.set_rex_vex_w_reverted();
8012 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8013 emit_int16(0x55, (0xC0 | encode));
8014 }
8015
8016 void Assembler::andps(XMMRegister dst, XMMRegister src) {
8017 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
8018 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
8019 emit_int16(0x54, (0xC0 | encode));
8020 }
8021
8022 void Assembler::andps(XMMRegister dst, Address src) {
8023 InstructionMark im(this);
8024 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
8025 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
8026 simd_prefix(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
8027 emit_int8(0x54);
8028 emit_operand(dst, src, 0);
8029 }
8030
8031 void Assembler::andpd(XMMRegister dst, Address src) {
8032 InstructionMark im(this);
8033 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
8034 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
8035 attributes.set_rex_vex_w_reverted();
8036 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8037 emit_int8(0x54);
8038 emit_operand(dst, src, 0);
8039 }
8040
8041 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8042 assert(VM_Version::supports_avx(), "");
8043 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
8044 attributes.set_rex_vex_w_reverted();
8045 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8046 emit_int16(0x54, (0xC0 | encode));
8047 }
8048
8049 void Assembler::vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8050 assert(VM_Version::supports_avx(), "");
8051 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
8052 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
8053 emit_int16(0x54, (0xC0 | encode));
8054 }
8055
8056 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
8057 assert(VM_Version::supports_avx(), "");
8058 InstructionMark im(this);
8059 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
8060 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
8061 attributes.set_rex_vex_w_reverted();
8062 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8063 emit_int8(0x54);
8064 emit_operand(dst, src, 0);
8065 }
8066
8067 void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
8068 assert(VM_Version::supports_avx(), "");
8069 InstructionMark im(this);
8070 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
8071 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
8072 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
8073 emit_int8(0x54);
8074 emit_operand(dst, src, 0);
8075 }
8076
8077 void Assembler::orpd(XMMRegister dst, XMMRegister src) {
8078 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
8079 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
8080 attributes.set_rex_vex_w_reverted();
8081 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8082 emit_int16(0x56, (0xC0 | encode));
8083 }
8084
8085 void Assembler::unpckhpd(XMMRegister dst, XMMRegister src) {
8086 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8087 attributes.set_rex_vex_w_reverted();
8088 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8089 emit_int8(0x15);
8090 emit_int8((0xC0 | encode));
8091 }
8092
8093 void Assembler::unpcklpd(XMMRegister dst, XMMRegister src) {
8094 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8095 attributes.set_rex_vex_w_reverted();
8096 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8097 emit_int16(0x14, (0xC0 | encode));
8098 }
8099
8100 void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
8101 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
8102 attributes.set_rex_vex_w_reverted();
8103 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8104 emit_int16(0x57, (0xC0 | encode));
8105 }
8106
8107 void Assembler::xorps(XMMRegister dst, XMMRegister src) {
8108 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
8109 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
8110 emit_int16(0x57, (0xC0 | encode));
8111 }
8112
8113 void Assembler::xorpd(XMMRegister dst, Address src) {
8114 InstructionMark im(this);
8115 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
8116 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
8117 attributes.set_rex_vex_w_reverted();
8118 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8119 emit_int8(0x57);
8120 emit_operand(dst, src, 0);
8121 }
8122
8123 void Assembler::xorps(XMMRegister dst, Address src) {
8124 InstructionMark im(this);
8125 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
8126 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
8127 simd_prefix(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
8128 emit_int8(0x57);
8129 emit_operand(dst, src, 0);
8130 }
8131
8132 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8133 assert(VM_Version::supports_avx(), "");
8134 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
8135 attributes.set_rex_vex_w_reverted();
8136 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8137 emit_int16(0x57, (0xC0 | encode));
8138 }
8139
8140 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8141 assert(VM_Version::supports_avx(), "");
8142 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
8143 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
8144 emit_int16(0x57, (0xC0 | encode));
8145 }
8146
8147 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
8148 assert(VM_Version::supports_avx(), "");
8149 InstructionMark im(this);
8150 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
8151 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
8152 attributes.set_rex_vex_w_reverted();
8153 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8154 emit_int8(0x57);
8155 emit_operand(dst, src, 0);
8156 }
8157
8158 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
8159 assert(VM_Version::supports_avx(), "");
8160 InstructionMark im(this);
8161 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
8162 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
8163 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
8164 emit_int8(0x57);
8165 emit_operand(dst, src, 0);
8166 }
8167
8168 // Integer vector arithmetic
8169 void Assembler::vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8170 assert((VM_Version::supports_avx() && (vector_len == 0)) ||
8171 VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
8172 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
8173 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8174 emit_int16(0x01, (0xC0 | encode));
8175 }
8176
8177 void Assembler::vphaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8178 assert((VM_Version::supports_avx() && (vector_len == 0)) ||
8179 VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
8180 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
8181 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8182 emit_int16(0x02, (0xC0 | encode));
8183 }
8184
8185 void Assembler::paddb(XMMRegister dst, XMMRegister src) {
8186 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8187 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8188 emit_int16((unsigned char)0xFC, (0xC0 | encode));
8189 }
8190
8191 void Assembler::paddw(XMMRegister dst, XMMRegister src) {
8192 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8193 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8194 emit_int16((unsigned char)0xFD, (0xC0 | encode));
8195 }
8196
8197 void Assembler::paddd(XMMRegister dst, XMMRegister src) {
8198 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8199 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8200 emit_int16((unsigned char)0xFE, (0xC0 | encode));
8201 }
8202
8203 void Assembler::paddd(XMMRegister dst, Address src) {
8204 InstructionMark im(this);
8205 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8206 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
8207 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8208 emit_int8((unsigned char)0xFE);
8209 emit_operand(dst, src, 0);
8210 }
8211
8212 void Assembler::paddq(XMMRegister dst, XMMRegister src) {
8213 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8214 attributes.set_rex_vex_w_reverted();
8215 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8216 emit_int16((unsigned char)0xD4, (0xC0 | encode));
8217 }
8218
8219 void Assembler::phaddw(XMMRegister dst, XMMRegister src) {
8220 assert(VM_Version::supports_sse3(), "");
8221 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
8222 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8223 emit_int16(0x01, (0xC0 | encode));
8224 }
8225
8226 void Assembler::phaddd(XMMRegister dst, XMMRegister src) {
8227 assert(VM_Version::supports_sse3(), "");
8228 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
8229 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8230 emit_int16(0x02, (0xC0 | encode));
8231 }
8232
8233 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8234 assert(UseAVX > 0, "requires some form of AVX");
8235 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8236 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8237 emit_int16((unsigned char)0xFC, (0xC0 | encode));
8238 }
8239
8240 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8241 assert(UseAVX > 0, "requires some form of AVX");
8242 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8243 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8244 emit_int16((unsigned char)0xFD, (0xC0 | encode));
8245 }
8246
8247 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8248 assert(UseAVX > 0, "requires some form of AVX");
8249 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8250 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8251 emit_int16((unsigned char)0xFE, (0xC0 | encode));
8252 }
8253
8254 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8255 assert(UseAVX > 0, "requires some form of AVX");
8256 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8257 attributes.set_rex_vex_w_reverted();
8258 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8259 emit_int16((unsigned char)0xD4, (0xC0 | encode));
8260 }
8261
8262 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
8263 assert(UseAVX > 0, "requires some form of AVX");
8264 InstructionMark im(this);
8265 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8266 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
8267 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8268 emit_int8((unsigned char)0xFC);
8269 emit_operand(dst, src, 0);
8270 }
8271
8272 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
8273 assert(UseAVX > 0, "requires some form of AVX");
8274 InstructionMark im(this);
8275 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8276 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
8277 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8278 emit_int8((unsigned char)0xFD);
8279 emit_operand(dst, src, 0);
8280 }
8281
8282 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
8283 assert(UseAVX > 0, "requires some form of AVX");
8284 InstructionMark im(this);
8285 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8286 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
8287 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8288 emit_int8((unsigned char)0xFE);
8289 emit_operand(dst, src, 0);
8290 }
8291
8292 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
8293 assert(UseAVX > 0, "requires some form of AVX");
8294 InstructionMark im(this);
8295 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8296 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
8297 attributes.set_rex_vex_w_reverted();
8298 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8299 emit_int8((unsigned char)0xD4);
8300 emit_operand(dst, src, 0);
8301 }
8302
8303 void Assembler::vaddsh(XMMRegister dst, XMMRegister nds, XMMRegister src) {
8304 assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
8305 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
8306 attributes.set_is_evex_instruction();
8307 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_MAP5, &attributes);
8308 emit_int16(0x58, (0xC0 | encode));
8309 }
8310
8311 void Assembler::vsubsh(XMMRegister dst, XMMRegister nds, XMMRegister src) {
8312 assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
8313 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
8314 attributes.set_is_evex_instruction();
8315 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_MAP5, &attributes);
8316 emit_int16(0x5C, (0xC0 | encode));
8317 }
8318
8319 void Assembler::vdivsh(XMMRegister dst, XMMRegister nds, XMMRegister src) {
8320 assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
8321 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
8322 attributes.set_is_evex_instruction();
8323 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_MAP5, &attributes);
8324 emit_int16(0x5E, (0xC0 | encode));
8325 }
8326
8327 void Assembler::vmulsh(XMMRegister dst, XMMRegister nds, XMMRegister src) {
8328 assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
8329 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
8330 attributes.set_is_evex_instruction();
8331 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_MAP5, &attributes);
8332 emit_int16(0x59, (0xC0 | encode));
8333 }
8334
8335 void Assembler::vmaxsh(XMMRegister dst, XMMRegister nds, XMMRegister src) {
8336 assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
8337 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
8338 attributes.set_is_evex_instruction();
8339 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_MAP5, &attributes);
8340 emit_int16(0x5F, (0xC0 | encode));
8341 }
8342
8343 void Assembler::eminmaxsh(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
8344 assert(VM_Version::supports_avx10_2(), "");
8345 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
8346 attributes.set_is_evex_instruction();
8347 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3A, &attributes);
8348 emit_int24(0x53, (0xC0 | encode), imm8);
8349 }
8350
8351 void Assembler::vminsh(XMMRegister dst, XMMRegister nds, XMMRegister src) {
8352 assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
8353 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
8354 attributes.set_is_evex_instruction();
8355 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_MAP5, &attributes);
8356 emit_int16(0x5D, (0xC0 | encode));
8357 }
8358
8359 void Assembler::vsqrtsh(XMMRegister dst, XMMRegister src) {
8360 assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
8361 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
8362 attributes.set_is_evex_instruction();
8363 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_MAP5, &attributes);
8364 emit_int16(0x51, (0xC0 | encode));
8365 }
8366
8367 void Assembler::vfmadd132sh(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
8368 assert(VM_Version::supports_avx512_fp16(), "");
8369 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
8370 attributes.set_is_evex_instruction();
8371 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_MAP6, &attributes);
8372 emit_int16((unsigned char)0x99, (0xC0 | encode));
8373 }
8374
8375 void Assembler::vpaddsb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8376 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
8377 assert(!needs_evex(dst, nds, src) || VM_Version::supports_avx512bw(), "");
8378 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8379 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8380 emit_int16((unsigned char)0xEC, (0xC0 | encode));
8381 }
8382
8383 void Assembler::vpaddsb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
8384 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
8385 assert(!needs_evex(dst, nds) || VM_Version::supports_avx512bw(), "");
8386 InstructionMark im(this);
8387 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8388 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
8389 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8390 emit_int8((unsigned char)0xEC);
8391 emit_operand(dst, src, 0);
8392 }
8393
8394 void Assembler::vpaddsw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8395 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
8396 assert(!needs_evex(dst, nds, src) || VM_Version::supports_avx512bw(), "");
8397 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8398 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8399 emit_int16((unsigned char)0xED, (0xC0 | encode));
8400 }
8401
8402 void Assembler::vpaddsw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
8403 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
8404 assert(!needs_evex(dst, nds) || VM_Version::supports_avx512bw(), "");
8405 InstructionMark im(this);
8406 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8407 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
8408 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8409 emit_int8((unsigned char)0xED);
8410 emit_operand(dst, src, 0);
8411 }
8412
8413 void Assembler::vpaddusb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8414 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
8415 assert(!needs_evex(dst, nds, src) || VM_Version::supports_avx512bw(), "");
8416 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8417 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8418 emit_int16((unsigned char)0xDC, (0xC0 | encode));
8419 }
8420
8421 void Assembler::vpaddusb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
8422 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
8423 assert(!needs_evex(dst, nds) || VM_Version::supports_avx512bw(), "");
8424 InstructionMark im(this);
8425 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8426 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
8427 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8428 emit_int8((unsigned char)0xDC);
8429 emit_operand(dst, src, 0);
8430 }
8431
8432
8433 void Assembler::vpaddusw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8434 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
8435 assert(!needs_evex(dst, nds, src) || VM_Version::supports_avx512bw(), "");
8436 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8437 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8438 emit_int16((unsigned char)0xDD, (0xC0 | encode));
8439 }
8440
8441 void Assembler::vpaddusw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
8442 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
8443 assert(!needs_evex(dst, nds) || VM_Version::supports_avx512bw(), "");
8444 InstructionMark im(this);
8445 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8446 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
8447 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8448 emit_int8((unsigned char)0xDD);
8449 emit_operand(dst, src, 0);
8450 }
8451
8452
8453 void Assembler::vpsubsb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8454 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
8455 assert(!needs_evex(dst, nds, src) || VM_Version::supports_avx512bw(), "");
8456 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8457 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8458 emit_int16((unsigned char)0xE8, (0xC0 | encode));
8459 }
8460
8461 void Assembler::vpsubsb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
8462 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
8463 assert(!needs_evex(dst, nds) || VM_Version::supports_avx512bw(), "");
8464 InstructionMark im(this);
8465 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8466 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
8467 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8468 emit_int8((unsigned char)0xE8);
8469 emit_operand(dst, src, 0);
8470 }
8471
8472 void Assembler::vpsubsw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8473 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
8474 assert(!needs_evex(dst, nds, src) || VM_Version::supports_avx512bw(), "");
8475 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8476 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8477 emit_int16((unsigned char)0xE9, (0xC0 | encode));
8478 }
8479
8480 void Assembler::vpsubsw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
8481 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
8482 assert(!needs_evex(dst, nds) || VM_Version::supports_avx512bw(), "");
8483 InstructionMark im(this);
8484 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8485 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
8486 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8487 emit_int8((unsigned char)0xE9);
8488 emit_operand(dst, src, 0);
8489 }
8490
8491 void Assembler::vpsubusb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8492 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
8493 assert(!needs_evex(dst, nds, src) || VM_Version::supports_avx512bw(), "");
8494 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8495 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8496 emit_int16((unsigned char)0xD8, (0xC0 | encode));
8497 }
8498
8499 void Assembler::vpsubusb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
8500 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
8501 assert(!needs_evex(dst, nds) || VM_Version::supports_avx512bw(), "");
8502 InstructionMark im(this);
8503 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8504 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
8505 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8506 emit_int8((unsigned char)0xD8);
8507 emit_operand(dst, src, 0);
8508 }
8509
8510 void Assembler::vpsubusw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8511 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
8512 assert(!needs_evex(dst, nds, src) || VM_Version::supports_avx512bw(), "");
8513 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8514 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8515 emit_int16((unsigned char)0xD9, (0xC0 | encode));
8516 }
8517
8518 void Assembler::vpsubusw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
8519 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
8520 assert(!needs_evex(dst, nds) || VM_Version::supports_avx512bw(), "");
8521 InstructionMark im(this);
8522 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8523 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
8524 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8525 emit_int8((unsigned char)0xD9);
8526 emit_operand(dst, src, 0);
8527 }
8528
8529
8530 void Assembler::psubb(XMMRegister dst, XMMRegister src) {
8531 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8532 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8533 emit_int16((unsigned char)0xF8, (0xC0 | encode));
8534 }
8535
8536 void Assembler::psubw(XMMRegister dst, XMMRegister src) {
8537 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8538 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8539 emit_int16((unsigned char)0xF9, (0xC0 | encode));
8540 }
8541
8542 void Assembler::psubd(XMMRegister dst, XMMRegister src) {
8543 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8544 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8545 emit_int16((unsigned char)0xFA, (0xC0 | encode));
8546 }
8547
8548 void Assembler::psubq(XMMRegister dst, XMMRegister src) {
8549 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8550 attributes.set_rex_vex_w_reverted();
8551 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8552 emit_int8((unsigned char)0xFB);
8553 emit_int8((0xC0 | encode));
8554 }
8555
8556 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8557 assert(UseAVX > 0, "requires some form of AVX");
8558 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8559 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8560 emit_int16((unsigned char)0xF8, (0xC0 | encode));
8561 }
8562
8563 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8564 assert(UseAVX > 0, "requires some form of AVX");
8565 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8566 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8567 emit_int16((unsigned char)0xF9, (0xC0 | encode));
8568 }
8569
8570 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8571 assert(UseAVX > 0, "requires some form of AVX");
8572 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8573 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8574 emit_int16((unsigned char)0xFA, (0xC0 | encode));
8575 }
8576
8577 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8578 assert(UseAVX > 0, "requires some form of AVX");
8579 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8580 attributes.set_rex_vex_w_reverted();
8581 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8582 emit_int16((unsigned char)0xFB, (0xC0 | encode));
8583 }
8584
8585 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
8586 assert(UseAVX > 0, "requires some form of AVX");
8587 InstructionMark im(this);
8588 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8589 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
8590 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8591 emit_int8((unsigned char)0xF8);
8592 emit_operand(dst, src, 0);
8593 }
8594
8595 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
8596 assert(UseAVX > 0, "requires some form of AVX");
8597 InstructionMark im(this);
8598 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8599 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
8600 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8601 emit_int8((unsigned char)0xF9);
8602 emit_operand(dst, src, 0);
8603 }
8604
8605 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
8606 assert(UseAVX > 0, "requires some form of AVX");
8607 InstructionMark im(this);
8608 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8609 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
8610 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8611 emit_int8((unsigned char)0xFA);
8612 emit_operand(dst, src, 0);
8613 }
8614
8615 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
8616 assert(UseAVX > 0, "requires some form of AVX");
8617 InstructionMark im(this);
8618 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8619 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
8620 attributes.set_rex_vex_w_reverted();
8621 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8622 emit_int8((unsigned char)0xFB);
8623 emit_operand(dst, src, 0);
8624 }
8625
8626 void Assembler::pmullw(XMMRegister dst, XMMRegister src) {
8627 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8628 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8629 emit_int16((unsigned char)0xD5, (0xC0 | encode));
8630 }
8631
8632 void Assembler::pmulld(XMMRegister dst, XMMRegister src) {
8633 assert(VM_Version::supports_sse4_1(), "");
8634 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8635 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8636 emit_int16(0x40, (0xC0 | encode));
8637 }
8638
8639 void Assembler::pmuludq(XMMRegister dst, XMMRegister src) {
8640 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8641 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8642 emit_int16((unsigned char)0xF4, (0xC0 | encode));
8643 }
8644
8645 void Assembler::vpmulhuw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8646 assert((vector_len == AVX_128bit && VM_Version::supports_avx()) ||
8647 (vector_len == AVX_256bit && VM_Version::supports_avx2()) ||
8648 (vector_len == AVX_512bit && VM_Version::supports_avx512bw()), "");
8649 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8650 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8651 emit_int16((unsigned char)0xE4, (0xC0 | encode));
8652 }
8653
8654 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8655 assert(UseAVX > 0, "requires some form of AVX");
8656 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8657 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8658 emit_int16((unsigned char)0xD5, (0xC0 | encode));
8659 }
8660
8661 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8662 assert(UseAVX > 0, "requires some form of AVX");
8663 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8664 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8665 emit_int16(0x40, (0xC0 | encode));
8666 }
8667
8668 void Assembler::evpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8669 assert(UseAVX > 2, "requires some form of EVEX");
8670 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
8671 attributes.set_is_evex_instruction();
8672 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8673 emit_int16(0x40, (0xC0 | encode));
8674 }
8675
8676 void Assembler::vpmuludq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8677 assert(UseAVX > 0, "requires some form of AVX");
8678 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8679 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8680 emit_int16((unsigned char)0xF4, (0xC0 | encode));
8681 }
8682
8683 void Assembler::vpmuldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8684 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
8685 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_evex()), "");
8686 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8687 attributes.set_rex_vex_w_reverted();
8688 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8689 emit_int16(0x28, (0xC0 | encode));
8690 }
8691
8692 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
8693 assert(UseAVX > 0, "requires some form of AVX");
8694 InstructionMark im(this);
8695 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8696 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
8697 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8698 emit_int8((unsigned char)0xD5);
8699 emit_operand(dst, src, 0);
8700 }
8701
8702 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
8703 assert(UseAVX > 0, "requires some form of AVX");
8704 InstructionMark im(this);
8705 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8706 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
8707 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8708 emit_int8(0x40);
8709 emit_operand(dst, src, 0);
8710 }
8711
8712 void Assembler::evpmullq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
8713 assert(UseAVX > 2, "requires some form of EVEX");
8714 InstructionMark im(this);
8715 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
8716 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
8717 attributes.set_is_evex_instruction();
8718 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8719 emit_int8(0x40);
8720 emit_operand(dst, src, 0);
8721 }
8722
8723 // Min, max
8724 void Assembler::pminsb(XMMRegister dst, XMMRegister src) {
8725 assert(VM_Version::supports_sse4_1(), "");
8726 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8727 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8728 emit_int16(0x38, (0xC0 | encode));
8729 }
8730
8731 void Assembler::vpminsb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8732 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
8733 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), "");
8734 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8735 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8736 emit_int16(0x38, (0xC0 | encode));
8737 }
8738
8739 void Assembler::pminsw(XMMRegister dst, XMMRegister src) {
8740 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8741 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8742 emit_int16((unsigned char)0xEA, (0xC0 | encode));
8743 }
8744
8745 void Assembler::vpminsw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8746 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
8747 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), "");
8748 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8749 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8750 emit_int16((unsigned char)0xEA, (0xC0 | encode));
8751 }
8752
8753 void Assembler::pminsd(XMMRegister dst, XMMRegister src) {
8754 assert(VM_Version::supports_sse4_1(), "");
8755 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8756 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8757 emit_int16(0x39, (0xC0 | encode));
8758 }
8759
8760 void Assembler::vpminsd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8761 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
8762 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_evex()), "");
8763 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8764 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8765 emit_int16(0x39, (0xC0 | encode));
8766 }
8767
8768 void Assembler::vpminsq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8769 assert(UseAVX > 2, "requires AVX512F");
8770 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8771 attributes.set_is_evex_instruction();
8772 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8773 emit_int16(0x39, (0xC0 | encode));
8774 }
8775
8776 void Assembler::minps(XMMRegister dst, XMMRegister src) {
8777 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8778 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
8779 emit_int16(0x5D, (0xC0 | encode));
8780 }
8781 void Assembler::vminps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8782 assert(vector_len >= AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), "");
8783 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8784 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
8785 emit_int16(0x5D, (0xC0 | encode));
8786 }
8787
8788 void Assembler::minpd(XMMRegister dst, XMMRegister src) {
8789 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8790 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8791 emit_int16(0x5D, (0xC0 | encode));
8792 }
8793 void Assembler::vminpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8794 assert(vector_len >= AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), "");
8795 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8796 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8797 emit_int16(0x5D, (0xC0 | encode));
8798 }
8799
8800 void Assembler::pmaxsb(XMMRegister dst, XMMRegister src) {
8801 assert(VM_Version::supports_sse4_1(), "");
8802 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8803 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8804 emit_int16(0x3C, (0xC0 | encode));
8805 }
8806
8807 void Assembler::vpmaxsb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8808 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
8809 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), "");
8810 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8811 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8812 emit_int16(0x3C, (0xC0 | encode));
8813 }
8814
8815 void Assembler::pmaxsw(XMMRegister dst, XMMRegister src) {
8816 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8817 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8818 emit_int16((unsigned char)0xEE, (0xC0 | encode));
8819 }
8820
8821 void Assembler::vpmaxsw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8822 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
8823 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), "");
8824 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8825 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8826 emit_int16((unsigned char)0xEE, (0xC0 | encode));
8827 }
8828
8829 void Assembler::pmaxsd(XMMRegister dst, XMMRegister src) {
8830 assert(VM_Version::supports_sse4_1(), "");
8831 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8832 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8833 emit_int16(0x3D, (0xC0 | encode));
8834 }
8835
8836 void Assembler::vpmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8837 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
8838 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_evex()), "");
8839 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8840 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8841 emit_int16(0x3D, (0xC0 | encode));
8842 }
8843
8844 void Assembler::vpmaxsq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8845 assert(UseAVX > 2, "requires AVX512F");
8846 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8847 attributes.set_is_evex_instruction();
8848 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8849 emit_int16(0x3D, (0xC0 | encode));
8850 }
8851
8852 void Assembler::maxps(XMMRegister dst, XMMRegister src) {
8853 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8854 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
8855 emit_int16(0x5F, (0xC0 | encode));
8856 }
8857
8858 void Assembler::vmaxps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8859 assert(vector_len >= AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), "");
8860 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8861 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
8862 emit_int16(0x5F, (0xC0 | encode));
8863 }
8864
8865 void Assembler::evminmaxps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int imm8, int vector_len) {
8866 assert(VM_Version::supports_avx10_2(), "");
8867 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
8868 attributes.set_is_evex_instruction();
8869 attributes.set_embedded_opmask_register_specifier(mask);
8870 if (merge) {
8871 attributes.reset_is_clear_context();
8872 }
8873 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
8874 emit_int24(0x52, (0xC0 | encode), imm8);
8875 }
8876
8877 void Assembler::evminmaxps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int imm8, int vector_len) {
8878 assert(VM_Version::supports_avx10_2(), "");
8879 InstructionMark im(this);
8880 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
8881 attributes.set_is_evex_instruction();
8882 attributes.set_embedded_opmask_register_specifier(mask);
8883 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
8884 if (merge) {
8885 attributes.reset_is_clear_context();
8886 }
8887 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
8888 emit_int8(0x52);
8889 emit_operand(dst, src, 0);
8890 emit_int8(imm8);
8891 }
8892
8893 void Assembler::maxpd(XMMRegister dst, XMMRegister src) {
8894 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8895 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8896 emit_int16(0x5F, (0xC0 | encode));
8897 }
8898
8899 void Assembler::evminmaxpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int imm8, int vector_len) {
8900 assert(VM_Version::supports_avx10_2(), "");
8901 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
8902 attributes.set_is_evex_instruction();
8903 attributes.set_embedded_opmask_register_specifier(mask);
8904 if (merge) {
8905 attributes.reset_is_clear_context();
8906 }
8907 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
8908 emit_int24(0x52, (0xC0 | encode), imm8);
8909 }
8910
8911 void Assembler::evminmaxpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int imm8, int vector_len) {
8912 assert(VM_Version::supports_avx10_2(), "");
8913 InstructionMark im(this);
8914 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
8915 attributes.set_is_evex_instruction();
8916 attributes.set_embedded_opmask_register_specifier(mask);
8917 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
8918 if (merge) {
8919 attributes.reset_is_clear_context();
8920 }
8921 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
8922 emit_int8(0x52);
8923 emit_operand(dst, src, 0);
8924 emit_int8(imm8);
8925 }
8926
8927 void Assembler::vmaxpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8928 assert(vector_len >= AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), "");
8929 InstructionAttr attributes(vector_len, /* vex_w */true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8930 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8931 emit_int16(0x5F, (0xC0 | encode));
8932 }
8933
8934 void Assembler::vpminub(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8935 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
8936 assert(!needs_evex(dst, nds, src) || VM_Version::supports_avx512bw(), "");
8937 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8938 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8939 emit_int16((unsigned char)0xDA, (0xC0 | encode));
8940 }
8941
8942 void Assembler::vpminub(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
8943 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
8944 assert(!needs_evex(dst, nds) || VM_Version::supports_avx512bw(), "");
8945 InstructionMark im(this);
8946 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8947 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
8948 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8949 emit_int8((unsigned char)0xDA);
8950 emit_operand(dst, src, 0);
8951 }
8952
8953 void Assembler::evpminub(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
8954 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
8955 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
8956 attributes.set_is_evex_instruction();
8957 attributes.set_embedded_opmask_register_specifier(mask);
8958 if (merge) {
8959 attributes.reset_is_clear_context();
8960 }
8961 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8962 emit_int16((unsigned char)0xDA, (0xC0 | encode));
8963 }
8964
8965 void Assembler::evpminub(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
8966 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
8967 InstructionMark im(this);
8968 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
8969 attributes.set_is_evex_instruction();
8970 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
8971 attributes.set_embedded_opmask_register_specifier(mask);
8972 if (merge) {
8973 attributes.reset_is_clear_context();
8974 }
8975 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8976 emit_int8((unsigned char)0xDA);
8977 emit_operand(dst, src, 0);
8978 }
8979
8980 void Assembler::vpminuw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8981 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
8982 assert(!needs_evex(dst, nds) || VM_Version::supports_avx512bw(), "");
8983 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8984 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8985 emit_int16(0x3A, (0xC0 | encode));
8986 }
8987
8988 void Assembler::vpminuw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
8989 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
8990 assert(!needs_evex(dst, nds) || VM_Version::supports_avx512bw(), "");
8991 InstructionMark im(this);
8992 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
8993 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
8994 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
8995 emit_int8((unsigned char)0x3A);
8996 emit_operand(dst, src, 0);
8997 }
8998
8999 void Assembler::evpminuw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9000 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
9001 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
9002 attributes.set_is_evex_instruction();
9003 attributes.set_embedded_opmask_register_specifier(mask);
9004 if (merge) {
9005 attributes.reset_is_clear_context();
9006 }
9007 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9008 emit_int16(0x3A, (0xC0 | encode));
9009 }
9010
9011 void Assembler::evpminuw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9012 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
9013 InstructionMark im(this);
9014 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
9015 attributes.set_is_evex_instruction();
9016 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
9017 attributes.set_embedded_opmask_register_specifier(mask);
9018 if (merge) {
9019 attributes.reset_is_clear_context();
9020 }
9021 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9022 emit_int8(0x3A);
9023 emit_operand(dst, src, 0);
9024 }
9025
9026 void Assembler::vpminud(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
9027 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
9028 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9029 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9030 emit_int16(0x3B, (0xC0 | encode));
9031 }
9032
9033 void Assembler::vpminud(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
9034 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
9035 InstructionMark im(this);
9036 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9037 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
9038 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9039 emit_int8((unsigned char)0x3B);
9040 emit_operand(dst, src, 0);
9041 }
9042
9043 void Assembler::evpminud(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9044 assert(VM_Version::supports_evex(), "");
9045 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
9046 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
9047 attributes.set_is_evex_instruction();
9048 attributes.set_embedded_opmask_register_specifier(mask);
9049 if (merge) {
9050 attributes.reset_is_clear_context();
9051 }
9052 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9053 emit_int16(0x3B, (0xC0 | encode));
9054 }
9055
9056 void Assembler::evpminud(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9057 assert(VM_Version::supports_evex(), "");
9058 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
9059 InstructionMark im(this);
9060 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
9061 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
9062 attributes.set_is_evex_instruction();
9063 attributes.set_embedded_opmask_register_specifier(mask);
9064 if (merge) {
9065 attributes.reset_is_clear_context();
9066 }
9067 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9068 emit_int8(0x3B);
9069 emit_operand(dst, src, 0);
9070 }
9071
9072 void Assembler::evpminuq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9073 assert(VM_Version::supports_evex(), "");
9074 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
9075 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
9076 attributes.set_is_evex_instruction();
9077 attributes.set_embedded_opmask_register_specifier(mask);
9078 if (merge) {
9079 attributes.reset_is_clear_context();
9080 }
9081 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9082 emit_int16(0x3B, (0xC0 | encode));
9083 }
9084
9085 void Assembler::evpminuq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9086 assert(VM_Version::supports_evex(), "");
9087 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
9088 InstructionMark im(this);
9089 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
9090 attributes.set_is_evex_instruction();
9091 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
9092 attributes.set_embedded_opmask_register_specifier(mask);
9093 if (merge) {
9094 attributes.reset_is_clear_context();
9095 }
9096 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9097 emit_int8(0x3B);
9098 emit_operand(dst, src, 0);
9099 }
9100
9101 void Assembler::vpmaxub(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
9102 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
9103 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), "");
9104 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
9105 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
9106 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9107 emit_int16((unsigned char)0xDE, (0xC0 | encode));
9108 }
9109
9110 void Assembler::vpmaxub(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
9111 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
9112 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), "");
9113 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
9114 InstructionMark im(this);
9115 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
9116 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
9117 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9118 emit_int8((unsigned char)0xDE);
9119 emit_operand(dst, src, 0);
9120 }
9121
9122 void Assembler::evpmaxub(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9123 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
9124 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
9125 attributes.set_is_evex_instruction();
9126 attributes.set_embedded_opmask_register_specifier(mask);
9127 if (merge) {
9128 attributes.reset_is_clear_context();
9129 }
9130 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9131 emit_int16((unsigned char)0xDE, (0xC0 | encode));
9132 }
9133
9134 void Assembler::evpmaxub(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9135 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
9136 InstructionMark im(this);
9137 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
9138 attributes.set_is_evex_instruction();
9139 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
9140 attributes.set_embedded_opmask_register_specifier(mask);
9141 if (merge) {
9142 attributes.reset_is_clear_context();
9143 }
9144 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9145 emit_int8((unsigned char)0xDE);
9146 emit_operand(dst, src, 0);
9147 }
9148
9149 void Assembler::vpmaxuw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
9150 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
9151 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), "");
9152 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
9153 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
9154 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9155 emit_int16(0x3E, (0xC0 | encode));
9156 }
9157
9158 void Assembler::vpmaxuw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
9159 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
9160 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), "");
9161 assert(UseAVX > 0 && (vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
9162 InstructionMark im(this);
9163 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
9164 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
9165 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9166 emit_int8((unsigned char)0x3E);
9167 emit_operand(dst, src, 0);
9168 }
9169
9170 void Assembler::evpmaxuw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9171 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
9172 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
9173 attributes.set_is_evex_instruction();
9174 attributes.set_embedded_opmask_register_specifier(mask);
9175 if (merge) {
9176 attributes.reset_is_clear_context();
9177 }
9178 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9179 emit_int16(0x3E, (0xC0 | encode));
9180 }
9181
9182 void Assembler::evpmaxuw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9183 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
9184 InstructionMark im(this);
9185 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
9186 attributes.set_is_evex_instruction();
9187 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
9188 attributes.set_embedded_opmask_register_specifier(mask);
9189 if (merge) {
9190 attributes.reset_is_clear_context();
9191 }
9192 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9193 emit_int8(0x3E);
9194 emit_operand(dst, src, 0);
9195 }
9196
9197 void Assembler::vpmaxud(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
9198 assert(UseAVX > 0, "");
9199 assert((vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds, src) || VM_Version::supports_avx512vl())), "");
9200 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9201 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9202 emit_int16(0x3F, (0xC0 | encode));
9203 }
9204
9205 void Assembler::vpmaxud(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
9206 assert(UseAVX > 0, "");
9207 assert((vector_len == Assembler::AVX_512bit || (!needs_evex(dst, nds) || VM_Version::supports_avx512vl())), "");
9208 InstructionMark im(this);
9209 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9210 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
9211 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9212 emit_int8((unsigned char)0x3F);
9213 emit_operand(dst, src, 0);
9214 }
9215
9216 void Assembler::evpmaxud(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9217 assert(VM_Version::supports_evex(), "");
9218 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
9219 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
9220 attributes.set_is_evex_instruction();
9221 attributes.set_embedded_opmask_register_specifier(mask);
9222 if (merge) {
9223 attributes.reset_is_clear_context();
9224 }
9225 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9226 emit_int16(0x3F, (0xC0 | encode));
9227 }
9228
9229 void Assembler::evpmaxud(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9230 assert(VM_Version::supports_evex(), "");
9231 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
9232 InstructionMark im(this);
9233 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
9234 attributes.set_is_evex_instruction();
9235 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
9236 attributes.set_embedded_opmask_register_specifier(mask);
9237 if (merge) {
9238 attributes.reset_is_clear_context();
9239 }
9240 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9241 emit_int8(0x3F);
9242 emit_operand(dst, src, 0);
9243 }
9244
9245 void Assembler::evpmaxuq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9246 assert(VM_Version::supports_evex(), "");
9247 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
9248 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
9249 attributes.set_is_evex_instruction();
9250 attributes.set_embedded_opmask_register_specifier(mask);
9251 if (merge) {
9252 attributes.reset_is_clear_context();
9253 }
9254 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9255 emit_int16(0x3F, (0xC0 | encode));
9256 }
9257
9258 void Assembler::evpmaxuq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9259 assert(VM_Version::supports_evex(), "");
9260 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
9261 InstructionMark im(this);
9262 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
9263 attributes.set_is_evex_instruction();
9264 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
9265 attributes.set_embedded_opmask_register_specifier(mask);
9266 if (merge) {
9267 attributes.reset_is_clear_context();
9268 }
9269 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9270 emit_int8(0x3F);
9271 emit_operand(dst, src, 0);
9272 }
9273
9274 // Shift packed integers left by specified number of bits.
9275 void Assembler::psllw(XMMRegister dst, int shift) {
9276 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
9277 // XMM6 is for /6 encoding: 66 0F 71 /6 ib
9278 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9279 emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
9280 }
9281
9282 void Assembler::pslld(XMMRegister dst, int shift) {
9283 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9284 // XMM6 is for /6 encoding: 66 0F 72 /6 ib
9285 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9286 emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
9287 }
9288
9289 void Assembler::psllq(XMMRegister dst, int shift) {
9290 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9291 attributes.set_rex_vex_w_reverted();
9292 // XMM6 is for /6 encoding: 66 0F 73 /6 ib
9293 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9294 emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
9295 }
9296
9297 void Assembler::psllw(XMMRegister dst, XMMRegister shift) {
9298 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
9299 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9300 emit_int16((unsigned char)0xF1, (0xC0 | encode));
9301 }
9302
9303 void Assembler::pslld(XMMRegister dst, XMMRegister shift) {
9304 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9305 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9306 emit_int16((unsigned char)0xF2, (0xC0 | encode));
9307 }
9308
9309 void Assembler::psllq(XMMRegister dst, XMMRegister shift) {
9310 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9311 attributes.set_rex_vex_w_reverted();
9312 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9313 emit_int16((unsigned char)0xF3, (0xC0 | encode));
9314 }
9315
9316 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
9317 assert(UseAVX > 0, "requires some form of AVX");
9318 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
9319 // XMM6 is for /6 encoding: 66 0F 71 /6 ib
9320 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9321 emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
9322 }
9323
9324 void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
9325 assert(UseAVX > 0, "requires some form of AVX");
9326 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9327 // XMM6 is for /6 encoding: 66 0F 72 /6 ib
9328 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9329 emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
9330 }
9331
9332 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
9333 assert(UseAVX > 0, "requires some form of AVX");
9334 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9335 attributes.set_rex_vex_w_reverted();
9336 // XMM6 is for /6 encoding: 66 0F 73 /6 ib
9337 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9338 emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
9339 }
9340
9341 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
9342 assert(UseAVX > 0, "requires some form of AVX");
9343 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
9344 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9345 emit_int16((unsigned char)0xF1, (0xC0 | encode));
9346 }
9347
9348 void Assembler::vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
9349 assert(UseAVX > 0, "requires some form of AVX");
9350 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9351 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9352 emit_int16((unsigned char)0xF2, (0xC0 | encode));
9353 }
9354
9355 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
9356 assert(UseAVX > 0, "requires some form of AVX");
9357 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9358 attributes.set_rex_vex_w_reverted();
9359 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9360 emit_int16((unsigned char)0xF3, (0xC0 | encode));
9361 }
9362
9363 // Shift packed integers logically right by specified number of bits.
9364 void Assembler::psrlw(XMMRegister dst, int shift) {
9365 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
9366 // XMM2 is for /2 encoding: 66 0F 71 /2 ib
9367 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9368 emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
9369 }
9370
9371 void Assembler::psrld(XMMRegister dst, int shift) {
9372 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9373 // XMM2 is for /2 encoding: 66 0F 72 /2 ib
9374 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9375 emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
9376 }
9377
9378 void Assembler::psrlq(XMMRegister dst, int shift) {
9379 // Do not confuse it with psrldq SSE2 instruction which
9380 // shifts 128 bit value in xmm register by number of bytes.
9381 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9382 attributes.set_rex_vex_w_reverted();
9383 // XMM2 is for /2 encoding: 66 0F 73 /2 ib
9384 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9385 emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
9386 }
9387
9388 void Assembler::psrlw(XMMRegister dst, XMMRegister shift) {
9389 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
9390 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9391 emit_int16((unsigned char)0xD1, (0xC0 | encode));
9392 }
9393
9394 void Assembler::psrld(XMMRegister dst, XMMRegister shift) {
9395 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9396 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9397 emit_int16((unsigned char)0xD2, (0xC0 | encode));
9398 }
9399
9400 void Assembler::psrlq(XMMRegister dst, XMMRegister shift) {
9401 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9402 attributes.set_rex_vex_w_reverted();
9403 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9404 emit_int16((unsigned char)0xD3, (0xC0 | encode));
9405 }
9406
9407 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
9408 assert(UseAVX > 0, "requires some form of AVX");
9409 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
9410 // XMM2 is for /2 encoding: 66 0F 71 /2 ib
9411 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9412 emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
9413 }
9414
9415 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
9416 assert(UseAVX > 0, "requires some form of AVX");
9417 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9418 // XMM2 is for /2 encoding: 66 0F 72 /2 ib
9419 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9420 emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
9421 }
9422
9423 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
9424 assert(UseAVX > 0, "requires some form of AVX");
9425 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9426 attributes.set_rex_vex_w_reverted();
9427 // XMM2 is for /2 encoding: 66 0F 73 /2 ib
9428 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9429 emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
9430 }
9431
9432 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
9433 assert(UseAVX > 0, "requires some form of AVX");
9434 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
9435 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9436 emit_int16((unsigned char)0xD1, (0xC0 | encode));
9437 }
9438
9439 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
9440 assert(UseAVX > 0, "requires some form of AVX");
9441 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9442 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9443 emit_int16((unsigned char)0xD2, (0xC0 | encode));
9444 }
9445
9446 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
9447 assert(UseAVX > 0, "requires some form of AVX");
9448 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9449 attributes.set_rex_vex_w_reverted();
9450 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9451 emit_int16((unsigned char)0xD3, (0xC0 | encode));
9452 }
9453
9454 void Assembler::evpsrlvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
9455 assert(VM_Version::supports_avx512bw(), "");
9456 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9457 attributes.set_is_evex_instruction();
9458 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9459 emit_int16(0x10, (0xC0 | encode));
9460 }
9461
9462 void Assembler::evpsllvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
9463 assert(VM_Version::supports_avx512bw(), "");
9464 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9465 attributes.set_is_evex_instruction();
9466 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9467 emit_int16(0x12, (0xC0 | encode));
9468 }
9469
9470 // Shift packed integers arithmetically right by specified number of bits.
9471 void Assembler::psraw(XMMRegister dst, int shift) {
9472 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
9473 // XMM4 is for /4 encoding: 66 0F 71 /4 ib
9474 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9475 emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
9476 }
9477
9478 void Assembler::psrad(XMMRegister dst, int shift) {
9479 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9480 // XMM4 is for /4 encoding: 66 0F 72 /4 ib
9481 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9482 emit_int8(0x72);
9483 emit_int8((0xC0 | encode));
9484 emit_int8(shift & 0xFF);
9485 }
9486
9487 void Assembler::psraw(XMMRegister dst, XMMRegister shift) {
9488 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
9489 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9490 emit_int16((unsigned char)0xE1, (0xC0 | encode));
9491 }
9492
9493 void Assembler::psrad(XMMRegister dst, XMMRegister shift) {
9494 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9495 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9496 emit_int16((unsigned char)0xE2, (0xC0 | encode));
9497 }
9498
9499 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
9500 assert(UseAVX > 0, "requires some form of AVX");
9501 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
9502 // XMM4 is for /4 encoding: 66 0F 71 /4 ib
9503 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9504 emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
9505 }
9506
9507 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
9508 assert(UseAVX > 0, "requires some form of AVX");
9509 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9510 // XMM4 is for /4 encoding: 66 0F 71 /4 ib
9511 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9512 emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
9513 }
9514
9515 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
9516 assert(UseAVX > 0, "requires some form of AVX");
9517 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
9518 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9519 emit_int16((unsigned char)0xE1, (0xC0 | encode));
9520 }
9521
9522 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
9523 assert(UseAVX > 0, "requires some form of AVX");
9524 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9525 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9526 emit_int16((unsigned char)0xE2, (0xC0 | encode));
9527 }
9528
9529 void Assembler::evpsraq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
9530 assert(UseAVX > 2, "requires AVX512");
9531 assert ((VM_Version::supports_avx512vl() || vector_len == 2), "requires AVX512vl");
9532 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9533 attributes.set_is_evex_instruction();
9534 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9535 emit_int24((unsigned char)0x72, (0xC0 | encode), shift & 0xFF);
9536 }
9537
9538 void Assembler::evpsraq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
9539 assert(UseAVX > 2, "requires AVX512");
9540 assert ((VM_Version::supports_avx512vl() || vector_len == 2), "requires AVX512vl");
9541 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9542 attributes.set_is_evex_instruction();
9543 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9544 emit_int16((unsigned char)0xE2, (0xC0 | encode));
9545 }
9546
9547 // logical operations packed integers
9548 void Assembler::pand(XMMRegister dst, XMMRegister src) {
9549 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9550 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9551 emit_int16((unsigned char)0xDB, (0xC0 | encode));
9552 }
9553
9554 void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
9555 assert(UseAVX > 0, "requires some form of AVX");
9556 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9557 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9558 emit_int16((unsigned char)0xDB, (0xC0 | encode));
9559 }
9560
9561 void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
9562 assert(UseAVX > 0, "requires some form of AVX");
9563 InstructionMark im(this);
9564 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9565 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
9566 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9567 emit_int8((unsigned char)0xDB);
9568 emit_operand(dst, src, 0);
9569 }
9570
9571 void Assembler::evpandq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
9572 evpandq(dst, k0, nds, src, false, vector_len);
9573 }
9574
9575 void Assembler::evpandq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
9576 evpandq(dst, k0, nds, src, false, vector_len);
9577 }
9578
9579 //Variable Shift packed integers logically left.
9580 void Assembler::vpsllvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
9581 assert(UseAVX > 1, "requires AVX2");
9582 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9583 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9584 emit_int16(0x47, (0xC0 | encode));
9585 }
9586
9587 void Assembler::vpsllvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
9588 assert(UseAVX > 1, "requires AVX2");
9589 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9590 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9591 emit_int16(0x47, (0xC0 | encode));
9592 }
9593
9594 //Variable Shift packed integers logically right.
9595 void Assembler::vpsrlvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
9596 assert(UseAVX > 1, "requires AVX2");
9597 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9598 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9599 emit_int16(0x45, (0xC0 | encode));
9600 }
9601
9602 void Assembler::vpsrlvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
9603 assert(UseAVX > 1, "requires AVX2");
9604 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9605 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9606 emit_int16(0x45, (0xC0 | encode));
9607 }
9608
9609 //Variable right Shift arithmetic packed integers .
9610 void Assembler::vpsravd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
9611 assert(UseAVX > 1, "requires AVX2");
9612 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9613 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9614 emit_int16(0x46, (0xC0 | encode));
9615 }
9616
9617 void Assembler::evpsravw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
9618 assert(VM_Version::supports_avx512bw(), "");
9619 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9620 attributes.set_is_evex_instruction();
9621 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9622 emit_int16(0x11, (0xC0 | encode));
9623 }
9624
9625 void Assembler::evpsravq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
9626 assert(UseAVX > 2, "requires AVX512");
9627 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL");
9628 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9629 attributes.set_is_evex_instruction();
9630 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9631 emit_int16(0x46, (0xC0 | encode));
9632 }
9633
9634 void Assembler::vpshldvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
9635 assert(VM_Version::supports_avx512_vbmi2(), "requires vbmi2");
9636 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9637 attributes.set_is_evex_instruction();
9638 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9639 emit_int16(0x71, (0xC0 | encode));
9640 }
9641
9642 void Assembler::vpshrdvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
9643 assert(VM_Version::supports_avx512_vbmi2(), "requires vbmi2");
9644 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9645 attributes.set_is_evex_instruction();
9646 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9647 emit_int16(0x73, (0xC0 | encode));
9648 }
9649
9650 void Assembler::pandn(XMMRegister dst, XMMRegister src) {
9651 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9652 attributes.set_rex_vex_w_reverted();
9653 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9654 emit_int16((unsigned char)0xDF, (0xC0 | encode));
9655 }
9656
9657 void Assembler::vpandn(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
9658 assert(UseAVX > 0, "requires some form of AVX");
9659 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9660 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9661 emit_int16((unsigned char)0xDF, (0xC0 | encode));
9662 }
9663
9664 void Assembler::por(XMMRegister dst, XMMRegister src) {
9665 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9666 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9667 emit_int16((unsigned char)0xEB, (0xC0 | encode));
9668 }
9669
9670 void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
9671 assert(UseAVX > 0, "requires some form of AVX");
9672 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9673 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9674 emit_int16((unsigned char)0xEB, (0xC0 | encode));
9675 }
9676
9677 void Assembler::vpor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
9678 assert(UseAVX > 0, "requires some form of AVX");
9679 InstructionMark im(this);
9680 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9681 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
9682 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9683 emit_int8((unsigned char)0xEB);
9684 emit_operand(dst, src, 0);
9685 }
9686
9687 void Assembler::evporq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
9688 evporq(dst, k0, nds, src, false, vector_len);
9689 }
9690
9691 void Assembler::evporq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
9692 evporq(dst, k0, nds, src, false, vector_len);
9693 }
9694
9695 void Assembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9696 assert(VM_Version::supports_evex(), "");
9697 // Encoding: EVEX.NDS.XXX.66.0F.W0 EB /r
9698 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
9699 attributes.set_is_evex_instruction();
9700 attributes.set_embedded_opmask_register_specifier(mask);
9701 if (merge) {
9702 attributes.reset_is_clear_context();
9703 }
9704 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9705 emit_int16((unsigned char)0xEB, (0xC0 | encode));
9706 }
9707
9708 void Assembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9709 assert(VM_Version::supports_evex(), "");
9710 // Encoding: EVEX.NDS.XXX.66.0F.W0 EB /r
9711 InstructionMark im(this);
9712 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
9713 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
9714 attributes.set_is_evex_instruction();
9715 attributes.set_embedded_opmask_register_specifier(mask);
9716 if (merge) {
9717 attributes.reset_is_clear_context();
9718 }
9719 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9720 emit_int8((unsigned char)0xEB);
9721 emit_operand(dst, src, 0);
9722 }
9723
9724 void Assembler::pxor(XMMRegister dst, XMMRegister src) {
9725 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9726 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9727 emit_int16((unsigned char)0xEF, (0xC0 | encode));
9728 }
9729
9730 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
9731 assert(UseAVX > 0, "requires some form of AVX");
9732 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
9733 vector_len == AVX_256bit ? VM_Version::supports_avx2() :
9734 vector_len == AVX_512bit ? VM_Version::supports_evex() : 0, "");
9735 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9736 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9737 emit_int16((unsigned char)0xEF, (0xC0 | encode));
9738 }
9739
9740 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
9741 assert(UseAVX > 0, "requires some form of AVX");
9742 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
9743 vector_len == AVX_256bit ? VM_Version::supports_avx2() :
9744 vector_len == AVX_512bit ? VM_Version::supports_evex() : 0, "");
9745 InstructionMark im(this);
9746 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9747 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
9748 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9749 emit_int8((unsigned char)0xEF);
9750 emit_operand(dst, src, 0);
9751 }
9752
9753 void Assembler::vpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
9754 assert(UseAVX > 2, "requires some form of EVEX");
9755 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9756 attributes.set_rex_vex_w_reverted();
9757 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9758 emit_int16((unsigned char)0xEF, (0xC0 | encode));
9759 }
9760
9761 void Assembler::evpxord(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9762 // Encoding: EVEX.NDS.XXX.66.0F.W0 EF /r
9763 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
9764 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
9765 attributes.set_is_evex_instruction();
9766 attributes.set_embedded_opmask_register_specifier(mask);
9767 if (merge) {
9768 attributes.reset_is_clear_context();
9769 }
9770 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9771 emit_int16((unsigned char)0xEF, (0xC0 | encode));
9772 }
9773
9774 void Assembler::evpxord(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9775 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
9776 InstructionMark im(this);
9777 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
9778 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
9779 attributes.set_is_evex_instruction();
9780 attributes.set_embedded_opmask_register_specifier(mask);
9781 if (merge) {
9782 attributes.reset_is_clear_context();
9783 }
9784 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9785 emit_int8((unsigned char)0xEF);
9786 emit_operand(dst, src, 0);
9787 }
9788
9789 void Assembler::evpxorq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9790 // Encoding: EVEX.NDS.XXX.66.0F.W1 EF /r
9791 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
9792 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
9793 attributes.set_is_evex_instruction();
9794 attributes.set_embedded_opmask_register_specifier(mask);
9795 if (merge) {
9796 attributes.reset_is_clear_context();
9797 }
9798 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9799 emit_int16((unsigned char)0xEF, (0xC0 | encode));
9800 }
9801
9802 void Assembler::evpxorq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9803 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
9804 InstructionMark im(this);
9805 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
9806 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
9807 attributes.set_is_evex_instruction();
9808 attributes.set_embedded_opmask_register_specifier(mask);
9809 if (merge) {
9810 attributes.reset_is_clear_context();
9811 }
9812 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9813 emit_int8((unsigned char)0xEF);
9814 emit_operand(dst, src, 0);
9815 }
9816
9817 void Assembler::evpandd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9818 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
9819 InstructionMark im(this);
9820 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
9821 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
9822 attributes.set_is_evex_instruction();
9823 attributes.set_embedded_opmask_register_specifier(mask);
9824 if (merge) {
9825 attributes.reset_is_clear_context();
9826 }
9827 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9828 emit_int8((unsigned char)0xDB);
9829 emit_operand(dst, src, 0);
9830 }
9831
9832 void Assembler::evpandq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9833 assert(VM_Version::supports_evex(), "requires AVX512F");
9834 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL");
9835 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
9836 attributes.set_is_evex_instruction();
9837 attributes.set_embedded_opmask_register_specifier(mask);
9838 if (merge) {
9839 attributes.reset_is_clear_context();
9840 }
9841 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9842 emit_int16((unsigned char)0xDB, (0xC0 | encode));
9843 }
9844
9845 void Assembler::evpandq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9846 assert(VM_Version::supports_evex(), "requires AVX512F");
9847 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL");
9848 InstructionMark im(this);
9849 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
9850 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
9851 attributes.set_is_evex_instruction();
9852 attributes.set_embedded_opmask_register_specifier(mask);
9853 if (merge) {
9854 attributes.reset_is_clear_context();
9855 }
9856 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9857 emit_int8((unsigned char)0xDB);
9858 emit_operand(dst, src, 0);
9859 }
9860
9861 void Assembler::evporq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
9862 assert(VM_Version::supports_evex(), "requires AVX512F");
9863 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL");
9864 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
9865 attributes.set_is_evex_instruction();
9866 attributes.set_embedded_opmask_register_specifier(mask);
9867 if (merge) {
9868 attributes.reset_is_clear_context();
9869 }
9870 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9871 emit_int16((unsigned char)0xEB, (0xC0 | encode));
9872 }
9873
9874 void Assembler::evporq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
9875 assert(VM_Version::supports_evex(), "requires AVX512F");
9876 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL");
9877 InstructionMark im(this);
9878 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
9879 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
9880 attributes.set_is_evex_instruction();
9881 attributes.set_embedded_opmask_register_specifier(mask);
9882 if (merge) {
9883 attributes.reset_is_clear_context();
9884 }
9885 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9886 emit_int8((unsigned char)0xEB);
9887 emit_operand(dst, src, 0);
9888 }
9889
9890 void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
9891 assert(VM_Version::supports_evex(), "requires EVEX support");
9892 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9893 attributes.set_is_evex_instruction();
9894 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9895 emit_int16((unsigned char)0xEF, (0xC0 | encode));
9896 }
9897
9898 void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
9899 assert(VM_Version::supports_evex(), "requires EVEX support");
9900 assert(dst != xnoreg, "sanity");
9901 InstructionMark im(this);
9902 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9903 attributes.set_is_evex_instruction();
9904 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
9905 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9906 emit_int8((unsigned char)0xEF);
9907 emit_operand(dst, src, 0);
9908 }
9909
9910 void Assembler::evprold(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
9911 assert(VM_Version::supports_evex(), "requires EVEX support");
9912 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
9913 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9914 attributes.set_is_evex_instruction();
9915 int encode = vex_prefix_and_encode(xmm1->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9916 emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
9917 }
9918
9919 void Assembler::evprolq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
9920 assert(VM_Version::supports_evex(), "requires EVEX support");
9921 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
9922 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9923 attributes.set_is_evex_instruction();
9924 int encode = vex_prefix_and_encode(xmm1->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9925 emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
9926 }
9927
9928 void Assembler::evprord(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
9929 assert(VM_Version::supports_evex(), "requires EVEX support");
9930 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
9931 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9932 attributes.set_is_evex_instruction();
9933 int encode = vex_prefix_and_encode(xmm0->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9934 emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
9935 }
9936
9937 void Assembler::evprorq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
9938 assert(VM_Version::supports_evex(), "requires EVEX support");
9939 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
9940 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9941 attributes.set_is_evex_instruction();
9942 int encode = vex_prefix_and_encode(xmm0->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
9943 emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
9944 }
9945
9946 void Assembler::evprolvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
9947 assert(VM_Version::supports_evex(), "requires EVEX support");
9948 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
9949 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9950 attributes.set_is_evex_instruction();
9951 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9952 emit_int16(0x15, (unsigned char)(0xC0 | encode));
9953 }
9954
9955 void Assembler::evprolvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
9956 assert(VM_Version::supports_evex(), "requires EVEX support");
9957 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
9958 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9959 attributes.set_is_evex_instruction();
9960 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9961 emit_int16(0x15, (unsigned char)(0xC0 | encode));
9962 }
9963
9964 void Assembler::evprorvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
9965 assert(VM_Version::supports_evex(), "requires EVEX support");
9966 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
9967 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9968 attributes.set_is_evex_instruction();
9969 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9970 emit_int16(0x14, (unsigned char)(0xC0 | encode));
9971 }
9972
9973 void Assembler::evprorvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
9974 assert(VM_Version::supports_evex(), "requires EVEX support");
9975 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
9976 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
9977 attributes.set_is_evex_instruction();
9978 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9979 emit_int16(0x14, (unsigned char)(0xC0 | encode));
9980 }
9981
9982 void Assembler::evplzcntd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
9983 assert(VM_Version::supports_avx512cd(), "");
9984 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
9985 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
9986 attributes.set_is_evex_instruction();
9987 attributes.set_embedded_opmask_register_specifier(mask);
9988 if (merge) {
9989 attributes.reset_is_clear_context();
9990 }
9991 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
9992 emit_int16(0x44, (0xC0 | encode));
9993 }
9994
9995 void Assembler::evplzcntq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
9996 assert(VM_Version::supports_avx512cd(), "");
9997 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
9998 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
9999 attributes.set_is_evex_instruction();
10000 attributes.set_embedded_opmask_register_specifier(mask);
10001 if (merge) {
10002 attributes.reset_is_clear_context();
10003 }
10004 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10005 emit_int16(0x44, (0xC0 | encode));
10006 }
10007
10008 void Assembler::vpternlogd(XMMRegister dst, int imm8, XMMRegister src2, XMMRegister src3, int vector_len) {
10009 assert(VM_Version::supports_evex(), "requires EVEX support");
10010 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
10011 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10012 attributes.set_is_evex_instruction();
10013 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10014 emit_int8(0x25);
10015 emit_int8((unsigned char)(0xC0 | encode));
10016 emit_int8(imm8);
10017 }
10018
10019 void Assembler::vpternlogd(XMMRegister dst, int imm8, XMMRegister src2, Address src3, int vector_len) {
10020 assert(VM_Version::supports_evex(), "requires EVEX support");
10021 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
10022 assert(dst != xnoreg, "sanity");
10023 InstructionMark im(this);
10024 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10025 attributes.set_is_evex_instruction();
10026 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
10027 vex_prefix(src3, src2->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10028 emit_int8(0x25);
10029 emit_operand(dst, src3, 1);
10030 emit_int8(imm8);
10031 }
10032
10033 void Assembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, XMMRegister src3, int vector_len) {
10034 assert(VM_Version::supports_evex(), "requires AVX512F");
10035 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL");
10036 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10037 attributes.set_is_evex_instruction();
10038 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10039 emit_int8(0x25);
10040 emit_int8((unsigned char)(0xC0 | encode));
10041 emit_int8(imm8);
10042 }
10043
10044 void Assembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, Address src3, int vector_len) {
10045 assert(VM_Version::supports_evex(), "requires EVEX support");
10046 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
10047 assert(dst != xnoreg, "sanity");
10048 InstructionMark im(this);
10049 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10050 attributes.set_is_evex_instruction();
10051 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
10052 vex_prefix(src3, src2->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10053 emit_int8(0x25);
10054 emit_operand(dst, src3, 1);
10055 emit_int8(imm8);
10056 }
10057
10058 void Assembler::evexpandps(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
10059 assert(VM_Version::supports_evex(), "");
10060 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10061 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10062 attributes.set_is_evex_instruction();
10063 attributes.set_embedded_opmask_register_specifier(mask);
10064 if (merge) {
10065 attributes.reset_is_clear_context();
10066 }
10067 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10068 emit_int16((unsigned char)0x88, (0xC0 | encode));
10069 }
10070
10071 void Assembler::evexpandpd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
10072 assert(VM_Version::supports_evex(), "");
10073 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10074 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10075 attributes.set_is_evex_instruction();
10076 attributes.set_embedded_opmask_register_specifier(mask);
10077 if (merge) {
10078 attributes.reset_is_clear_context();
10079 }
10080 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10081 emit_int16((unsigned char)0x88, (0xC0 | encode));
10082 }
10083
10084 void Assembler::evpexpandb(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
10085 assert(VM_Version::supports_avx512_vbmi2(), "");
10086 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10087 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10088 attributes.set_is_evex_instruction();
10089 attributes.set_embedded_opmask_register_specifier(mask);
10090 if (merge) {
10091 attributes.reset_is_clear_context();
10092 }
10093 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10094 emit_int16(0x62, (0xC0 | encode));
10095 }
10096
10097 void Assembler::evpexpandw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
10098 assert(VM_Version::supports_avx512_vbmi2(), "");
10099 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10100 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10101 attributes.set_is_evex_instruction();
10102 attributes.set_embedded_opmask_register_specifier(mask);
10103 if (merge) {
10104 attributes.reset_is_clear_context();
10105 }
10106 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10107 emit_int16(0x62, (0xC0 | encode));
10108 }
10109
10110 void Assembler::evpexpandd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
10111 assert(VM_Version::supports_evex(), "");
10112 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10113 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10114 attributes.set_is_evex_instruction();
10115 attributes.set_embedded_opmask_register_specifier(mask);
10116 if (merge) {
10117 attributes.reset_is_clear_context();
10118 }
10119 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10120 emit_int16((unsigned char)0x89, (0xC0 | encode));
10121 }
10122
10123 void Assembler::evpexpandq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
10124 assert(VM_Version::supports_evex(), "");
10125 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10126 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10127 attributes.set_is_evex_instruction();
10128 attributes.set_embedded_opmask_register_specifier(mask);
10129 if (merge) {
10130 attributes.reset_is_clear_context();
10131 }
10132 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10133 emit_int16((unsigned char)0x89, (0xC0 | encode));
10134 }
10135
10136 // vinserti forms
10137
10138 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
10139 assert(VM_Version::supports_avx2(), "");
10140 assert(imm8 <= 0x01, "imm8: %u", imm8);
10141 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10142 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10143 // last byte:
10144 // 0x00 - insert into lower 128 bits
10145 // 0x01 - insert into upper 128 bits
10146 emit_int24(0x38, (0xC0 | encode), imm8 & 0x01);
10147 }
10148
10149 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
10150 assert(VM_Version::supports_avx2(), "");
10151 assert(dst != xnoreg, "sanity");
10152 assert(imm8 <= 0x01, "imm8: %u", imm8);
10153 InstructionMark im(this);
10154 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10155 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
10156 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10157 emit_int8(0x38);
10158 emit_operand(dst, src, 1);
10159 // 0x00 - insert into lower 128 bits
10160 // 0x01 - insert into upper 128 bits
10161 emit_int8(imm8 & 0x01);
10162 }
10163
10164 void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
10165 assert(VM_Version::supports_evex(), "");
10166 assert(imm8 <= 0x03, "imm8: %u", imm8);
10167 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10168 attributes.set_is_evex_instruction();
10169 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10170 // imm8:
10171 // 0x00 - insert into q0 128 bits (0..127)
10172 // 0x01 - insert into q1 128 bits (128..255)
10173 // 0x02 - insert into q2 128 bits (256..383)
10174 // 0x03 - insert into q3 128 bits (384..511)
10175 emit_int24(0x38, (0xC0 | encode), imm8 & 0x03);
10176 }
10177
10178 void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
10179 assert(VM_Version::supports_evex(), "");
10180 assert(dst != xnoreg, "sanity");
10181 assert(imm8 <= 0x03, "imm8: %u", imm8);
10182 InstructionMark im(this);
10183 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10184 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
10185 attributes.set_is_evex_instruction();
10186 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10187 emit_int8(0x18);
10188 emit_operand(dst, src, 1);
10189 // 0x00 - insert into q0 128 bits (0..127)
10190 // 0x01 - insert into q1 128 bits (128..255)
10191 // 0x02 - insert into q2 128 bits (256..383)
10192 // 0x03 - insert into q3 128 bits (384..511)
10193 emit_int8(imm8 & 0x03);
10194 }
10195
10196 void Assembler::vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
10197 assert(VM_Version::supports_evex(), "");
10198 assert(imm8 <= 0x01, "imm8: %u", imm8);
10199 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10200 attributes.set_is_evex_instruction();
10201 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10202 //imm8:
10203 // 0x00 - insert into lower 256 bits
10204 // 0x01 - insert into upper 256 bits
10205 emit_int24(0x3A, (0xC0 | encode), imm8 & 0x01);
10206 }
10207
10208 void Assembler::evinserti64x2(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8, int vector_len) {
10209 assert(VM_Version::supports_avx512dq(), "");
10210 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10211 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10212 attributes.set_is_evex_instruction();
10213 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10214 emit_int24(0x38, (0xC0 | encode), imm8 & 0x03);
10215 }
10216
10217
10218 // vinsertf forms
10219
10220 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
10221 assert(VM_Version::supports_avx(), "");
10222 assert(imm8 <= 0x01, "imm8: %u", imm8);
10223 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10224 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10225 // imm8:
10226 // 0x00 - insert into lower 128 bits
10227 // 0x01 - insert into upper 128 bits
10228 emit_int24(0x18, (0xC0 | encode), imm8 & 0x01);
10229 }
10230
10231 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
10232 assert(VM_Version::supports_avx(), "");
10233 assert(dst != xnoreg, "sanity");
10234 assert(imm8 <= 0x01, "imm8: %u", imm8);
10235 InstructionMark im(this);
10236 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10237 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
10238 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10239 emit_int8(0x18);
10240 emit_operand(dst, src, 1);
10241 // 0x00 - insert into lower 128 bits
10242 // 0x01 - insert into upper 128 bits
10243 emit_int8(imm8 & 0x01);
10244 }
10245
10246 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
10247 assert(VM_Version::supports_evex(), "");
10248 assert(imm8 <= 0x03, "imm8: %u", imm8);
10249 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10250 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10251 // imm8:
10252 // 0x00 - insert into q0 128 bits (0..127)
10253 // 0x01 - insert into q1 128 bits (128..255)
10254 // 0x02 - insert into q0 128 bits (256..383)
10255 // 0x03 - insert into q1 128 bits (384..512)
10256 emit_int24(0x18, (0xC0 | encode), imm8 & 0x03);
10257 }
10258
10259 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
10260 assert(VM_Version::supports_evex(), "");
10261 assert(dst != xnoreg, "sanity");
10262 assert(imm8 <= 0x03, "imm8: %u", imm8);
10263 InstructionMark im(this);
10264 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10265 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
10266 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10267 emit_int8(0x18);
10268 emit_operand(dst, src, 1);
10269 // 0x00 - insert into q0 128 bits (0..127)
10270 // 0x01 - insert into q1 128 bits (128..255)
10271 // 0x02 - insert into q0 128 bits (256..383)
10272 // 0x03 - insert into q1 128 bits (384..512)
10273 emit_int8(imm8 & 0x03);
10274 }
10275
10276 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
10277 assert(VM_Version::supports_evex(), "");
10278 assert(imm8 <= 0x01, "imm8: %u", imm8);
10279 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10280 attributes.set_is_evex_instruction();
10281 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10282 // imm8:
10283 // 0x00 - insert into lower 256 bits
10284 // 0x01 - insert into upper 256 bits
10285 emit_int24(0x1A, (0xC0 | encode), imm8 & 0x01);
10286 }
10287
10288 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
10289 assert(VM_Version::supports_evex(), "");
10290 assert(dst != xnoreg, "sanity");
10291 assert(imm8 <= 0x01, "imm8: %u", imm8);
10292 InstructionMark im(this);
10293 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10294 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit);
10295 attributes.set_is_evex_instruction();
10296 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10297 emit_int8(0x1A);
10298 emit_operand(dst, src, 1);
10299 // 0x00 - insert into lower 256 bits
10300 // 0x01 - insert into upper 256 bits
10301 emit_int8(imm8 & 0x01);
10302 }
10303
10304
10305 // vextracti forms
10306
10307 void Assembler::vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
10308 assert(VM_Version::supports_avx2(), "");
10309 assert(imm8 <= 0x01, "imm8: %u", imm8);
10310 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10311 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10312 // imm8:
10313 // 0x00 - extract from lower 128 bits
10314 // 0x01 - extract from upper 128 bits
10315 emit_int24(0x39, (0xC0 | encode), imm8 & 0x01);
10316 }
10317
10318 void Assembler::vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
10319 assert(VM_Version::supports_avx2(), "");
10320 assert(src != xnoreg, "sanity");
10321 assert(imm8 <= 0x01, "imm8: %u", imm8);
10322 InstructionMark im(this);
10323 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10324 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
10325 attributes.reset_is_clear_context();
10326 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10327 emit_int8(0x39);
10328 emit_operand(src, dst, 1);
10329 // 0x00 - extract from lower 128 bits
10330 // 0x01 - extract from upper 128 bits
10331 emit_int8(imm8 & 0x01);
10332 }
10333
10334 void Assembler::vextracti32x4(XMMRegister dst, XMMRegister src, uint8_t imm8) {
10335 assert(VM_Version::supports_evex(), "");
10336 assert(imm8 <= 0x03, "imm8: %u", imm8);
10337 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10338 attributes.set_is_evex_instruction();
10339 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10340 // imm8:
10341 // 0x00 - extract from bits 127:0
10342 // 0x01 - extract from bits 255:128
10343 // 0x02 - extract from bits 383:256
10344 // 0x03 - extract from bits 511:384
10345 emit_int24(0x39, (0xC0 | encode), imm8 & 0x03);
10346 }
10347
10348 void Assembler::vextracti32x4(Address dst, XMMRegister src, uint8_t imm8) {
10349 assert(VM_Version::supports_evex(), "");
10350 assert(src != xnoreg, "sanity");
10351 assert(imm8 <= 0x03, "imm8: %u", imm8);
10352 InstructionMark im(this);
10353 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10354 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
10355 attributes.reset_is_clear_context();
10356 attributes.set_is_evex_instruction();
10357 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10358 emit_int8(0x39);
10359 emit_operand(src, dst, 1);
10360 // 0x00 - extract from bits 127:0
10361 // 0x01 - extract from bits 255:128
10362 // 0x02 - extract from bits 383:256
10363 // 0x03 - extract from bits 511:384
10364 emit_int8(imm8 & 0x03);
10365 }
10366
10367 void Assembler::vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8) {
10368 assert(VM_Version::supports_avx512dq(), "");
10369 assert(imm8 <= 0x03, "imm8: %u", imm8);
10370 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10371 attributes.set_is_evex_instruction();
10372 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10373 // imm8:
10374 // 0x00 - extract from bits 127:0
10375 // 0x01 - extract from bits 255:128
10376 // 0x02 - extract from bits 383:256
10377 // 0x03 - extract from bits 511:384
10378 emit_int24(0x39, (0xC0 | encode), imm8 & 0x03);
10379 }
10380
10381 void Assembler::vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) {
10382 assert(VM_Version::supports_evex(), "");
10383 assert(imm8 <= 0x01, "imm8: %u", imm8);
10384 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10385 attributes.set_is_evex_instruction();
10386 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10387 // imm8:
10388 // 0x00 - extract from lower 256 bits
10389 // 0x01 - extract from upper 256 bits
10390 emit_int24(0x3B, (0xC0 | encode), imm8 & 0x01);
10391 }
10392
10393 void Assembler::vextracti64x4(Address dst, XMMRegister src, uint8_t imm8) {
10394 assert(VM_Version::supports_evex(), "");
10395 assert(src != xnoreg, "sanity");
10396 assert(imm8 <= 0x01, "imm8: %u", imm8);
10397 InstructionMark im(this);
10398 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10399 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit);
10400 attributes.reset_is_clear_context();
10401 attributes.set_is_evex_instruction();
10402 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10403 emit_int8(0x38);
10404 emit_operand(src, dst, 1);
10405 // 0x00 - extract from lower 256 bits
10406 // 0x01 - extract from upper 256 bits
10407 emit_int8(imm8 & 0x01);
10408 }
10409 // vextractf forms
10410
10411 void Assembler::vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
10412 assert(VM_Version::supports_avx(), "");
10413 assert(imm8 <= 0x01, "imm8: %u", imm8);
10414 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10415 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10416 // imm8:
10417 // 0x00 - extract from lower 128 bits
10418 // 0x01 - extract from upper 128 bits
10419 emit_int24(0x19, (0xC0 | encode), imm8 & 0x01);
10420 }
10421
10422 void Assembler::vextractf128(Address dst, XMMRegister src, uint8_t imm8) {
10423 assert(VM_Version::supports_avx(), "");
10424 assert(src != xnoreg, "sanity");
10425 assert(imm8 <= 0x01, "imm8: %u", imm8);
10426 InstructionMark im(this);
10427 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10428 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
10429 attributes.reset_is_clear_context();
10430 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10431 emit_int8(0x19);
10432 emit_operand(src, dst, 1);
10433 // 0x00 - extract from lower 128 bits
10434 // 0x01 - extract from upper 128 bits
10435 emit_int8(imm8 & 0x01);
10436 }
10437
10438 void Assembler::vextractf32x4(XMMRegister dst, XMMRegister src, uint8_t imm8) {
10439 assert(VM_Version::supports_evex(), "");
10440 assert(imm8 <= 0x03, "imm8: %u", imm8);
10441 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10442 attributes.set_is_evex_instruction();
10443 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10444 // imm8:
10445 // 0x00 - extract from bits 127:0
10446 // 0x01 - extract from bits 255:128
10447 // 0x02 - extract from bits 383:256
10448 // 0x03 - extract from bits 511:384
10449 emit_int24(0x19, (0xC0 | encode), imm8 & 0x03);
10450 }
10451
10452 void Assembler::vextractf32x4(Address dst, XMMRegister src, uint8_t imm8) {
10453 assert(VM_Version::supports_evex(), "");
10454 assert(src != xnoreg, "sanity");
10455 assert(imm8 <= 0x03, "imm8: %u", imm8);
10456 InstructionMark im(this);
10457 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10458 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
10459 attributes.reset_is_clear_context();
10460 attributes.set_is_evex_instruction();
10461 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10462 emit_int8(0x19);
10463 emit_operand(src, dst, 1);
10464 // 0x00 - extract from bits 127:0
10465 // 0x01 - extract from bits 255:128
10466 // 0x02 - extract from bits 383:256
10467 // 0x03 - extract from bits 511:384
10468 emit_int8(imm8 & 0x03);
10469 }
10470
10471 void Assembler::vextractf64x2(XMMRegister dst, XMMRegister src, uint8_t imm8) {
10472 assert(VM_Version::supports_avx512dq(), "");
10473 assert(imm8 <= 0x03, "imm8: %u", imm8);
10474 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10475 attributes.set_is_evex_instruction();
10476 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10477 // imm8:
10478 // 0x00 - extract from bits 127:0
10479 // 0x01 - extract from bits 255:128
10480 // 0x02 - extract from bits 383:256
10481 // 0x03 - extract from bits 511:384
10482 emit_int24(0x19, (0xC0 | encode), imm8 & 0x03);
10483 }
10484
10485 void Assembler::vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) {
10486 assert(VM_Version::supports_evex(), "");
10487 assert(imm8 <= 0x01, "imm8: %u", imm8);
10488 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10489 attributes.set_is_evex_instruction();
10490 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10491 // imm8:
10492 // 0x00 - extract from lower 256 bits
10493 // 0x01 - extract from upper 256 bits
10494 emit_int24(0x1B, (0xC0 | encode), imm8 & 0x01);
10495 }
10496
10497 void Assembler::vextractf64x4(Address dst, XMMRegister src, uint8_t imm8) {
10498 assert(VM_Version::supports_evex(), "");
10499 assert(src != xnoreg, "sanity");
10500 assert(imm8 <= 0x01, "imm8: %u", imm8);
10501 InstructionMark im(this);
10502 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10503 attributes.set_address_attributes(/* tuple_type */ EVEX_T4,/* input_size_in_bits */ EVEX_64bit);
10504 attributes.reset_is_clear_context();
10505 attributes.set_is_evex_instruction();
10506 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
10507 emit_int8(0x1B);
10508 emit_operand(src, dst, 1);
10509 // 0x00 - extract from lower 256 bits
10510 // 0x01 - extract from upper 256 bits
10511 emit_int8(imm8 & 0x01);
10512 }
10513
10514 void Assembler::extractps(Register dst, XMMRegister src, uint8_t imm8) {
10515 assert(VM_Version::supports_sse4_1(), "");
10516 assert(imm8 <= 0x03, "imm8: %u", imm8);
10517 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
10518 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes, true);
10519 // imm8:
10520 // 0x00 - extract from bits 31:0
10521 // 0x01 - extract from bits 63:32
10522 // 0x02 - extract from bits 95:64
10523 // 0x03 - extract from bits 127:96
10524 emit_int24(0x17, (0xC0 | encode), imm8 & 0x03);
10525 }
10526
10527 // duplicate 1-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL
10528 void Assembler::vpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len) {
10529 assert(VM_Version::supports_avx2(), "");
10530 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
10531 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10532 emit_int16(0x78, (0xC0 | encode));
10533 }
10534
10535 void Assembler::vpbroadcastb(XMMRegister dst, Address src, int vector_len) {
10536 assert(VM_Version::supports_avx2(), "");
10537 assert(dst != xnoreg, "sanity");
10538 InstructionMark im(this);
10539 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
10540 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit);
10541 // swap src<->dst for encoding
10542 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10543 emit_int8(0x78);
10544 emit_operand(dst, src, 0);
10545 }
10546
10547 // duplicate 2-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL
10548 void Assembler::vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len) {
10549 assert(VM_Version::supports_avx2(), "");
10550 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
10551 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10552 emit_int16(0x79, (0xC0 | encode));
10553 }
10554
10555 void Assembler::vpbroadcastw(XMMRegister dst, Address src, int vector_len) {
10556 assert(VM_Version::supports_avx2(), "");
10557 assert(dst != xnoreg, "sanity");
10558 InstructionMark im(this);
10559 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
10560 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit);
10561 // swap src<->dst for encoding
10562 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10563 emit_int8(0x79);
10564 emit_operand(dst, src, 0);
10565 }
10566
10567 void Assembler::vpsadbw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
10568 assert(UseAVX > 0, "requires some form of AVX");
10569 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
10570 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10571 emit_int16((unsigned char)0xF6, (0xC0 | encode));
10572 }
10573
10574 void Assembler::vpunpckhwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
10575 assert(UseAVX > 0, "requires some form of AVX");
10576 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10577 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10578 emit_int16(0x69, (0xC0 | encode));
10579 }
10580
10581 void Assembler::vpunpcklwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
10582 assert(UseAVX > 0, "requires some form of AVX");
10583 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10584 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10585 emit_int16(0x61, (0xC0 | encode));
10586 }
10587
10588 void Assembler::vpunpckhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
10589 assert(UseAVX > 0, "requires some form of AVX");
10590 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10591 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10592 emit_int16(0x6A, (0xC0 | encode));
10593 }
10594
10595 void Assembler::vpunpckhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
10596 assert(UseAVX > 0, "requires some form of AVX");
10597 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10598 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10599 emit_int16(0x6D, (0xC0 | encode));
10600 }
10601
10602 void Assembler::vpunpckldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
10603 assert(UseAVX > 0, "requires some form of AVX");
10604 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10605 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10606 emit_int16(0x62, (0xC0 | encode));
10607 }
10608
10609 void Assembler::vpunpcklqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
10610 assert(UseAVX > 0, "requires some form of AVX");
10611 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10612 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10613 emit_int16(0x6C, (0xC0 | encode));
10614 }
10615
10616 // xmm/mem sourced byte/word/dword/qword replicate
10617 void Assembler::evpaddb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10618 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10619 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10620 attributes.set_is_evex_instruction();
10621 attributes.set_embedded_opmask_register_specifier(mask);
10622 if (merge) {
10623 attributes.reset_is_clear_context();
10624 }
10625 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10626 emit_int16((unsigned char)0xFC, (0xC0 | encode));
10627 }
10628
10629 void Assembler::evpaddb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10630 InstructionMark im(this);
10631 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10632 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10633 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM,/* input_size_in_bits */ EVEX_NObit);
10634 attributes.set_is_evex_instruction();
10635 attributes.set_embedded_opmask_register_specifier(mask);
10636 if (merge) {
10637 attributes.reset_is_clear_context();
10638 }
10639 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10640 emit_int8((unsigned char)0xFC);
10641 emit_operand(dst, src, 0);
10642 }
10643
10644 void Assembler::evpaddw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10645 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10646 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10647 attributes.set_is_evex_instruction();
10648 attributes.set_embedded_opmask_register_specifier(mask);
10649 if (merge) {
10650 attributes.reset_is_clear_context();
10651 }
10652 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10653 emit_int16((unsigned char)0xFD, (0xC0 | encode));
10654 }
10655
10656 void Assembler::evpaddw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10657 InstructionMark im(this);
10658 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10659 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10660 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM,/* input_size_in_bits */ EVEX_NObit);
10661 attributes.set_is_evex_instruction();
10662 attributes.set_embedded_opmask_register_specifier(mask);
10663 if (merge) {
10664 attributes.reset_is_clear_context();
10665 }
10666 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10667 emit_int8((unsigned char)0xFD);
10668 emit_operand(dst, src, 0);
10669 }
10670
10671 void Assembler::evpaddd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10672 assert(VM_Version::supports_evex(), "");
10673 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10674 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10675 attributes.set_is_evex_instruction();
10676 attributes.set_embedded_opmask_register_specifier(mask);
10677 if (merge) {
10678 attributes.reset_is_clear_context();
10679 }
10680 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10681 emit_int16((unsigned char)0xFE, (0xC0 | encode));
10682 }
10683
10684 void Assembler::evpaddd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10685 InstructionMark im(this);
10686 assert(VM_Version::supports_evex(), "");
10687 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10688 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10689 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10690 attributes.set_is_evex_instruction();
10691 attributes.set_embedded_opmask_register_specifier(mask);
10692 if (merge) {
10693 attributes.reset_is_clear_context();
10694 }
10695 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10696 emit_int8((unsigned char)0xFE);
10697 emit_operand(dst, src, 0);
10698 }
10699
10700 void Assembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10701 assert(VM_Version::supports_evex(), "");
10702 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10703 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10704 attributes.set_is_evex_instruction();
10705 attributes.set_embedded_opmask_register_specifier(mask);
10706 if (merge) {
10707 attributes.reset_is_clear_context();
10708 }
10709 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10710 emit_int16((unsigned char)0xD4, (0xC0 | encode));
10711 }
10712
10713 void Assembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10714 InstructionMark im(this);
10715 assert(VM_Version::supports_evex(), "");
10716 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10717 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10718 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10719 attributes.set_is_evex_instruction();
10720 attributes.set_embedded_opmask_register_specifier(mask);
10721 if (merge) {
10722 attributes.reset_is_clear_context();
10723 }
10724 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10725 emit_int8((unsigned char)0xD4);
10726 emit_operand(dst, src, 0);
10727 }
10728
10729 void Assembler::evaddps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10730 assert(VM_Version::supports_evex(), "");
10731 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10732 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10733 attributes.set_is_evex_instruction();
10734 attributes.set_embedded_opmask_register_specifier(mask);
10735 if (merge) {
10736 attributes.reset_is_clear_context();
10737 }
10738 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
10739 emit_int16(0x58, (0xC0 | encode));
10740 }
10741
10742 void Assembler::evaddps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10743 InstructionMark im(this);
10744 assert(VM_Version::supports_evex(), "");
10745 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10746 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10747 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10748 attributes.set_is_evex_instruction();
10749 attributes.set_embedded_opmask_register_specifier(mask);
10750 if (merge) {
10751 attributes.reset_is_clear_context();
10752 }
10753 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
10754 emit_int8(0x58);
10755 emit_operand(dst, src, 0);
10756 }
10757
10758 void Assembler::evaddpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10759 assert(VM_Version::supports_evex(), "");
10760 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10761 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10762 attributes.set_is_evex_instruction();
10763 attributes.set_embedded_opmask_register_specifier(mask);
10764 if (merge) {
10765 attributes.reset_is_clear_context();
10766 }
10767 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10768 emit_int16(0x58, (0xC0 | encode));
10769 }
10770
10771 void Assembler::evaddpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10772 InstructionMark im(this);
10773 assert(VM_Version::supports_evex(), "");
10774 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10775 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10776 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10777 attributes.set_is_evex_instruction();
10778 attributes.set_embedded_opmask_register_specifier(mask);
10779 if (merge) {
10780 attributes.reset_is_clear_context();
10781 }
10782 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10783 emit_int8(0x58);
10784 emit_operand(dst, src, 0);
10785 }
10786
10787 void Assembler::evpsubb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10788 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10789 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10790 attributes.set_is_evex_instruction();
10791 attributes.set_embedded_opmask_register_specifier(mask);
10792 if (merge) {
10793 attributes.reset_is_clear_context();
10794 }
10795 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10796 emit_int16((unsigned char)0xF8, (0xC0 | encode));
10797 }
10798
10799 void Assembler::evpsubb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10800 InstructionMark im(this);
10801 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10802 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10803 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM,/* input_size_in_bits */ EVEX_NObit);
10804 attributes.set_is_evex_instruction();
10805 attributes.set_embedded_opmask_register_specifier(mask);
10806 if (merge) {
10807 attributes.reset_is_clear_context();
10808 }
10809 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10810 emit_int8((unsigned char)0xF8);
10811 emit_operand(dst, src, 0);
10812 }
10813
10814 void Assembler::evpsubw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10815 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10816 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10817 attributes.set_is_evex_instruction();
10818 attributes.set_embedded_opmask_register_specifier(mask);
10819 if (merge) {
10820 attributes.reset_is_clear_context();
10821 }
10822 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10823 emit_int16((unsigned char)0xF9, (0xC0 | encode));
10824 }
10825
10826 void Assembler::evpsubw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10827 InstructionMark im(this);
10828 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10829 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10830 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM,/* input_size_in_bits */ EVEX_NObit);
10831 attributes.set_is_evex_instruction();
10832 attributes.set_embedded_opmask_register_specifier(mask);
10833 if (merge) {
10834 attributes.reset_is_clear_context();
10835 }
10836 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10837 emit_int8((unsigned char)0xF9);
10838 emit_operand(dst, src, 0);
10839 }
10840
10841 void Assembler::evpsubd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10842 assert(VM_Version::supports_evex(), "");
10843 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10844 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10845 attributes.set_is_evex_instruction();
10846 attributes.set_embedded_opmask_register_specifier(mask);
10847 if (merge) {
10848 attributes.reset_is_clear_context();
10849 }
10850 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10851 emit_int16((unsigned char)0xFA, (0xC0 | encode));
10852 }
10853
10854 void Assembler::evpsubd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10855 InstructionMark im(this);
10856 assert(VM_Version::supports_evex(), "");
10857 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10858 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10859 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10860 attributes.set_is_evex_instruction();
10861 attributes.set_embedded_opmask_register_specifier(mask);
10862 if (merge) {
10863 attributes.reset_is_clear_context();
10864 }
10865 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10866 emit_int8((unsigned char)0xFA);
10867 emit_operand(dst, src, 0);
10868 }
10869
10870 void Assembler::evpsubq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10871 assert(VM_Version::supports_evex(), "");
10872 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10873 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10874 attributes.set_is_evex_instruction();
10875 attributes.set_embedded_opmask_register_specifier(mask);
10876 if (merge) {
10877 attributes.reset_is_clear_context();
10878 }
10879 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10880 emit_int16((unsigned char)0xFB, (0xC0 | encode));
10881 }
10882
10883 void Assembler::evpsubq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10884 InstructionMark im(this);
10885 assert(VM_Version::supports_evex(), "");
10886 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10887 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10888 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10889 attributes.set_is_evex_instruction();
10890 attributes.set_embedded_opmask_register_specifier(mask);
10891 if (merge) {
10892 attributes.reset_is_clear_context();
10893 }
10894 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10895 emit_int8((unsigned char)0xFB);
10896 emit_operand(dst, src, 0);
10897 }
10898
10899 void Assembler::evsubps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10900 assert(VM_Version::supports_evex(), "");
10901 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10902 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10903 attributes.set_is_evex_instruction();
10904 attributes.set_embedded_opmask_register_specifier(mask);
10905 if (merge) {
10906 attributes.reset_is_clear_context();
10907 }
10908 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
10909 emit_int16(0x5C, (0xC0 | encode));
10910 }
10911
10912 void Assembler::evsubps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10913 InstructionMark im(this);
10914 assert(VM_Version::supports_evex(), "");
10915 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10916 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10917 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10918 attributes.set_is_evex_instruction();
10919 attributes.set_embedded_opmask_register_specifier(mask);
10920 if (merge) {
10921 attributes.reset_is_clear_context();
10922 }
10923 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
10924 emit_int8(0x5C);
10925 emit_operand(dst, src, 0);
10926 }
10927
10928 void Assembler::evsubpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10929 assert(VM_Version::supports_evex(), "");
10930 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10931 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10932 attributes.set_is_evex_instruction();
10933 attributes.set_embedded_opmask_register_specifier(mask);
10934 if (merge) {
10935 attributes.reset_is_clear_context();
10936 }
10937 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10938 emit_int16(0x5C, (0xC0 | encode));
10939 }
10940
10941 void Assembler::evsubpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10942 InstructionMark im(this);
10943 assert(VM_Version::supports_evex(), "");
10944 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10945 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10946 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10947 attributes.set_is_evex_instruction();
10948 attributes.set_embedded_opmask_register_specifier(mask);
10949 if (merge) {
10950 attributes.reset_is_clear_context();
10951 }
10952 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10953 emit_int8(0x5C);
10954 emit_operand(dst, src, 0);
10955 }
10956
10957 void Assembler::evpaddsb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10958 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10959 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10960 attributes.set_is_evex_instruction();
10961 attributes.set_embedded_opmask_register_specifier(mask);
10962 if (merge) {
10963 attributes.reset_is_clear_context();
10964 }
10965 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10966 emit_int16((unsigned char)0xEC, (0xC0 | encode));
10967 }
10968
10969 void Assembler::evpaddsb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10970 InstructionMark im(this);
10971 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10972 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10973 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM,/* input_size_in_bits */ EVEX_NObit);
10974 attributes.set_is_evex_instruction();
10975 attributes.set_embedded_opmask_register_specifier(mask);
10976 if (merge) {
10977 attributes.reset_is_clear_context();
10978 }
10979 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10980 emit_int8((unsigned char)0xEC);
10981 emit_operand(dst, src, 0);
10982 }
10983
10984 void Assembler::evpaddsw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10985 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10986 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10987 attributes.set_is_evex_instruction();
10988 attributes.set_embedded_opmask_register_specifier(mask);
10989 if (merge) {
10990 attributes.reset_is_clear_context();
10991 }
10992 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10993 emit_int16((unsigned char)0xED, (0xC0 | encode));
10994 }
10995
10996 void Assembler::evpaddsw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10997 InstructionMark im(this);
10998 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10999 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11000 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM,/* input_size_in_bits */ EVEX_NObit);
11001 attributes.set_is_evex_instruction();
11002 attributes.set_embedded_opmask_register_specifier(mask);
11003 if (merge) {
11004 attributes.reset_is_clear_context();
11005 }
11006 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11007 emit_int8((unsigned char)0xED);
11008 emit_operand(dst, src, 0);
11009 }
11010
11011 void Assembler::evpaddusb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11012 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11013 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11014 attributes.set_is_evex_instruction();
11015 attributes.set_embedded_opmask_register_specifier(mask);
11016 if (merge) {
11017 attributes.reset_is_clear_context();
11018 }
11019 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11020 emit_int16((unsigned char)0xDC, (0xC0 | encode));
11021 }
11022
11023 void Assembler::evpaddusb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11024 InstructionMark im(this);
11025 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11026 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11027 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM,/* input_size_in_bits */ EVEX_NObit);
11028 attributes.set_is_evex_instruction();
11029 attributes.set_embedded_opmask_register_specifier(mask);
11030 if (merge) {
11031 attributes.reset_is_clear_context();
11032 }
11033 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11034 emit_int8((unsigned char)0xDC);
11035 emit_operand(dst, src, 0);
11036 }
11037
11038 void Assembler::evpaddusw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11039 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11040 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11041 attributes.set_is_evex_instruction();
11042 attributes.set_embedded_opmask_register_specifier(mask);
11043 if (merge) {
11044 attributes.reset_is_clear_context();
11045 }
11046 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11047 emit_int16((unsigned char)0xDD, (0xC0 | encode));
11048 }
11049
11050 void Assembler::evpaddusw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11051 InstructionMark im(this);
11052 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11053 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11054 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM,/* input_size_in_bits */ EVEX_NObit);
11055 attributes.set_is_evex_instruction();
11056 attributes.set_embedded_opmask_register_specifier(mask);
11057 if (merge) {
11058 attributes.reset_is_clear_context();
11059 }
11060 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11061 emit_int8((unsigned char)0xDD);
11062 emit_operand(dst, src, 0);
11063 }
11064
11065 void Assembler::evpsubsb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11066 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11067 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11068 attributes.set_is_evex_instruction();
11069 attributes.set_embedded_opmask_register_specifier(mask);
11070 if (merge) {
11071 attributes.reset_is_clear_context();
11072 }
11073 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11074 emit_int16((unsigned char)0xE8, (0xC0 | encode));
11075 }
11076
11077 void Assembler::evpsubsb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11078 InstructionMark im(this);
11079 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11080 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11081 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM,/* input_size_in_bits */ EVEX_NObit);
11082 attributes.set_is_evex_instruction();
11083 attributes.set_embedded_opmask_register_specifier(mask);
11084 if (merge) {
11085 attributes.reset_is_clear_context();
11086 }
11087 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11088 emit_int8((unsigned char)0xE8);
11089 emit_operand(dst, src, 0);
11090 }
11091
11092 void Assembler::evpsubsw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11093 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11094 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11095 attributes.set_is_evex_instruction();
11096 attributes.set_embedded_opmask_register_specifier(mask);
11097 if (merge) {
11098 attributes.reset_is_clear_context();
11099 }
11100 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11101 emit_int16((unsigned char)0xE9, (0xC0 | encode));
11102 }
11103
11104 void Assembler::evpsubsw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11105 InstructionMark im(this);
11106 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11107 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11108 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM,/* input_size_in_bits */ EVEX_NObit);
11109 attributes.set_is_evex_instruction();
11110 attributes.set_embedded_opmask_register_specifier(mask);
11111 if (merge) {
11112 attributes.reset_is_clear_context();
11113 }
11114 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11115 emit_int8((unsigned char)0xE9);
11116 emit_operand(dst, src, 0);
11117 }
11118
11119 void Assembler::evpsubusb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11120 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11121 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11122 attributes.set_is_evex_instruction();
11123 attributes.set_embedded_opmask_register_specifier(mask);
11124 if (merge) {
11125 attributes.reset_is_clear_context();
11126 }
11127 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11128 emit_int16((unsigned char)0xD8, (0xC0 | encode));
11129 }
11130
11131 void Assembler::evpsubusb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11132 InstructionMark im(this);
11133 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11134 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11135 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM,/* input_size_in_bits */ EVEX_NObit);
11136 attributes.set_is_evex_instruction();
11137 attributes.set_embedded_opmask_register_specifier(mask);
11138 if (merge) {
11139 attributes.reset_is_clear_context();
11140 }
11141 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11142 emit_int8((unsigned char)0xD8);
11143 emit_operand(dst, src, 0);
11144 }
11145
11146 void Assembler::evpsubusw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11147 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11148 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11149 attributes.set_is_evex_instruction();
11150 attributes.set_embedded_opmask_register_specifier(mask);
11151 if (merge) {
11152 attributes.reset_is_clear_context();
11153 }
11154 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11155 emit_int16((unsigned char)0xD9, (0xC0 | encode));
11156 }
11157
11158
11159 void Assembler::evpsubusw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11160 InstructionMark im(this);
11161 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11162 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11163 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM,/* input_size_in_bits */ EVEX_NObit);
11164 attributes.set_is_evex_instruction();
11165 attributes.set_embedded_opmask_register_specifier(mask);
11166 if (merge) {
11167 attributes.reset_is_clear_context();
11168 }
11169 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11170 emit_int8((unsigned char)0xD9);
11171 emit_operand(dst, src, 0);
11172 }
11173
11174 void Assembler::evpmullw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11175 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11176 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11177 attributes.set_is_evex_instruction();
11178 attributes.set_embedded_opmask_register_specifier(mask);
11179 if (merge) {
11180 attributes.reset_is_clear_context();
11181 }
11182 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11183 emit_int16((unsigned char)0xD5, (0xC0 | encode));
11184 }
11185
11186 void Assembler::evpmullw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11187 InstructionMark im(this);
11188 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11189 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11190 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM,/* input_size_in_bits */ EVEX_NObit);
11191 attributes.set_is_evex_instruction();
11192 attributes.set_embedded_opmask_register_specifier(mask);
11193 if (merge) {
11194 attributes.reset_is_clear_context();
11195 }
11196 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11197 emit_int8((unsigned char)0xD5);
11198 emit_operand(dst, src, 0);
11199 }
11200
11201 void Assembler::evpmulld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11202 assert(VM_Version::supports_evex(), "");
11203 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11204 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11205 attributes.set_is_evex_instruction();
11206 attributes.set_embedded_opmask_register_specifier(mask);
11207 if (merge) {
11208 attributes.reset_is_clear_context();
11209 }
11210 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11211 emit_int16(0x40, (0xC0 | encode));
11212 }
11213
11214 void Assembler::evpmulld(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11215 InstructionMark im(this);
11216 assert(VM_Version::supports_evex(), "");
11217 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11218 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11219 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
11220 attributes.set_is_evex_instruction();
11221 attributes.set_embedded_opmask_register_specifier(mask);
11222 if (merge) {
11223 attributes.reset_is_clear_context();
11224 }
11225 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11226 emit_int8(0x40);
11227 emit_operand(dst, src, 0);
11228 }
11229
11230 void Assembler::evpmullq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11231 assert(VM_Version::supports_avx512dq() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11232 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11233 attributes.set_is_evex_instruction();
11234 attributes.set_embedded_opmask_register_specifier(mask);
11235 if (merge) {
11236 attributes.reset_is_clear_context();
11237 }
11238 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11239 emit_int16(0x40, (0xC0 | encode));
11240 }
11241
11242 void Assembler::evpmullq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11243 InstructionMark im(this);
11244 assert(VM_Version::supports_avx512dq() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11245 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11246 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
11247 attributes.set_is_evex_instruction();
11248 attributes.set_embedded_opmask_register_specifier(mask);
11249 if (merge) {
11250 attributes.reset_is_clear_context();
11251 }
11252 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11253 emit_int8(0x40);
11254 emit_operand(dst, src, 0);
11255 }
11256
11257 void Assembler::evpmulhw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11258 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11259 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11260 attributes.set_is_evex_instruction();
11261 attributes.set_embedded_opmask_register_specifier(mask);
11262 if (merge) {
11263 attributes.reset_is_clear_context();
11264 }
11265 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11266 emit_int16((unsigned char)0xE5, (0xC0 | encode));
11267 }
11268
11269 void Assembler::evmulps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11270 assert(VM_Version::supports_evex(), "");
11271 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11272 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11273 attributes.set_is_evex_instruction();
11274 attributes.set_embedded_opmask_register_specifier(mask);
11275 if (merge) {
11276 attributes.reset_is_clear_context();
11277 }
11278 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
11279 emit_int16(0x59, (0xC0 | encode));
11280 }
11281
11282 void Assembler::evmulps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11283 InstructionMark im(this);
11284 assert(VM_Version::supports_evex(), "");
11285 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11286 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11287 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
11288 attributes.set_is_evex_instruction();
11289 attributes.set_embedded_opmask_register_specifier(mask);
11290 if (merge) {
11291 attributes.reset_is_clear_context();
11292 }
11293 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
11294 emit_int8(0x59);
11295 emit_operand(dst, src, 0);
11296 }
11297
11298 void Assembler::evmulpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11299 assert(VM_Version::supports_evex(), "");
11300 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11301 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11302 attributes.set_is_evex_instruction();
11303 attributes.set_embedded_opmask_register_specifier(mask);
11304 if (merge) {
11305 attributes.reset_is_clear_context();
11306 }
11307 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11308 emit_int16(0x59, (0xC0 | encode));
11309 }
11310
11311 void Assembler::evmulpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11312 InstructionMark im(this);
11313 assert(VM_Version::supports_evex(), "");
11314 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11315 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11316 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
11317 attributes.set_is_evex_instruction();
11318 attributes.set_embedded_opmask_register_specifier(mask);
11319 if (merge) {
11320 attributes.reset_is_clear_context();
11321 }
11322 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11323 emit_int8(0x59);
11324 emit_operand(dst, src, 0);
11325 }
11326
11327 void Assembler::evsqrtps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11328 assert(VM_Version::supports_evex(), "");
11329 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11330 InstructionAttr attributes(vector_len,/* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11331 attributes.set_is_evex_instruction();
11332 attributes.set_embedded_opmask_register_specifier(mask);
11333 if (merge) {
11334 attributes.reset_is_clear_context();
11335 }
11336 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
11337 emit_int16(0x51, (0xC0 | encode));
11338 }
11339
11340 void Assembler::evsqrtps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11341 InstructionMark im(this);
11342 assert(VM_Version::supports_evex(), "");
11343 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11344 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11345 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
11346 attributes.set_is_evex_instruction();
11347 attributes.set_embedded_opmask_register_specifier(mask);
11348 if (merge) {
11349 attributes.reset_is_clear_context();
11350 }
11351 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
11352 emit_int8(0x51);
11353 emit_operand(dst, src, 0);
11354 }
11355
11356 void Assembler::evsqrtpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11357 assert(VM_Version::supports_evex(), "");
11358 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11359 InstructionAttr attributes(vector_len,/* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11360 attributes.set_is_evex_instruction();
11361 attributes.set_embedded_opmask_register_specifier(mask);
11362 if (merge) {
11363 attributes.reset_is_clear_context();
11364 }
11365 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11366 emit_int16(0x51, (0xC0 | encode));
11367 }
11368
11369 void Assembler::evsqrtpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11370 InstructionMark im(this);
11371 assert(VM_Version::supports_evex(), "");
11372 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11373 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11374 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
11375 attributes.set_is_evex_instruction();
11376 attributes.set_embedded_opmask_register_specifier(mask);
11377 if (merge) {
11378 attributes.reset_is_clear_context();
11379 }
11380 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11381 emit_int8(0x51);
11382 emit_operand(dst, src, 0);
11383 }
11384
11385
11386 void Assembler::evdivps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11387 assert(VM_Version::supports_evex(), "");
11388 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11389 InstructionAttr attributes(vector_len,/* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11390 attributes.set_is_evex_instruction();
11391 attributes.set_embedded_opmask_register_specifier(mask);
11392 if (merge) {
11393 attributes.reset_is_clear_context();
11394 }
11395 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
11396 emit_int16(0x5E, (0xC0 | encode));
11397 }
11398
11399 void Assembler::evdivps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11400 InstructionMark im(this);
11401 assert(VM_Version::supports_evex(), "");
11402 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11403 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11404 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
11405 attributes.set_is_evex_instruction();
11406 attributes.set_embedded_opmask_register_specifier(mask);
11407 if (merge) {
11408 attributes.reset_is_clear_context();
11409 }
11410 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
11411 emit_int8(0x5E);
11412 emit_operand(dst, src, 0);
11413 }
11414
11415 void Assembler::evdivpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11416 assert(VM_Version::supports_evex(), "");
11417 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11418 InstructionAttr attributes(vector_len,/* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11419 attributes.set_is_evex_instruction();
11420 attributes.set_embedded_opmask_register_specifier(mask);
11421 if (merge) {
11422 attributes.reset_is_clear_context();
11423 }
11424 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11425 emit_int16(0x5E, (0xC0 | encode));
11426 }
11427
11428 void Assembler::evdivpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11429 InstructionMark im(this);
11430 assert(VM_Version::supports_evex(), "");
11431 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11432 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11433 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
11434 attributes.set_is_evex_instruction();
11435 attributes.set_embedded_opmask_register_specifier(mask);
11436 if (merge) {
11437 attributes.reset_is_clear_context();
11438 }
11439 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11440 emit_int8(0x5E);
11441 emit_operand(dst, src, 0);
11442 }
11443
11444 void Assembler::evdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src, EvexRoundPrefix rmode) {
11445 assert(VM_Version::supports_evex(), "");
11446 InstructionAttr attributes(rmode, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
11447 attributes.set_extended_context();
11448 attributes.set_is_evex_instruction();
11449 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
11450 emit_int16(0x5E, (0xC0 | encode));
11451 }
11452
11453 void Assembler::evpabsb(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
11454 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11455 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11456 attributes.set_is_evex_instruction();
11457 attributes.set_embedded_opmask_register_specifier(mask);
11458 if (merge) {
11459 attributes.reset_is_clear_context();
11460 }
11461 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11462 emit_int16(0x1C, (0xC0 | encode));
11463 }
11464
11465
11466 void Assembler::evpabsb(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
11467 InstructionMark im(this);
11468 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11469 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11470 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM,/* input_size_in_bits */ EVEX_NObit);
11471 attributes.set_is_evex_instruction();
11472 attributes.set_embedded_opmask_register_specifier(mask);
11473 if (merge) {
11474 attributes.reset_is_clear_context();
11475 }
11476 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11477 emit_int8(0x1C);
11478 emit_operand(dst, src, 0);
11479 }
11480
11481 void Assembler::evpabsw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
11482 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11483 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11484 attributes.set_is_evex_instruction();
11485 attributes.set_embedded_opmask_register_specifier(mask);
11486 if (merge) {
11487 attributes.reset_is_clear_context();
11488 }
11489 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11490 emit_int16(0x1D, (0xC0 | encode));
11491 }
11492
11493
11494 void Assembler::evpabsw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
11495 InstructionMark im(this);
11496 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11497 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11498 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM,/* input_size_in_bits */ EVEX_NObit);
11499 attributes.set_is_evex_instruction();
11500 attributes.set_embedded_opmask_register_specifier(mask);
11501 if (merge) {
11502 attributes.reset_is_clear_context();
11503 }
11504 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11505 emit_int8(0x1D);
11506 emit_operand(dst, src, 0);
11507 }
11508
11509 void Assembler::evpabsd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
11510 assert(VM_Version::supports_evex(), "");
11511 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11512 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11513 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
11514 attributes.set_is_evex_instruction();
11515 attributes.set_embedded_opmask_register_specifier(mask);
11516 if (merge) {
11517 attributes.reset_is_clear_context();
11518 }
11519 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11520 emit_int16(0x1E, (0xC0 | encode));
11521 }
11522
11523
11524 void Assembler::evpabsd(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
11525 InstructionMark im(this);
11526 assert(VM_Version::supports_evex(), "");
11527 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11528 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11529 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
11530 attributes.set_is_evex_instruction();
11531 attributes.set_embedded_opmask_register_specifier(mask);
11532 if (merge) {
11533 attributes.reset_is_clear_context();
11534 }
11535 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11536 emit_int8(0x1E);
11537 emit_operand(dst, src, 0);
11538 }
11539
11540 void Assembler::evpabsq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
11541 assert(VM_Version::supports_evex(), "");
11542 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11543 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11544 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
11545 attributes.set_is_evex_instruction();
11546 attributes.set_embedded_opmask_register_specifier(mask);
11547 if (merge) {
11548 attributes.reset_is_clear_context();
11549 }
11550 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11551 emit_int16(0x1F, (0xC0 | encode));
11552 }
11553
11554
11555 void Assembler::evpabsq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
11556 InstructionMark im(this);
11557 assert(VM_Version::supports_evex(), "");
11558 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11559 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11560 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
11561 attributes.set_is_evex_instruction();
11562 attributes.set_embedded_opmask_register_specifier(mask);
11563 if (merge) {
11564 attributes.reset_is_clear_context();
11565 }
11566 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11567 emit_int8(0x1F);
11568 emit_operand(dst, src, 0);
11569 }
11570
11571 void Assembler::evpfma213ps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11572 assert(VM_Version::supports_evex(), "");
11573 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11574 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11575 attributes.set_is_evex_instruction();
11576 attributes.set_embedded_opmask_register_specifier(mask);
11577 if (merge) {
11578 attributes.reset_is_clear_context();
11579 }
11580 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11581 emit_int16((unsigned char)0xA8, (0xC0 | encode));
11582 }
11583
11584 void Assembler::evpfma213ps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11585 InstructionMark im(this);
11586 assert(VM_Version::supports_evex(), "");
11587 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11588 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11589 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
11590 attributes.set_is_evex_instruction();
11591 attributes.set_embedded_opmask_register_specifier(mask);
11592 if (merge) {
11593 attributes.reset_is_clear_context();
11594 }
11595 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11596 emit_int8((unsigned char)0xA8);
11597 emit_operand(dst, src, 0);
11598 }
11599
11600 void Assembler::evpfma213pd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11601 assert(VM_Version::supports_evex(), "");
11602 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11603 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11604 attributes.set_is_evex_instruction();
11605 attributes.set_embedded_opmask_register_specifier(mask);
11606 if (merge) {
11607 attributes.reset_is_clear_context();
11608 }
11609 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11610 emit_int16((unsigned char)0xA8, (0xC0 | encode));
11611 }
11612
11613 void Assembler::evpfma213pd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11614 InstructionMark im(this);
11615 assert(VM_Version::supports_evex(), "");
11616 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11617 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
11618 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
11619 attributes.set_is_evex_instruction();
11620 attributes.set_embedded_opmask_register_specifier(mask);
11621 if (merge) {
11622 attributes.reset_is_clear_context();
11623 }
11624 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11625 emit_int8((unsigned char)0xA8);
11626 emit_operand(dst, src, 0);
11627 }
11628
11629 void Assembler::evpermb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11630 assert(VM_Version::supports_avx512_vbmi() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11631 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11632 attributes.set_is_evex_instruction();
11633 attributes.set_embedded_opmask_register_specifier(mask);
11634 if (merge) {
11635 attributes.reset_is_clear_context();
11636 }
11637 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11638 emit_int16((unsigned char)0x8D, (0xC0 | encode));
11639 }
11640
11641 void Assembler::evpermb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11642 assert(VM_Version::supports_avx512_vbmi() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11643 InstructionMark im(this);
11644 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11645 attributes.set_is_evex_instruction();
11646 attributes.set_embedded_opmask_register_specifier(mask);
11647 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
11648 if (merge) {
11649 attributes.reset_is_clear_context();
11650 }
11651 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11652 emit_int8((unsigned char)0x8D);
11653 emit_operand(dst, src, 0);
11654 }
11655
11656 void Assembler::evpermw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11657 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11658 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11659 attributes.set_is_evex_instruction();
11660 attributes.set_embedded_opmask_register_specifier(mask);
11661 if (merge) {
11662 attributes.reset_is_clear_context();
11663 }
11664 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11665 emit_int16((unsigned char)0x8D, (0xC0 | encode));
11666 }
11667
11668 void Assembler::evpermw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11669 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11670 InstructionMark im(this);
11671 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11672 attributes.set_is_evex_instruction();
11673 attributes.set_embedded_opmask_register_specifier(mask);
11674 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
11675 if (merge) {
11676 attributes.reset_is_clear_context();
11677 }
11678 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11679 emit_int8((unsigned char)0x8D);
11680 emit_operand(dst, src, 0);
11681 }
11682
11683 void Assembler::evpermd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11684 assert(VM_Version::supports_evex() && vector_len > AVX_128bit, "");
11685 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11686 attributes.set_is_evex_instruction();
11687 attributes.set_embedded_opmask_register_specifier(mask);
11688 if (merge) {
11689 attributes.reset_is_clear_context();
11690 }
11691 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11692 emit_int16(0x36, (0xC0 | encode));
11693 }
11694
11695 void Assembler::evpermd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11696 assert(VM_Version::supports_evex() && vector_len > AVX_128bit, "");
11697 InstructionMark im(this);
11698 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11699 attributes.set_is_evex_instruction();
11700 attributes.set_embedded_opmask_register_specifier(mask);
11701 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
11702 if (merge) {
11703 attributes.reset_is_clear_context();
11704 }
11705 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11706 emit_int8(0x36);
11707 emit_operand(dst, src, 0);
11708 }
11709
11710 void Assembler::evpermq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11711 assert(VM_Version::supports_evex() && vector_len > AVX_128bit, "");
11712 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11713 attributes.set_is_evex_instruction();
11714 attributes.set_embedded_opmask_register_specifier(mask);
11715 if (merge) {
11716 attributes.reset_is_clear_context();
11717 }
11718 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11719 emit_int16(0x36, (0xC0 | encode));
11720 }
11721
11722 void Assembler::evpermq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11723 assert(VM_Version::supports_evex() && vector_len > AVX_128bit, "");
11724 InstructionMark im(this);
11725 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11726 attributes.set_is_evex_instruction();
11727 attributes.set_embedded_opmask_register_specifier(mask);
11728 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
11729 if (merge) {
11730 attributes.reset_is_clear_context();
11731 }
11732 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11733 emit_int8(0x36);
11734 emit_operand(dst, src, 0);
11735 }
11736
11737 void Assembler::evpsllw(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
11738 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11739 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11740 attributes.set_is_evex_instruction();
11741 attributes.set_embedded_opmask_register_specifier(mask);
11742 if (merge) {
11743 attributes.reset_is_clear_context();
11744 }
11745 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11746 emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
11747 }
11748
11749 void Assembler::evpslld(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
11750 assert(VM_Version::supports_evex(), "");
11751 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11752 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11753 attributes.set_is_evex_instruction();
11754 attributes.set_embedded_opmask_register_specifier(mask);
11755 if (merge) {
11756 attributes.reset_is_clear_context();
11757 }
11758 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11759 emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
11760 }
11761
11762 void Assembler::evpsllq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
11763 assert(VM_Version::supports_evex(), "");
11764 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11765 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11766 attributes.set_is_evex_instruction();
11767 attributes.set_embedded_opmask_register_specifier(mask);
11768 if (merge) {
11769 attributes.reset_is_clear_context();
11770 }
11771 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11772 emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
11773 }
11774
11775 void Assembler::evpsrlw(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
11776 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11777 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11778 attributes.set_is_evex_instruction();
11779 attributes.set_embedded_opmask_register_specifier(mask);
11780 if (merge) {
11781 attributes.reset_is_clear_context();
11782 }
11783 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11784 emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
11785 }
11786
11787 void Assembler::evpsrld(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
11788 assert(VM_Version::supports_evex(), "");
11789 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11790 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11791 attributes.set_is_evex_instruction();
11792 attributes.set_embedded_opmask_register_specifier(mask);
11793 if (merge) {
11794 attributes.reset_is_clear_context();
11795 }
11796 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11797 emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
11798 }
11799
11800 void Assembler::evpsrlq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
11801 assert(VM_Version::supports_evex(), "");
11802 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11803 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11804 attributes.set_is_evex_instruction();
11805 attributes.set_embedded_opmask_register_specifier(mask);
11806 if (merge) {
11807 attributes.reset_is_clear_context();
11808 }
11809 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11810 emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
11811 }
11812
11813 void Assembler::evpsraw(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
11814 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11815 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11816 attributes.set_is_evex_instruction();
11817 attributes.set_embedded_opmask_register_specifier(mask);
11818 if (merge) {
11819 attributes.reset_is_clear_context();
11820 }
11821 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11822 emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
11823 }
11824
11825 void Assembler::evpsrad(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
11826 assert(VM_Version::supports_evex(), "");
11827 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11828 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11829 attributes.set_is_evex_instruction();
11830 attributes.set_embedded_opmask_register_specifier(mask);
11831 if (merge) {
11832 attributes.reset_is_clear_context();
11833 }
11834 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11835 emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
11836 }
11837
11838 void Assembler::evpsraq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
11839 assert(VM_Version::supports_evex(), "");
11840 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11841 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11842 attributes.set_is_evex_instruction();
11843 attributes.set_embedded_opmask_register_specifier(mask);
11844 if (merge) {
11845 attributes.reset_is_clear_context();
11846 }
11847 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11848 emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
11849 }
11850
11851 void Assembler::evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11852 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11853 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11854 attributes.set_is_evex_instruction();
11855 attributes.set_embedded_opmask_register_specifier(mask);
11856 if (merge) {
11857 attributes.reset_is_clear_context();
11858 }
11859 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11860 emit_int16((unsigned char)0xF1, (0xC0 | encode));
11861 }
11862
11863 void Assembler::evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11864 assert(VM_Version::supports_evex(), "");
11865 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11866 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11867 attributes.set_is_evex_instruction();
11868 attributes.set_embedded_opmask_register_specifier(mask);
11869 if (merge) {
11870 attributes.reset_is_clear_context();
11871 }
11872 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11873 emit_int16((unsigned char)0xF2, (0xC0 | encode));
11874 }
11875
11876 void Assembler::evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11877 assert(VM_Version::supports_evex(), "");
11878 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11879 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11880 attributes.set_is_evex_instruction();
11881 attributes.set_embedded_opmask_register_specifier(mask);
11882 if (merge) {
11883 attributes.reset_is_clear_context();
11884 }
11885 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11886 emit_int16((unsigned char)0xF3, (0xC0 | encode));
11887 }
11888
11889 void Assembler::evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11890 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11891 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11892 attributes.set_is_evex_instruction();
11893 attributes.set_embedded_opmask_register_specifier(mask);
11894 if (merge) {
11895 attributes.reset_is_clear_context();
11896 }
11897 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11898 emit_int16((unsigned char)0xD1, (0xC0 | encode));
11899 }
11900
11901 void Assembler::evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11902 assert(VM_Version::supports_evex(), "");
11903 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11904 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11905 attributes.set_is_evex_instruction();
11906 attributes.set_embedded_opmask_register_specifier(mask);
11907 if (merge) {
11908 attributes.reset_is_clear_context();
11909 }
11910 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11911 emit_int16((unsigned char)0xD2, (0xC0 | encode));
11912 }
11913
11914 void Assembler::evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11915 assert(VM_Version::supports_evex(), "");
11916 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11917 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11918 attributes.set_is_evex_instruction();
11919 attributes.set_embedded_opmask_register_specifier(mask);
11920 if (merge) {
11921 attributes.reset_is_clear_context();
11922 }
11923 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11924 emit_int16((unsigned char)0xD3, (0xC0 | encode));
11925 }
11926
11927 void Assembler::evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11928 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11929 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11930 attributes.set_is_evex_instruction();
11931 attributes.set_embedded_opmask_register_specifier(mask);
11932 if (merge) {
11933 attributes.reset_is_clear_context();
11934 }
11935 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11936 emit_int16((unsigned char)0xE1, (0xC0 | encode));
11937 }
11938
11939 void Assembler::evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11940 assert(VM_Version::supports_evex(), "");
11941 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11942 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11943 attributes.set_is_evex_instruction();
11944 attributes.set_embedded_opmask_register_specifier(mask);
11945 if (merge) {
11946 attributes.reset_is_clear_context();
11947 }
11948 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11949 emit_int16((unsigned char)0xE2, (0xC0 | encode));
11950 }
11951
11952 void Assembler::evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11953 assert(VM_Version::supports_evex(), "");
11954 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11955 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11956 attributes.set_is_evex_instruction();
11957 attributes.set_embedded_opmask_register_specifier(mask);
11958 if (merge) {
11959 attributes.reset_is_clear_context();
11960 }
11961 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11962 emit_int16((unsigned char)0xE2, (0xC0 | encode));
11963 }
11964
11965 void Assembler::evpsllvw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11966 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11967 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11968 attributes.set_is_evex_instruction();
11969 attributes.set_embedded_opmask_register_specifier(mask);
11970 if (merge) {
11971 attributes.reset_is_clear_context();
11972 }
11973 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11974 emit_int16(0x12, (0xC0 | encode));
11975 }
11976
11977 void Assembler::evpsllvd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11978 assert(VM_Version::supports_evex(), "");
11979 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11980 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11981 attributes.set_is_evex_instruction();
11982 attributes.set_embedded_opmask_register_specifier(mask);
11983 if (merge) {
11984 attributes.reset_is_clear_context();
11985 }
11986 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11987 emit_int16(0x47, (0xC0 | encode));
11988 }
11989
11990 void Assembler::evpsllvq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11991 assert(VM_Version::supports_evex(), "");
11992 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11993 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11994 attributes.set_is_evex_instruction();
11995 attributes.set_embedded_opmask_register_specifier(mask);
11996 if (merge) {
11997 attributes.reset_is_clear_context();
11998 }
11999 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12000 emit_int16(0x47, (0xC0 | encode));
12001 }
12002
12003 void Assembler::evpsrlvw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
12004 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
12005 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12006 attributes.set_is_evex_instruction();
12007 attributes.set_embedded_opmask_register_specifier(mask);
12008 if (merge) {
12009 attributes.reset_is_clear_context();
12010 }
12011 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12012 emit_int16(0x10, (0xC0 | encode));
12013 }
12014
12015 void Assembler::evpsrlvd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
12016 assert(VM_Version::supports_evex(), "");
12017 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
12018 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12019 attributes.set_is_evex_instruction();
12020 attributes.set_embedded_opmask_register_specifier(mask);
12021 if (merge) {
12022 attributes.reset_is_clear_context();
12023 }
12024 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12025 emit_int16(0x45, (0xC0 | encode));
12026 }
12027
12028 void Assembler::evpsrlvq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
12029 assert(VM_Version::supports_evex(), "");
12030 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
12031 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12032 attributes.set_is_evex_instruction();
12033 attributes.set_embedded_opmask_register_specifier(mask);
12034 if (merge) {
12035 attributes.reset_is_clear_context();
12036 }
12037 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12038 emit_int16(0x45, (0xC0 | encode));
12039 }
12040
12041 void Assembler::evpsravw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
12042 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
12043 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12044 attributes.set_is_evex_instruction();
12045 attributes.set_embedded_opmask_register_specifier(mask);
12046 if (merge) {
12047 attributes.reset_is_clear_context();
12048 }
12049 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12050 emit_int16(0x11, (0xC0 | encode));
12051 }
12052
12053 void Assembler::evpsravd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
12054 assert(VM_Version::supports_evex(), "");
12055 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
12056 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12057 attributes.set_is_evex_instruction();
12058 attributes.set_embedded_opmask_register_specifier(mask);
12059 if (merge) {
12060 attributes.reset_is_clear_context();
12061 }
12062 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12063 emit_int16(0x46, (0xC0 | encode));
12064 }
12065
12066 void Assembler::evpsravq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
12067 assert(VM_Version::supports_evex(), "");
12068 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
12069 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12070 attributes.set_is_evex_instruction();
12071 attributes.set_embedded_opmask_register_specifier(mask);
12072 if (merge) {
12073 attributes.reset_is_clear_context();
12074 }
12075 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12076 emit_int16(0x46, (0xC0 | encode));
12077 }
12078
12079 void Assembler::evpminsb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
12080 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
12081 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12082 attributes.set_is_evex_instruction();
12083 attributes.set_embedded_opmask_register_specifier(mask);
12084 if (merge) {
12085 attributes.reset_is_clear_context();
12086 }
12087 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12088 emit_int16(0x38, (0xC0 | encode));
12089 }
12090
12091 void Assembler::evpminsb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
12092 assert(VM_Version::supports_avx512bw(), "");
12093 InstructionMark im(this);
12094 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12095 attributes.set_is_evex_instruction();
12096 attributes.set_embedded_opmask_register_specifier(mask);
12097 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
12098 if (merge) {
12099 attributes.reset_is_clear_context();
12100 }
12101 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12102 emit_int8(0x38);
12103 emit_operand(dst, src, 0);
12104 }
12105
12106 void Assembler::evpminsw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
12107 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
12108 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12109 attributes.set_is_evex_instruction();
12110 attributes.set_embedded_opmask_register_specifier(mask);
12111 if (merge) {
12112 attributes.reset_is_clear_context();
12113 }
12114 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
12115 emit_int16((unsigned char)0xEA, (0xC0 | encode));
12116 }
12117
12118 void Assembler::evpminsw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
12119 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
12120 InstructionMark im(this);
12121 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12122 attributes.set_is_evex_instruction();
12123 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
12124 attributes.set_embedded_opmask_register_specifier(mask);
12125 if (merge) {
12126 attributes.reset_is_clear_context();
12127 }
12128 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
12129 emit_int8((unsigned char)0xEA);
12130 emit_operand(dst, src, 0);
12131 }
12132
12133 void Assembler::evpminsd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
12134 assert(VM_Version::supports_evex(), "");
12135 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
12136 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12137 attributes.set_is_evex_instruction();
12138 attributes.set_embedded_opmask_register_specifier(mask);
12139 if (merge) {
12140 attributes.reset_is_clear_context();
12141 }
12142 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12143 emit_int16(0x39, (0xC0 | encode));
12144 }
12145
12146 void Assembler::evpminsd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
12147 assert(VM_Version::supports_evex(), "");
12148 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
12149 InstructionMark im(this);
12150 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12151 attributes.set_is_evex_instruction();
12152 attributes.set_embedded_opmask_register_specifier(mask);
12153 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
12154 if (merge) {
12155 attributes.reset_is_clear_context();
12156 }
12157 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12158 emit_int8(0x39);
12159 emit_operand(dst, src, 0);
12160 }
12161
12162 void Assembler::evpminsq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
12163 assert(VM_Version::supports_evex(), "");
12164 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
12165 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12166 attributes.set_is_evex_instruction();
12167 attributes.set_embedded_opmask_register_specifier(mask);
12168 if (merge) {
12169 attributes.reset_is_clear_context();
12170 }
12171 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12172 emit_int16(0x39, (0xC0 | encode));
12173 }
12174
12175 void Assembler::evpminsq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
12176 assert(VM_Version::supports_evex(), "");
12177 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
12178 InstructionMark im(this);
12179 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12180 attributes.set_is_evex_instruction();
12181 attributes.set_embedded_opmask_register_specifier(mask);
12182 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
12183 if (merge) {
12184 attributes.reset_is_clear_context();
12185 }
12186 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12187 emit_int8(0x39);
12188 emit_operand(dst, src, 0);
12189 }
12190
12191
12192 void Assembler::evpmaxsb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
12193 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
12194 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12195 attributes.set_is_evex_instruction();
12196 attributes.set_embedded_opmask_register_specifier(mask);
12197 if (merge) {
12198 attributes.reset_is_clear_context();
12199 }
12200 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12201 emit_int16(0x3C, (0xC0 | encode));
12202 }
12203
12204 void Assembler::evpmaxsb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
12205 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
12206 InstructionMark im(this);
12207 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12208 attributes.set_is_evex_instruction();
12209 attributes.set_embedded_opmask_register_specifier(mask);
12210 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
12211 if (merge) {
12212 attributes.reset_is_clear_context();
12213 }
12214 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12215 emit_int8(0x3C);
12216 emit_operand(dst, src, 0);
12217 }
12218
12219 void Assembler::evpmaxsw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
12220 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
12221 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12222 attributes.set_is_evex_instruction();
12223 attributes.set_embedded_opmask_register_specifier(mask);
12224 if (merge) {
12225 attributes.reset_is_clear_context();
12226 }
12227 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
12228 emit_int16((unsigned char)0xEE, (0xC0 | encode));
12229 }
12230
12231 void Assembler::evpmaxsw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
12232 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
12233 InstructionMark im(this);
12234 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12235 attributes.set_is_evex_instruction();
12236 attributes.set_embedded_opmask_register_specifier(mask);
12237 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
12238 if (merge) {
12239 attributes.reset_is_clear_context();
12240 }
12241 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
12242 emit_int8((unsigned char)0xEE);
12243 emit_operand(dst, src, 0);
12244 }
12245
12246 void Assembler::evpmaxsd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
12247 assert(VM_Version::supports_evex(), "");
12248 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
12249 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12250 attributes.set_is_evex_instruction();
12251 attributes.set_embedded_opmask_register_specifier(mask);
12252 if (merge) {
12253 attributes.reset_is_clear_context();
12254 }
12255 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12256 emit_int16(0x3D, (0xC0 | encode));
12257 }
12258
12259 void Assembler::evpmaxsd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
12260 assert(VM_Version::supports_evex(), "");
12261 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
12262 InstructionMark im(this);
12263 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12264 attributes.set_is_evex_instruction();
12265 attributes.set_embedded_opmask_register_specifier(mask);
12266 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
12267 if (merge) {
12268 attributes.reset_is_clear_context();
12269 }
12270 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12271 emit_int8(0x3D);
12272 emit_operand(dst, src, 0);
12273 }
12274
12275 void Assembler::evpmaxsq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
12276 assert(VM_Version::supports_evex(), "");
12277 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
12278 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12279 attributes.set_is_evex_instruction();
12280 attributes.set_embedded_opmask_register_specifier(mask);
12281 if (merge) {
12282 attributes.reset_is_clear_context();
12283 }
12284 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12285 emit_int16(0x3D, (0xC0 | encode));
12286 }
12287
12288 void Assembler::evpmaxsq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
12289 assert(VM_Version::supports_evex(), "");
12290 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
12291 InstructionMark im(this);
12292 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12293 attributes.set_is_evex_instruction();
12294 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
12295 attributes.set_embedded_opmask_register_specifier(mask);
12296 if (merge) {
12297 attributes.reset_is_clear_context();
12298 }
12299 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12300 emit_int8(0x3D);
12301 emit_operand(dst, src, 0);
12302 }
12303
12304 void Assembler::evpternlogd(XMMRegister dst, int imm8, KRegister mask, XMMRegister src2, XMMRegister src3, bool merge, int vector_len) {
12305 assert(VM_Version::supports_evex(), "requires EVEX support");
12306 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
12307 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12308 attributes.set_is_evex_instruction();
12309 attributes.set_embedded_opmask_register_specifier(mask);
12310 if (merge) {
12311 attributes.reset_is_clear_context();
12312 }
12313 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
12314 emit_int24(0x25, (unsigned char)(0xC0 | encode), imm8);
12315 }
12316
12317 void Assembler::evpternlogd(XMMRegister dst, int imm8, KRegister mask, XMMRegister src2, Address src3, bool merge, int vector_len) {
12318 assert(VM_Version::supports_evex(), "requires EVEX support");
12319 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
12320 assert(dst != xnoreg, "sanity");
12321 InstructionMark im(this);
12322 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12323 attributes.set_is_evex_instruction();
12324 attributes.set_embedded_opmask_register_specifier(mask);
12325 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
12326 if (merge) {
12327 attributes.reset_is_clear_context();
12328 }
12329 vex_prefix(src3, src2->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
12330 emit_int8(0x25);
12331 emit_operand(dst, src3, 1);
12332 emit_int8(imm8);
12333 }
12334
12335 void Assembler::evpternlogq(XMMRegister dst, int imm8, KRegister mask, XMMRegister src2, XMMRegister src3, bool merge, int vector_len) {
12336 assert(VM_Version::supports_evex(), "requires EVEX support");
12337 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
12338 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12339 attributes.set_is_evex_instruction();
12340 attributes.set_embedded_opmask_register_specifier(mask);
12341 if (merge) {
12342 attributes.reset_is_clear_context();
12343 }
12344 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
12345 emit_int24(0x25, (unsigned char)(0xC0 | encode), imm8);
12346 }
12347
12348 void Assembler::evpternlogq(XMMRegister dst, int imm8, KRegister mask, XMMRegister src2, Address src3, bool merge, int vector_len) {
12349 assert(VM_Version::supports_evex(), "requires EVEX support");
12350 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
12351 assert(dst != xnoreg, "sanity");
12352 InstructionMark im(this);
12353 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12354 attributes.set_is_evex_instruction();
12355 attributes.set_embedded_opmask_register_specifier(mask);
12356 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
12357 if (merge) {
12358 attributes.reset_is_clear_context();
12359 }
12360 vex_prefix(src3, src2->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
12361 emit_int8(0x25);
12362 emit_operand(dst, src3, 1);
12363 emit_int8(imm8);
12364 }
12365
12366 void Assembler::gf2p8affineqb(XMMRegister dst, XMMRegister src, int imm8) {
12367 assert(VM_Version::supports_gfni(), "");
12368 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
12369 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
12370 emit_int24((unsigned char)0xCE, (unsigned char)(0xC0 | encode), imm8);
12371 }
12372
12373 void Assembler::vgf2p8affineqb(XMMRegister dst, XMMRegister src2, XMMRegister src3, int imm8, int vector_len) {
12374 assert(VM_Version::supports_gfni(), "requires GFNI support");
12375 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
12376 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
12377 emit_int24((unsigned char)0xCE, (unsigned char)(0xC0 | encode), imm8);
12378 }
12379
12380 // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL
12381 void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len) {
12382 assert(UseAVX >= 2, "");
12383 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
12384 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12385 emit_int16(0x58, (0xC0 | encode));
12386 }
12387
12388 void Assembler::vpbroadcastd(XMMRegister dst, Address src, int vector_len) {
12389 assert(VM_Version::supports_avx2(), "");
12390 assert(dst != xnoreg, "sanity");
12391 InstructionMark im(this);
12392 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
12393 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
12394 // swap src<->dst for encoding
12395 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12396 emit_int8(0x58);
12397 emit_operand(dst, src, 0);
12398 }
12399
12400 // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL
12401 void Assembler::vpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len) {
12402 assert(VM_Version::supports_avx2(), "");
12403 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
12404 attributes.set_rex_vex_w_reverted();
12405 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12406 emit_int16(0x59, (0xC0 | encode));
12407 }
12408
12409 void Assembler::vpbroadcastq(XMMRegister dst, Address src, int vector_len) {
12410 assert(VM_Version::supports_avx2(), "");
12411 assert(dst != xnoreg, "sanity");
12412 InstructionMark im(this);
12413 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
12414 attributes.set_rex_vex_w_reverted();
12415 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
12416 // swap src<->dst for encoding
12417 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12418 emit_int8(0x59);
12419 emit_operand(dst, src, 0);
12420 }
12421
12422 void Assembler::evbroadcasti32x4(XMMRegister dst, Address src, int vector_len) {
12423 assert(vector_len != Assembler::AVX_128bit, "");
12424 assert(VM_Version::supports_evex(), "");
12425 assert(dst != xnoreg, "sanity");
12426 InstructionMark im(this);
12427 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
12428 attributes.set_rex_vex_w_reverted();
12429 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
12430 // swap src<->dst for encoding
12431 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12432 emit_int8(0x5A);
12433 emit_operand(dst, src, 0);
12434 }
12435
12436 void Assembler::evbroadcasti64x2(XMMRegister dst, XMMRegister src, int vector_len) {
12437 assert(vector_len != Assembler::AVX_128bit, "");
12438 assert(VM_Version::supports_avx512dq(), "");
12439 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
12440 attributes.set_rex_vex_w_reverted();
12441 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12442 emit_int16(0x5A, (0xC0 | encode));
12443 }
12444
12445 void Assembler::evbroadcasti64x2(XMMRegister dst, Address src, int vector_len) {
12446 assert(vector_len != Assembler::AVX_128bit, "");
12447 assert(VM_Version::supports_avx512dq(), "");
12448 assert(dst != xnoreg, "sanity");
12449 InstructionMark im(this);
12450 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
12451 attributes.set_rex_vex_w_reverted();
12452 attributes.set_address_attributes(/* tuple_type */ EVEX_T2, /* input_size_in_bits */ EVEX_64bit);
12453 // swap src<->dst for encoding
12454 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12455 emit_int8(0x5A);
12456 emit_operand(dst, src, 0);
12457 }
12458
12459 void Assembler::vbroadcasti128(XMMRegister dst, Address src, int vector_len) {
12460 assert(VM_Version::supports_avx2(), "");
12461 assert(vector_len == AVX_256bit, "");
12462 assert(dst != xnoreg, "sanity");
12463 InstructionMark im(this);
12464 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
12465 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
12466 // swap src<->dst for encoding
12467 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12468 emit_int8(0x5A);
12469 emit_operand(dst, src, 0);
12470 }
12471
12472 // scalar single/double precision replicate
12473
12474 // duplicate single precision data from src into programmed locations in dest : requires AVX512VL
12475 void Assembler::vbroadcastss(XMMRegister dst, XMMRegister src, int vector_len) {
12476 assert(VM_Version::supports_avx2(), "");
12477 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
12478 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12479 emit_int16(0x18, (0xC0 | encode));
12480 }
12481
12482 void Assembler::vbroadcastss(XMMRegister dst, Address src, int vector_len) {
12483 assert(VM_Version::supports_avx(), "");
12484 assert(dst != xnoreg, "sanity");
12485 InstructionMark im(this);
12486 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
12487 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
12488 // swap src<->dst for encoding
12489 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12490 emit_int8(0x18);
12491 emit_operand(dst, src, 0);
12492 }
12493
12494 // duplicate double precision data from src into programmed locations in dest : requires AVX512VL
12495 void Assembler::vbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len) {
12496 assert(VM_Version::supports_avx2(), "");
12497 assert(vector_len == AVX_256bit || vector_len == AVX_512bit, "");
12498 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
12499 attributes.set_rex_vex_w_reverted();
12500 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12501 emit_int16(0x19, (0xC0 | encode));
12502 }
12503
12504 void Assembler::vbroadcastsd(XMMRegister dst, Address src, int vector_len) {
12505 assert(VM_Version::supports_avx(), "");
12506 assert(vector_len == AVX_256bit || vector_len == AVX_512bit, "");
12507 assert(dst != xnoreg, "sanity");
12508 InstructionMark im(this);
12509 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
12510 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
12511 attributes.set_rex_vex_w_reverted();
12512 // swap src<->dst for encoding
12513 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12514 emit_int8(0x19);
12515 emit_operand(dst, src, 0);
12516 }
12517
12518 void Assembler::vbroadcastf128(XMMRegister dst, Address src, int vector_len) {
12519 assert(VM_Version::supports_avx(), "");
12520 assert(vector_len == AVX_256bit, "");
12521 assert(dst != xnoreg, "sanity");
12522 InstructionMark im(this);
12523 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
12524 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
12525 // swap src<->dst for encoding
12526 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12527 emit_int8(0x1A);
12528 emit_operand(dst, src, 0);
12529 }
12530
12531 void Assembler::evbroadcastf64x2(XMMRegister dst, Address src, int vector_len) {
12532 assert(VM_Version::supports_avx512dq(), "");
12533 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
12534 assert(dst != xnoreg, "sanity");
12535 InstructionMark im(this);
12536 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
12537 attributes.set_address_attributes(/* tuple_type */ EVEX_T2, /* input_size_in_bits */ EVEX_64bit);
12538 attributes.set_is_evex_instruction();
12539 // swap src<->dst for encoding
12540 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12541 emit_int8(0x1A);
12542 emit_operand(dst, src, 0);
12543 }
12544
12545
12546 // gpr source broadcast forms
12547
12548 // duplicate 1-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL
12549 void Assembler::evpbroadcastb(XMMRegister dst, Register src, int vector_len) {
12550 assert(VM_Version::supports_avx512bw(), "");
12551 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
12552 attributes.set_is_evex_instruction();
12553 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes, true);
12554 emit_int16(0x7A, (0xC0 | encode));
12555 }
12556
12557 // duplicate 2-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL
12558 void Assembler::evpbroadcastw(XMMRegister dst, Register src, int vector_len) {
12559 assert(VM_Version::supports_avx512bw(), "");
12560 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
12561 attributes.set_is_evex_instruction();
12562 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes , true);
12563 emit_int16(0x7B, (0xC0 | encode));
12564 }
12565
12566 // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL
12567 void Assembler::evpbroadcastd(XMMRegister dst, Register src, int vector_len) {
12568 assert(VM_Version::supports_evex(), "");
12569 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
12570 attributes.set_is_evex_instruction();
12571 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes, true);
12572 emit_int16(0x7C, (0xC0 | encode));
12573 }
12574
12575 // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL
12576 void Assembler::evpbroadcastq(XMMRegister dst, Register src, int vector_len) {
12577 assert(VM_Version::supports_evex(), "");
12578 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
12579 attributes.set_is_evex_instruction();
12580 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes, true);
12581 emit_int16(0x7C, (0xC0 | encode));
12582 }
12583
12584 void Assembler::vpgatherdd(XMMRegister dst, Address src, XMMRegister mask, int vector_len) {
12585 assert(VM_Version::supports_avx2(), "");
12586 assert(!needs_eevex(src.base()), "does not support extended gprs as BASE of address operand");
12587 assert(vector_len == Assembler::AVX_128bit || vector_len == Assembler::AVX_256bit, "");
12588 assert(dst != xnoreg, "sanity");
12589 assert(src.isxmmindex(),"expected to be xmm index");
12590 assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same");
12591 InstructionMark im(this);
12592 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
12593 vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12594 emit_int8((unsigned char)0x90);
12595 emit_operand(dst, src, 0);
12596 }
12597
12598 void Assembler::vpgatherdq(XMMRegister dst, Address src, XMMRegister mask, int vector_len) {
12599 assert(VM_Version::supports_avx2(), "");
12600 assert(!needs_eevex(src.base()), "does not support extended gprs as BASE of address operand");
12601 assert(vector_len == Assembler::AVX_128bit || vector_len == Assembler::AVX_256bit, "");
12602 assert(dst != xnoreg, "sanity");
12603 assert(src.isxmmindex(),"expected to be xmm index");
12604 assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same");
12605 InstructionMark im(this);
12606 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
12607 vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12608 emit_int8((unsigned char)0x90);
12609 emit_operand(dst, src, 0);
12610 }
12611
12612 void Assembler::vgatherdpd(XMMRegister dst, Address src, XMMRegister mask, int vector_len) {
12613 assert(VM_Version::supports_avx2(), "");
12614 assert(!needs_eevex(src.base()), "does not support extended gprs as BASE of address operand");
12615 assert(vector_len == Assembler::AVX_128bit || vector_len == Assembler::AVX_256bit, "");
12616 assert(dst != xnoreg, "sanity");
12617 assert(src.isxmmindex(),"expected to be xmm index");
12618 assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same");
12619 InstructionMark im(this);
12620 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
12621 vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12622 emit_int8((unsigned char)0x92);
12623 emit_operand(dst, src, 0);
12624 }
12625
12626 void Assembler::vgatherdps(XMMRegister dst, Address src, XMMRegister mask, int vector_len) {
12627 assert(VM_Version::supports_avx2(), "");
12628 assert(!needs_eevex(src.base()), "does not support extended gprs as BASE of address operand");
12629 assert(vector_len == Assembler::AVX_128bit || vector_len == Assembler::AVX_256bit, "");
12630 assert(dst != xnoreg, "sanity");
12631 assert(src.isxmmindex(),"expected to be xmm index");
12632 assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same");
12633 InstructionMark im(this);
12634 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
12635 vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12636 emit_int8((unsigned char)0x92);
12637 emit_operand(dst, src, 0);
12638 }
12639 void Assembler::evpgatherdd(XMMRegister dst, KRegister mask, Address src, int vector_len) {
12640 assert(VM_Version::supports_evex(), "");
12641 assert(dst != xnoreg, "sanity");
12642 assert(src.isxmmindex(),"expected to be xmm index");
12643 assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same");
12644 assert(mask != k0, "instruction will #UD if mask is in k0");
12645 InstructionMark im(this);
12646 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12647 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
12648 attributes.reset_is_clear_context();
12649 attributes.set_embedded_opmask_register_specifier(mask);
12650 attributes.set_is_evex_instruction();
12651 // swap src<->dst for encoding
12652 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12653 emit_int8((unsigned char)0x90);
12654 emit_operand(dst, src, 0);
12655 }
12656
12657 void Assembler::evpgatherdq(XMMRegister dst, KRegister mask, Address src, int vector_len) {
12658 assert(VM_Version::supports_evex(), "");
12659 assert(dst != xnoreg, "sanity");
12660 assert(src.isxmmindex(),"expected to be xmm index");
12661 assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same");
12662 assert(mask != k0, "instruction will #UD if mask is in k0");
12663 InstructionMark im(this);
12664 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12665 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
12666 attributes.reset_is_clear_context();
12667 attributes.set_embedded_opmask_register_specifier(mask);
12668 attributes.set_is_evex_instruction();
12669 // swap src<->dst for encoding
12670 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12671 emit_int8((unsigned char)0x90);
12672 emit_operand(dst, src, 0);
12673 }
12674
12675 void Assembler::evgatherdpd(XMMRegister dst, KRegister mask, Address src, int vector_len) {
12676 assert(VM_Version::supports_evex(), "");
12677 assert(dst != xnoreg, "sanity");
12678 assert(src.isxmmindex(),"expected to be xmm index");
12679 assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same");
12680 assert(mask != k0, "instruction will #UD if mask is in k0");
12681 InstructionMark im(this);
12682 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12683 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
12684 attributes.reset_is_clear_context();
12685 attributes.set_embedded_opmask_register_specifier(mask);
12686 attributes.set_is_evex_instruction();
12687 // swap src<->dst for encoding
12688 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12689 emit_int8((unsigned char)0x92);
12690 emit_operand(dst, src, 0);
12691 }
12692
12693 void Assembler::evgatherdps(XMMRegister dst, KRegister mask, Address src, int vector_len) {
12694 assert(VM_Version::supports_evex(), "");
12695 assert(dst != xnoreg, "sanity");
12696 assert(src.isxmmindex(),"expected to be xmm index");
12697 assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same");
12698 assert(mask != k0, "instruction will #UD if mask is in k0");
12699 InstructionMark im(this);
12700 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12701 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
12702 attributes.reset_is_clear_context();
12703 attributes.set_embedded_opmask_register_specifier(mask);
12704 attributes.set_is_evex_instruction();
12705 // swap src<->dst for encoding
12706 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12707 emit_int8((unsigned char)0x92);
12708 emit_operand(dst, src, 0);
12709 }
12710
12711 void Assembler::evpscatterdd(Address dst, KRegister mask, XMMRegister src, int vector_len) {
12712 assert(VM_Version::supports_evex(), "");
12713 assert(mask != k0, "instruction will #UD if mask is in k0");
12714 InstructionMark im(this);
12715 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12716 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
12717 attributes.reset_is_clear_context();
12718 attributes.set_embedded_opmask_register_specifier(mask);
12719 attributes.set_is_evex_instruction();
12720 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12721 emit_int8((unsigned char)0xA0);
12722 emit_operand(src, dst, 0);
12723 }
12724
12725 void Assembler::evpscatterdq(Address dst, KRegister mask, XMMRegister src, int vector_len) {
12726 assert(VM_Version::supports_evex(), "");
12727 assert(mask != k0, "instruction will #UD if mask is in k0");
12728 InstructionMark im(this);
12729 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12730 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
12731 attributes.reset_is_clear_context();
12732 attributes.set_embedded_opmask_register_specifier(mask);
12733 attributes.set_is_evex_instruction();
12734 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12735 emit_int8((unsigned char)0xA0);
12736 emit_operand(src, dst, 0);
12737 }
12738
12739 void Assembler::evscatterdps(Address dst, KRegister mask, XMMRegister src, int vector_len) {
12740 assert(VM_Version::supports_evex(), "");
12741 assert(mask != k0, "instruction will #UD if mask is in k0");
12742 InstructionMark im(this);
12743 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12744 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
12745 attributes.reset_is_clear_context();
12746 attributes.set_embedded_opmask_register_specifier(mask);
12747 attributes.set_is_evex_instruction();
12748 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12749 emit_int8((unsigned char)0xA2);
12750 emit_operand(src, dst, 0);
12751 }
12752
12753 void Assembler::evscatterdpd(Address dst, KRegister mask, XMMRegister src, int vector_len) {
12754 assert(VM_Version::supports_evex(), "");
12755 assert(mask != k0, "instruction will #UD if mask is in k0");
12756 InstructionMark im(this);
12757 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12758 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
12759 attributes.reset_is_clear_context();
12760 attributes.set_embedded_opmask_register_specifier(mask);
12761 attributes.set_is_evex_instruction();
12762 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12763 emit_int8((unsigned char)0xA2);
12764 emit_operand(src, dst, 0);
12765 }
12766 // Carry-Less Multiplication Quadword
12767 void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) {
12768 assert(VM_Version::supports_clmul(), "");
12769 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
12770 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
12771 emit_int24(0x44, (0xC0 | encode), (unsigned char)mask);
12772 }
12773
12774 // Carry-Less Multiplication Quadword
12775 void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) {
12776 assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), "");
12777 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
12778 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
12779 emit_int24(0x44, (0xC0 | encode), (unsigned char)mask);
12780 }
12781
12782 void Assembler::evpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask, int vector_len) {
12783 assert(VM_Version::supports_avx512_vpclmulqdq(), "Requires vector carryless multiplication support");
12784 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
12785 attributes.set_is_evex_instruction();
12786 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
12787 emit_int24(0x44, (0xC0 | encode), (unsigned char)mask);
12788 }
12789
12790 void Assembler::vzeroupper_uncached() {
12791 if (VM_Version::supports_vzeroupper()) {
12792 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
12793 (void)vex_prefix_and_encode(0, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
12794 emit_int8(0x77);
12795 }
12796 }
12797
12798 void Assembler::vfpclassss(KRegister kdst, XMMRegister src, uint8_t imm8) {
12799 // Encoding: EVEX.LIG.66.0F3A.W0 67 /r ib
12800 assert(VM_Version::supports_evex(), "");
12801 assert(VM_Version::supports_avx512dq(), "");
12802 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
12803 attributes.set_is_evex_instruction();
12804 int encode = vex_prefix_and_encode(kdst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
12805 emit_int24((unsigned char)0x67, (unsigned char)(0xC0 | encode), imm8);
12806 }
12807
12808 void Assembler::vfpclasssd(KRegister kdst, XMMRegister src, uint8_t imm8) {
12809 // Encoding: EVEX.LIG.66.0F3A.W1 67 /r ib
12810 assert(VM_Version::supports_evex(), "");
12811 assert(VM_Version::supports_avx512dq(), "");
12812 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
12813 attributes.set_is_evex_instruction();
12814 int encode = vex_prefix_and_encode(kdst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
12815 emit_int24((unsigned char)0x67, (unsigned char)(0xC0 | encode), imm8);
12816 }
12817
12818 void Assembler::fld_x(Address adr) {
12819 InstructionMark im(this);
12820 emit_int8((unsigned char)0xDB);
12821 emit_operand32(rbp, adr, 0);
12822 }
12823
12824 void Assembler::fstp_x(Address adr) {
12825 InstructionMark im(this);
12826 emit_int8((unsigned char)0xDB);
12827 emit_operand32(rdi, adr, 0);
12828 }
12829
12830 void Assembler::emit_operand32(Register reg, Address adr, int post_addr_length) {
12831 assert(reg->encoding() < 8, "no extended registers");
12832 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
12833 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec, post_addr_length);
12834 }
12835
12836 void Assembler::fld_d(Address adr) {
12837 InstructionMark im(this);
12838 emit_int8((unsigned char)0xDD);
12839 emit_operand32(rax, adr, 0);
12840 }
12841
12842 void Assembler::fprem() {
12843 emit_int16((unsigned char)0xD9, (unsigned char)0xF8);
12844 }
12845
12846 void Assembler::fnstsw_ax() {
12847 emit_int16((unsigned char)0xDF, (unsigned char)0xE0);
12848 }
12849
12850 void Assembler::fstp_d(Address adr) {
12851 InstructionMark im(this);
12852 emit_int8((unsigned char)0xDD);
12853 emit_operand32(rbx, adr, 0);
12854 }
12855
12856 void Assembler::fstp_d(int index) {
12857 emit_farith(0xDD, 0xD8, index);
12858 }
12859
12860 void Assembler::emit_farith(int b1, int b2, int i) {
12861 assert(isByte(b1) && isByte(b2), "wrong opcode");
12862 assert(0 <= i && i < 8, "illegal stack offset");
12863 emit_int16(b1, b2 + i);
12864 }
12865
12866 // SSE SIMD prefix byte values corresponding to VexSimdPrefix encoding.
12867 static int simd_pre[4] = { 0, 0x66, 0xF3, 0xF2 };
12868 // SSE opcode second byte values (first is 0x0F) corresponding to VexOpcode encoding.
12869 static int simd_opc[4] = { 0, 0, 0x38, 0x3A };
12870
12871 // Generate SSE legacy REX prefix and SIMD opcode based on VEX encoding.
12872 void Assembler::rex_prefix(Address adr, XMMRegister xreg, VexSimdPrefix pre, VexOpcode opc, bool rex_w) {
12873 if (pre > 0) {
12874 emit_int8(simd_pre[pre]);
12875 }
12876 if (rex_w) {
12877 prefixq(adr, xreg);
12878 } else {
12879 prefix(adr, xreg);
12880 }
12881 if (opc > 0) {
12882 emit_int8(0x0F);
12883 int opc2 = simd_opc[opc];
12884 if (opc2 > 0) {
12885 emit_int8(opc2);
12886 }
12887 }
12888 }
12889
12890 int Assembler::rex_prefix_and_encode(int dst_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool rex_w) {
12891 if (pre > 0) {
12892 emit_int8(simd_pre[pre]);
12893 }
12894 int encode = (rex_w) ? prefixq_and_encode(dst_enc, src_enc) : prefix_and_encode(dst_enc, src_enc);
12895 if (opc > 0) {
12896 emit_int8(0x0F);
12897 int opc2 = simd_opc[opc];
12898 if (opc2 > 0) {
12899 emit_int8(opc2);
12900 }
12901 }
12902 return encode;
12903 }
12904
12905
12906 void Assembler::vex_prefix(bool vex_r, bool vex_b, bool vex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc) {
12907 int vector_len = _attributes->get_vector_len();
12908 bool vex_w = _attributes->is_rex_vex_w();
12909 if (vex_b || vex_x || vex_w || (opc == VEX_OPCODE_0F_38) || (opc == VEX_OPCODE_0F_3A)) {
12910 int byte1 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0);
12911 byte1 = (~byte1) & 0xE0;
12912 byte1 |= opc;
12913
12914 int byte2 = ((~nds_enc) & 0xf) << 3;
12915 byte2 |= (vex_w ? VEX_W : 0) | ((vector_len > 0) ? 4 : 0) | pre;
12916
12917 emit_int24((unsigned char)VEX_3bytes, byte1, byte2);
12918 } else {
12919 int byte1 = vex_r ? VEX_R : 0;
12920 byte1 = (~byte1) & 0x80;
12921 byte1 |= ((~nds_enc) & 0xf) << 3;
12922 byte1 |= ((vector_len > 0 ) ? 4 : 0) | pre;
12923 emit_int16((unsigned char)VEX_2bytes, byte1);
12924 }
12925 }
12926
12927 // This is a 4 byte encoding
12928 void Assembler::evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_r, bool eevex_b, bool evex_v,
12929 bool eevex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc, bool no_flags) {
12930 // EVEX 0x62 prefix
12931 // byte1 = EVEX_4bytes;
12932
12933 bool vex_w = _attributes->is_rex_vex_w();
12934 int evex_encoding = (vex_w ? VEX_W : 0);
12935 // EVEX.b is not currently used for broadcast of single element or data rounding modes
12936 _attributes->set_evex_encoding(evex_encoding);
12937
12938 // P0: byte 2, initialized to RXBR'0mmm
12939 // instead of not'd
12940 int byte2 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0) | (evex_r ? EVEX_Rb : 0);
12941 byte2 = (~byte2) & 0xF0;
12942 byte2 |= eevex_b ? EEVEX_B : 0;
12943 // confine opc opcode extensions in mm bits to lower two bits
12944 // of form {0F, 0F_38, 0F_3A, 0F_3C}
12945 byte2 |= opc;
12946
12947 // P1: byte 3 as Wvvvv1pp
12948 int byte3 = ((~nds_enc) & 0xf) << 3;
12949 byte3 |= (eevex_x ? 0 : EEVEX_X);
12950 byte3 |= (vex_w & 1) << 7;
12951 // confine pre opcode extensions in pp bits to lower two bits
12952 // of form {66, F3, F2}
12953 byte3 |= pre;
12954
12955 // P2: byte 4 as zL'Lbv'aaa or 00LXVF00 where V = V4, X(extended context) = ND and F = NF (no flags)
12956 int byte4 = 0;
12957 if (no_flags) {
12958 assert(_attributes->is_no_reg_mask(), "mask register not supported with no_flags");
12959 byte4 |= 0x4;
12960 } else {
12961 // kregs are implemented in the low 3 bits as aaa
12962 byte4 = (_attributes->is_no_reg_mask()) ?
12963 0 :
12964 _attributes->get_embedded_opmask_register_specifier();
12965 }
12966 // EVEX.v` for extending EVEX.vvvv or VIDX
12967 byte4 |= (evex_v ? 0: EVEX_V);
12968 // third EXEC.b for broadcast actions
12969 byte4 |= (_attributes->is_extended_context() ? EVEX_Rb : 0);
12970 // fourth EVEX.L'L for vector length : 0 is 128, 1 is 256, 2 is 512, currently we do not support 1024
12971 byte4 |= ((_attributes->get_vector_len())& 0x3) << 5;
12972 // last is EVEX.z for zero/merge actions
12973 if (_attributes->is_no_reg_mask() == false &&
12974 _attributes->get_embedded_opmask_register_specifier() != 0) {
12975 byte4 |= (_attributes->is_clear_context() ? EVEX_Z : 0);
12976 }
12977 emit_int32(EVEX_4bytes, byte2, byte3, byte4);
12978 }
12979
12980 void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes, bool nds_is_ndd, bool no_flags) {
12981 if (adr.base_needs_rex2() || adr.index_needs_rex2() || nds_is_ndd || no_flags) {
12982 assert(UseAPX, "APX features not enabled");
12983 }
12984 if (nds_is_ndd) attributes->set_extended_context();
12985 bool is_extended = adr.base_needs_rex2() || adr.index_needs_rex2() || nds_enc >= 16 || xreg_enc >= 16 || nds_is_ndd;
12986 bool vex_r = (xreg_enc & 8) == 8;
12987 bool vex_b = adr.base_needs_rex();
12988 bool vex_x;
12989 if (adr.isxmmindex()) {
12990 vex_x = adr.xmmindex_needs_rex();
12991 } else {
12992 vex_x = adr.index_needs_rex();
12993 }
12994 set_attributes(attributes);
12995 // For EVEX instruction (which is not marked as pure EVEX instruction) check and see if this instruction
12996 // is allowed in legacy mode and has resources which will fit in it.
12997 // Pure EVEX instructions will have is_evex_instruction set in their definition.
12998 if (!attributes->is_legacy_mode()) {
12999 if (UseAVX > 2 && !attributes->is_evex_instruction()) {
13000 if ((attributes->get_vector_len() != AVX_512bit) && !is_extended) {
13001 attributes->set_is_legacy_mode();
13002 }
13003 }
13004 }
13005
13006 if (UseAVX > 2) {
13007 assert(((!attributes->uses_vl()) ||
13008 (attributes->get_vector_len() == AVX_512bit) ||
13009 (!_legacy_mode_vl) ||
13010 (attributes->is_legacy_mode())),"XMM register should be 0-15");
13011 assert((!is_extended || (!attributes->is_legacy_mode())),"XMM register should be 0-15");
13012 }
13013
13014 if (UseAVX > 2 && !attributes->is_legacy_mode())
13015 {
13016 bool evex_r = (xreg_enc >= 16);
13017 bool evex_v;
13018 // EVEX.V' is set to true when VSIB is used as we may need to use higher order XMM registers (16-31)
13019 if (adr.isxmmindex()) {
13020 evex_v = ((adr._xmmindex->encoding() > 15) ? true : false);
13021 } else {
13022 evex_v = (nds_enc >= 16);
13023 }
13024 bool eevex_x = adr.index_needs_rex2();
13025 bool eevex_b = adr.base_needs_rex2();
13026 attributes->set_is_evex_instruction();
13027 evex_prefix(vex_r, vex_b, vex_x, evex_r, eevex_b, evex_v, eevex_x, nds_enc, pre, opc, no_flags);
13028 } else {
13029 if (UseAVX > 2 && attributes->is_rex_vex_w_reverted()) {
13030 attributes->set_rex_vex_w(false);
13031 }
13032 vex_prefix(vex_r, vex_b, vex_x, nds_enc, pre, opc);
13033 }
13034 }
13035
13036 void Assembler::eevex_prefix_ndd(Address adr, int ndd_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes, bool no_flags) {
13037 attributes->set_is_evex_instruction();
13038 vex_prefix(adr, ndd_enc, xreg_enc, pre, opc, attributes, /* nds_is_ndd */ true, no_flags);
13039 }
13040
13041 void Assembler::emit_eevex_or_demote(Register dst, Address src1, Register src2, VexSimdPrefix pre, VexOpcode opc,
13042 int size, int opcode_byte, bool no_flags, bool is_map1, bool is_commutative) {
13043 if (is_commutative && is_demotable(no_flags, dst->encoding(), src2->encoding())) {
13044 // Opcode byte adjustment due to mismatch between NDD and equivalent demotable variant
13045 opcode_byte += 2;
13046 if (size == EVEX_64bit) {
13047 emit_prefix_and_int8(get_prefixq(src1, dst, is_map1), opcode_byte);
13048 } else {
13049 // For 32-bit, 16-bit and 8-bit
13050 if (size == EVEX_16bit) {
13051 emit_int8(0x66);
13052 }
13053 prefix(src1, dst, false, is_map1);
13054 emit_int8(opcode_byte);
13055 }
13056 } else {
13057 bool vex_w = (size == EVEX_64bit) ? true : false;
13058 InstructionAttr attributes(AVX_128bit, vex_w, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13059 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, size);
13060 eevex_prefix_ndd(src1, dst->encoding(), src2->encoding(), pre, opc, &attributes, no_flags);
13061 emit_int8(opcode_byte);
13062 }
13063 emit_operand(src2, src1, 0);
13064 }
13065
13066 void Assembler::emit_eevex_or_demote(Register dst, Register src1, Address src2, VexSimdPrefix pre, VexOpcode opc,
13067 int size, int opcode_byte, bool no_flags, bool is_map1) {
13068 if (is_demotable(no_flags, dst->encoding(), src1->encoding())) {
13069 if (size == EVEX_64bit) {
13070 emit_prefix_and_int8(get_prefixq(src2, dst, is_map1), opcode_byte);
13071 } else {
13072 // For 32-bit, 16-bit and 8-bit
13073 if (size == EVEX_16bit) {
13074 emit_int8(0x66);
13075 }
13076 prefix(src2, dst, false, is_map1);
13077 emit_int8(opcode_byte);
13078 }
13079 } else {
13080 bool vex_w = (size == EVEX_64bit) ? true : false;
13081 InstructionAttr attributes(AVX_128bit, vex_w, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13082 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, size);
13083 eevex_prefix_ndd(src2, dst->encoding(), src1->encoding(), pre, opc, &attributes, no_flags);
13084 emit_int8(opcode_byte);
13085 }
13086 emit_operand(src1, src2, 0);
13087 }
13088
13089 void Assembler::eevex_prefix_nf(Address adr, int ndd_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes, bool no_flags) {
13090 attributes->set_is_evex_instruction();
13091 vex_prefix(adr, ndd_enc, xreg_enc, pre, opc, attributes, /* nds_is_ndd */ false, no_flags);
13092 }
13093
13094 int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes, bool src_is_gpr, bool nds_is_ndd, bool no_flags) {
13095 if (nds_is_ndd || no_flags || (src_is_gpr && src_enc >= 16)) {
13096 assert(UseAPX, "APX features not enabled");
13097 }
13098 if (nds_is_ndd) attributes->set_extended_context();
13099 bool is_extended = dst_enc >= 16 || nds_enc >= 16 || src_enc >=16;
13100 bool vex_r = (dst_enc & 8) == 8;
13101 bool vex_b = (src_enc & 8) == 8;
13102 bool vex_x = false;
13103 set_attributes(attributes);
13104
13105 // For EVEX instruction (which is not marked as pure EVEX instruction) check and see if this instruction
13106 // is allowed in legacy mode and has resources which will fit in it.
13107 // Pure EVEX instructions will have is_evex_instruction set in their definition.
13108 if (!attributes->is_legacy_mode()) {
13109 if (UseAVX > 2 && !attributes->is_evex_instruction()) {
13110 if ((!attributes->uses_vl() || (attributes->get_vector_len() != AVX_512bit)) &&
13111 !is_extended) {
13112 attributes->set_is_legacy_mode();
13113 }
13114 }
13115 }
13116
13117 if (UseAVX > 2) {
13118 // All the scalar fp instructions (with uses_vl as false) can have legacy_mode as false
13119 // Instruction with uses_vl true are vector instructions
13120 // All the vector instructions with AVX_512bit length can have legacy_mode as false
13121 // All the vector instructions with < AVX_512bit length can have legacy_mode as false if AVX512vl() is supported
13122 // Rest all should have legacy_mode set as true
13123 assert(((!attributes->uses_vl()) ||
13124 (attributes->get_vector_len() == AVX_512bit) ||
13125 (!_legacy_mode_vl) ||
13126 (attributes->is_legacy_mode())),"XMM register should be 0-15");
13127 // Instruction with legacy_mode true should have dst, nds and src < 15
13128 assert(((!is_extended) || (!attributes->is_legacy_mode())),"XMM register should be 0-15");
13129 }
13130
13131 if (UseAVX > 2 && !attributes->is_legacy_mode())
13132 {
13133 bool evex_r = (dst_enc >= 16);
13134 bool evex_v = (nds_enc >= 16);
13135 bool evex_b = (src_enc >= 16) && src_is_gpr;
13136 // can use vex_x as bank extender on rm encoding
13137 vex_x = (src_enc >= 16) && !src_is_gpr;
13138 attributes->set_is_evex_instruction();
13139 evex_prefix(vex_r, vex_b, vex_x, evex_r, evex_b, evex_v, false /*eevex_x*/, nds_enc, pre, opc, no_flags);
13140 } else {
13141 if (UseAVX > 2 && attributes->is_rex_vex_w_reverted()) {
13142 attributes->set_rex_vex_w(false);
13143 }
13144 vex_prefix(vex_r, vex_b, vex_x, nds_enc, pre, opc);
13145 }
13146
13147 // return modrm byte components for operands
13148 return (((dst_enc & 7) << 3) | (src_enc & 7));
13149 }
13150
13151 void Assembler::emit_eevex_or_demote(int dst_enc, int nds_enc, int src_enc, int8_t imm8, VexSimdPrefix pre, VexOpcode opc,
13152 int size, int opcode_byte, bool no_flags, bool is_map1) {
13153 bool is_prefixq = (size == EVEX_64bit) ? true : false;
13154 if (is_demotable(no_flags, dst_enc, nds_enc)) {
13155 int encode = is_prefixq ? prefixq_and_encode(src_enc, dst_enc, is_map1) : prefix_and_encode(src_enc, dst_enc, is_map1);
13156 emit_opcode_prefix_and_encoding((unsigned char)(opcode_byte | 0x80), 0xC0, encode, imm8);
13157 } else {
13158 InstructionAttr attributes(AVX_128bit, is_prefixq, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13159 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, size);
13160 int encode = emit_eevex_prefix_or_demote_ndd(src_enc, dst_enc, nds_enc, pre, opc, &attributes, no_flags);
13161 emit_int24(opcode_byte, (0xC0 | encode), imm8);
13162 }
13163 }
13164
13165 void Assembler::emit_eevex_or_demote(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc,
13166 int size, int opcode_byte, bool no_flags, bool is_map1, bool swap, bool is_commutative) {
13167 int encode;
13168 bool is_prefixq = (size == EVEX_64bit) ? true : false;
13169 bool first_operand_demotable = is_demotable(no_flags, dst_enc, nds_enc);
13170 bool second_operand_demotable = is_commutative && is_demotable(no_flags, dst_enc, src_enc);
13171 if (first_operand_demotable || second_operand_demotable) {
13172 if (size == EVEX_16bit) {
13173 emit_int8(0x66);
13174 }
13175 int src = first_operand_demotable ? src_enc : nds_enc;
13176 if (swap) {
13177 encode = is_prefixq ? prefixq_and_encode(dst_enc, src, is_map1) : prefix_and_encode(dst_enc, src, is_map1);
13178 } else {
13179 encode = is_prefixq ? prefixq_and_encode(src, dst_enc, is_map1) : prefix_and_encode(src, dst_enc, is_map1);
13180 }
13181 emit_opcode_prefix_and_encoding((unsigned char)opcode_byte, 0xC0, encode);
13182 } else {
13183 InstructionAttr attributes(AVX_128bit, is_prefixq, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13184 attributes.set_is_evex_instruction();
13185 if (swap) {
13186 encode = vex_prefix_and_encode(nds_enc, dst_enc, src_enc, pre, opc, &attributes, /* src_is_gpr */ true, /* nds_is_ndd */ true, no_flags);
13187 } else {
13188 encode = vex_prefix_and_encode(src_enc, dst_enc, nds_enc, pre, opc, &attributes, /* src_is_gpr */ true, /* nds_is_ndd */ true, no_flags);
13189 }
13190 emit_int16(opcode_byte, (0xC0 | encode));
13191 }
13192 }
13193
13194 int Assembler::emit_eevex_prefix_or_demote_ndd(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc,
13195 InstructionAttr *attributes, bool no_flags, bool use_prefixq) {
13196 if (is_demotable(no_flags, dst_enc, nds_enc)) {
13197 if (pre == VEX_SIMD_66) {
13198 emit_int8(0x66);
13199 }
13200 return use_prefixq ? prefixq_and_encode(dst_enc, src_enc) : prefix_and_encode(dst_enc, src_enc);
13201 }
13202 attributes->set_is_evex_instruction();
13203 return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, attributes, /* src_is_gpr */ true, /* nds_is_ndd */ true, no_flags);
13204 }
13205
13206 int Assembler::emit_eevex_prefix_or_demote_ndd(int dst_enc, int nds_enc, VexSimdPrefix pre, VexOpcode opc,
13207 InstructionAttr *attributes, bool no_flags, bool use_prefixq) {
13208 //Demote RegReg and RegRegImm instructions
13209 if (is_demotable(no_flags, dst_enc, nds_enc)) {
13210 return use_prefixq ? prefixq_and_encode(dst_enc) : prefix_and_encode(dst_enc);
13211 }
13212 attributes->set_is_evex_instruction();
13213 return vex_prefix_and_encode(0, dst_enc, nds_enc, pre, opc, attributes, /* src_is_gpr */ true, /* nds_is_ndd */ true, no_flags);
13214 }
13215
13216 int Assembler::emit_eevex_prefix_ndd(int dst_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes, bool no_flags) {
13217 attributes->set_is_evex_instruction();
13218 return vex_prefix_and_encode(0, 0, dst_enc, pre, opc, attributes, /* src_is_gpr */ true, /* nds_is_ndd */ true, no_flags);
13219 }
13220
13221 int Assembler::eevex_prefix_and_encode_nf(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc,
13222 InstructionAttr *attributes, bool no_flags) {
13223 attributes->set_is_evex_instruction();
13224 return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, attributes, /* src_is_gpr */ true, /* nds_is_ndd */ false, no_flags);
13225 }
13226
13227 void Assembler::emit_eevex_prefix_or_demote_arith_ndd(Register dst, Register src1, Register src2, VexSimdPrefix pre, VexOpcode opc,
13228 int size, int op1, int op2, bool no_flags, bool is_commutative) {
13229 bool demotable = is_demotable(no_flags, dst->encoding(), src1->encoding());
13230 if (!demotable && is_commutative) {
13231 if (is_demotable(no_flags, dst->encoding(), src2->encoding())) {
13232 // swap src1 and src2
13233 Register tmp = src1;
13234 src1 = src2;
13235 src2 = tmp;
13236 }
13237 }
13238 bool vex_w = (size == EVEX_64bit) ? true : false;
13239 bool use_prefixq = vex_w;
13240 InstructionAttr attributes(AVX_128bit, vex_w, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13241 // NDD shares its encoding bits with NDS bits for regular EVEX instruction.
13242 // Therefore, DST is passed as the second argument to minimize changes in the leaf level routine.
13243 (void)emit_eevex_prefix_or_demote_ndd(src1->encoding(), dst->encoding(), src2->encoding(), pre, opc, &attributes, no_flags, use_prefixq);
13244 emit_arith(op1, op2, src1, src2);
13245 }
13246
13247 void Assembler::emit_eevex_prefix_or_demote_arith_ndd(Register dst, Register nds, int32_t imm32, VexSimdPrefix pre, VexOpcode opc,
13248 int size, int op1, int op2, bool no_flags) {
13249 int dst_enc = dst->encoding();
13250 int nds_enc = nds->encoding();
13251 bool demote = is_demotable(no_flags, dst_enc, nds_enc);
13252 if (demote) {
13253 (size == EVEX_64bit) ? (void) prefixq_and_encode(dst_enc) : (void) prefix_and_encode(dst_enc);
13254 } else {
13255 bool vex_w = (size == EVEX_64bit) ? true : false;
13256 InstructionAttr attributes(AVX_128bit, vex_w, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13257 attributes.set_is_evex_instruction();
13258 vex_prefix_and_encode(0, dst_enc, nds_enc, pre, opc, &attributes, /* src_is_gpr */ true, /* nds_is_ndd */ true, no_flags);
13259
13260 }
13261 emit_arith(op1, op2, nds, imm32, demote);
13262 }
13263
13264 void Assembler::simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre,
13265 VexOpcode opc, InstructionAttr *attributes) {
13266 if (UseAVX > 0) {
13267 int xreg_enc = xreg->encoding();
13268 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
13269 vex_prefix(adr, nds_enc, xreg_enc, pre, opc, attributes);
13270 } else {
13271 assert((nds == xreg) || (nds == xnoreg), "wrong sse encoding");
13272 rex_prefix(adr, xreg, pre, opc, attributes->is_rex_vex_w());
13273 }
13274 }
13275
13276 int Assembler::simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre,
13277 VexOpcode opc, InstructionAttr *attributes, bool src_is_gpr) {
13278 int dst_enc = dst->encoding();
13279 int src_enc = src->encoding();
13280 if (UseAVX > 0) {
13281 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
13282 return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, attributes, src_is_gpr);
13283 } else {
13284 assert((nds == dst) || (nds == src) || (nds == xnoreg), "wrong sse encoding");
13285 return rex_prefix_and_encode(dst_enc, src_enc, pre, opc, attributes->is_rex_vex_w());
13286 }
13287 }
13288
13289 bool Assembler::is_demotable(bool no_flags, int dst_enc, int nds_enc) {
13290 return (!no_flags && dst_enc == nds_enc);
13291 }
13292
13293 void Assembler::vmaxss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
13294 assert(VM_Version::supports_avx(), "");
13295 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13296 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
13297 emit_int16(0x5F, (0xC0 | encode));
13298 }
13299
13300 void Assembler::vmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
13301 assert(VM_Version::supports_avx(), "");
13302 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13303 attributes.set_rex_vex_w_reverted();
13304 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
13305 emit_int16(0x5F, (0xC0 | encode));
13306 }
13307
13308 void Assembler::vminss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
13309 assert(VM_Version::supports_avx(), "");
13310 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13311 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
13312 emit_int16(0x5D, (0xC0 | encode));
13313 }
13314
13315 void Assembler::eminmaxss(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
13316 assert(VM_Version::supports_avx10_2(), "");
13317 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13318 attributes.set_is_evex_instruction();
13319 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
13320 emit_int24(0x53, (0xC0 | encode), imm8);
13321 }
13322
13323 void Assembler::vminsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
13324 assert(VM_Version::supports_avx(), "");
13325 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13326 attributes.set_rex_vex_w_reverted();
13327 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
13328 emit_int16(0x5D, (0xC0 | encode));
13329 }
13330
13331 void Assembler::eminmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
13332 assert(VM_Version::supports_avx10_2(), "");
13333 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13334 attributes.set_is_evex_instruction();
13335 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
13336 emit_int24(0x53, (0xC0 | encode), imm8);
13337 }
13338
13339 void Assembler::vcmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len) {
13340 assert(VM_Version::supports_avx(), "");
13341 assert(vector_len <= AVX_256bit, "");
13342 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
13343 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
13344 emit_int24((unsigned char)0xC2, (0xC0 | encode), (0xF & cop));
13345 }
13346
13347 void Assembler::blendvpb(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) {
13348 assert(VM_Version::supports_avx(), "");
13349 assert(vector_len <= AVX_256bit, "");
13350 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
13351 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
13352 int src2_enc = src2->encoding();
13353 emit_int24(0x4C, (0xC0 | encode), (0xF0 & src2_enc << 4));
13354 }
13355
13356 void Assembler::vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) {
13357 assert(UseAVX > 0 && (vector_len == AVX_128bit || vector_len == AVX_256bit), "");
13358 assert(vector_len <= AVX_256bit, "");
13359 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
13360 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
13361 int src2_enc = src2->encoding();
13362 emit_int24(0x4B, (0xC0 | encode), (0xF0 & src2_enc << 4));
13363 }
13364
13365 void Assembler::vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
13366 assert(VM_Version::supports_avx2(), "");
13367 assert(vector_len <= AVX_256bit, "");
13368 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
13369 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
13370 emit_int24(0x02, (0xC0 | encode), (unsigned char)imm8);
13371 }
13372
13373 void Assembler::vcmpps(XMMRegister dst, XMMRegister nds, XMMRegister src, int comparison, int vector_len) {
13374 assert(VM_Version::supports_avx(), "");
13375 assert(vector_len <= AVX_256bit, "");
13376 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
13377 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
13378 emit_int24((unsigned char)0xC2, (0xC0 | encode), (unsigned char)comparison);
13379 }
13380
13381 void Assembler::evcmpph(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
13382 ComparisonPredicateFP comparison, int vector_len) {
13383 assert(VM_Version::supports_avx512_fp16(), "");
13384 assert(VM_Version::supports_avx512vl() || vector_len == Assembler::AVX_512bit, "");
13385 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13386 attributes.set_is_evex_instruction();
13387 attributes.set_embedded_opmask_register_specifier(mask);
13388 attributes.reset_is_clear_context();
13389 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3A, &attributes);
13390 emit_int24((unsigned char)0xC2, (0xC0 | encode), comparison);
13391 }
13392
13393 void Assembler::evcmpsh(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, ComparisonPredicateFP comparison) {
13394 assert(VM_Version::supports_avx512_fp16(), "");
13395 InstructionAttr attributes(Assembler::AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13396 attributes.set_is_evex_instruction();
13397 attributes.set_embedded_opmask_register_specifier(mask);
13398 attributes.reset_is_clear_context();
13399 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_3A, &attributes);
13400 emit_int24((unsigned char)0xC2, (0xC0 | encode), comparison);
13401 }
13402
13403 void Assembler::evcmpps(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
13404 ComparisonPredicateFP comparison, int vector_len) {
13405 assert(VM_Version::supports_avx512vl() || vector_len == Assembler::AVX_512bit, "");
13406 // Encoding: EVEX.NDS.XXX.0F.W0 C2 /r ib
13407 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13408 attributes.set_is_evex_instruction();
13409 attributes.set_embedded_opmask_register_specifier(mask);
13410 attributes.reset_is_clear_context();
13411 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
13412 emit_int24((unsigned char)0xC2, (0xC0 | encode), comparison);
13413 }
13414
13415 void Assembler::evcmppd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
13416 ComparisonPredicateFP comparison, int vector_len) {
13417 assert(VM_Version::supports_avx512vl() || vector_len == Assembler::AVX_512bit, "");
13418 // Encoding: EVEX.NDS.XXX.66.0F.W1 C2 /r ib
13419 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13420 attributes.set_is_evex_instruction();
13421 attributes.set_embedded_opmask_register_specifier(mask);
13422 attributes.reset_is_clear_context();
13423 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
13424 emit_int24((unsigned char)0xC2, (0xC0 | encode), comparison);
13425 }
13426
13427 void Assembler::blendvps(XMMRegister dst, XMMRegister src) {
13428 assert(VM_Version::supports_sse4_1(), "");
13429 assert(UseAVX <= 0, "sse encoding is inconsistent with avx encoding");
13430 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
13431 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13432 emit_int16(0x14, (0xC0 | encode));
13433 }
13434
13435 void Assembler::blendvpd(XMMRegister dst, XMMRegister src) {
13436 assert(VM_Version::supports_sse4_1(), "");
13437 assert(UseAVX <= 0, "sse encoding is inconsistent with avx encoding");
13438 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
13439 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13440 emit_int16(0x15, (0xC0 | encode));
13441 }
13442
13443 void Assembler::pblendvb(XMMRegister dst, XMMRegister src) {
13444 assert(VM_Version::supports_sse4_1(), "");
13445 assert(UseAVX <= 0, "sse encoding is inconsistent with avx encoding");
13446 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
13447 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13448 emit_int16(0x10, (0xC0 | encode));
13449 }
13450
13451 void Assembler::vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) {
13452 assert(UseAVX > 0 && (vector_len == AVX_128bit || vector_len == AVX_256bit), "");
13453 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
13454 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
13455 int src2_enc = src2->encoding();
13456 emit_int24(0x4A, (0xC0 | encode), (0xF0 & src2_enc << 4));
13457 }
13458
13459 void Assembler::vblendps(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
13460 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
13461 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
13462 emit_int24(0x0C, (0xC0 | encode), imm8);
13463 }
13464
13465 void Assembler::vpcmpgtb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
13466 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
13467 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
13468 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
13469 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
13470 emit_int16(0x64, (0xC0 | encode));
13471 }
13472
13473 void Assembler::vpcmpgtw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
13474 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
13475 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
13476 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
13477 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
13478 emit_int16(0x65, (0xC0 | encode));
13479 }
13480
13481 void Assembler::vpcmpgtd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
13482 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
13483 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
13484 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
13485 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
13486 emit_int16(0x66, (0xC0 | encode));
13487 }
13488
13489 void Assembler::vpcmpgtq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
13490 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
13491 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
13492 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
13493 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13494 emit_int16(0x37, (0xC0 | encode));
13495 }
13496
13497 void Assembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
13498 int comparison, bool is_signed, int vector_len) {
13499 assert(VM_Version::supports_evex(), "");
13500 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
13501 // Encoding: EVEX.NDS.XXX.66.0F3A.W0 1F /r ib
13502 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13503 attributes.set_is_evex_instruction();
13504 attributes.set_embedded_opmask_register_specifier(mask);
13505 attributes.reset_is_clear_context();
13506 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
13507 int opcode = is_signed ? 0x1F : 0x1E;
13508 emit_int24(opcode, (0xC0 | encode), comparison);
13509 }
13510
13511 void Assembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, Address src,
13512 int comparison, bool is_signed, int vector_len) {
13513 assert(VM_Version::supports_evex(), "");
13514 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
13515 // Encoding: EVEX.NDS.XXX.66.0F3A.W0 1F /r ib
13516 InstructionMark im(this);
13517 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13518 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
13519 attributes.set_is_evex_instruction();
13520 attributes.set_embedded_opmask_register_specifier(mask);
13521 attributes.reset_is_clear_context();
13522 int dst_enc = kdst->encoding();
13523 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
13524 int opcode = is_signed ? 0x1F : 0x1E;
13525 emit_int8((unsigned char)opcode);
13526 emit_operand(as_Register(dst_enc), src, 1);
13527 emit_int8((unsigned char)comparison);
13528 }
13529
13530 void Assembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
13531 int comparison, bool is_signed, int vector_len) {
13532 assert(VM_Version::supports_evex(), "");
13533 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
13534 // Encoding: EVEX.NDS.XXX.66.0F3A.W1 1F /r ib
13535 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13536 attributes.set_is_evex_instruction();
13537 attributes.set_embedded_opmask_register_specifier(mask);
13538 attributes.reset_is_clear_context();
13539 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
13540 int opcode = is_signed ? 0x1F : 0x1E;
13541 emit_int24(opcode, (0xC0 | encode), comparison);
13542 }
13543
13544 void Assembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, Address src,
13545 int comparison, bool is_signed, int vector_len) {
13546 assert(VM_Version::supports_evex(), "");
13547 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
13548 // Encoding: EVEX.NDS.XXX.66.0F3A.W1 1F /r ib
13549 InstructionMark im(this);
13550 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13551 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
13552 attributes.set_is_evex_instruction();
13553 attributes.set_embedded_opmask_register_specifier(mask);
13554 attributes.reset_is_clear_context();
13555 int dst_enc = kdst->encoding();
13556 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
13557 int opcode = is_signed ? 0x1F : 0x1E;
13558 emit_int8((unsigned char)opcode);
13559 emit_operand(as_Register(dst_enc), src, 1);
13560 emit_int8((unsigned char)comparison);
13561 }
13562
13563 void Assembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
13564 int comparison, bool is_signed, int vector_len) {
13565 assert(VM_Version::supports_evex(), "");
13566 assert(VM_Version::supports_avx512bw(), "");
13567 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
13568 // Encoding: EVEX.NDS.XXX.66.0F3A.W0 3F /r ib
13569 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
13570 attributes.set_is_evex_instruction();
13571 attributes.set_embedded_opmask_register_specifier(mask);
13572 attributes.reset_is_clear_context();
13573 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
13574 int opcode = is_signed ? 0x3F : 0x3E;
13575 emit_int24(opcode, (0xC0 | encode), comparison);
13576 }
13577
13578 void Assembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, Address src,
13579 int comparison, bool is_signed, int vector_len) {
13580 assert(VM_Version::supports_evex(), "");
13581 assert(VM_Version::supports_avx512bw(), "");
13582 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
13583 // Encoding: EVEX.NDS.XXX.66.0F3A.W0 3F /r ib
13584 InstructionMark im(this);
13585 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
13586 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
13587 attributes.set_is_evex_instruction();
13588 attributes.set_embedded_opmask_register_specifier(mask);
13589 attributes.reset_is_clear_context();
13590 int dst_enc = kdst->encoding();
13591 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
13592 int opcode = is_signed ? 0x3F : 0x3E;
13593 emit_int8((unsigned char)opcode);
13594 emit_operand(as_Register(dst_enc), src, 1);
13595 emit_int8((unsigned char)comparison);
13596 }
13597
13598 void Assembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
13599 int comparison, bool is_signed, int vector_len) {
13600 assert(VM_Version::supports_evex(), "");
13601 assert(VM_Version::supports_avx512bw(), "");
13602 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
13603 // Encoding: EVEX.NDS.XXX.66.0F3A.W1 3F /r ib
13604 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
13605 attributes.set_is_evex_instruction();
13606 attributes.set_embedded_opmask_register_specifier(mask);
13607 attributes.reset_is_clear_context();
13608 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
13609 int opcode = is_signed ? 0x3F : 0x3E;
13610 emit_int24(opcode, (0xC0 | encode), comparison);
13611 }
13612
13613 void Assembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, Address src,
13614 int comparison, bool is_signed, int vector_len) {
13615 assert(VM_Version::supports_evex(), "");
13616 assert(VM_Version::supports_avx512bw(), "");
13617 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
13618 // Encoding: EVEX.NDS.XXX.66.0F3A.W1 3F /r ib
13619 InstructionMark im(this);
13620 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
13621 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
13622 attributes.set_is_evex_instruction();
13623 attributes.set_embedded_opmask_register_specifier(mask);
13624 attributes.reset_is_clear_context();
13625 int dst_enc = kdst->encoding();
13626 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
13627 int opcode = is_signed ? 0x3F : 0x3E;
13628 emit_int8((unsigned char)opcode);
13629 emit_operand(as_Register(dst_enc), src, 1);
13630 emit_int8((unsigned char)comparison);
13631 }
13632
13633 void Assembler::evprord(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
13634 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
13635 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13636 attributes.set_is_evex_instruction();
13637 attributes.set_embedded_opmask_register_specifier(mask);
13638 if (merge) {
13639 attributes.reset_is_clear_context();
13640 }
13641 int encode = vex_prefix_and_encode(xmm0->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
13642 emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
13643 }
13644
13645 void Assembler::evprorq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
13646 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
13647 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13648 attributes.set_is_evex_instruction();
13649 attributes.set_embedded_opmask_register_specifier(mask);
13650 if (merge) {
13651 attributes.reset_is_clear_context();
13652 }
13653 int encode = vex_prefix_and_encode(xmm0->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
13654 emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
13655 }
13656
13657 void Assembler::evprorvd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
13658 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
13659 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13660 attributes.set_is_evex_instruction();
13661 attributes.set_embedded_opmask_register_specifier(mask);
13662 if (merge) {
13663 attributes.reset_is_clear_context();
13664 }
13665 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13666 emit_int16(0x14, (0xC0 | encode));
13667 }
13668
13669 void Assembler::evprorvq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
13670 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
13671 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13672 attributes.set_is_evex_instruction();
13673 attributes.set_embedded_opmask_register_specifier(mask);
13674 if (merge) {
13675 attributes.reset_is_clear_context();
13676 }
13677 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13678 emit_int16(0x14, (0xC0 | encode));
13679 }
13680
13681 void Assembler::evprold(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
13682 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
13683 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13684 attributes.set_is_evex_instruction();
13685 attributes.set_embedded_opmask_register_specifier(mask);
13686 if (merge) {
13687 attributes.reset_is_clear_context();
13688 }
13689 int encode = vex_prefix_and_encode(xmm1->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
13690 emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
13691 }
13692
13693 void Assembler::evprolq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
13694 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
13695 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13696 attributes.set_is_evex_instruction();
13697 attributes.set_embedded_opmask_register_specifier(mask);
13698 if (merge) {
13699 attributes.reset_is_clear_context();
13700 }
13701 int encode = vex_prefix_and_encode(xmm1->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
13702 emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
13703 }
13704
13705 void Assembler::evprolvd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
13706 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
13707 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13708 attributes.set_is_evex_instruction();
13709 attributes.set_embedded_opmask_register_specifier(mask);
13710 if (merge) {
13711 attributes.reset_is_clear_context();
13712 }
13713 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13714 emit_int16(0x15, (0xC0 | encode));
13715 }
13716
13717 void Assembler::evprolvq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
13718 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
13719 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13720 attributes.set_is_evex_instruction();
13721 attributes.set_embedded_opmask_register_specifier(mask);
13722 if (merge) {
13723 attributes.reset_is_clear_context();
13724 }
13725 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13726 emit_int16(0x15, (0xC0 | encode));
13727 }
13728
13729 void Assembler::vpblendvb(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len) {
13730 assert(VM_Version::supports_avx(), "");
13731 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
13732 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
13733 int mask_enc = mask->encoding();
13734 emit_int24(0x4C, (0xC0 | encode), 0xF0 & mask_enc << 4);
13735 }
13736
13737 void Assembler::evblendmpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
13738 assert(VM_Version::supports_avx512vl() || vector_len == Assembler::AVX_512bit, "");
13739 // Encoding: EVEX.NDS.XXX.66.0F38.W1 65 /r
13740 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13741 attributes.set_is_evex_instruction();
13742 attributes.set_embedded_opmask_register_specifier(mask);
13743 if (merge) {
13744 attributes.reset_is_clear_context();
13745 }
13746 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13747 emit_int16(0x65, (0xC0 | encode));
13748 }
13749
13750 void Assembler::evblendmps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
13751 assert(VM_Version::supports_avx512vl() || vector_len == Assembler::AVX_512bit, "");
13752 // Encoding: EVEX.NDS.XXX.66.0F38.W0 65 /r
13753 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13754 attributes.set_is_evex_instruction();
13755 attributes.set_embedded_opmask_register_specifier(mask);
13756 if (merge) {
13757 attributes.reset_is_clear_context();
13758 }
13759 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13760 emit_int16(0x65, (0xC0 | encode));
13761 }
13762
13763 void Assembler::evpblendmb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
13764 assert(VM_Version::supports_avx512bw(), "");
13765 assert(VM_Version::supports_avx512vl() || vector_len == Assembler::AVX_512bit, "");
13766 // Encoding: EVEX.NDS.512.66.0F38.W0 66 /r
13767 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
13768 attributes.set_is_evex_instruction();
13769 attributes.set_embedded_opmask_register_specifier(mask);
13770 if (merge) {
13771 attributes.reset_is_clear_context();
13772 }
13773 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13774 emit_int16(0x66, (0xC0 | encode));
13775 }
13776
13777 void Assembler::evpblendmw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
13778 assert(VM_Version::supports_avx512bw(), "");
13779 assert(VM_Version::supports_avx512vl() || vector_len == Assembler::AVX_512bit, "");
13780 // Encoding: EVEX.NDS.512.66.0F38.W1 66 /r
13781 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
13782 attributes.set_is_evex_instruction();
13783 attributes.set_embedded_opmask_register_specifier(mask);
13784 if (merge) {
13785 attributes.reset_is_clear_context();
13786 }
13787 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13788 emit_int16(0x66, (0xC0 | encode));
13789 }
13790
13791 void Assembler::evpblendmd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
13792 assert(VM_Version::supports_avx512vl() || vector_len == Assembler::AVX_512bit, "");
13793 //Encoding: EVEX.NDS.512.66.0F38.W0 64 /r
13794 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13795 attributes.set_is_evex_instruction();
13796 attributes.set_embedded_opmask_register_specifier(mask);
13797 if (merge) {
13798 attributes.reset_is_clear_context();
13799 }
13800 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13801 emit_int16(0x64, (0xC0 | encode));
13802 }
13803
13804 void Assembler::evpblendmq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
13805 assert(VM_Version::supports_avx512vl() || vector_len == Assembler::AVX_512bit, "");
13806 //Encoding: EVEX.NDS.512.66.0F38.W1 64 /r
13807 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13808 attributes.set_is_evex_instruction();
13809 attributes.set_embedded_opmask_register_specifier(mask);
13810 if (merge) {
13811 attributes.reset_is_clear_context();
13812 }
13813 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13814 emit_int16(0x64, (0xC0 | encode));
13815 }
13816
13817 void Assembler::bzhiq(Register dst, Register src1, Register src2) {
13818 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
13819 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13820 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes, true);
13821 emit_int16((unsigned char)0xF5, (0xC0 | encode));
13822 }
13823
13824 void Assembler::bzhil(Register dst, Register src1, Register src2) {
13825 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
13826 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13827 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes, true);
13828 emit_int16((unsigned char)0xF5, (0xC0 | encode));
13829 }
13830
13831 void Assembler::pextl(Register dst, Register src1, Register src2) {
13832 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
13833 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13834 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes, true);
13835 emit_int16((unsigned char)0xF5, (0xC0 | encode));
13836 }
13837
13838 void Assembler::pdepl(Register dst, Register src1, Register src2) {
13839 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
13840 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13841 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes, true);
13842 emit_int16((unsigned char)0xF5, (0xC0 | encode));
13843 }
13844
13845 void Assembler::pextq(Register dst, Register src1, Register src2) {
13846 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
13847 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13848 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes, true);
13849 emit_int16((unsigned char)0xF5, (0xC0 | encode));
13850 }
13851
13852 void Assembler::pdepq(Register dst, Register src1, Register src2) {
13853 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
13854 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13855 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes, true);
13856 emit_int16((unsigned char)0xF5, (0xC0 | encode));
13857 }
13858
13859 void Assembler::pextl(Register dst, Register src1, Address src2) {
13860 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
13861 InstructionMark im(this);
13862 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13863 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
13864 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
13865 emit_int8((unsigned char)0xF5);
13866 emit_operand(dst, src2, 0);
13867 }
13868
13869 void Assembler::pdepl(Register dst, Register src1, Address src2) {
13870 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
13871 InstructionMark im(this);
13872 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13873 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
13874 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes);
13875 emit_int8((unsigned char)0xF5);
13876 emit_operand(dst, src2, 0);
13877 }
13878
13879 void Assembler::pextq(Register dst, Register src1, Address src2) {
13880 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
13881 InstructionMark im(this);
13882 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13883 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
13884 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
13885 emit_int8((unsigned char)0xF5);
13886 emit_operand(dst, src2, 0);
13887 }
13888
13889 void Assembler::pdepq(Register dst, Register src1, Address src2) {
13890 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
13891 InstructionMark im(this);
13892 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13893 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
13894 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes);
13895 emit_int8((unsigned char)0xF5);
13896 emit_operand(dst, src2, 0);
13897 }
13898
13899 void Assembler::sarxl(Register dst, Register src1, Register src2) {
13900 assert(VM_Version::supports_bmi2(), "");
13901 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13902 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes, true);
13903 emit_int16((unsigned char)0xF7, (0xC0 | encode));
13904 }
13905
13906 void Assembler::sarxl(Register dst, Address src1, Register src2) {
13907 assert(VM_Version::supports_bmi2(), "");
13908 InstructionMark im(this);
13909 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13910 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
13911 vex_prefix(src1, src2->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
13912 emit_int8((unsigned char)0xF7);
13913 emit_operand(dst, src1, 0);
13914 }
13915
13916 void Assembler::sarxq(Register dst, Register src1, Register src2) {
13917 assert(VM_Version::supports_bmi2(), "");
13918 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13919 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes, true);
13920 emit_int16((unsigned char)0xF7, (0xC0 | encode));
13921 }
13922
13923 void Assembler::sarxq(Register dst, Address src1, Register src2) {
13924 assert(VM_Version::supports_bmi2(), "");
13925 InstructionMark im(this);
13926 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13927 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
13928 vex_prefix(src1, src2->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
13929 emit_int8((unsigned char)0xF7);
13930 emit_operand(dst, src1, 0);
13931 }
13932
13933 void Assembler::shlxl(Register dst, Register src1, Register src2) {
13934 assert(VM_Version::supports_bmi2(), "");
13935 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13936 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes, true);
13937 emit_int16((unsigned char)0xF7, (0xC0 | encode));
13938 }
13939
13940 void Assembler::shlxl(Register dst, Address src1, Register src2) {
13941 assert(VM_Version::supports_bmi2(), "");
13942 InstructionMark im(this);
13943 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13944 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
13945 vex_prefix(src1, src2->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13946 emit_int8((unsigned char)0xF7);
13947 emit_operand(dst, src1, 0);
13948 }
13949
13950 void Assembler::shlxq(Register dst, Register src1, Register src2) {
13951 assert(VM_Version::supports_bmi2(), "");
13952 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13953 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes, true);
13954 emit_int16((unsigned char)0xF7, (0xC0 | encode));
13955 }
13956
13957 void Assembler::shlxq(Register dst, Address src1, Register src2) {
13958 assert(VM_Version::supports_bmi2(), "");
13959 InstructionMark im(this);
13960 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13961 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
13962 vex_prefix(src1, src2->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13963 emit_int8((unsigned char)0xF7);
13964 emit_operand(dst, src1, 0);
13965 }
13966
13967 void Assembler::shrxl(Register dst, Register src1, Register src2) {
13968 assert(VM_Version::supports_bmi2(), "");
13969 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13970 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes, true);
13971 emit_int16((unsigned char)0xF7, (0xC0 | encode));
13972 }
13973
13974 void Assembler::shrxl(Register dst, Address src1, Register src2) {
13975 assert(VM_Version::supports_bmi2(), "");
13976 InstructionMark im(this);
13977 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13978 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
13979 vex_prefix(src1, src2->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes);
13980 emit_int8((unsigned char)0xF7);
13981 emit_operand(dst, src1, 0);
13982 }
13983
13984 void Assembler::shrxq(Register dst, Register src1, Register src2) {
13985 assert(VM_Version::supports_bmi2(), "");
13986 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13987 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes, true);
13988 emit_int16((unsigned char)0xF7, (0xC0 | encode));
13989 }
13990
13991 void Assembler::shrxq(Register dst, Address src1, Register src2) {
13992 assert(VM_Version::supports_bmi2(), "");
13993 InstructionMark im(this);
13994 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13995 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
13996 vex_prefix(src1, src2->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes);
13997 emit_int8((unsigned char)0xF7);
13998 emit_operand(dst, src1, 0);
13999 }
14000
14001 void Assembler::evpmovq2m(KRegister dst, XMMRegister src, int vector_len) {
14002 assert(VM_Version::supports_avx512vldq(), "");
14003 assert(VM_Version::supports_avx512vl() || vector_len == Assembler::AVX_512bit, "");
14004 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
14005 attributes.set_is_evex_instruction();
14006 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
14007 emit_int16(0x39, (0xC0 | encode));
14008 }
14009
14010 void Assembler::evpmovd2m(KRegister dst, XMMRegister src, int vector_len) {
14011 assert(VM_Version::supports_avx512vldq(), "");
14012 assert(VM_Version::supports_avx512vl() || vector_len == Assembler::AVX_512bit, "");
14013 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
14014 attributes.set_is_evex_instruction();
14015 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
14016 emit_int16(0x39, (0xC0 | encode));
14017 }
14018
14019 void Assembler::evpmovw2m(KRegister dst, XMMRegister src, int vector_len) {
14020 assert(VM_Version::supports_avx512vlbw(), "");
14021 assert(VM_Version::supports_avx512vl() || vector_len == Assembler::AVX_512bit, "");
14022 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
14023 attributes.set_is_evex_instruction();
14024 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
14025 emit_int16(0x29, (0xC0 | encode));
14026 }
14027
14028 void Assembler::evpmovb2m(KRegister dst, XMMRegister src, int vector_len) {
14029 assert(VM_Version::supports_avx512vlbw(), "");
14030 assert(VM_Version::supports_avx512vl() || vector_len == Assembler::AVX_512bit, "");
14031 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
14032 attributes.set_is_evex_instruction();
14033 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
14034 emit_int16(0x29, (0xC0 | encode));
14035 }
14036
14037 void Assembler::evpmovm2q(XMMRegister dst, KRegister src, int vector_len) {
14038 assert(VM_Version::supports_avx512vldq(), "");
14039 assert(VM_Version::supports_avx512vl() || vector_len == Assembler::AVX_512bit, "");
14040 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
14041 attributes.set_is_evex_instruction();
14042 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
14043 emit_int16(0x38, (0xC0 | encode));
14044 }
14045
14046 void Assembler::evpmovm2d(XMMRegister dst, KRegister src, int vector_len) {
14047 assert(VM_Version::supports_avx512vldq(), "");
14048 assert(VM_Version::supports_avx512vl() || vector_len == Assembler::AVX_512bit, "");
14049 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
14050 attributes.set_is_evex_instruction();
14051 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
14052 emit_int16(0x38, (0xC0 | encode));
14053 }
14054
14055 void Assembler::evpmovm2w(XMMRegister dst, KRegister src, int vector_len) {
14056 assert(VM_Version::supports_avx512vlbw(), "");
14057 assert(VM_Version::supports_avx512vl() || vector_len == Assembler::AVX_512bit, "");
14058 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
14059 attributes.set_is_evex_instruction();
14060 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
14061 emit_int16(0x28, (0xC0 | encode));
14062 }
14063
14064 void Assembler::evpmovm2b(XMMRegister dst, KRegister src, int vector_len) {
14065 assert(VM_Version::supports_avx512vlbw(), "");
14066 assert(VM_Version::supports_avx512vl() || vector_len == Assembler::AVX_512bit, "");
14067 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
14068 attributes.set_is_evex_instruction();
14069 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
14070 emit_int16(0x28, (0xC0 | encode));
14071 }
14072
14073 void Assembler::evpcompressb(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
14074 assert(VM_Version::supports_avx512_vbmi2(), "");
14075 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
14076 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
14077 attributes.set_embedded_opmask_register_specifier(mask);
14078 attributes.set_is_evex_instruction();
14079 if (merge) {
14080 attributes.reset_is_clear_context();
14081 }
14082 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
14083 emit_int16((unsigned char)0x63, (0xC0 | encode));
14084 }
14085
14086 void Assembler::evpcompressw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
14087 assert(VM_Version::supports_avx512_vbmi2(), "");
14088 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
14089 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
14090 attributes.set_embedded_opmask_register_specifier(mask);
14091 attributes.set_is_evex_instruction();
14092 if (merge) {
14093 attributes.reset_is_clear_context();
14094 }
14095 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
14096 emit_int16((unsigned char)0x63, (0xC0 | encode));
14097 }
14098
14099 void Assembler::evpcompressd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
14100 assert(VM_Version::supports_evex(), "");
14101 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
14102 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
14103 attributes.set_embedded_opmask_register_specifier(mask);
14104 attributes.set_is_evex_instruction();
14105 if (merge) {
14106 attributes.reset_is_clear_context();
14107 }
14108 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
14109 emit_int16((unsigned char)0x8B, (0xC0 | encode));
14110 }
14111
14112 void Assembler::evpcompressq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
14113 assert(VM_Version::supports_evex(), "");
14114 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
14115 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
14116 attributes.set_embedded_opmask_register_specifier(mask);
14117 attributes.set_is_evex_instruction();
14118 if (merge) {
14119 attributes.reset_is_clear_context();
14120 }
14121 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
14122 emit_int16((unsigned char)0x8B, (0xC0 | encode));
14123 }
14124
14125 void Assembler::evcompressps(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
14126 assert(VM_Version::supports_evex(), "");
14127 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
14128 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
14129 attributes.set_embedded_opmask_register_specifier(mask);
14130 attributes.set_is_evex_instruction();
14131 if (merge) {
14132 attributes.reset_is_clear_context();
14133 }
14134 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
14135 emit_int16((unsigned char)0x8A, (0xC0 | encode));
14136 }
14137
14138 void Assembler::evcompresspd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
14139 assert(VM_Version::supports_evex(), "");
14140 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
14141 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
14142 attributes.set_embedded_opmask_register_specifier(mask);
14143 attributes.set_is_evex_instruction();
14144 if (merge) {
14145 attributes.reset_is_clear_context();
14146 }
14147 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
14148 emit_int16((unsigned char)0x8A, (0xC0 | encode));
14149 }
14150
14151 // This should only be used by 64bit instructions that can use rip-relative
14152 // it cannot be used by instructions that want an immediate value.
14153
14154 // Determine whether an address is always reachable in rip-relative addressing mode
14155 // when accessed from the code cache.
14156 static bool is_always_reachable(address target, relocInfo::relocType reloc_type) {
14157 switch (reloc_type) {
14158 // This should be rip-relative and easily reachable.
14159 case relocInfo::internal_word_type: {
14160 return true;
14161 }
14162 // This should be rip-relative within the code cache and easily
14163 // reachable until we get huge code caches. (At which point
14164 // IC code is going to have issues).
14165 case relocInfo::virtual_call_type:
14166 case relocInfo::opt_virtual_call_type:
14167 case relocInfo::static_call_type:
14168 case relocInfo::static_stub_type: {
14169 return true;
14170 }
14171 case relocInfo::runtime_call_type:
14172 case relocInfo::external_word_type:
14173 case relocInfo::poll_return_type: // these are really external_word but need special
14174 case relocInfo::poll_type: { // relocs to identify them
14175 return CodeCache::contains(target);
14176 }
14177 default: {
14178 return false;
14179 }
14180 }
14181 }
14182
14183 // Determine whether an address is reachable in rip-relative addressing mode from the code cache.
14184 static bool is_reachable(address target, relocInfo::relocType reloc_type) {
14185 if (is_always_reachable(target, reloc_type)) {
14186 return true;
14187 }
14188 switch (reloc_type) {
14189 // None will force a 64bit literal to the code stream. Likely a placeholder
14190 // for something that will be patched later and we need to certain it will
14191 // always be reachable.
14192 case relocInfo::none: {
14193 return false;
14194 }
14195 case relocInfo::runtime_call_type:
14196 case relocInfo::external_word_type:
14197 case relocInfo::poll_return_type: // these are really external_word but need special
14198 case relocInfo::poll_type: { // relocs to identify them
14199 assert(!CodeCache::contains(target), "always reachable");
14200 if (ForceUnreachable) {
14201 return false; // stress the correction code
14202 }
14203 // For external_word_type/runtime_call_type if it is reachable from where we
14204 // are now (possibly a temp buffer) and where we might end up
14205 // anywhere in the code cache then we are always reachable.
14206 // This would have to change if we ever save/restore shared code to be more pessimistic.
14207 // Code buffer has to be allocated in the code cache, so check against
14208 // code cache boundaries cover that case.
14209 //
14210 // In rip-relative addressing mode, an effective address is formed by adding displacement
14211 // to the 64-bit RIP of the next instruction which is not known yet. Considering target address
14212 // is guaranteed to be outside of the code cache, checking against code cache boundaries is enough
14213 // to account for that.
14214 return Assembler::is_simm32(target - CodeCache::low_bound()) &&
14215 Assembler::is_simm32(target - CodeCache::high_bound());
14216 }
14217 default: {
14218 return false;
14219 }
14220 }
14221 }
14222
14223 bool Assembler::reachable(AddressLiteral adr) {
14224 assert(CodeCache::contains(pc()), "required");
14225 if (adr.is_lval()) {
14226 return false;
14227 }
14228 return is_reachable(adr.target(), adr.reloc());
14229 }
14230
14231 bool Assembler::always_reachable(AddressLiteral adr) {
14232 assert(CodeCache::contains(pc()), "required");
14233 if (adr.is_lval()) {
14234 return false;
14235 }
14236 return is_always_reachable(adr.target(), adr.reloc());
14237 }
14238
14239 void Assembler::emit_data64(jlong data,
14240 relocInfo::relocType rtype,
14241 int format) {
14242 if (rtype == relocInfo::none) {
14243 emit_int64(data);
14244 } else {
14245 emit_data64(data, Relocation::spec_simple(rtype), format);
14246 }
14247 }
14248
14249 void Assembler::emit_data64(jlong data,
14250 RelocationHolder const& rspec,
14251 int format) {
14252 assert(imm_operand == 0, "default format must be immediate in this file");
14253 assert(imm_operand == format, "must be immediate");
14254 assert(inst_mark() != nullptr, "must be inside InstructionMark");
14255 // Do not use AbstractAssembler::relocate, which is not intended for
14256 // embedded words. Instead, relocate to the enclosing instruction.
14257 code_section()->relocate(inst_mark(), rspec, format);
14258 #ifdef ASSERT
14259 check_relocation(rspec, format);
14260 #endif
14261 emit_int64(data);
14262 }
14263
14264 int Assembler::get_base_prefix_bits(int enc) {
14265 int bits = 0;
14266 if (enc & 16) bits |= REX2BIT_B4;
14267 if (enc & 8) bits |= REX2BIT_B;
14268 return bits;
14269 }
14270
14271 int Assembler::get_index_prefix_bits(int enc) {
14272 int bits = 0;
14273 if (enc & 16) bits |= REX2BIT_X4;
14274 if (enc & 8) bits |= REX2BIT_X;
14275 return bits;
14276 }
14277
14278 int Assembler::get_base_prefix_bits(Register base) {
14279 return base->is_valid() ? get_base_prefix_bits(base->encoding()) : 0;
14280 }
14281
14282 int Assembler::get_index_prefix_bits(Register index) {
14283 return index->is_valid() ? get_index_prefix_bits(index->encoding()) : 0;
14284 }
14285
14286 int Assembler::get_reg_prefix_bits(int enc) {
14287 int bits = 0;
14288 if (enc & 16) bits |= REX2BIT_R4;
14289 if (enc & 8) bits |= REX2BIT_R;
14290 return bits;
14291 }
14292
14293 void Assembler::prefix(Register reg) {
14294 if (reg->encoding() >= 16) {
14295 prefix16(WREX2 | get_base_prefix_bits(reg->encoding()));
14296 } else if (reg->encoding() >= 8) {
14297 prefix(REX_B);
14298 }
14299 }
14300
14301 void Assembler::prefix(Register dst, Register src, Prefix p) {
14302 if ((p & WREX2) || src->encoding() >= 16 || dst->encoding() >= 16) {
14303 prefix_rex2(dst, src);
14304 return;
14305 }
14306 if (src->encoding() >= 8) {
14307 p = (Prefix)(p | REX_B);
14308 }
14309 if (dst->encoding() >= 8) {
14310 p = (Prefix)(p | REX_R);
14311 }
14312 if (p != Prefix_EMPTY) {
14313 // do not generate an empty prefix
14314 prefix(p);
14315 }
14316 }
14317
14318 void Assembler::prefix_rex2(Register dst, Register src) {
14319 int bits = 0;
14320 bits |= get_base_prefix_bits(src->encoding());
14321 bits |= get_reg_prefix_bits(dst->encoding());
14322 prefix16(WREX2 | bits);
14323 }
14324
14325 void Assembler::prefix(Register dst, Address adr, Prefix p) {
14326 if (adr.base_needs_rex2() || adr.index_needs_rex2() || dst->encoding() >= 16) {
14327 prefix_rex2(dst, adr);
14328 }
14329 if (adr.base_needs_rex()) {
14330 if (adr.index_needs_rex()) {
14331 assert(false, "prefix(Register dst, Address adr, Prefix p) does not support handling of an X");
14332 } else {
14333 p = (Prefix)(p | REX_B);
14334 }
14335 } else {
14336 if (adr.index_needs_rex()) {
14337 assert(false, "prefix(Register dst, Address adr, Prefix p) does not support handling of an X");
14338 }
14339 }
14340 if (dst->encoding() >= 8) {
14341 p = (Prefix)(p | REX_R);
14342 }
14343 if (p != Prefix_EMPTY) {
14344 // do not generate an empty prefix
14345 prefix(p);
14346 }
14347 }
14348
14349 void Assembler::prefix_rex2(Register dst, Address adr) {
14350 assert(!adr.index_needs_rex2(), "prefix(Register dst, Address adr) does not support handling of an X");
14351 int bits = 0;
14352 bits |= get_base_prefix_bits(adr.base());
14353 bits |= get_reg_prefix_bits(dst->encoding());
14354 prefix16(WREX2 | bits);
14355 }
14356
14357 void Assembler::prefix(Address adr, bool is_map1) {
14358 if (adr.base_needs_rex2() || adr.index_needs_rex2()) {
14359 prefix_rex2(adr, is_map1);
14360 return;
14361 }
14362 if (adr.base_needs_rex()) {
14363 if (adr.index_needs_rex()) {
14364 prefix(REX_XB);
14365 } else {
14366 prefix(REX_B);
14367 }
14368 } else {
14369 if (adr.index_needs_rex()) {
14370 prefix(REX_X);
14371 }
14372 }
14373 if (is_map1) emit_int8(0x0F);
14374 }
14375
14376 void Assembler::prefix_rex2(Address adr, bool is_map1) {
14377 int bits = is_map1 ? REX2BIT_M0 : 0;
14378 bits |= get_base_prefix_bits(adr.base());
14379 bits |= get_index_prefix_bits(adr.index());
14380 prefix16(WREX2 | bits);
14381 }
14382
14383 void Assembler::prefix(Address adr, Register reg, bool byteinst, bool is_map1) {
14384 if (reg->encoding() >= 16 || adr.base_needs_rex2() || adr.index_needs_rex2()) {
14385 prefix_rex2(adr, reg, byteinst, is_map1);
14386 return;
14387 }
14388 if (reg->encoding() < 8) {
14389 if (adr.base_needs_rex()) {
14390 if (adr.index_needs_rex()) {
14391 prefix(REX_XB);
14392 } else {
14393 prefix(REX_B);
14394 }
14395 } else {
14396 if (adr.index_needs_rex()) {
14397 prefix(REX_X);
14398 } else if (byteinst && reg->encoding() >= 4) {
14399 prefix(REX);
14400 }
14401 }
14402 } else {
14403 if (adr.base_needs_rex()) {
14404 if (adr.index_needs_rex()) {
14405 prefix(REX_RXB);
14406 } else {
14407 prefix(REX_RB);
14408 }
14409 } else {
14410 if (adr.index_needs_rex()) {
14411 prefix(REX_RX);
14412 } else {
14413 prefix(REX_R);
14414 }
14415 }
14416 }
14417 if (is_map1) emit_int8(0x0F);
14418 }
14419
14420 void Assembler::prefix_rex2(Address adr, Register reg, bool byteinst, bool is_map1) {
14421 int bits = is_map1 ? REX2BIT_M0 : 0;
14422 bits |= get_base_prefix_bits(adr.base());
14423 bits |= get_index_prefix_bits(adr.index());
14424 bits |= get_reg_prefix_bits(reg->encoding());
14425 prefix16(WREX2 | bits);
14426 }
14427
14428 void Assembler::prefix(Address adr, XMMRegister reg) {
14429 if (reg->encoding() >= 16 || adr.base_needs_rex2() || adr.index_needs_rex2()) {
14430 prefixq_rex2(adr, reg);
14431 return;
14432 }
14433 if (reg->encoding() < 8) {
14434 if (adr.base_needs_rex()) {
14435 if (adr.index_needs_rex()) {
14436 prefix(REX_XB);
14437 } else {
14438 prefix(REX_B);
14439 }
14440 } else {
14441 if (adr.index_needs_rex()) {
14442 prefix(REX_X);
14443 }
14444 }
14445 } else {
14446 if (adr.base_needs_rex()) {
14447 if (adr.index_needs_rex()) {
14448 prefix(REX_RXB);
14449 } else {
14450 prefix(REX_RB);
14451 }
14452 } else {
14453 if (adr.index_needs_rex()) {
14454 prefix(REX_RX);
14455 } else {
14456 prefix(REX_R);
14457 }
14458 }
14459 }
14460 }
14461
14462 void Assembler::prefix_rex2(Address adr, XMMRegister src) {
14463 int bits = 0;
14464 bits |= get_base_prefix_bits(adr.base());
14465 bits |= get_index_prefix_bits(adr.index());
14466 bits |= get_reg_prefix_bits(src->encoding());
14467 prefix16(WREX2 | bits);
14468 }
14469
14470 int Assembler::prefix_and_encode(int reg_enc, bool byteinst, bool is_map1) {
14471 if (reg_enc >= 16) {
14472 return prefix_and_encode_rex2(reg_enc, is_map1);
14473 }
14474 if (reg_enc >= 8) {
14475 prefix(REX_B);
14476 reg_enc -= 8;
14477 } else if (byteinst && reg_enc >= 4) {
14478 prefix(REX);
14479 }
14480 int opc_prefix = is_map1 ? 0x0F00 : 0;
14481 return opc_prefix | reg_enc;
14482 }
14483
14484 int Assembler::prefix_and_encode_rex2(int reg_enc, bool is_map1) {
14485 prefix16(WREX2 | (is_map1 ? REX2BIT_M0 : 0) | get_base_prefix_bits(reg_enc));
14486 return reg_enc & 0x7;
14487 }
14488
14489 int Assembler::prefix_and_encode(int dst_enc, bool dst_is_byte, int src_enc, bool src_is_byte, bool is_map1) {
14490 if (src_enc >= 16 || dst_enc >= 16) {
14491 return prefix_and_encode_rex2(dst_enc, src_enc, is_map1 ? REX2BIT_M0 : 0);
14492 }
14493 if (dst_enc < 8) {
14494 if (src_enc >= 8) {
14495 prefix(REX_B);
14496 src_enc -= 8;
14497 } else if ((src_is_byte && src_enc >= 4) || (dst_is_byte && dst_enc >= 4)) {
14498 prefix(REX);
14499 }
14500 } else {
14501 if (src_enc < 8) {
14502 prefix(REX_R);
14503 } else {
14504 prefix(REX_RB);
14505 src_enc -= 8;
14506 }
14507 dst_enc -= 8;
14508 }
14509 int opcode_prefix = is_map1 ? 0x0F00 : 0;
14510 return opcode_prefix | (dst_enc << 3 | src_enc);
14511 }
14512
14513 int Assembler::prefix_and_encode_rex2(int dst_enc, int src_enc, int init_bits) {
14514 int bits = init_bits;
14515 bits |= get_reg_prefix_bits(dst_enc);
14516 bits |= get_base_prefix_bits(src_enc);
14517 dst_enc &= 0x7;
14518 src_enc &= 0x7;
14519 prefix16(WREX2 | bits);
14520 return dst_enc << 3 | src_enc;
14521 }
14522
14523 bool Assembler::prefix_is_rex2(int prefix) {
14524 return (prefix & 0xFF00) == WREX2;
14525 }
14526
14527 int Assembler::get_prefixq_rex2(Address adr, bool is_map1) {
14528 assert(UseAPX, "APX features not enabled");
14529 int bits = REX2BIT_W;
14530 if (is_map1) bits |= REX2BIT_M0;
14531 bits |= get_base_prefix_bits(adr.base());
14532 bits |= get_index_prefix_bits(adr.index());
14533 return WREX2 | bits;
14534 }
14535
14536 int Assembler::get_prefixq(Address adr, bool is_map1) {
14537 if (adr.base_needs_rex2() || adr.index_needs_rex2()) {
14538 return get_prefixq_rex2(adr, is_map1);
14539 }
14540 int8_t prfx = get_prefixq(adr, rax);
14541 assert(REX_W <= prfx && prfx <= REX_WXB, "must be");
14542 return is_map1 ? (((int16_t)prfx) << 8) | 0x0F : (int16_t)prfx;
14543 }
14544
14545 int Assembler::get_prefixq(Address adr, Register src, bool is_map1) {
14546 if (adr.base_needs_rex2() || adr.index_needs_rex2() || src->encoding() >= 16) {
14547 return get_prefixq_rex2(adr, src, is_map1);
14548 }
14549 int8_t prfx = (int8_t)(REX_W +
14550 ((int)adr.base_needs_rex()) +
14551 ((int)adr.index_needs_rex() << 1) +
14552 ((int)(src->encoding() >= 8) << 2));
14553 #ifdef ASSERT
14554 if (src->encoding() < 8) {
14555 if (adr.base_needs_rex()) {
14556 if (adr.index_needs_rex()) {
14557 assert(prfx == REX_WXB, "must be");
14558 } else {
14559 assert(prfx == REX_WB, "must be");
14560 }
14561 } else {
14562 if (adr.index_needs_rex()) {
14563 assert(prfx == REX_WX, "must be");
14564 } else {
14565 assert(prfx == REX_W, "must be");
14566 }
14567 }
14568 } else {
14569 if (adr.base_needs_rex()) {
14570 if (adr.index_needs_rex()) {
14571 assert(prfx == REX_WRXB, "must be");
14572 } else {
14573 assert(prfx == REX_WRB, "must be");
14574 }
14575 } else {
14576 if (adr.index_needs_rex()) {
14577 assert(prfx == REX_WRX, "must be");
14578 } else {
14579 assert(prfx == REX_WR, "must be");
14580 }
14581 }
14582 }
14583 #endif
14584 return is_map1 ? (((int16_t)prfx) << 8) | 0x0F : (int16_t)prfx;
14585 }
14586
14587 int Assembler::get_prefixq_rex2(Address adr, Register src, bool is_map1) {
14588 assert(UseAPX, "APX features not enabled");
14589 int bits = REX2BIT_W;
14590 if (is_map1) bits |= REX2BIT_M0;
14591 bits |= get_base_prefix_bits(adr.base());
14592 bits |= get_index_prefix_bits(adr.index());
14593 bits |= get_reg_prefix_bits(src->encoding());
14594 return WREX2 | bits;
14595 }
14596
14597 void Assembler::prefixq(Address adr) {
14598 if (adr.base_needs_rex2() || adr.index_needs_rex2()) {
14599 prefix16(get_prefixq_rex2(adr));
14600 } else {
14601 emit_int8(get_prefixq(adr));
14602 }
14603 }
14604
14605 void Assembler::prefixq(Address adr, Register src, bool is_map1) {
14606 if (adr.base_needs_rex2() || adr.index_needs_rex2() || src->encoding() >= 16) {
14607 prefix16(get_prefixq_rex2(adr, src, is_map1));
14608 } else {
14609 emit_int8(get_prefixq(adr, src));
14610 if (is_map1) emit_int8(0x0F);
14611 }
14612 }
14613
14614
14615 void Assembler::prefixq(Address adr, XMMRegister src) {
14616 if (src->encoding() >= 16 || adr.base_needs_rex2() || adr.index_needs_rex2()) {
14617 prefixq_rex2(adr, src);
14618 return;
14619 }
14620 if (src->encoding() < 8) {
14621 if (adr.base_needs_rex()) {
14622 if (adr.index_needs_rex()) {
14623 prefix(REX_WXB);
14624 } else {
14625 prefix(REX_WB);
14626 }
14627 } else {
14628 if (adr.index_needs_rex()) {
14629 prefix(REX_WX);
14630 } else {
14631 prefix(REX_W);
14632 }
14633 }
14634 } else {
14635 if (adr.base_needs_rex()) {
14636 if (adr.index_needs_rex()) {
14637 prefix(REX_WRXB);
14638 } else {
14639 prefix(REX_WRB);
14640 }
14641 } else {
14642 if (adr.index_needs_rex()) {
14643 prefix(REX_WRX);
14644 } else {
14645 prefix(REX_WR);
14646 }
14647 }
14648 }
14649 }
14650
14651 void Assembler::prefixq_rex2(Address adr, XMMRegister src) {
14652 int bits = REX2BIT_W;
14653 bits |= get_base_prefix_bits(adr.base());
14654 bits |= get_index_prefix_bits(adr.index());
14655 bits |= get_reg_prefix_bits(src->encoding());
14656 prefix16(WREX2 | bits);
14657 }
14658
14659 int Assembler::prefixq_and_encode(int reg_enc, bool is_map1) {
14660 if (reg_enc >= 16) {
14661 return prefixq_and_encode_rex2(reg_enc, is_map1);
14662 }
14663 if (reg_enc < 8) {
14664 prefix(REX_W);
14665 } else {
14666 prefix(REX_WB);
14667 reg_enc -= 8;
14668 }
14669 int opcode_prefix = is_map1 ? 0x0F00 : 0;
14670 return opcode_prefix | reg_enc;
14671 }
14672
14673
14674 int Assembler::prefixq_and_encode_rex2(int reg_enc, bool is_map1) {
14675 prefix16(WREX2 | REX2BIT_W | (is_map1 ? REX2BIT_M0: 0) | get_base_prefix_bits(reg_enc));
14676 return reg_enc & 0x7;
14677 }
14678
14679 int Assembler::prefixq_and_encode(int dst_enc, int src_enc, bool is_map1) {
14680 if (dst_enc >= 16 || src_enc >= 16) {
14681 return prefixq_and_encode_rex2(dst_enc, src_enc, is_map1);
14682 }
14683 if (dst_enc < 8) {
14684 if (src_enc < 8) {
14685 prefix(REX_W);
14686 } else {
14687 prefix(REX_WB);
14688 src_enc -= 8;
14689 }
14690 } else {
14691 if (src_enc < 8) {
14692 prefix(REX_WR);
14693 } else {
14694 prefix(REX_WRB);
14695 src_enc -= 8;
14696 }
14697 dst_enc -= 8;
14698 }
14699 int opcode_prefix = is_map1 ? 0x0F00 : 0;
14700 return opcode_prefix | (dst_enc << 3 | src_enc);
14701 }
14702
14703 int Assembler::prefixq_and_encode_rex2(int dst_enc, int src_enc, bool is_map1) {
14704 int init_bits = REX2BIT_W | (is_map1 ? REX2BIT_M0 : 0);
14705 return prefix_and_encode_rex2(dst_enc, src_enc, init_bits);
14706 }
14707
14708 void Assembler::emit_prefix_and_int8(int prefix, int b1) {
14709 if ((prefix & 0xFF00) == 0) {
14710 emit_int16(prefix, b1);
14711 } else {
14712 assert((prefix & 0xFF00) != WREX2 || UseAPX, "APX features not enabled");
14713 emit_int24((prefix & 0xFF00) >> 8, prefix & 0x00FF, b1);
14714 }
14715 }
14716
14717 void Assembler::adcq(Register dst, int32_t imm32) {
14718 (void) prefixq_and_encode(dst->encoding());
14719 emit_arith(0x81, 0xD0, dst, imm32);
14720 }
14721
14722 void Assembler::adcq(Register dst, Address src) {
14723 InstructionMark im(this);
14724 emit_prefix_and_int8(get_prefixq(src, dst), 0x13);
14725 emit_operand(dst, src, 0);
14726 }
14727
14728 void Assembler::adcq(Register dst, Register src) {
14729 (void) prefixq_and_encode(dst->encoding(), src->encoding());
14730 emit_arith(0x13, 0xC0, dst, src);
14731 }
14732
14733 void Assembler::addq(Address dst, int32_t imm32) {
14734 InstructionMark im(this);
14735 prefixq(dst);
14736 emit_arith_operand(0x81, rax, dst, imm32);
14737 }
14738
14739 void Assembler::eaddq(Register dst, Address src, int32_t imm32, bool no_flags) {
14740 InstructionMark im(this);
14741 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14742 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
14743 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
14744 emit_arith_operand(0x81, rax, src, imm32);
14745 }
14746
14747 void Assembler::addq(Address dst, Register src) {
14748 InstructionMark im(this);
14749 emit_prefix_and_int8(get_prefixq(dst, src), 0x01);
14750 emit_operand(src, dst, 0);
14751 }
14752
14753 void Assembler::eaddq(Register dst, Address src1, Register src2, bool no_flags) {
14754 InstructionMark im(this);
14755 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_64bit, 0x01, no_flags, false /* is_map1 */, true /* is_commutative */);
14756 }
14757
14758 void Assembler::addq(Register dst, int32_t imm32) {
14759 (void) prefixq_and_encode(dst->encoding());
14760 emit_arith(0x81, 0xC0, dst, imm32);
14761 }
14762
14763 void Assembler::eaddq(Register dst, Register src, int32_t imm32, bool no_flags) {
14764 emit_eevex_prefix_or_demote_arith_ndd(dst, src, imm32, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_64bit, 0x81, 0xC0, no_flags);
14765 }
14766
14767 void Assembler::addq(Register dst, Address src) {
14768 InstructionMark im(this);
14769 emit_prefix_and_int8(get_prefixq(src, dst), 0x03);
14770 emit_operand(dst, src, 0);
14771 }
14772
14773 void Assembler::eaddq(Register dst, Register src1, Address src2, bool no_flags) {
14774 InstructionMark im(this);
14775 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_64bit, 0x03, no_flags);
14776 }
14777
14778 void Assembler::addq(Register dst, Register src) {
14779 (void) prefixq_and_encode(dst->encoding(), src->encoding());
14780 emit_arith(0x03, 0xC0, dst, src);
14781 }
14782
14783 void Assembler::eaddq(Register dst, Register src1, Register src2, bool no_flags) {
14784 emit_eevex_prefix_or_demote_arith_ndd(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_64bit, 0x03, 0xC0, no_flags, true /* is_commutative */);
14785 }
14786
14787 void Assembler::adcxq(Register dst, Register src) {
14788 //assert(VM_Version::supports_adx(), "adx instructions not supported");
14789 if (needs_rex2(dst, src)) {
14790 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14791 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, true);
14792 emit_int16((unsigned char)0x66, (0xC0 | encode));
14793 } else {
14794 emit_int8(0x66);
14795 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
14796 emit_int32(0x0F,
14797 0x38,
14798 (unsigned char)0xF6,
14799 (0xC0 | encode));
14800 }
14801 }
14802
14803 void Assembler::eadcxq(Register dst, Register src1, Register src2) {
14804 if (is_demotable(false, dst->encoding(), src1->encoding())) {
14805 return adcxq(dst, src2);
14806 }
14807 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14808 int encode = emit_eevex_prefix_or_demote_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, false /* no_flags */, true /* use_prefixq */);
14809 emit_int16((unsigned char)0x66, (0xC0 | encode));
14810 }
14811
14812 void Assembler::adoxq(Register dst, Register src) {
14813 //assert(VM_Version::supports_adx(), "adx instructions not supported");
14814 if (needs_rex2(dst, src)) {
14815 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14816 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, true);
14817 emit_int16((unsigned char)0x66, (0xC0 | encode));
14818 } else {
14819 emit_int8((unsigned char)0xF3);
14820 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
14821 emit_int32(0x0F,
14822 0x38,
14823 (unsigned char)0xF6,
14824 (0xC0 | encode));
14825 }
14826 }
14827
14828 void Assembler::eadoxq(Register dst, Register src1, Register src2) {
14829 if (is_demotable(false, dst->encoding(), src1->encoding())) {
14830 return adoxq(dst, src2);
14831 }
14832 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14833 int encode = emit_eevex_prefix_or_demote_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, false /* no_flags */, true /* use_prefixq */);
14834 emit_int16((unsigned char)0x66, (0xC0 | encode));
14835 }
14836
14837 void Assembler::andq(Address dst, int32_t imm32) {
14838 InstructionMark im(this);
14839 prefixq(dst);
14840 emit_arith_operand(0x81, as_Register(4), dst, imm32);
14841 }
14842
14843 void Assembler::eandq(Register dst, Address src, int32_t imm32, bool no_flags) {
14844 InstructionMark im(this);
14845 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14846 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
14847 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
14848 emit_arith_operand(0x81, as_Register(4), src, imm32);
14849 }
14850
14851 void Assembler::andq(Register dst, int32_t imm32) {
14852 (void) prefixq_and_encode(dst->encoding());
14853 emit_arith(0x81, 0xE0, dst, imm32);
14854 }
14855
14856 void Assembler::eandq(Register dst, Register src, int32_t imm32, bool no_flags) {
14857 emit_eevex_prefix_or_demote_arith_ndd(dst, src, imm32, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_64bit, 0x81, 0xE0, no_flags);
14858 }
14859
14860 void Assembler::andq(Register dst, Address src) {
14861 InstructionMark im(this);
14862 emit_prefix_and_int8(get_prefixq(src, dst), 0x23);
14863 emit_operand(dst, src, 0);
14864 }
14865
14866 void Assembler::eandq(Register dst, Register src1, Address src2, bool no_flags) {
14867 InstructionMark im(this);
14868 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_64bit, 0x23, no_flags);
14869 }
14870
14871 void Assembler::andq(Register dst, Register src) {
14872 (void) prefixq_and_encode(dst->encoding(), src->encoding());
14873 emit_arith(0x23, 0xC0, dst, src);
14874 }
14875
14876 void Assembler::eandq(Register dst, Register src1, Register src2, bool no_flags) {
14877 emit_eevex_prefix_or_demote_arith_ndd(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_64bit, 0x23, 0xC0, no_flags, true /* is_commutative */);
14878 }
14879
14880 void Assembler::andq(Address dst, Register src) {
14881 InstructionMark im(this);
14882 emit_prefix_and_int8(get_prefixq(dst, src), 0x21);
14883 emit_operand(src, dst, 0);
14884 }
14885
14886 void Assembler::eandq(Register dst, Address src1, Register src2, bool no_flags) {
14887 InstructionMark im(this);
14888 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_64bit, 0x21, no_flags, false /* is_map1 */, true /* is_commutative */);
14889 }
14890
14891 void Assembler::andnq(Register dst, Register src1, Register src2) {
14892 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
14893 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14894 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes, true);
14895 emit_int16((unsigned char)0xF2, (0xC0 | encode));
14896 }
14897
14898 void Assembler::andnq(Register dst, Register src1, Address src2) {
14899 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
14900 InstructionMark im(this);
14901 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14902 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
14903 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
14904 emit_int8((unsigned char)0xF2);
14905 emit_operand(dst, src2, 0);
14906 }
14907
14908 void Assembler::bsfq(Register dst, Register src) {
14909 int encode = prefixq_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
14910 emit_opcode_prefix_and_encoding((unsigned char)0xBC, 0xC0, encode);
14911 }
14912
14913 void Assembler::bsrq(Register dst, Register src) {
14914 int encode = prefixq_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
14915 emit_opcode_prefix_and_encoding((unsigned char)0xBD, 0xC0, encode);
14916 }
14917
14918 void Assembler::bswapq(Register reg) {
14919 int encode = prefixq_and_encode(reg->encoding(), true /* is_map1 */);
14920 emit_opcode_prefix_and_encoding((unsigned char)0xC8, encode);
14921 }
14922
14923 void Assembler::blsiq(Register dst, Register src) {
14924 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
14925 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14926 int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes, true);
14927 emit_int16((unsigned char)0xF3, (0xC0 | encode));
14928 }
14929
14930 void Assembler::blsiq(Register dst, Address src) {
14931 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
14932 InstructionMark im(this);
14933 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14934 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
14935 vex_prefix(src, dst->encoding(), rbx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
14936 emit_int8((unsigned char)0xF3);
14937 emit_operand(rbx, src, 0);
14938 }
14939
14940 void Assembler::blsmskq(Register dst, Register src) {
14941 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
14942 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14943 int encode = vex_prefix_and_encode(rdx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes, true);
14944 emit_int16((unsigned char)0xF3, (0xC0 | encode));
14945 }
14946
14947 void Assembler::blsmskq(Register dst, Address src) {
14948 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
14949 InstructionMark im(this);
14950 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14951 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
14952 vex_prefix(src, dst->encoding(), rdx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
14953 emit_int8((unsigned char)0xF3);
14954 emit_operand(rdx, src, 0);
14955 }
14956
14957 void Assembler::blsrq(Register dst, Register src) {
14958 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
14959 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14960 int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes, true);
14961 emit_int16((unsigned char)0xF3, (0xC0 | encode));
14962 }
14963
14964 void Assembler::blsrq(Register dst, Address src) {
14965 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
14966 InstructionMark im(this);
14967 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14968 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
14969 vex_prefix(src, dst->encoding(), rcx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
14970 emit_int8((unsigned char)0xF3);
14971 emit_operand(rcx, src, 0);
14972 }
14973
14974 void Assembler::cdqq() {
14975 emit_int16(REX_W, (unsigned char)0x99);
14976 }
14977
14978 void Assembler::cdqe() {
14979 emit_int16(REX_W, (unsigned char)0x98);
14980 }
14981
14982 void Assembler::clflush(Address adr) {
14983 assert(VM_Version::supports_clflush(), "should do");
14984 prefix(adr, true /* is_map1 */);
14985 emit_int8((unsigned char)0xAE);
14986 emit_operand(rdi, adr, 0);
14987 }
14988
14989 void Assembler::clflushopt(Address adr) {
14990 assert(VM_Version::supports_clflushopt(), "should do!");
14991 // adr should be base reg only with no index or offset
14992 assert(adr.index() == noreg, "index should be noreg");
14993 assert(adr.scale() == Address::no_scale, "scale should be no_scale");
14994 assert(adr.disp() == 0, "displacement should be 0");
14995 // instruction prefix is 0x66
14996 emit_int8(0x66);
14997 prefix(adr, true /* is_map1 */);
14998 // opcode family is 0x0F 0xAE
14999 emit_int8((unsigned char)0xAE);
15000 // extended opcode byte is 7 == rdi
15001 emit_operand(rdi, adr, 0);
15002 }
15003
15004 void Assembler::clwb(Address adr) {
15005 assert(VM_Version::supports_clwb(), "should do!");
15006 // adr should be base reg only with no index or offset
15007 assert(adr.index() == noreg, "index should be noreg");
15008 assert(adr.scale() == Address::no_scale, "scale should be no_scale");
15009 assert(adr.disp() == 0, "displacement should be 0");
15010 // instruction prefix is 0x66
15011 emit_int8(0x66);
15012 prefix(adr, true /* is_map1 */);
15013 // opcode family is 0x0f 0xAE
15014 emit_int8((unsigned char)0xAE);
15015 // extended opcode byte is 6 == rsi
15016 emit_operand(rsi, adr, 0);
15017 }
15018
15019 void Assembler::cmovq(Condition cc, Register dst, Register src) {
15020 int encode = prefixq_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
15021 emit_opcode_prefix_and_encoding((0x40 | cc), 0xC0, encode);
15022 }
15023
15024 void Assembler::ecmovq(Condition cc, Register dst, Register src1, Register src2) {
15025 emit_eevex_or_demote(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_64bit, 0x40 | cc, false /* no_flags */, true /* is_map1 */, true /* swap */);
15026 }
15027
15028 void Assembler::cmovq(Condition cc, Register dst, Address src) {
15029 InstructionMark im(this);
15030 int prefix = get_prefixq(src, dst, true /* is_map1 */);
15031 emit_prefix_and_int8(prefix, (0x40 | cc));
15032 emit_operand(dst, src, 0);
15033 }
15034
15035 void Assembler::ecmovq(Condition cc, Register dst, Register src1, Address src2) {
15036 InstructionMark im(this);
15037 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_64bit, (0x40 | cc) , false /* no_flags */, true /* is_map1 */);
15038 }
15039
15040 void Assembler::cmpq(Address dst, int32_t imm32) {
15041 InstructionMark im(this);
15042 prefixq(dst);
15043 emit_arith_operand(0x81, as_Register(7), dst, imm32);
15044 }
15045
15046 void Assembler::cmpq(Register dst, int32_t imm32) {
15047 (void) prefixq_and_encode(dst->encoding());
15048 emit_arith(0x81, 0xF8, dst, imm32);
15049 }
15050
15051 void Assembler::cmpq(Address dst, Register src) {
15052 InstructionMark im(this);
15053 emit_prefix_and_int8(get_prefixq(dst, src), 0x39);
15054 emit_operand(src, dst, 0);
15055 }
15056
15057 void Assembler::cmpq(Register dst, Register src) {
15058 (void) prefixq_and_encode(dst->encoding(), src->encoding());
15059 emit_arith(0x3B, 0xC0, dst, src);
15060 }
15061
15062 void Assembler::cmpq(Register dst, Address src) {
15063 InstructionMark im(this);
15064 emit_prefix_and_int8(get_prefixq(src, dst), 0x3B);
15065 emit_operand(dst, src, 0);
15066 }
15067
15068 void Assembler::cmpxchgq(Register reg, Address adr) {
15069 InstructionMark im(this);
15070 int prefix = get_prefixq(adr, reg, true /* is_map1 */);
15071 emit_prefix_and_int8(prefix, (unsigned char)0xB1);
15072 emit_operand(reg, adr, 0);
15073 }
15074
15075 void Assembler::cvtsi2sdq(XMMRegister dst, Register src) {
15076 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15077 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes, true);
15078 emit_int16(0x2A, (0xC0 | encode));
15079 }
15080
15081 void Assembler::cvtsi2sdq(XMMRegister dst, Address src) {
15082 InstructionMark im(this);
15083 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15084 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
15085 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
15086 emit_int8(0x2A);
15087 emit_operand(dst, src, 0);
15088 }
15089
15090 void Assembler::cvtsi2ssq(XMMRegister dst, Address src) {
15091 InstructionMark im(this);
15092 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15093 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
15094 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
15095 emit_int8(0x2A);
15096 emit_operand(dst, src, 0);
15097 }
15098
15099 void Assembler::cvttsd2siq(Register dst, Address src) {
15100 // F2 REX.W 0F 2C /r
15101 // CVTTSD2SI r64, xmm1/m64
15102 InstructionMark im(this);
15103 emit_int8((unsigned char)0xF2);
15104 prefixq(src, dst, true /* is_map1 */);
15105 emit_int8((unsigned char)0x2C);
15106 emit_operand(dst, src, 0);
15107 }
15108
15109 void Assembler::evcvttsd2sisl(Register dst, XMMRegister src) {
15110 assert(VM_Version::supports_avx10_2(), "");
15111 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15112 attributes.set_is_evex_instruction();
15113 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_MAP5, &attributes);
15114 emit_int16(0x6D, (0xC0 | encode));
15115 }
15116
15117 void Assembler::evcvttsd2sisl(Register dst, Address src) {
15118 assert(VM_Version::supports_avx10_2(), "");
15119 InstructionMark im(this);
15120 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15121 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
15122 attributes.set_is_evex_instruction();
15123 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_MAP5, &attributes);
15124 emit_int8((unsigned char)0x6D);
15125 emit_operand(dst, src, 0);
15126 }
15127
15128 void Assembler::evcvttsd2sisq(Register dst, XMMRegister src) {
15129 assert(VM_Version::supports_avx10_2(), "");
15130 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15131 attributes.set_is_evex_instruction();
15132 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_MAP5, &attributes);
15133 emit_int16(0x6D, (0xC0 | encode));
15134 }
15135
15136 void Assembler::evcvttsd2sisq(Register dst, Address src) {
15137 assert(VM_Version::supports_avx10_2(), "");
15138 InstructionMark im(this);
15139 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15140 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
15141 attributes.set_is_evex_instruction();
15142 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_MAP5, &attributes);
15143 emit_int8((unsigned char)0x6D);
15144 emit_operand(dst, src, 0);
15145 }
15146
15147 void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
15148 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15149 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
15150 emit_int16(0x2C, (0xC0 | encode));
15151 }
15152
15153 void Assembler::cvtsd2siq(Register dst, XMMRegister src) {
15154 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15155 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
15156 emit_int16(0x2D, (0xC0 | encode));
15157 }
15158
15159 void Assembler::cvttss2siq(Register dst, XMMRegister src) {
15160 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15161 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
15162 emit_int16(0x2C, (0xC0 | encode));
15163 }
15164
15165 void Assembler::decl(Register dst) {
15166 // Don't use it directly. Use MacroAssembler::decrementl() instead.
15167 // Use two-byte form (one-byte form is a REX prefix in 64-bit mode)
15168 int encode = prefix_and_encode(dst->encoding());
15169 emit_int16((unsigned char)0xFF, (0xC8 | encode));
15170 }
15171
15172 void Assembler::edecl(Register dst, Register src, bool no_flags) {
15173 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15174 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
15175 emit_int16((unsigned char)0xFF, (0xC8 | encode));
15176 }
15177
15178 void Assembler::decq(Register dst) {
15179 // Don't use it directly. Use MacroAssembler::decrementq() instead.
15180 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
15181 int encode = prefixq_and_encode(dst->encoding());
15182 emit_int16((unsigned char)0xFF, 0xC8 | encode);
15183 }
15184
15185 void Assembler::edecq(Register dst, Register src, bool no_flags) {
15186 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15187 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags, true /* use_prefixq */);
15188 emit_int16((unsigned char)0xFF, (0xC8 | encode));
15189 }
15190
15191 void Assembler::decq(Address dst) {
15192 // Don't use it directly. Use MacroAssembler::decrementq() instead.
15193 InstructionMark im(this);
15194 emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xFF);
15195 emit_operand(rcx, dst, 0);
15196 }
15197
15198 void Assembler::edecq(Register dst, Address src, bool no_flags) {
15199 InstructionMark im(this);
15200 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15201 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
15202 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
15203 emit_int8((unsigned char)0xFF);
15204 emit_operand(rcx, src, 0);
15205 }
15206
15207 // can't use REX2
15208 void Assembler::fxrstor(Address src) {
15209 InstructionMark im(this);
15210 emit_int24(get_prefixq(src), 0x0F, (unsigned char)0xAE);
15211 emit_operand(as_Register(1), src, 0);
15212 }
15213
15214 // can't use REX2
15215 void Assembler::xrstor(Address src) {
15216 InstructionMark im(this);
15217 emit_int24(get_prefixq(src), 0x0F, (unsigned char)0xAE);
15218 emit_operand(as_Register(5), src, 0);
15219 }
15220
15221 // can't use REX2
15222 void Assembler::fxsave(Address dst) {
15223 InstructionMark im(this);
15224 emit_int24(get_prefixq(dst), 0x0F, (unsigned char)0xAE);
15225 emit_operand(as_Register(0), dst, 0);
15226 }
15227
15228 // cant use REX2
15229 void Assembler::xsave(Address dst) {
15230 InstructionMark im(this);
15231 emit_int24(get_prefixq(dst), 0x0F, (unsigned char)0xAE);
15232 emit_operand(as_Register(4), dst, 0);
15233 }
15234
15235 void Assembler::idivq(Register src) {
15236 int encode = prefixq_and_encode(src->encoding());
15237 emit_int16((unsigned char)0xF7, (0xF8 | encode));
15238 }
15239
15240 void Assembler::eidivq(Register src, bool no_flags) {
15241 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15242 int encode = eevex_prefix_and_encode_nf(0, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
15243 emit_int16((unsigned char)0xF7, (0xF8 | encode));
15244 }
15245
15246 void Assembler::divq(Register src) {
15247 int encode = prefixq_and_encode(src->encoding());
15248 emit_int16((unsigned char)0xF7, (0xF0 | encode));
15249 }
15250
15251 void Assembler::edivq(Register src, bool no_flags) {
15252 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15253 int encode = eevex_prefix_and_encode_nf(0, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
15254 emit_int16((unsigned char)0xF7, (0xF0 | encode));
15255 }
15256
15257 void Assembler::imulq(Register dst, Register src) {
15258 int encode = prefixq_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
15259 emit_opcode_prefix_and_encoding((unsigned char)0xAF, 0xC0, encode);
15260 }
15261
15262 void Assembler::eimulq(Register dst, Register src, bool no_flags) {
15263 if (is_demotable(no_flags, dst->encoding(), src->encoding())) {
15264 return imulq(dst);
15265 }
15266 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15267 int encode = eevex_prefix_and_encode_nf(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
15268 emit_int16((unsigned char)0xAF, (0xC0 | encode));
15269 }
15270
15271 void Assembler::eimulq(Register dst, Register src1, Register src2, bool no_flags) {
15272 emit_eevex_or_demote(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_64bit, 0xAF, no_flags, true /* is_map1 */, true /* swap */, true /* is_commutative */);
15273 }
15274
15275 void Assembler::imulq(Register src) {
15276 int encode = prefixq_and_encode(src->encoding());
15277 emit_int16((unsigned char)0xF7, (0xE8 | encode));
15278 }
15279
15280 void Assembler::eimulq(Register src, bool no_flags) {
15281 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15282 int encode = eevex_prefix_and_encode_nf(0, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
15283 emit_int16((unsigned char)0xF7, (0xE8 | encode));
15284 }
15285
15286 void Assembler::imulq(Register dst, Address src, int32_t value) {
15287 InstructionMark im(this);
15288 prefixq(src, dst);
15289 if (is8bit(value)) {
15290 emit_int8((unsigned char)0x6B);
15291 emit_operand(dst, src, 1);
15292 emit_int8(value);
15293 } else {
15294 emit_int8((unsigned char)0x69);
15295 emit_operand(dst, src, 4);
15296 emit_int32(value);
15297 }
15298 }
15299
15300 void Assembler::eimulq(Register dst, Address src, int32_t value, bool no_flags) {
15301 InstructionMark im(this);
15302 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15303 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
15304 eevex_prefix_nf(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
15305 if (is8bit(value)) {
15306 emit_int8((unsigned char)0x6B);
15307 emit_operand(dst, src, 1);
15308 emit_int8(value);
15309 } else {
15310 emit_int8((unsigned char)0x69);
15311 emit_operand(dst, src, 4);
15312 emit_int32(value);
15313 }
15314 }
15315
15316 void Assembler::imulq(Register dst, Register src, int value) {
15317 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
15318 if (is8bit(value)) {
15319 emit_int24(0x6B, (0xC0 | encode), (value & 0xFF));
15320 } else {
15321 emit_int16(0x69, (0xC0 | encode));
15322 emit_int32(value);
15323 }
15324 }
15325
15326 void Assembler::eimulq(Register dst, Register src, int value, bool no_flags) {
15327 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15328 int encode = eevex_prefix_and_encode_nf(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
15329 if (is8bit(value)) {
15330 emit_int24(0x6B, (0xC0 | encode), (value & 0xFF));
15331 } else {
15332 emit_int16(0x69, (0xC0 | encode));
15333 emit_int32(value);
15334 }
15335 }
15336
15337 void Assembler::imulq(Register dst, Address src) {
15338 InstructionMark im(this);
15339 int prefix = get_prefixq(src, dst, true /* is_map1 */);
15340 emit_prefix_and_int8(prefix, (unsigned char)0xAF);
15341 emit_operand(dst, src, 0);
15342 }
15343
15344 void Assembler::eimulq(Register dst, Address src, bool no_flags) {
15345 InstructionMark im(this);
15346 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15347 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
15348 eevex_prefix_nf(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
15349
15350 emit_int8((unsigned char)0xAF);
15351 emit_operand(dst, src, 0);
15352 }
15353
15354 void Assembler::eimulq(Register dst, Register src1, Address src2, bool no_flags) {
15355 InstructionMark im(this);
15356 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_64bit, (unsigned char)0xAF, no_flags, true /* is_map1 */);
15357 }
15358
15359 void Assembler::incl(Register dst) {
15360 // Don't use it directly. Use MacroAssembler::incrementl() instead.
15361 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
15362 int encode = prefix_and_encode(dst->encoding());
15363 emit_int16((unsigned char)0xFF, (0xC0 | encode));
15364 }
15365
15366 void Assembler::eincl(Register dst, Register src, bool no_flags) {
15367 // Don't use it directly. Use MacroAssembler::incrementl() instead.
15368 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
15369 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15370 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
15371 emit_int16((unsigned char)0xFF, (0xC0 | encode));
15372 }
15373
15374 void Assembler::incq(Register dst) {
15375 // Don't use it directly. Use MacroAssembler::incrementq() instead.
15376 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
15377 int encode = prefixq_and_encode(dst->encoding());
15378 emit_int16((unsigned char)0xFF, (0xC0 | encode));
15379 }
15380
15381 void Assembler::eincq(Register dst, Register src, bool no_flags) {
15382 // Don't use it directly. Use MacroAssembler::incrementq() instead.
15383 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
15384 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15385 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags, true /* use_prefixq */);
15386 emit_int16((unsigned char)0xFF, (0xC0 | encode));
15387 }
15388
15389 void Assembler::incq(Address dst) {
15390 // Don't use it directly. Use MacroAssembler::incrementq() instead.
15391 InstructionMark im(this);
15392 emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xFF);
15393 emit_operand(rax, dst, 0);
15394 }
15395
15396 void Assembler::eincq(Register dst, Address src, bool no_flags) {
15397 // Don't use it directly. Use MacroAssembler::incrementq() instead.
15398 InstructionMark im(this);
15399 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15400 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
15401 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
15402 emit_int8((unsigned char) 0xFF);
15403 emit_operand(rax, src, 0);
15404 }
15405
15406 void Assembler::lea(Register dst, Address src) {
15407 leaq(dst, src);
15408 }
15409
15410 void Assembler::leaq(Register dst, Address src) {
15411 InstructionMark im(this);
15412 emit_prefix_and_int8(get_prefixq(src, dst), (unsigned char)0x8D);
15413 emit_operand(dst, src, 0);
15414 }
15415
15416 void Assembler::mov64(Register dst, int64_t imm64) {
15417 InstructionMark im(this);
15418 int encode = prefixq_and_encode(dst->encoding());
15419 emit_int8(0xB8 | encode);
15420 emit_int64(imm64);
15421 }
15422
15423 void Assembler::mov64(Register dst, int64_t imm64, relocInfo::relocType rtype, int format) {
15424 InstructionMark im(this);
15425 int encode = prefixq_and_encode(dst->encoding());
15426 emit_int8(0xB8 | encode);
15427 emit_data64(imm64, rtype, format);
15428 }
15429
15430 void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) {
15431 InstructionMark im(this);
15432 int encode = prefixq_and_encode(dst->encoding());
15433 emit_int8(0xB8 | encode);
15434 emit_data64(imm64, rspec);
15435 }
15436
15437 void Assembler::mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec) {
15438 InstructionMark im(this);
15439 int encode = prefix_and_encode(dst->encoding());
15440 emit_int8(0xB8 | encode);
15441 emit_data((int)imm32, rspec, narrow_oop_operand);
15442 }
15443
15444 void Assembler::mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec) {
15445 InstructionMark im(this);
15446 prefix(dst);
15447 emit_int8((unsigned char)0xC7);
15448 emit_operand(rax, dst, 4);
15449 emit_data((int)imm32, rspec, narrow_oop_operand);
15450 }
15451
15452 void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) {
15453 InstructionMark im(this);
15454 int encode = prefix_and_encode(src1->encoding());
15455 emit_int16((unsigned char)0x81, (0xF8 | encode));
15456 emit_data((int)imm32, rspec, narrow_oop_operand);
15457 }
15458
15459 void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) {
15460 InstructionMark im(this);
15461 prefix(src1);
15462 emit_int8((unsigned char)0x81);
15463 emit_operand(rax, src1, 4);
15464 emit_data((int)imm32, rspec, narrow_oop_operand);
15465 }
15466
15467 void Assembler::lzcntq(Register dst, Register src) {
15468 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
15469 emit_int8((unsigned char)0xF3);
15470 int encode = prefixq_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
15471 emit_opcode_prefix_and_encoding((unsigned char)0xBD, 0xC0, encode);
15472 }
15473
15474 void Assembler::elzcntq(Register dst, Register src, bool no_flags) {
15475 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
15476 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15477 int encode = eevex_prefix_and_encode_nf(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
15478 emit_int16((unsigned char)0xF5, (0xC0 | encode));
15479 }
15480
15481 void Assembler::lzcntq(Register dst, Address src) {
15482 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
15483 InstructionMark im(this);
15484 emit_int8((unsigned char)0xF3);
15485 prefixq(src, dst, true /* is_map1 */);
15486 emit_int8((unsigned char)0xBD);
15487 emit_operand(dst, src, 0);
15488 }
15489
15490 void Assembler::elzcntq(Register dst, Address src, bool no_flags) {
15491 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
15492 InstructionMark im(this);
15493 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15494 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
15495 eevex_prefix_nf(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
15496 emit_int8((unsigned char)0xF5);
15497 emit_operand(dst, src, 0);
15498 }
15499
15500 void Assembler::movdq(XMMRegister dst, Register src) {
15501 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15502 int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes, true);
15503 emit_int16(0x6E, (0xC0 | encode));
15504 }
15505
15506 void Assembler::movdq(Register dst, XMMRegister src) {
15507 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15508 // swap src/dst to get correct prefix
15509 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes, true);
15510 emit_int16(0x7E,
15511 (0xC0 | encode));
15512 }
15513
15514 void Assembler::movq(Register dst, Register src) {
15515 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
15516 emit_int16((unsigned char)0x8B,
15517 (0xC0 | encode));
15518 }
15519
15520 void Assembler::movq(Register dst, Address src) {
15521 InstructionMark im(this);
15522 emit_prefix_and_int8(get_prefixq(src, dst), (unsigned char)0x8B);
15523 emit_operand(dst, src, 0);
15524 }
15525
15526 void Assembler::movq(Address dst, Register src) {
15527 InstructionMark im(this);
15528 emit_prefix_and_int8(get_prefixq(dst, src), (unsigned char)0x89);
15529 emit_operand(src, dst, 0);
15530 }
15531
15532 void Assembler::movq(Address dst, int32_t imm32) {
15533 InstructionMark im(this);
15534 emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xC7);
15535 emit_operand(as_Register(0), dst, 4);
15536 emit_int32(imm32);
15537 }
15538
15539 void Assembler::movq(Register dst, int32_t imm32) {
15540 int encode = prefixq_and_encode(dst->encoding());
15541 emit_int16((unsigned char)0xC7, (0xC0 | encode));
15542 emit_int32(imm32);
15543 }
15544
15545 void Assembler::movsbq(Register dst, Address src) {
15546 InstructionMark im(this);
15547 int prefix = get_prefixq(src, dst, true /* is_map1 */);
15548 emit_prefix_and_int8(prefix, (unsigned char)0xBE);
15549 emit_operand(dst, src, 0);
15550 }
15551
15552 void Assembler::movsbq(Register dst, Register src) {
15553 int encode = prefixq_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
15554 emit_opcode_prefix_and_encoding((unsigned char)0xBE, 0xC0, encode);
15555 }
15556
15557 void Assembler::movslq(Address dst, int32_t imm32) {
15558 assert(is_simm32(imm32), "lost bits");
15559 InstructionMark im(this);
15560 emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xC7);
15561 emit_operand(rax, dst, 4);
15562 emit_int32(imm32);
15563 }
15564
15565 void Assembler::movslq(Register dst, Address src) {
15566 InstructionMark im(this);
15567 emit_prefix_and_int8(get_prefixq(src, dst), 0x63);
15568 emit_operand(dst, src, 0);
15569 }
15570
15571 void Assembler::movslq(Register dst, Register src) {
15572 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
15573 emit_int16(0x63, (0xC0 | encode));
15574 }
15575
15576 void Assembler::movswq(Register dst, Address src) {
15577 InstructionMark im(this);
15578 int prefix = get_prefixq(src, dst, true /* is_map1 */);
15579 emit_prefix_and_int8(prefix, (unsigned char)0xBF);
15580 emit_operand(dst, src, 0);
15581 }
15582
15583 void Assembler::movswq(Register dst, Register src) {
15584 int encode = prefixq_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
15585 emit_opcode_prefix_and_encoding((unsigned char)0xBF, 0xC0, encode);
15586 }
15587
15588 void Assembler::movzbq(Register dst, Address src) {
15589 InstructionMark im(this);
15590 int prefix = get_prefixq(src, dst, true /* is_map1 */);
15591 emit_prefix_and_int8(prefix, (unsigned char)0xB6);
15592 emit_operand(dst, src, 0);
15593 }
15594
15595 void Assembler::movzbq(Register dst, Register src) {
15596 int encode = prefixq_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
15597 emit_opcode_prefix_and_encoding((unsigned char)0xB6, 0xC0, encode);
15598 }
15599
15600 void Assembler::movzwq(Register dst, Address src) {
15601 InstructionMark im(this);
15602 int prefix = get_prefixq(src, dst, true /* is_map1 */);
15603 emit_prefix_and_int8(prefix, (unsigned char)0xB7);
15604 emit_operand(dst, src, 0);
15605 }
15606
15607 void Assembler::movzwq(Register dst, Register src) {
15608 int encode = prefixq_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
15609 emit_opcode_prefix_and_encoding((unsigned char)0xB7, 0xC0, encode);
15610 }
15611
15612 void Assembler::mulq(Address src) {
15613 InstructionMark im(this);
15614 emit_prefix_and_int8(get_prefixq(src), (unsigned char)0xF7);
15615 emit_operand(rsp, src, 0);
15616 }
15617
15618 void Assembler::emulq(Address src, bool no_flags) {
15619 InstructionMark im(this);
15620 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15621 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
15622 eevex_prefix_nf(src, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
15623 emit_int8(0xF7);
15624 emit_operand(rsp, src, 0);
15625 }
15626
15627 void Assembler::mulq(Register src) {
15628 int encode = prefixq_and_encode(src->encoding());
15629 emit_int16((unsigned char)0xF7, (0xE0 | encode));
15630 }
15631
15632 void Assembler::emulq(Register src, bool no_flags) {
15633 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15634 int encode = eevex_prefix_and_encode_nf(0, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
15635 emit_int16((unsigned char)0xF7, (0xE0 | encode));
15636 }
15637
15638 void Assembler::mulxq(Register dst1, Register dst2, Register src) {
15639 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
15640 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15641 int encode = vex_prefix_and_encode(dst1->encoding(), dst2->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes, true);
15642 emit_int16((unsigned char)0xF6, (0xC0 | encode));
15643 }
15644
15645 void Assembler::negq(Register dst) {
15646 int encode = prefixq_and_encode(dst->encoding());
15647 emit_int16((unsigned char)0xF7, (0xD8 | encode));
15648 }
15649
15650 void Assembler::enegq(Register dst, Register src, bool no_flags) {
15651 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15652 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags, true /* use_prefixq */);
15653 emit_int16((unsigned char)0xF7, (0xD8 | encode));
15654 }
15655
15656 void Assembler::negq(Address dst) {
15657 InstructionMark im(this);
15658 emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xF7);
15659 emit_operand(as_Register(3), dst, 0);
15660 }
15661
15662 void Assembler::enegq(Register dst, Address src, bool no_flags) {
15663 InstructionMark im(this);
15664 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15665 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
15666 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
15667 emit_int8((unsigned char)0xF7);
15668 emit_operand(as_Register(3), src, 0);
15669 }
15670
15671 void Assembler::notq(Register dst) {
15672 int encode = prefixq_and_encode(dst->encoding());
15673 emit_int16((unsigned char)0xF7, (0xD0 | encode));
15674 }
15675
15676 void Assembler::enotq(Register dst, Register src) {
15677 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15678 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, false /* no_flags */, true /* use_prefixq */);
15679 emit_int16((unsigned char)0xF7, (0xD0 | encode));
15680 }
15681
15682 void Assembler::btq(Register dst, Register src) {
15683 int encode = prefixq_and_encode(src->encoding(), dst->encoding(), true /* is_map1 */);
15684 emit_opcode_prefix_and_encoding((unsigned char)0xA3, 0xC0, encode);
15685 }
15686
15687 void Assembler::btq(Register src, int imm8) {
15688 assert(isByte(imm8), "not a byte");
15689 int encode = prefixq_and_encode(src->encoding(), true /* is_map1 */);
15690 emit_opcode_prefix_and_encoding((unsigned char)0xBA, 0xE0, encode);
15691 emit_int8(imm8);
15692 }
15693
15694 void Assembler::btsq(Address dst, int imm8) {
15695 assert(isByte(imm8), "not a byte");
15696 InstructionMark im(this);
15697 int prefix = get_prefixq(dst, true /* is_map1 */);
15698 emit_prefix_and_int8(prefix, (unsigned char)0xBA);
15699 emit_operand(rbp /* 5 */, dst, 1);
15700 emit_int8(imm8);
15701 }
15702
15703 void Assembler::btrq(Address dst, int imm8) {
15704 assert(isByte(imm8), "not a byte");
15705 InstructionMark im(this);
15706 int prefix = get_prefixq(dst, true /* is_map1 */);
15707 emit_prefix_and_int8(prefix, (unsigned char)0xBA);
15708 emit_operand(rsi /* 6 */, dst, 1);
15709 emit_int8(imm8);
15710 }
15711
15712 void Assembler::orq(Address dst, int32_t imm32) {
15713 InstructionMark im(this);
15714 prefixq(dst);
15715 emit_arith_operand(0x81, as_Register(1), dst, imm32);
15716 }
15717
15718 void Assembler::eorq(Register dst, Address src, int32_t imm32, bool no_flags) {
15719 InstructionMark im(this);
15720 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15721 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
15722 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
15723 emit_arith_operand(0x81, as_Register(1), src, imm32);
15724 }
15725
15726 void Assembler::orq(Address dst, Register src) {
15727 InstructionMark im(this);
15728 emit_prefix_and_int8(get_prefixq(dst, src), (unsigned char)0x09);
15729 emit_operand(src, dst, 0);
15730 }
15731
15732 void Assembler::eorq(Register dst, Address src1, Register src2, bool no_flags) {
15733 InstructionMark im(this);
15734 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_64bit, 0x09, no_flags, false /* is_map1 */, true /* is_commutative */);
15735 }
15736
15737 void Assembler::orq(Register dst, int32_t imm32) {
15738 (void) prefixq_and_encode(dst->encoding());
15739 emit_arith(0x81, 0xC8, dst, imm32);
15740 }
15741
15742 void Assembler::eorq(Register dst, Register src, int32_t imm32, bool no_flags) {
15743 emit_eevex_prefix_or_demote_arith_ndd(dst, src, imm32, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_64bit, 0x81, 0xC8, no_flags);
15744 }
15745
15746 void Assembler::orq_imm32(Register dst, int32_t imm32) {
15747 (void) prefixq_and_encode(dst->encoding());
15748 emit_arith_imm32(0x81, 0xC8, dst, imm32);
15749 }
15750
15751 void Assembler::eorq_imm32(Register dst, Register src, int32_t imm32, bool no_flags) {
15752 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15753 (void) emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags, true /* use_prefixq */);
15754 emit_arith_imm32(0x81, 0xC8, src, imm32);
15755 }
15756
15757 void Assembler::orq(Register dst, Address src) {
15758 InstructionMark im(this);
15759 emit_prefix_and_int8(get_prefixq(src, dst), 0x0B);
15760 emit_operand(dst, src, 0);
15761 }
15762
15763 void Assembler::eorq(Register dst, Register src1, Address src2, bool no_flags) {
15764 InstructionMark im(this);
15765 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_64bit, 0x0B, no_flags);
15766 }
15767
15768 void Assembler::orq(Register dst, Register src) {
15769 (void) prefixq_and_encode(dst->encoding(), src->encoding());
15770 emit_arith(0x0B, 0xC0, dst, src);
15771 }
15772
15773 void Assembler::eorq(Register dst, Register src1, Register src2, bool no_flags) {
15774 emit_eevex_prefix_or_demote_arith_ndd(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_64bit, 0x0B, 0xC0, no_flags, true /* is_commutative */);
15775 }
15776 void Assembler::popcntq(Register dst, Address src) {
15777 assert(VM_Version::supports_popcnt(), "must support");
15778 InstructionMark im(this);
15779 emit_int8((unsigned char)0xF3);
15780 emit_prefix_and_int8(get_prefixq(src, dst, true /* is_map1 */), (unsigned char) 0xB8);
15781 emit_operand(dst, src, 0);
15782 }
15783
15784 void Assembler::epopcntq(Register dst, Address src, bool no_flags) {
15785 assert(VM_Version::supports_popcnt(), "must support");
15786 InstructionMark im(this);
15787 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15788 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
15789 eevex_prefix_nf(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
15790 emit_int8((unsigned char) 0x88);
15791 emit_operand(dst, src, 0);
15792 }
15793
15794 void Assembler::popcntq(Register dst, Register src) {
15795 assert(VM_Version::supports_popcnt(), "must support");
15796 emit_int8((unsigned char)0xF3);
15797 int encode = prefixq_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
15798 emit_opcode_prefix_and_encoding((unsigned char)0xB8, 0xC0, encode);
15799 }
15800
15801 void Assembler::epopcntq(Register dst, Register src, bool no_flags) {
15802 assert(VM_Version::supports_popcnt(), "must support");
15803 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15804 int encode = eevex_prefix_and_encode_nf(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
15805 emit_int16((unsigned char)0x88, (0xC0 | encode));
15806 }
15807
15808 void Assembler::popq(Address dst) {
15809 InstructionMark im(this);
15810 emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0x8F);
15811 emit_operand(rax, dst, 0);
15812 }
15813
15814 void Assembler::popq(Register dst) {
15815 int encode = prefix_and_encode(dst->encoding());
15816 emit_int8((unsigned char)0x58 | encode);
15817 }
15818
15819 // Precomputable: popa, pusha, vzeroupper
15820
15821 // The result of these routines are invariant from one invocation to another
15822 // invocation for the duration of a run. Caching the result on bootstrap
15823 // and copying it out on subsequent invocations can thus be beneficial
15824 static bool precomputed = false;
15825
15826 static u_char* popa_code = nullptr;
15827 static int popa_len = 0;
15828
15829 static u_char* pusha_code = nullptr;
15830 static int pusha_len = 0;
15831
15832 static u_char* vzup_code = nullptr;
15833 static int vzup_len = 0;
15834
15835 void Assembler::precompute_instructions() {
15836 assert(!Universe::is_fully_initialized(), "must still be single threaded");
15837 guarantee(!precomputed, "only once");
15838 precomputed = true;
15839 ResourceMark rm;
15840
15841 // Make a temporary buffer big enough for the routines we're capturing
15842 int size = UseAPX ? 512 : 256;
15843 char* tmp_code = NEW_RESOURCE_ARRAY(char, size);
15844 CodeBuffer buffer((address)tmp_code, size);
15845 MacroAssembler masm(&buffer);
15846
15847 address begin_popa = masm.code_section()->end();
15848 masm.popa_uncached();
15849 address end_popa = masm.code_section()->end();
15850 masm.pusha_uncached();
15851 address end_pusha = masm.code_section()->end();
15852 masm.vzeroupper_uncached();
15853 address end_vzup = masm.code_section()->end();
15854
15855 // Save the instructions to permanent buffers.
15856 popa_len = (int)(end_popa - begin_popa);
15857 popa_code = NEW_C_HEAP_ARRAY(u_char, popa_len, mtInternal);
15858 memcpy(popa_code, begin_popa, popa_len);
15859
15860 pusha_len = (int)(end_pusha - end_popa);
15861 pusha_code = NEW_C_HEAP_ARRAY(u_char, pusha_len, mtInternal);
15862 memcpy(pusha_code, end_popa, pusha_len);
15863
15864 vzup_len = (int)(end_vzup - end_pusha);
15865 if (vzup_len > 0) {
15866 vzup_code = NEW_C_HEAP_ARRAY(u_char, vzup_len, mtInternal);
15867 memcpy(vzup_code, end_pusha, vzup_len);
15868 } else {
15869 vzup_code = pusha_code; // dummy
15870 }
15871
15872 assert(masm.code()->total_oop_size() == 0 &&
15873 masm.code()->total_metadata_size() == 0 &&
15874 masm.code()->total_relocation_size() == 0,
15875 "pre-computed code can't reference oops, metadata or contain relocations");
15876 }
15877
15878 static void emit_copy(CodeSection* code_section, u_char* src, int src_len) {
15879 assert(src != nullptr, "code to copy must have been pre-computed");
15880 assert(code_section->limit() - code_section->end() > src_len, "code buffer not large enough");
15881 address end = code_section->end();
15882 memcpy(end, src, src_len);
15883 code_section->set_end(end + src_len);
15884 }
15885
15886
15887 // Does not actually store the value of rsp on the stack.
15888 // The slot for rsp just contains an arbitrary value.
15889 void Assembler::pusha() { // 64bit
15890 emit_copy(code_section(), pusha_code, pusha_len);
15891 }
15892
15893 // Does not actually store the value of rsp on the stack.
15894 // The slot for rsp just contains an arbitrary value.
15895 void Assembler::pusha_uncached() { // 64bit
15896 if (UseAPX) {
15897 // Data being pushed by PUSH2 must be 16B-aligned on the stack, for this push rax upfront
15898 // and use it as a temporary register for stack alignment.
15899 pushp(rax);
15900 // Move original stack pointer to RAX and align stack pointer to 16B boundary.
15901 movq(rax, rsp);
15902 andq(rsp, -(StackAlignmentInBytes));
15903 // Push pair of original stack pointer along with remaining registers
15904 // at 16B aligned boundary.
15905 push2p(rax, r31);
15906 // Restore the original contents of RAX register.
15907 movq(rax, Address(rax));
15908 push2p(r30, r29);
15909 push2p(r28, r27);
15910 push2p(r26, r25);
15911 push2p(r24, r23);
15912 push2p(r22, r21);
15913 push2p(r20, r19);
15914 push2p(r18, r17);
15915 push2p(r16, r15);
15916 push2p(r14, r13);
15917 push2p(r12, r11);
15918 push2p(r10, r9);
15919 push2p(r8, rdi);
15920 push2p(rsi, rbp);
15921 push2p(rbx, rdx);
15922 // To maintain 16 byte alignment after rcx is pushed.
15923 subq(rsp, 8);
15924 pushp(rcx);
15925 } else {
15926 subq(rsp, 16 * wordSize);
15927 movq(Address(rsp, 15 * wordSize), rax);
15928 movq(Address(rsp, 14 * wordSize), rcx);
15929 movq(Address(rsp, 13 * wordSize), rdx);
15930 movq(Address(rsp, 12 * wordSize), rbx);
15931 // Skip rsp as the value is normally not used. There are a few places where
15932 // the original value of rsp needs to be known but that can be computed
15933 // from the value of rsp immediately after pusha (rsp + 16 * wordSize).
15934 // FIXME: For APX any such direct access should also consider EGPR size
15935 // during address compution.
15936 movq(Address(rsp, 10 * wordSize), rbp);
15937 movq(Address(rsp, 9 * wordSize), rsi);
15938 movq(Address(rsp, 8 * wordSize), rdi);
15939 movq(Address(rsp, 7 * wordSize), r8);
15940 movq(Address(rsp, 6 * wordSize), r9);
15941 movq(Address(rsp, 5 * wordSize), r10);
15942 movq(Address(rsp, 4 * wordSize), r11);
15943 movq(Address(rsp, 3 * wordSize), r12);
15944 movq(Address(rsp, 2 * wordSize), r13);
15945 movq(Address(rsp, wordSize), r14);
15946 movq(Address(rsp, 0), r15);
15947 }
15948 }
15949
15950 void Assembler::popa() { // 64bit
15951 emit_copy(code_section(), popa_code, popa_len);
15952 }
15953
15954 void Assembler::popa_uncached() { // 64bit
15955 if (UseAPX) {
15956 popp(rcx);
15957 addq(rsp, 8);
15958 // Data being popped by POP2 must be 16B-aligned on the stack.
15959 pop2p(rdx, rbx);
15960 pop2p(rbp, rsi);
15961 pop2p(rdi, r8);
15962 pop2p(r9, r10);
15963 pop2p(r11, r12);
15964 pop2p(r13, r14);
15965 pop2p(r15, r16);
15966 pop2p(r17, r18);
15967 pop2p(r19, r20);
15968 pop2p(r21, r22);
15969 pop2p(r23, r24);
15970 pop2p(r25, r26);
15971 pop2p(r27, r28);
15972 pop2p(r29, r30);
15973 // Popped value in RAX holds original unaligned stack pointer.
15974 pop2p(r31, rax);
15975 // Reinstantiate original stack pointer.
15976 movq(rsp, rax);
15977 popp(rax);
15978 } else {
15979 movq(r15, Address(rsp, 0));
15980 movq(r14, Address(rsp, wordSize));
15981 movq(r13, Address(rsp, 2 * wordSize));
15982 movq(r12, Address(rsp, 3 * wordSize));
15983 movq(r11, Address(rsp, 4 * wordSize));
15984 movq(r10, Address(rsp, 5 * wordSize));
15985 movq(r9, Address(rsp, 6 * wordSize));
15986 movq(r8, Address(rsp, 7 * wordSize));
15987 movq(rdi, Address(rsp, 8 * wordSize));
15988 movq(rsi, Address(rsp, 9 * wordSize));
15989 movq(rbp, Address(rsp, 10 * wordSize));
15990 // Skip rsp as it is restored automatically to the value
15991 // before the corresponding pusha when popa is done.
15992 movq(rbx, Address(rsp, 12 * wordSize));
15993 movq(rdx, Address(rsp, 13 * wordSize));
15994 movq(rcx, Address(rsp, 14 * wordSize));
15995 movq(rax, Address(rsp, 15 * wordSize));
15996
15997 addq(rsp, 16 * wordSize);
15998 }
15999 }
16000
16001 void Assembler::vzeroupper() {
16002 emit_copy(code_section(), vzup_code, vzup_len);
16003 }
16004
16005 void Assembler::vzeroall() {
16006 assert(VM_Version::supports_avx(), "requires AVX");
16007 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
16008 (void)vex_prefix_and_encode(0, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
16009 emit_int8(0x77);
16010 }
16011
16012 void Assembler::pushq(Address src) {
16013 InstructionMark im(this);
16014 emit_prefix_and_int8(get_prefixq(src), (unsigned char)0xFF);
16015 emit_operand(rsi, src, 0);
16016 }
16017
16018 void Assembler::rclq(Register dst, int imm8) {
16019 assert(isShiftCount(imm8 >> 1), "illegal shift count");
16020 int encode = prefixq_and_encode(dst->encoding());
16021 if (imm8 == 1) {
16022 emit_int16((unsigned char)0xD1, (0xD0 | encode));
16023 } else {
16024 emit_int24((unsigned char)0xC1, (0xD0 | encode), imm8);
16025 }
16026 }
16027
16028 void Assembler::erclq(Register dst, Register src, int imm8) {
16029 assert(isShiftCount(imm8 >> 1), "illegal shift count");
16030 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16031 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, false /* no_flags */, true /* use_prefixq */);
16032 if (imm8 == 1) {
16033 emit_int16((unsigned char)0xD1, (0xD0 | encode));
16034 } else {
16035 emit_int24((unsigned char)0xC1, (0xD0 | encode), imm8);
16036 }
16037 }
16038
16039 void Assembler::rcrq(Register dst, int imm8) {
16040 assert(isShiftCount(imm8 >> 1), "illegal shift count");
16041 int encode = prefixq_and_encode(dst->encoding());
16042 if (imm8 == 1) {
16043 emit_int16((unsigned char)0xD1, (0xD8 | encode));
16044 } else {
16045 emit_int24((unsigned char)0xC1, (0xD8 | encode), imm8);
16046 }
16047 }
16048
16049 void Assembler::ercrq(Register dst, Register src, int imm8) {
16050 assert(isShiftCount(imm8 >> 1), "illegal shift count");
16051 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16052 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, false /* no_flags */, true /* use_prefixq */);
16053 if (imm8 == 1) {
16054 emit_int16((unsigned char)0xD1, (0xD8 | encode));
16055 } else {
16056 emit_int24((unsigned char)0xC1, (0xD8 | encode), imm8);
16057 }
16058 }
16059
16060 void Assembler::rorxl(Register dst, Register src, int imm8) {
16061 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
16062 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16063 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes, true);
16064 emit_int24((unsigned char)0xF0, (0xC0 | encode), imm8);
16065 }
16066
16067 void Assembler::rorxl(Register dst, Address src, int imm8) {
16068 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
16069 InstructionMark im(this);
16070 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16071 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
16072 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes);
16073 emit_int8((unsigned char)0xF0);
16074 emit_operand(dst, src, 1);
16075 emit_int8(imm8);
16076 }
16077
16078 void Assembler::rorxq(Register dst, Register src, int imm8) {
16079 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
16080 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16081 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes, true);
16082 emit_int24((unsigned char)0xF0, (0xC0 | encode), imm8);
16083 }
16084
16085 void Assembler::rorxq(Register dst, Address src, int imm8) {
16086 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
16087 InstructionMark im(this);
16088 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16089 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
16090 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes);
16091 emit_int8((unsigned char)0xF0);
16092 emit_operand(dst, src, 1);
16093 emit_int8(imm8);
16094 }
16095
16096 void Assembler::salq(Address dst, int imm8) {
16097 InstructionMark im(this);
16098 assert(isShiftCount(imm8 >> 1), "illegal shift count");
16099 if (imm8 == 1) {
16100 emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xD1);
16101 emit_operand(as_Register(4), dst, 0);
16102 }
16103 else {
16104 emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xC1);
16105 emit_operand(as_Register(4), dst, 1);
16106 emit_int8(imm8);
16107 }
16108 }
16109
16110 void Assembler::esalq(Register dst, Address src, int imm8, bool no_flags) {
16111 InstructionMark im(this);
16112 assert(isShiftCount(imm8 >> 1), "illegal shift count");
16113 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16114 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
16115 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
16116 if (imm8 == 1) {
16117 emit_int8((unsigned char)0xD1);
16118 emit_operand(as_Register(4), src, 0);
16119 }
16120 else {
16121 emit_int8((unsigned char)0xC1);
16122 emit_operand(as_Register(4), src, 1);
16123 emit_int8(imm8);
16124 }
16125 }
16126
16127 void Assembler::salq(Address dst) {
16128 InstructionMark im(this);
16129 emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xD3);
16130 emit_operand(as_Register(4), dst, 0);
16131 }
16132
16133 void Assembler::esalq(Register dst, Address src, bool no_flags) {
16134 InstructionMark im(this);
16135 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16136 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
16137 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
16138 emit_int8((unsigned char)0xD3);
16139 emit_operand(as_Register(4), src, 0);
16140 }
16141
16142 void Assembler::salq(Register dst, int imm8) {
16143 assert(isShiftCount(imm8 >> 1), "illegal shift count");
16144 int encode = prefixq_and_encode(dst->encoding());
16145 if (imm8 == 1) {
16146 emit_int16((unsigned char)0xD1, (0xE0 | encode));
16147 } else {
16148 emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8);
16149 }
16150 }
16151
16152 void Assembler::esalq(Register dst, Register src, int imm8, bool no_flags) {
16153 assert(isShiftCount(imm8 >> 1), "illegal shift count");
16154 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16155 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags, true /* use_prefixq */);
16156 if (imm8 == 1) {
16157 emit_int16((unsigned char)0xD1, (0xE0 | encode));
16158 } else {
16159 emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8);
16160 }
16161 }
16162
16163 void Assembler::salq(Register dst) {
16164 int encode = prefixq_and_encode(dst->encoding());
16165 emit_int16((unsigned char)0xD3, (0xE0 | encode));
16166 }
16167
16168 void Assembler::esalq(Register dst, Register src, bool no_flags) {
16169 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16170 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags, true /* use_prefixq */);
16171 emit_int16((unsigned char)0xD3, (0xE0 | encode));
16172 }
16173
16174 void Assembler::sarq(Address dst, int imm8) {
16175 InstructionMark im(this);
16176 assert(isShiftCount(imm8 >> 1), "illegal shift count");
16177 if (imm8 == 1) {
16178 emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xD1);
16179 emit_operand(as_Register(7), dst, 0);
16180 }
16181 else {
16182 emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xC1);
16183 emit_operand(as_Register(7), dst, 1);
16184 emit_int8(imm8);
16185 }
16186 }
16187
16188 void Assembler::esarq(Register dst, Address src, int imm8, bool no_flags) {
16189 assert(isShiftCount(imm8 >> 1), "illegal shift count");
16190 InstructionMark im(this);
16191 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16192 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
16193 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
16194 if (imm8 == 1) {
16195 emit_int8((unsigned char)0xD1);
16196 emit_operand(as_Register(7), src, 0);
16197 }
16198 else {
16199 emit_int8((unsigned char)0xC1);
16200 emit_operand(as_Register(7), src, 1);
16201 emit_int8(imm8);
16202 }
16203 }
16204
16205 void Assembler::sarq(Address dst) {
16206 InstructionMark im(this);
16207 emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xD3);
16208 emit_operand(as_Register(7), dst, 0);
16209 }
16210
16211 void Assembler::esarq(Register dst, Address src, bool no_flags) {
16212 InstructionMark im(this);
16213 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16214 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
16215 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
16216 emit_int8((unsigned char)0xD3);
16217 emit_operand(as_Register(7), src, 0);
16218 }
16219
16220 void Assembler::sarq(Register dst, int imm8) {
16221 assert(isShiftCount(imm8 >> 1), "illegal shift count");
16222 int encode = prefixq_and_encode(dst->encoding());
16223 if (imm8 == 1) {
16224 emit_int16((unsigned char)0xD1, (0xF8 | encode));
16225 } else {
16226 emit_int24((unsigned char)0xC1, (0xF8 | encode), imm8);
16227 }
16228 }
16229
16230 void Assembler::esarq(Register dst, Register src, int imm8, bool no_flags) {
16231 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16232 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags, true /* use_prefixq */);
16233 if (imm8 == 1) {
16234 emit_int16((unsigned char)0xD1, (0xF8 | encode));
16235 } else {
16236 emit_int24((unsigned char)0xC1, (0xF8 | encode), imm8);
16237 }
16238 }
16239
16240 void Assembler::sarq(Register dst) {
16241 int encode = prefixq_and_encode(dst->encoding());
16242 emit_int16((unsigned char)0xD3, (0xF8 | encode));
16243 }
16244
16245 void Assembler::esarq(Register dst, Register src, bool no_flags) {
16246 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16247 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags, true /* use_prefixq */);
16248 emit_int16((unsigned char)0xD3, (0xF8 | encode));
16249 }
16250
16251 void Assembler::sbbq(Address dst, int32_t imm32) {
16252 InstructionMark im(this);
16253 prefixq(dst);
16254 emit_arith_operand(0x81, rbx, dst, imm32);
16255 }
16256
16257 void Assembler::sbbq(Register dst, int32_t imm32) {
16258 (void) prefixq_and_encode(dst->encoding());
16259 emit_arith(0x81, 0xD8, dst, imm32);
16260 }
16261
16262 void Assembler::sbbq(Register dst, Address src) {
16263 InstructionMark im(this);
16264 emit_prefix_and_int8(get_prefixq(src, dst), 0x1B);
16265 emit_operand(dst, src, 0);
16266 }
16267
16268 void Assembler::sbbq(Register dst, Register src) {
16269 (void) prefixq_and_encode(dst->encoding(), src->encoding());
16270 emit_arith(0x1B, 0xC0, dst, src);
16271 }
16272
16273 void Assembler::shlq(Register dst, int imm8) {
16274 assert(isShiftCount(imm8 >> 1), "illegal shift count");
16275 int encode = prefixq_and_encode(dst->encoding());
16276 if (imm8 == 1) {
16277 emit_int16((unsigned char)0xD1, (0xE0 | encode));
16278 } else {
16279 emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8);
16280 }
16281 }
16282
16283 void Assembler::eshlq(Register dst, Register src, int imm8, bool no_flags) {
16284 assert(isShiftCount(imm8 >> 1), "illegal shift count");
16285 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16286 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags, true /* use_prefixq */);
16287 if (imm8 == 1 ) {
16288 emit_int16((unsigned char)0xD1, (0xE0 | encode));
16289 } else {
16290 emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8);
16291 }
16292 }
16293
16294 void Assembler::shlq(Register dst) {
16295 int encode = prefixq_and_encode(dst->encoding());
16296 emit_int16((unsigned char)0xD3, (0xE0 | encode));
16297 }
16298
16299 void Assembler::eshlq(Register dst, Register src, bool no_flags) {
16300 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16301 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags, true /* use_prefixq */);
16302 emit_int16((unsigned char)0xD3, (0xE0 | encode));
16303 }
16304
16305 void Assembler::shrq(Register dst, int imm8) {
16306 assert(isShiftCount(imm8 >> 1), "illegal shift count");
16307 int encode = prefixq_and_encode(dst->encoding());
16308 if (imm8 == 1) {
16309 emit_int16((unsigned char)0xD1, (0xE8 | encode));
16310 }
16311 else {
16312 emit_int24((unsigned char)0xC1, (0xE8 | encode), imm8);
16313 }
16314 }
16315
16316 void Assembler::eshrq(Register dst, Register src, int imm8, bool no_flags) {
16317 assert(isShiftCount(imm8 >> 1), "illegal shift count");
16318 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16319 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags, true /* use_prefixq */);
16320 if (imm8 == 1) {
16321 emit_int16((unsigned char)0xD1, (0xE8 | encode));
16322 }
16323 else {
16324 emit_int24((unsigned char)0xC1, (0xE8 | encode), imm8);
16325 }
16326 }
16327
16328 void Assembler::shrq(Register dst) {
16329 int encode = prefixq_and_encode(dst->encoding());
16330 emit_int16((unsigned char)0xD3, 0xE8 | encode);
16331 }
16332
16333 void Assembler::eshrq(Register dst, Register src, bool no_flags) {
16334 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16335 int encode = emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags, true /* use_prefixq */);
16336 emit_int16((unsigned char)0xD3, (0xE8 | encode));
16337 }
16338
16339 void Assembler::shrq(Address dst) {
16340 InstructionMark im(this);
16341 emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xD3);
16342 emit_operand(as_Register(5), dst, 0);
16343 }
16344
16345 void Assembler::eshrq(Register dst, Address src, bool no_flags) {
16346 InstructionMark im(this);
16347 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16348 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
16349 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
16350 emit_int8((unsigned char)0xD3);
16351 emit_operand(as_Register(5), src, 0);
16352 }
16353
16354 void Assembler::shrq(Address dst, int imm8) {
16355 InstructionMark im(this);
16356 assert(isShiftCount(imm8 >> 1), "illegal shift count");
16357 if (imm8 == 1) {
16358 emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xD1);
16359 emit_operand(as_Register(5), dst, 0);
16360 }
16361 else {
16362 emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xC1);
16363 emit_operand(as_Register(5), dst, 1);
16364 emit_int8(imm8);
16365 }
16366 }
16367
16368 void Assembler::eshrq(Register dst, Address src, int imm8, bool no_flags) {
16369 InstructionMark im(this);
16370 assert(isShiftCount(imm8 >> 1), "illegal shift count");
16371 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16372 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
16373 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
16374 if (imm8 == 1) {
16375 emit_int8((unsigned char)0xD1);
16376 emit_operand(as_Register(5), src, 0);
16377 }
16378 else {
16379 emit_int8((unsigned char)0xC1);
16380 emit_operand(as_Register(5), src, 1);
16381 emit_int8(imm8);
16382 }
16383 }
16384
16385 void Assembler::subq(Address dst, int32_t imm32) {
16386 InstructionMark im(this);
16387 prefixq(dst);
16388 emit_arith_operand(0x81, rbp, dst, imm32);
16389 }
16390
16391 void Assembler::esubq(Register dst, Address src, int32_t imm32, bool no_flags) {
16392 InstructionMark im(this);
16393 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16394 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
16395 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
16396 emit_arith_operand(0x81, rbp, src, imm32);
16397 }
16398
16399 void Assembler::subq(Address dst, Register src) {
16400 InstructionMark im(this);
16401 emit_prefix_and_int8(get_prefixq(dst, src), 0x29);
16402 emit_operand(src, dst, 0);
16403 }
16404
16405 void Assembler::esubq(Register dst, Address src1, Register src2, bool no_flags) {
16406 InstructionMark im(this);
16407 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16408 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
16409 eevex_prefix_ndd(src1, dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
16410 emit_int8(0x29);
16411 emit_operand(src2, src1, 0);
16412 }
16413
16414 void Assembler::subq(Register dst, int32_t imm32) {
16415 (void) prefixq_and_encode(dst->encoding());
16416 emit_arith(0x81, 0xE8, dst, imm32);
16417 }
16418
16419 void Assembler::esubq(Register dst, Register src, int32_t imm32, bool no_flags) {
16420 emit_eevex_prefix_or_demote_arith_ndd(dst, src, imm32, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_64bit, 0x81, 0xE8, no_flags);
16421 }
16422
16423 // Force generation of a 4 byte immediate value even if it fits into 8bit
16424 void Assembler::subq_imm32(Register dst, int32_t imm32) {
16425 (void) prefixq_and_encode(dst->encoding());
16426 emit_arith_imm32(0x81, 0xE8, dst, imm32);
16427 }
16428
16429 void Assembler::esubq_imm32(Register dst, Register src, int32_t imm32, bool no_flags) {
16430 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16431 (void) emit_eevex_prefix_or_demote_ndd(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags, true /* use_prefixq */);
16432 emit_arith_imm32(0x81, 0xE8, src, imm32);
16433 }
16434
16435 void Assembler::subq(Register dst, Address src) {
16436 InstructionMark im(this);
16437 emit_prefix_and_int8(get_prefixq(src, dst), 0x2B);
16438 emit_operand(dst, src, 0);
16439 }
16440
16441 void Assembler::esubq(Register dst, Register src1, Address src2, bool no_flags) {
16442 InstructionMark im(this);
16443 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_64bit, 0x2B, no_flags);
16444 }
16445
16446 void Assembler::subq(Register dst, Register src) {
16447 (void) prefixq_and_encode(dst->encoding(), src->encoding());
16448 emit_arith(0x2B, 0xC0, dst, src);
16449 }
16450
16451 void Assembler::esubq(Register dst, Register src1, Register src2, bool no_flags) {
16452 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16453 // NDD shares its encoding bits with NDS bits for regular EVEX instruction.
16454 // Therefore, DST is passed as the second argument to minimize changes in the leaf level routine.
16455 (void) emit_eevex_prefix_or_demote_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags, true /* use_prefixq */);
16456 emit_arith(0x2B, 0xC0, src1, src2);
16457 }
16458
16459 void Assembler::testq(Address dst, int32_t imm32) {
16460 InstructionMark im(this);
16461 emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xF7);
16462 emit_operand(as_Register(0), dst, 4);
16463 emit_int32(imm32);
16464 }
16465
16466 void Assembler::testq(Register dst, int32_t imm32) {
16467 // not using emit_arith because test
16468 // doesn't support sign-extension of
16469 // 8bit operands
16470 if (dst == rax) {
16471 prefix(REX_W);
16472 emit_int8((unsigned char)0xA9);
16473 emit_int32(imm32);
16474 } else {
16475 int encode = dst->encoding();
16476 encode = prefixq_and_encode(encode);
16477 emit_int16((unsigned char)0xF7, (0xC0 | encode));
16478 emit_int32(imm32);
16479 }
16480 }
16481
16482 void Assembler::testq(Register dst, Register src) {
16483 (void) prefixq_and_encode(dst->encoding(), src->encoding());
16484 emit_arith(0x85, 0xC0, dst, src);
16485 }
16486
16487 void Assembler::testq(Register dst, Address src) {
16488 InstructionMark im(this);
16489 emit_prefix_and_int8(get_prefixq(src, dst), (unsigned char)0x85);
16490 emit_operand(dst, src, 0);
16491 }
16492
16493 void Assembler::xaddq(Address dst, Register src) {
16494 InstructionMark im(this);
16495 int prefix = get_prefixq(dst, src, true /* is_map1 */);
16496 emit_prefix_and_int8(prefix, (unsigned char)0xC1);
16497 emit_operand(src, dst, 0);
16498 }
16499
16500 void Assembler::xchgq(Register dst, Address src) {
16501 InstructionMark im(this);
16502 emit_prefix_and_int8(get_prefixq(src, dst), (unsigned char)0x87);
16503 emit_operand(dst, src, 0);
16504 }
16505
16506 void Assembler::xchgq(Register dst, Register src) {
16507 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
16508 emit_int16((unsigned char)0x87, (0xc0 | encode));
16509 }
16510
16511 void Assembler::xorq(Register dst, Register src) {
16512 (void) prefixq_and_encode(dst->encoding(), src->encoding());
16513 emit_arith(0x33, 0xC0, dst, src);
16514 }
16515
16516 void Assembler::exorq(Register dst, Register src1, Register src2, bool no_flags) {
16517 emit_eevex_prefix_or_demote_arith_ndd(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_64bit, 0x33, 0xC0, no_flags, true /* is_commutative */);
16518 }
16519
16520 void Assembler::xorq(Register dst, Address src) {
16521 InstructionMark im(this);
16522 emit_prefix_and_int8(get_prefixq(src, dst), 0x33);
16523 emit_operand(dst, src, 0);
16524 }
16525
16526 void Assembler::exorq(Register dst, Register src1, Address src2, bool no_flags) {
16527 InstructionMark im(this);
16528 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_64bit, 0x33, no_flags);
16529 }
16530
16531 void Assembler::xorq(Register dst, int32_t imm32) {
16532 (void) prefixq_and_encode(dst->encoding());
16533 emit_arith(0x81, 0xF0, dst, imm32);
16534 }
16535
16536 void Assembler::exorq(Register dst, Register src, int32_t imm32, bool no_flags) {
16537 emit_eevex_prefix_or_demote_arith_ndd(dst, src, imm32, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_64bit, 0x81, 0xF0, no_flags);
16538 }
16539
16540 void Assembler::xorq(Address dst, int32_t imm32) {
16541 InstructionMark im(this);
16542 prefixq(dst);
16543 emit_arith_operand(0x81, as_Register(6), dst, imm32);
16544 }
16545
16546 void Assembler::exorq(Register dst, Address src, int32_t imm32, bool no_flags) {
16547 InstructionMark im(this);
16548 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16549 attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
16550 eevex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, &attributes, no_flags);
16551 emit_arith_operand(0x81, as_Register(6), src, imm32);
16552 }
16553
16554 void Assembler::xorq(Address dst, Register src) {
16555 InstructionMark im(this);
16556 emit_prefix_and_int8(get_prefixq(dst, src), 0x31);
16557 emit_operand(src, dst, 0);
16558 }
16559
16560 void Assembler::esetzucc(Condition cc, Register dst) {
16561 assert(VM_Version::supports_apx_f(), "");
16562 assert(0 <= cc && cc < 16, "illegal cc");
16563 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16564 // Encoding Format : eevex_prefix (4 bytes) | opcode_cc | modrm
16565 int encode = emit_eevex_prefix_ndd(dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3C /* MAP4 */, &attributes); // demotion disabled
16566 emit_opcode_prefix_and_encoding((0x40 | cc), 0xC0, encode);
16567 }
16568
16569 void Assembler::exorq(Register dst, Address src1, Register src2, bool no_flags) {
16570 InstructionMark im(this);
16571 emit_eevex_or_demote(dst, src1, src2, VEX_SIMD_NONE, VEX_OPCODE_0F_3C /* MAP4 */, EVEX_64bit, 0x31, no_flags, false /* is_map1 */, true /* is_commutative */);
16572 }
16573
16574 void InstructionAttr::set_address_attributes(int tuple_type, int input_size_in_bits) {
16575 if (VM_Version::supports_evex()) {
16576 _tuple_type = tuple_type;
16577 _input_size_in_bits = input_size_in_bits;
16578 }
16579 }
16580
16581 void Assembler::evpermi2b(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
16582 assert(VM_Version::supports_avx512_vbmi() && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl()), "");
16583 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16584 attributes.set_is_evex_instruction();
16585 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
16586 emit_int16(0x75, (0xC0 | encode));
16587 }
16588
16589 void Assembler::evpermi2w(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
16590 assert(VM_Version::supports_avx512bw() && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl()), "");
16591 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16592 attributes.set_is_evex_instruction();
16593 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
16594 emit_int16(0x75, (0xC0 | encode));
16595 }
16596
16597 void Assembler::evpermi2d(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
16598 assert(VM_Version::supports_evex() && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl()), "");
16599 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16600 attributes.set_is_evex_instruction();
16601 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
16602 emit_int16(0x76, (0xC0 | encode));
16603 }
16604
16605 void Assembler::evpermi2q(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
16606 assert(VM_Version::supports_evex() && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl()), "");
16607 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16608 attributes.set_is_evex_instruction();
16609 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
16610 emit_int16(0x76, (0xC0 | encode));
16611 }
16612
16613 void Assembler::evpermi2ps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
16614 assert(VM_Version::supports_evex() && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl()), "");
16615 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16616 attributes.set_is_evex_instruction();
16617 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
16618 emit_int16(0x77, (0xC0 | encode));
16619 }
16620
16621 void Assembler::evpermi2pd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
16622 assert(VM_Version::supports_evex() && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl()), "");
16623 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16624 attributes.set_is_evex_instruction();
16625 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
16626 emit_int16(0x77, (0xC0 | encode));
16627 }
16628
16629 void Assembler::evpermt2b(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
16630 assert(VM_Version::supports_avx512_vbmi(), "");
16631 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16632 attributes.set_is_evex_instruction();
16633 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
16634 emit_int16(0x7D, (0xC0 | encode));
16635 }
16636
16637 void Assembler::evpermt2w(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
16638 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx512vlbw() : VM_Version::supports_avx512bw(), "");
16639 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16640 attributes.set_is_evex_instruction();
16641 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
16642 emit_int16(0x7D, (0xC0 | encode));
16643 }
16644
16645 void Assembler::evpermt2d(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
16646 assert(VM_Version::supports_evex() && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl()), "");
16647 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16648 attributes.set_is_evex_instruction();
16649 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
16650 emit_int16(0x7E, (0xC0 | encode));
16651 }
16652
16653 void Assembler::evpermt2q(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
16654 assert(VM_Version::supports_evex() && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl()), "");
16655 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16656 attributes.set_is_evex_instruction();
16657 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
16658 emit_int16(0x7E, (0xC0 | encode));
16659 }
16660
16661 void Assembler::evaddph(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
16662 assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
16663 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
16664 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16665 attributes.set_is_evex_instruction();
16666 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_MAP5, &attributes);
16667 emit_int16(0x58, (0xC0 | encode));
16668 }
16669
16670 void Assembler::evaddph(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
16671 assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
16672 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
16673 InstructionMark im(this);
16674 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16675 attributes.set_is_evex_instruction();
16676 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
16677 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_MAP5, &attributes);
16678 emit_int8(0x58);
16679 emit_operand(dst, src, 0);
16680 }
16681
16682 void Assembler::evsubph(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
16683 assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
16684 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
16685 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16686 attributes.set_is_evex_instruction();
16687 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_MAP5, &attributes);
16688 emit_int16(0x5C, (0xC0 | encode));
16689 }
16690
16691 void Assembler::evsubph(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
16692 assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
16693 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
16694 InstructionMark im(this);
16695 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16696 attributes.set_is_evex_instruction();
16697 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
16698 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_MAP5, &attributes);
16699 emit_int8(0x5C);
16700 emit_operand(dst, src, 0);
16701 }
16702
16703 void Assembler::evmulph(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
16704 assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
16705 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
16706 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16707 attributes.set_is_evex_instruction();
16708 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_MAP5, &attributes);
16709 emit_int16(0x59, (0xC0 | encode));
16710 }
16711
16712 void Assembler::evmulph(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
16713 assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
16714 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
16715 InstructionMark im(this);
16716 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16717 attributes.set_is_evex_instruction();
16718 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
16719 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_MAP5, &attributes);
16720 emit_int8(0x59);
16721 emit_operand(dst, src, 0);
16722 }
16723
16724 void Assembler::evminph(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
16725 assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
16726 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
16727 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16728 attributes.set_is_evex_instruction();
16729 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_MAP5, &attributes);
16730 emit_int16(0x5D, (0xC0 | encode));
16731 }
16732
16733 void Assembler::evminph(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
16734 assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
16735 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
16736 InstructionMark im(this);
16737 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16738 attributes.set_is_evex_instruction();
16739 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
16740 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_MAP5, &attributes);
16741 emit_int8(0x5D);
16742 emit_operand(dst, src, 0);
16743 }
16744
16745 void Assembler::evminmaxph(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int imm8, int vector_len) {
16746 assert(VM_Version::supports_avx10_2(), "");
16747 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
16748 attributes.set_is_evex_instruction();
16749 attributes.set_embedded_opmask_register_specifier(mask);
16750 if (merge) {
16751 attributes.reset_is_clear_context();
16752 }
16753 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3A, &attributes);
16754 emit_int24(0x52, (0xC0 | encode), imm8);
16755 }
16756
16757 void Assembler::evminmaxph(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int imm8, int vector_len) {
16758 assert(VM_Version::supports_avx10_2(), "");
16759 InstructionMark im(this);
16760 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
16761 attributes.set_is_evex_instruction();
16762 attributes.set_embedded_opmask_register_specifier(mask);
16763 if (merge) {
16764 attributes.reset_is_clear_context();
16765 }
16766 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
16767 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3A, &attributes);
16768 emit_int8(0x52);
16769 emit_operand(dst, src, 0);
16770 emit_int8(imm8);
16771 }
16772
16773 void Assembler::evmaxph(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
16774 assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
16775 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
16776 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16777 attributes.set_is_evex_instruction();
16778 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_MAP5, &attributes);
16779 emit_int16(0x5F, (0xC0 | encode));
16780 }
16781
16782 void Assembler::evmaxph(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
16783 assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
16784 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
16785 InstructionMark im(this);
16786 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16787 attributes.set_is_evex_instruction();
16788 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
16789 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_MAP5, &attributes);
16790 emit_int8(0x5F);
16791 emit_operand(dst, src, 0);
16792 }
16793
16794 void Assembler::evdivph(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
16795 assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
16796 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
16797 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16798 attributes.set_is_evex_instruction();
16799 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_MAP5, &attributes);
16800 emit_int16(0x5E, (0xC0 | encode));
16801 }
16802
16803 void Assembler::evdivph(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
16804 assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
16805 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
16806 InstructionMark im(this);
16807 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16808 attributes.set_is_evex_instruction();
16809 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
16810 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_MAP5, &attributes);
16811 emit_int8(0x5E);
16812 emit_operand(dst, src, 0);
16813 }
16814
16815 void Assembler::evsqrtph(XMMRegister dst, XMMRegister src, int vector_len) {
16816 assert(VM_Version::supports_avx512_fp16(), "");
16817 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
16818 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16819 attributes.set_is_evex_instruction();
16820 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_MAP5, &attributes);
16821 emit_int16(0x51, (0xC0 | encode));
16822 }
16823
16824 void Assembler::evsqrtph(XMMRegister dst, Address src, int vector_len) {
16825 assert(VM_Version::supports_avx512_fp16(), "");
16826 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
16827 InstructionMark im(this);
16828 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16829 attributes.set_is_evex_instruction();
16830 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
16831 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_MAP5, &attributes);
16832 emit_int8(0x51);
16833 emit_operand(dst, src, 0);
16834 }
16835
16836 void Assembler::evfmadd132ph(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
16837 assert(VM_Version::supports_avx512_fp16(), "");
16838 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
16839 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16840 attributes.set_is_evex_instruction();
16841 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_MAP6, &attributes);
16842 emit_int16(0x98, (0xC0 | encode));
16843 }
16844
16845 void Assembler::evfmadd132ph(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
16846 assert(VM_Version::supports_avx512_fp16(), "");
16847 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
16848 InstructionMark im(this);
16849 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16850 attributes.set_is_evex_instruction();
16851 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
16852 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_MAP6, &attributes);
16853 emit_int8(0x98);
16854 emit_operand(dst, src, 0);
16855 }
16856