1 /* 2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "asm/assembler.inline.hpp" 28 #include "gc/shared/cardTableBarrierSet.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "prims/methodHandles.hpp" 33 #include "runtime/objectMonitor.hpp" 34 #include "runtime/os.hpp" 35 #include "runtime/sharedRuntime.hpp" 36 #include "runtime/stubRoutines.hpp" 37 #include "runtime/vm_version.hpp" 38 #include "utilities/macros.hpp" 39 40 #ifdef PRODUCT 41 #define BLOCK_COMMENT(str) /* nothing */ 42 #define STOP(error) stop(error) 43 #else 44 #define BLOCK_COMMENT(str) block_comment(str) 45 #define STOP(error) block_comment(error); stop(error) 46 #endif 47 48 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 49 // Implementation of AddressLiteral 50 51 // A 2-D table for managing compressed displacement(disp8) on EVEX enabled platforms. 52 unsigned char tuple_table[Assembler::EVEX_ETUP + 1][Assembler::AVX_512bit + 1] = { 53 // -----------------Table 4.5 -------------------- // 54 16, 32, 64, // EVEX_FV(0) 55 4, 4, 4, // EVEX_FV(1) - with Evex.b 56 16, 32, 64, // EVEX_FV(2) - with Evex.w 57 8, 8, 8, // EVEX_FV(3) - with Evex.w and Evex.b 58 8, 16, 32, // EVEX_HV(0) 59 4, 4, 4, // EVEX_HV(1) - with Evex.b 60 // -----------------Table 4.6 -------------------- // 61 16, 32, 64, // EVEX_FVM(0) 62 1, 1, 1, // EVEX_T1S(0) 63 2, 2, 2, // EVEX_T1S(1) 64 4, 4, 4, // EVEX_T1S(2) 65 8, 8, 8, // EVEX_T1S(3) 66 4, 4, 4, // EVEX_T1F(0) 67 8, 8, 8, // EVEX_T1F(1) 68 8, 8, 8, // EVEX_T2(0) 69 0, 16, 16, // EVEX_T2(1) 70 0, 16, 16, // EVEX_T4(0) 71 0, 0, 32, // EVEX_T4(1) 72 0, 0, 32, // EVEX_T8(0) 73 8, 16, 32, // EVEX_HVM(0) 74 4, 8, 16, // EVEX_QVM(0) 75 2, 4, 8, // EVEX_OVM(0) 76 16, 16, 16, // EVEX_M128(0) 77 8, 32, 64, // EVEX_DUP(0) 78 0, 0, 0 // EVEX_NTUP 79 }; 80 81 AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) { 82 _is_lval = false; 83 _target = target; 84 switch (rtype) { 85 case relocInfo::oop_type: 86 case relocInfo::metadata_type: 87 // Oops are a special case. Normally they would be their own section 88 // but in cases like icBuffer they are literals in the code stream that 89 // we don't have a section for. We use none so that we get a literal address 90 // which is always patchable. 91 break; 92 case relocInfo::external_word_type: 93 _rspec = external_word_Relocation::spec(target); 94 break; 95 case relocInfo::internal_word_type: 96 _rspec = internal_word_Relocation::spec(target); 97 break; 98 case relocInfo::opt_virtual_call_type: 99 _rspec = opt_virtual_call_Relocation::spec(); 100 break; 101 case relocInfo::static_call_type: 102 _rspec = static_call_Relocation::spec(); 103 break; 104 case relocInfo::runtime_call_type: 105 _rspec = runtime_call_Relocation::spec(); 106 break; 107 case relocInfo::poll_type: 108 case relocInfo::poll_return_type: 109 _rspec = Relocation::spec_simple(rtype); 110 break; 111 case relocInfo::none: 112 break; 113 default: 114 ShouldNotReachHere(); 115 break; 116 } 117 } 118 119 // Implementation of Address 120 121 #ifdef _LP64 122 123 Address Address::make_array(ArrayAddress adr) { 124 // Not implementable on 64bit machines 125 // Should have been handled higher up the call chain. 126 ShouldNotReachHere(); 127 return Address(); 128 } 129 130 // exceedingly dangerous constructor 131 Address::Address(int disp, address loc, relocInfo::relocType rtype) { 132 _base = noreg; 133 _index = noreg; 134 _scale = no_scale; 135 _disp = disp; 136 _xmmindex = xnoreg; 137 _isxmmindex = false; 138 switch (rtype) { 139 case relocInfo::external_word_type: 140 _rspec = external_word_Relocation::spec(loc); 141 break; 142 case relocInfo::internal_word_type: 143 _rspec = internal_word_Relocation::spec(loc); 144 break; 145 case relocInfo::runtime_call_type: 146 // HMM 147 _rspec = runtime_call_Relocation::spec(); 148 break; 149 case relocInfo::poll_type: 150 case relocInfo::poll_return_type: 151 _rspec = Relocation::spec_simple(rtype); 152 break; 153 case relocInfo::none: 154 break; 155 default: 156 ShouldNotReachHere(); 157 } 158 } 159 #else // LP64 160 161 Address Address::make_array(ArrayAddress adr) { 162 AddressLiteral base = adr.base(); 163 Address index = adr.index(); 164 assert(index._disp == 0, "must not have disp"); // maybe it can? 165 Address array(index._base, index._index, index._scale, (intptr_t) base.target()); 166 array._rspec = base._rspec; 167 return array; 168 } 169 170 // exceedingly dangerous constructor 171 Address::Address(address loc, RelocationHolder spec) { 172 _base = noreg; 173 _index = noreg; 174 _scale = no_scale; 175 _disp = (intptr_t) loc; 176 _rspec = spec; 177 _xmmindex = xnoreg; 178 _isxmmindex = false; 179 } 180 181 #endif // _LP64 182 183 184 185 // Convert the raw encoding form into the form expected by the constructor for 186 // Address. An index of 4 (rsp) corresponds to having no index, so convert 187 // that to noreg for the Address constructor. 188 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) { 189 RelocationHolder rspec = RelocationHolder::none; 190 if (disp_reloc != relocInfo::none) { 191 rspec = Relocation::spec_simple(disp_reloc); 192 } 193 bool valid_index = index != rsp->encoding(); 194 if (valid_index) { 195 Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp)); 196 madr._rspec = rspec; 197 return madr; 198 } else { 199 Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp)); 200 madr._rspec = rspec; 201 return madr; 202 } 203 } 204 205 // Implementation of Assembler 206 207 int AbstractAssembler::code_fill_byte() { 208 return (u_char)'\xF4'; // hlt 209 } 210 211 void Assembler::init_attributes(void) { 212 _legacy_mode_bw = (VM_Version::supports_avx512bw() == false); 213 _legacy_mode_dq = (VM_Version::supports_avx512dq() == false); 214 _legacy_mode_vl = (VM_Version::supports_avx512vl() == false); 215 _legacy_mode_vlbw = (VM_Version::supports_avx512vlbw() == false); 216 NOT_LP64(_is_managed = false;) 217 _attributes = NULL; 218 } 219 220 221 void Assembler::membar(Membar_mask_bits order_constraint) { 222 // We only have to handle StoreLoad 223 if (order_constraint & StoreLoad) { 224 // All usable chips support "locked" instructions which suffice 225 // as barriers, and are much faster than the alternative of 226 // using cpuid instruction. We use here a locked add [esp-C],0. 227 // This is conveniently otherwise a no-op except for blowing 228 // flags, and introducing a false dependency on target memory 229 // location. We can't do anything with flags, but we can avoid 230 // memory dependencies in the current method by locked-adding 231 // somewhere else on the stack. Doing [esp+C] will collide with 232 // something on stack in current method, hence we go for [esp-C]. 233 // It is convenient since it is almost always in data cache, for 234 // any small C. We need to step back from SP to avoid data 235 // dependencies with other things on below SP (callee-saves, for 236 // example). Without a clear way to figure out the minimal safe 237 // distance from SP, it makes sense to step back the complete 238 // cache line, as this will also avoid possible second-order effects 239 // with locked ops against the cache line. Our choice of offset 240 // is bounded by x86 operand encoding, which should stay within 241 // [-128; +127] to have the 8-byte displacement encoding. 242 // 243 // Any change to this code may need to revisit other places in 244 // the code where this idiom is used, in particular the 245 // orderAccess code. 246 247 int offset = -VM_Version::L1_line_size(); 248 if (offset < -128) { 249 offset = -128; 250 } 251 252 lock(); 253 addl(Address(rsp, offset), 0);// Assert the lock# signal here 254 } 255 } 256 257 // make this go away someday 258 void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) { 259 if (rtype == relocInfo::none) 260 emit_int32(data); 261 else 262 emit_data(data, Relocation::spec_simple(rtype), format); 263 } 264 265 void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) { 266 assert(imm_operand == 0, "default format must be immediate in this file"); 267 assert(inst_mark() != NULL, "must be inside InstructionMark"); 268 if (rspec.type() != relocInfo::none) { 269 #ifdef ASSERT 270 check_relocation(rspec, format); 271 #endif 272 // Do not use AbstractAssembler::relocate, which is not intended for 273 // embedded words. Instead, relocate to the enclosing instruction. 274 275 // hack. call32 is too wide for mask so use disp32 276 if (format == call32_operand) 277 code_section()->relocate(inst_mark(), rspec, disp32_operand); 278 else 279 code_section()->relocate(inst_mark(), rspec, format); 280 } 281 emit_int32(data); 282 } 283 284 static int encode(Register r) { 285 int enc = r->encoding(); 286 if (enc >= 8) { 287 enc -= 8; 288 } 289 return enc; 290 } 291 292 void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) { 293 assert(dst->has_byte_register(), "must have byte register"); 294 assert(isByte(op1) && isByte(op2), "wrong opcode"); 295 assert(isByte(imm8), "not a byte"); 296 assert((op1 & 0x01) == 0, "should be 8bit operation"); 297 emit_int24(op1, (op2 | encode(dst)), imm8); 298 } 299 300 301 void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) { 302 assert(isByte(op1) && isByte(op2), "wrong opcode"); 303 assert(op1 == 0x81, "Unexpected opcode"); 304 if (is8bit(imm32)) { 305 emit_int24(op1 | 0x02, // set sign bit 306 op2 | encode(dst), 307 imm32 & 0xFF); 308 } else if (dst == rax) { 309 switch (op2) { 310 case 0xD0: emit_int8(0x15); break; // adc 311 case 0xC0: emit_int8(0x05); break; // add 312 case 0xE0: emit_int8(0x25); break; // and 313 case 0xF8: emit_int8(0x3D); break; // cmp 314 case 0xC8: emit_int8(0x0D); break; // or 315 case 0xD8: emit_int8(0x1D); break; // sbb 316 case 0xE8: emit_int8(0x2D); break; // sub 317 case 0xF0: emit_int8(0x35); break; // xor 318 default: ShouldNotReachHere(); 319 } 320 emit_int32(imm32); 321 } else { 322 emit_int16(op1, (op2 | encode(dst))); 323 emit_int32(imm32); 324 } 325 } 326 327 // Force generation of a 4 byte immediate value even if it fits into 8bit 328 void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) { 329 assert(isByte(op1) && isByte(op2), "wrong opcode"); 330 assert((op1 & 0x01) == 1, "should be 32bit operation"); 331 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); 332 emit_int16(op1, (op2 | encode(dst))); 333 emit_int32(imm32); 334 } 335 336 // immediate-to-memory forms 337 void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) { 338 assert((op1 & 0x01) == 1, "should be 32bit operation"); 339 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); 340 if (is8bit(imm32)) { 341 emit_int8(op1 | 0x02); // set sign bit 342 emit_operand(rm, adr, 1); 343 emit_int8(imm32 & 0xFF); 344 } else { 345 emit_int8(op1); 346 emit_operand(rm, adr, 4); 347 emit_int32(imm32); 348 } 349 } 350 351 void Assembler::emit_arith_operand_imm32(int op1, Register rm, Address adr, int32_t imm32) { 352 assert(op1 == 0x81, "unexpected opcode"); 353 emit_int8(op1); 354 emit_operand(rm, adr, 4); 355 emit_int32(imm32); 356 } 357 358 void Assembler::emit_arith(int op1, int op2, Register dst, Register src) { 359 assert(isByte(op1) && isByte(op2), "wrong opcode"); 360 emit_int16(op1, (op2 | encode(dst) << 3 | encode(src))); 361 } 362 363 364 bool Assembler::query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len, 365 int cur_tuple_type, int in_size_in_bits, int cur_encoding) { 366 int mod_idx = 0; 367 // We will test if the displacement fits the compressed format and if so 368 // apply the compression to the displacement iff the result is8bit. 369 if (VM_Version::supports_evex() && is_evex_inst) { 370 switch (cur_tuple_type) { 371 case EVEX_FV: 372 if ((cur_encoding & VEX_W) == VEX_W) { 373 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 3 : 2; 374 } else { 375 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 376 } 377 break; 378 379 case EVEX_HV: 380 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 381 break; 382 383 case EVEX_FVM: 384 break; 385 386 case EVEX_T1S: 387 switch (in_size_in_bits) { 388 case EVEX_8bit: 389 break; 390 391 case EVEX_16bit: 392 mod_idx = 1; 393 break; 394 395 case EVEX_32bit: 396 mod_idx = 2; 397 break; 398 399 case EVEX_64bit: 400 mod_idx = 3; 401 break; 402 } 403 break; 404 405 case EVEX_T1F: 406 case EVEX_T2: 407 case EVEX_T4: 408 mod_idx = (in_size_in_bits == EVEX_64bit) ? 1 : 0; 409 break; 410 411 case EVEX_T8: 412 break; 413 414 case EVEX_HVM: 415 break; 416 417 case EVEX_QVM: 418 break; 419 420 case EVEX_OVM: 421 break; 422 423 case EVEX_M128: 424 break; 425 426 case EVEX_DUP: 427 break; 428 429 default: 430 assert(0, "no valid evex tuple_table entry"); 431 break; 432 } 433 434 if (vector_len >= AVX_128bit && vector_len <= AVX_512bit) { 435 int disp_factor = tuple_table[cur_tuple_type + mod_idx][vector_len]; 436 if ((disp % disp_factor) == 0) { 437 int new_disp = disp / disp_factor; 438 if ((-0x80 <= new_disp && new_disp < 0x80)) { 439 disp = new_disp; 440 } 441 } else { 442 return false; 443 } 444 } 445 } 446 return (-0x80 <= disp && disp < 0x80); 447 } 448 449 450 bool Assembler::emit_compressed_disp_byte(int &disp) { 451 int mod_idx = 0; 452 // We will test if the displacement fits the compressed format and if so 453 // apply the compression to the displacement iff the result is8bit. 454 if (VM_Version::supports_evex() && _attributes && _attributes->is_evex_instruction()) { 455 int evex_encoding = _attributes->get_evex_encoding(); 456 int tuple_type = _attributes->get_tuple_type(); 457 switch (tuple_type) { 458 case EVEX_FV: 459 if ((evex_encoding & VEX_W) == VEX_W) { 460 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 3 : 2; 461 } else { 462 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 463 } 464 break; 465 466 case EVEX_HV: 467 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 468 break; 469 470 case EVEX_FVM: 471 break; 472 473 case EVEX_T1S: 474 switch (_attributes->get_input_size()) { 475 case EVEX_8bit: 476 break; 477 478 case EVEX_16bit: 479 mod_idx = 1; 480 break; 481 482 case EVEX_32bit: 483 mod_idx = 2; 484 break; 485 486 case EVEX_64bit: 487 mod_idx = 3; 488 break; 489 } 490 break; 491 492 case EVEX_T1F: 493 case EVEX_T2: 494 case EVEX_T4: 495 mod_idx = (_attributes->get_input_size() == EVEX_64bit) ? 1 : 0; 496 break; 497 498 case EVEX_T8: 499 break; 500 501 case EVEX_HVM: 502 break; 503 504 case EVEX_QVM: 505 break; 506 507 case EVEX_OVM: 508 break; 509 510 case EVEX_M128: 511 break; 512 513 case EVEX_DUP: 514 break; 515 516 default: 517 assert(0, "no valid evex tuple_table entry"); 518 break; 519 } 520 521 int vector_len = _attributes->get_vector_len(); 522 if (vector_len >= AVX_128bit && vector_len <= AVX_512bit) { 523 int disp_factor = tuple_table[tuple_type + mod_idx][vector_len]; 524 if ((disp % disp_factor) == 0) { 525 int new_disp = disp / disp_factor; 526 if (is8bit(new_disp)) { 527 disp = new_disp; 528 } 529 } else { 530 return false; 531 } 532 } 533 } 534 return is8bit(disp); 535 } 536 537 static bool is_valid_encoding(int reg_enc) { 538 return reg_enc >= 0; 539 } 540 541 static int raw_encode(Register reg) { 542 assert(reg == noreg || reg->is_valid(), "sanity"); 543 int reg_enc = reg->raw_encoding(); 544 assert(reg_enc == -1 || is_valid_encoding(reg_enc), "sanity"); 545 return reg_enc; 546 } 547 548 static int raw_encode(XMMRegister xmmreg) { 549 assert(xmmreg == xnoreg || xmmreg->is_valid(), "sanity"); 550 int xmmreg_enc = xmmreg->raw_encoding(); 551 assert(xmmreg_enc == -1 || is_valid_encoding(xmmreg_enc), "sanity"); 552 return xmmreg_enc; 553 } 554 555 static int raw_encode(KRegister kreg) { 556 assert(kreg == knoreg || kreg->is_valid(), "sanity"); 557 int kreg_enc = kreg->raw_encoding(); 558 assert(kreg_enc == -1 || is_valid_encoding(kreg_enc), "sanity"); 559 return kreg_enc; 560 } 561 562 static int modrm_encoding(int mod, int dst_enc, int src_enc) { 563 return (mod & 3) << 6 | (dst_enc & 7) << 3 | (src_enc & 7); 564 } 565 566 static int sib_encoding(Address::ScaleFactor scale, int index_enc, int base_enc) { 567 return (scale & 3) << 6 | (index_enc & 7) << 3 | (base_enc & 7); 568 } 569 570 inline void Assembler::emit_modrm(int mod, int dst_enc, int src_enc) { 571 assert((mod & 3) != 0b11, "forbidden"); 572 int modrm = modrm_encoding(mod, dst_enc, src_enc); 573 emit_int8(modrm); 574 } 575 576 inline void Assembler::emit_modrm_disp8(int mod, int dst_enc, int src_enc, 577 int disp) { 578 int modrm = modrm_encoding(mod, dst_enc, src_enc); 579 emit_int16(modrm, disp & 0xFF); 580 } 581 582 inline void Assembler::emit_modrm_sib(int mod, int dst_enc, int src_enc, 583 Address::ScaleFactor scale, int index_enc, int base_enc) { 584 int modrm = modrm_encoding(mod, dst_enc, src_enc); 585 int sib = sib_encoding(scale, index_enc, base_enc); 586 emit_int16(modrm, sib); 587 } 588 589 inline void Assembler::emit_modrm_sib_disp8(int mod, int dst_enc, int src_enc, 590 Address::ScaleFactor scale, int index_enc, int base_enc, 591 int disp) { 592 int modrm = modrm_encoding(mod, dst_enc, src_enc); 593 int sib = sib_encoding(scale, index_enc, base_enc); 594 emit_int24(modrm, sib, disp & 0xFF); 595 } 596 597 void Assembler::emit_operand_helper(int reg_enc, int base_enc, int index_enc, 598 Address::ScaleFactor scale, int disp, 599 RelocationHolder const& rspec, 600 int post_addr_length) { 601 bool no_relocation = (rspec.type() == relocInfo::none); 602 603 if (is_valid_encoding(base_enc)) { 604 if (is_valid_encoding(index_enc)) { 605 assert(scale != Address::no_scale, "inconsistent address"); 606 // [base + index*scale + disp] 607 if (disp == 0 && no_relocation && 608 base_enc != rbp->encoding() LP64_ONLY(&& base_enc != r13->encoding())) { 609 // [base + index*scale] 610 // [00 reg 100][ss index base] 611 emit_modrm_sib(0b00, reg_enc, 0b100, 612 scale, index_enc, base_enc); 613 } else if (emit_compressed_disp_byte(disp) && no_relocation) { 614 // [base + index*scale + imm8] 615 // [01 reg 100][ss index base] imm8 616 emit_modrm_sib_disp8(0b01, reg_enc, 0b100, 617 scale, index_enc, base_enc, 618 disp); 619 } else { 620 // [base + index*scale + disp32] 621 // [10 reg 100][ss index base] disp32 622 emit_modrm_sib(0b10, reg_enc, 0b100, 623 scale, index_enc, base_enc); 624 emit_data(disp, rspec, disp32_operand); 625 } 626 } else if (base_enc == rsp->encoding() LP64_ONLY(|| base_enc == r12->encoding())) { 627 // [rsp + disp] 628 if (disp == 0 && no_relocation) { 629 // [rsp] 630 // [00 reg 100][00 100 100] 631 emit_modrm_sib(0b00, reg_enc, 0b100, 632 Address::times_1, 0b100, 0b100); 633 } else if (emit_compressed_disp_byte(disp) && no_relocation) { 634 // [rsp + imm8] 635 // [01 reg 100][00 100 100] disp8 636 emit_modrm_sib_disp8(0b01, reg_enc, 0b100, 637 Address::times_1, 0b100, 0b100, 638 disp); 639 } else { 640 // [rsp + imm32] 641 // [10 reg 100][00 100 100] disp32 642 emit_modrm_sib(0b10, reg_enc, 0b100, 643 Address::times_1, 0b100, 0b100); 644 emit_data(disp, rspec, disp32_operand); 645 } 646 } else { 647 // [base + disp] 648 assert(base_enc != rsp->encoding() LP64_ONLY(&& base_enc != r12->encoding()), "illegal addressing mode"); 649 if (disp == 0 && no_relocation && 650 base_enc != rbp->encoding() LP64_ONLY(&& base_enc != r13->encoding())) { 651 // [base] 652 // [00 reg base] 653 emit_modrm(0, reg_enc, base_enc); 654 } else if (emit_compressed_disp_byte(disp) && no_relocation) { 655 // [base + disp8] 656 // [01 reg base] disp8 657 emit_modrm_disp8(0b01, reg_enc, base_enc, 658 disp); 659 } else { 660 // [base + disp32] 661 // [10 reg base] disp32 662 emit_modrm(0b10, reg_enc, base_enc); 663 emit_data(disp, rspec, disp32_operand); 664 } 665 } 666 } else { 667 if (is_valid_encoding(index_enc)) { 668 assert(scale != Address::no_scale, "inconsistent address"); 669 // base == noreg 670 // [index*scale + disp] 671 // [00 reg 100][ss index 101] disp32 672 emit_modrm_sib(0b00, reg_enc, 0b100, 673 scale, index_enc, 0b101 /* no base */); 674 emit_data(disp, rspec, disp32_operand); 675 } else if (!no_relocation) { 676 // base == noreg, index == noreg 677 // [disp] (64bit) RIP-RELATIVE (32bit) abs 678 // [00 reg 101] disp32 679 680 emit_modrm(0b00, reg_enc, 0b101 /* no base */); 681 // Note that the RIP-rel. correction applies to the generated 682 // disp field, but _not_ to the target address in the rspec. 683 684 // disp was created by converting the target address minus the pc 685 // at the start of the instruction. That needs more correction here. 686 // intptr_t disp = target - next_ip; 687 assert(inst_mark() != NULL, "must be inside InstructionMark"); 688 address next_ip = pc() + sizeof(int32_t) + post_addr_length; 689 int64_t adjusted = disp; 690 // Do rip-rel adjustment for 64bit 691 LP64_ONLY(adjusted -= (next_ip - inst_mark())); 692 assert(is_simm32(adjusted), 693 "must be 32bit offset (RIP relative address)"); 694 emit_data((int32_t) adjusted, rspec, disp32_operand); 695 696 } else { 697 // base == noreg, index == noreg, no_relocation == true 698 // 32bit never did this, did everything as the rip-rel/disp code above 699 // [disp] ABSOLUTE 700 // [00 reg 100][00 100 101] disp32 701 emit_modrm_sib(0b00, reg_enc, 0b100 /* no base */, 702 Address::times_1, 0b100, 0b101); 703 emit_data(disp, rspec, disp32_operand); 704 } 705 } 706 } 707 708 void Assembler::emit_operand(Register reg, Register base, Register index, 709 Address::ScaleFactor scale, int disp, 710 RelocationHolder const& rspec, 711 int post_addr_length) { 712 assert(!index->is_valid() || index != rsp, "illegal addressing mode"); 713 emit_operand_helper(raw_encode(reg), raw_encode(base), raw_encode(index), 714 scale, disp, rspec, post_addr_length); 715 716 } 717 void Assembler::emit_operand(XMMRegister xmmreg, Register base, Register index, 718 Address::ScaleFactor scale, int disp, 719 RelocationHolder const& rspec, 720 int post_addr_length) { 721 assert(!index->is_valid() || index != rsp, "illegal addressing mode"); 722 assert(xmmreg->encoding() < 16 || UseAVX > 2, "not supported"); 723 emit_operand_helper(raw_encode(xmmreg), raw_encode(base), raw_encode(index), 724 scale, disp, rspec, post_addr_length); 725 } 726 727 void Assembler::emit_operand(XMMRegister xmmreg, Register base, XMMRegister xmmindex, 728 Address::ScaleFactor scale, int disp, 729 RelocationHolder const& rspec, 730 int post_addr_length) { 731 assert(xmmreg->encoding() < 16 || UseAVX > 2, "not supported"); 732 assert(xmmindex->encoding() < 16 || UseAVX > 2, "not supported"); 733 emit_operand_helper(raw_encode(xmmreg), raw_encode(base), raw_encode(xmmindex), 734 scale, disp, rspec, post_addr_length); 735 } 736 737 void Assembler::emit_operand(KRegister kreg, Address adr, 738 int post_addr_length) { 739 emit_operand(kreg, adr._base, adr._index, adr._scale, adr._disp, 740 adr._rspec, 741 post_addr_length); 742 } 743 744 void Assembler::emit_operand(KRegister kreg, Register base, Register index, 745 Address::ScaleFactor scale, int disp, 746 RelocationHolder const& rspec, 747 int post_addr_length) { 748 assert(!index->is_valid() || index != rsp, "illegal addressing mode"); 749 emit_operand_helper(raw_encode(kreg), raw_encode(base), raw_encode(index), 750 scale, disp, rspec, post_addr_length); 751 } 752 753 // Secret local extension to Assembler::WhichOperand: 754 #define end_pc_operand (_WhichOperand_limit) 755 756 address Assembler::locate_operand(address inst, WhichOperand which) { 757 // Decode the given instruction, and return the address of 758 // an embedded 32-bit operand word. 759 760 // If "which" is disp32_operand, selects the displacement portion 761 // of an effective address specifier. 762 // If "which" is imm64_operand, selects the trailing immediate constant. 763 // If "which" is call32_operand, selects the displacement of a call or jump. 764 // Caller is responsible for ensuring that there is such an operand, 765 // and that it is 32/64 bits wide. 766 767 // If "which" is end_pc_operand, find the end of the instruction. 768 769 address ip = inst; 770 bool is_64bit = false; 771 772 debug_only(bool has_disp32 = false); 773 int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn 774 775 again_after_prefix: 776 switch (0xFF & *ip++) { 777 778 // These convenience macros generate groups of "case" labels for the switch. 779 #define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3 780 #define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \ 781 case (x)+4: case (x)+5: case (x)+6: case (x)+7 782 #define REP16(x) REP8((x)+0): \ 783 case REP8((x)+8) 784 785 case CS_segment: 786 case SS_segment: 787 case DS_segment: 788 case ES_segment: 789 case FS_segment: 790 case GS_segment: 791 // Seems dubious 792 LP64_ONLY(assert(false, "shouldn't have that prefix")); 793 assert(ip == inst+1, "only one prefix allowed"); 794 goto again_after_prefix; 795 796 case 0x67: 797 case REX: 798 case REX_B: 799 case REX_X: 800 case REX_XB: 801 case REX_R: 802 case REX_RB: 803 case REX_RX: 804 case REX_RXB: 805 NOT_LP64(assert(false, "64bit prefixes")); 806 goto again_after_prefix; 807 808 case REX_W: 809 case REX_WB: 810 case REX_WX: 811 case REX_WXB: 812 case REX_WR: 813 case REX_WRB: 814 case REX_WRX: 815 case REX_WRXB: 816 NOT_LP64(assert(false, "64bit prefixes")); 817 is_64bit = true; 818 goto again_after_prefix; 819 820 case 0xFF: // pushq a; decl a; incl a; call a; jmp a 821 case 0x88: // movb a, r 822 case 0x89: // movl a, r 823 case 0x8A: // movb r, a 824 case 0x8B: // movl r, a 825 case 0x8F: // popl a 826 debug_only(has_disp32 = true); 827 break; 828 829 case 0x68: // pushq #32 830 if (which == end_pc_operand) { 831 return ip + 4; 832 } 833 assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate"); 834 return ip; // not produced by emit_operand 835 836 case 0x66: // movw ... (size prefix) 837 again_after_size_prefix2: 838 switch (0xFF & *ip++) { 839 case REX: 840 case REX_B: 841 case REX_X: 842 case REX_XB: 843 case REX_R: 844 case REX_RB: 845 case REX_RX: 846 case REX_RXB: 847 case REX_W: 848 case REX_WB: 849 case REX_WX: 850 case REX_WXB: 851 case REX_WR: 852 case REX_WRB: 853 case REX_WRX: 854 case REX_WRXB: 855 NOT_LP64(assert(false, "64bit prefix found")); 856 goto again_after_size_prefix2; 857 case 0x8B: // movw r, a 858 case 0x89: // movw a, r 859 debug_only(has_disp32 = true); 860 break; 861 case 0xC7: // movw a, #16 862 debug_only(has_disp32 = true); 863 tail_size = 2; // the imm16 864 break; 865 case 0x0F: // several SSE/SSE2 variants 866 ip--; // reparse the 0x0F 867 goto again_after_prefix; 868 default: 869 ShouldNotReachHere(); 870 } 871 break; 872 873 case REP8(0xB8): // movl/q r, #32/#64(oop?) 874 if (which == end_pc_operand) return ip + (is_64bit ? 8 : 4); 875 // these asserts are somewhat nonsensical 876 #ifndef _LP64 877 assert(which == imm_operand || which == disp32_operand, 878 "which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip)); 879 #else 880 assert((which == call32_operand || which == imm_operand) && is_64bit || 881 which == narrow_oop_operand && !is_64bit, 882 "which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip)); 883 #endif // _LP64 884 return ip; 885 886 case 0x69: // imul r, a, #32 887 case 0xC7: // movl a, #32(oop?) 888 tail_size = 4; 889 debug_only(has_disp32 = true); // has both kinds of operands! 890 break; 891 892 case 0x0F: // movx..., etc. 893 switch (0xFF & *ip++) { 894 case 0x3A: // pcmpestri 895 tail_size = 1; 896 case 0x38: // ptest, pmovzxbw 897 ip++; // skip opcode 898 debug_only(has_disp32 = true); // has both kinds of operands! 899 break; 900 901 case 0x70: // pshufd r, r/a, #8 902 debug_only(has_disp32 = true); // has both kinds of operands! 903 case 0x73: // psrldq r, #8 904 tail_size = 1; 905 break; 906 907 case 0x10: // movups 908 case 0x11: // movups 909 case 0x12: // movlps 910 case 0x28: // movaps 911 case 0x2E: // ucomiss 912 case 0x2F: // comiss 913 case 0x54: // andps 914 case 0x55: // andnps 915 case 0x56: // orps 916 case 0x57: // xorps 917 case 0x58: // addpd 918 case 0x59: // mulpd 919 case 0x6E: // movd 920 case 0x7E: // movd 921 case 0x6F: // movdq 922 case 0x7F: // movdq 923 case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush 924 case 0xFE: // paddd 925 debug_only(has_disp32 = true); 926 break; 927 928 case 0xAD: // shrd r, a, %cl 929 case 0xAF: // imul r, a 930 case 0xBE: // movsbl r, a (movsxb) 931 case 0xBF: // movswl r, a (movsxw) 932 case 0xB6: // movzbl r, a (movzxb) 933 case 0xB7: // movzwl r, a (movzxw) 934 case REP16(0x40): // cmovl cc, r, a 935 case 0xB0: // cmpxchgb 936 case 0xB1: // cmpxchg 937 case 0xC1: // xaddl 938 case 0xC7: // cmpxchg8 939 case REP16(0x90): // setcc a 940 debug_only(has_disp32 = true); 941 // fall out of the switch to decode the address 942 break; 943 944 case 0xC4: // pinsrw r, a, #8 945 debug_only(has_disp32 = true); 946 case 0xC5: // pextrw r, r, #8 947 tail_size = 1; // the imm8 948 break; 949 950 case 0xAC: // shrd r, a, #8 951 debug_only(has_disp32 = true); 952 tail_size = 1; // the imm8 953 break; 954 955 case REP16(0x80): // jcc rdisp32 956 if (which == end_pc_operand) return ip + 4; 957 assert(which == call32_operand, "jcc has no disp32 or imm"); 958 return ip; 959 default: 960 ShouldNotReachHere(); 961 } 962 break; 963 964 case 0x81: // addl a, #32; addl r, #32 965 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl 966 // on 32bit in the case of cmpl, the imm might be an oop 967 tail_size = 4; 968 debug_only(has_disp32 = true); // has both kinds of operands! 969 break; 970 971 case 0x83: // addl a, #8; addl r, #8 972 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl 973 debug_only(has_disp32 = true); // has both kinds of operands! 974 tail_size = 1; 975 break; 976 977 case 0x15: // adc rax, #32 978 case 0x05: // add rax, #32 979 case 0x25: // and rax, #32 980 case 0x3D: // cmp rax, #32 981 case 0x0D: // or rax, #32 982 case 0x1D: // sbb rax, #32 983 case 0x2D: // sub rax, #32 984 case 0x35: // xor rax, #32 985 return which == end_pc_operand ? ip + 4 : ip; 986 987 case 0x9B: 988 switch (0xFF & *ip++) { 989 case 0xD9: // fnstcw a 990 debug_only(has_disp32 = true); 991 break; 992 default: 993 ShouldNotReachHere(); 994 } 995 break; 996 997 case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a 998 case REP4(0x10): // adc... 999 case REP4(0x20): // and... 1000 case REP4(0x30): // xor... 1001 case REP4(0x08): // or... 1002 case REP4(0x18): // sbb... 1003 case REP4(0x28): // sub... 1004 case 0xF7: // mull a 1005 case 0x8D: // lea r, a 1006 case 0x87: // xchg r, a 1007 case REP4(0x38): // cmp... 1008 case 0x85: // test r, a 1009 debug_only(has_disp32 = true); // has both kinds of operands! 1010 break; 1011 1012 case 0xA8: // testb rax, #8 1013 return which == end_pc_operand ? ip + 1 : ip; 1014 case 0xA9: // testl/testq rax, #32 1015 return which == end_pc_operand ? ip + 4 : ip; 1016 1017 case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8 1018 case 0xC6: // movb a, #8 1019 case 0x80: // cmpb a, #8 1020 case 0x6B: // imul r, a, #8 1021 debug_only(has_disp32 = true); // has both kinds of operands! 1022 tail_size = 1; // the imm8 1023 break; 1024 1025 case 0xC4: // VEX_3bytes 1026 case 0xC5: // VEX_2bytes 1027 assert((UseAVX > 0), "shouldn't have VEX prefix"); 1028 assert(ip == inst+1, "no prefixes allowed"); 1029 // C4 and C5 are also used as opcodes for PINSRW and PEXTRW instructions 1030 // but they have prefix 0x0F and processed when 0x0F processed above. 1031 // 1032 // In 32-bit mode the VEX first byte C4 and C5 alias onto LDS and LES 1033 // instructions (these instructions are not supported in 64-bit mode). 1034 // To distinguish them bits [7:6] are set in the VEX second byte since 1035 // ModRM byte can not be of the form 11xxxxxx in 32-bit mode. To set 1036 // those VEX bits REX and vvvv bits are inverted. 1037 // 1038 // Fortunately C2 doesn't generate these instructions so we don't need 1039 // to check for them in product version. 1040 1041 // Check second byte 1042 NOT_LP64(assert((0xC0 & *ip) == 0xC0, "shouldn't have LDS and LES instructions")); 1043 1044 int vex_opcode; 1045 // First byte 1046 if ((0xFF & *inst) == VEX_3bytes) { 1047 vex_opcode = VEX_OPCODE_MASK & *ip; 1048 ip++; // third byte 1049 is_64bit = ((VEX_W & *ip) == VEX_W); 1050 } else { 1051 vex_opcode = VEX_OPCODE_0F; 1052 } 1053 ip++; // opcode 1054 // To find the end of instruction (which == end_pc_operand). 1055 switch (vex_opcode) { 1056 case VEX_OPCODE_0F: 1057 switch (0xFF & *ip) { 1058 case 0x70: // pshufd r, r/a, #8 1059 case 0x71: // ps[rl|ra|ll]w r, #8 1060 case 0x72: // ps[rl|ra|ll]d r, #8 1061 case 0x73: // ps[rl|ra|ll]q r, #8 1062 case 0xC2: // cmp[ps|pd|ss|sd] r, r, r/a, #8 1063 case 0xC4: // pinsrw r, r, r/a, #8 1064 case 0xC5: // pextrw r/a, r, #8 1065 case 0xC6: // shufp[s|d] r, r, r/a, #8 1066 tail_size = 1; // the imm8 1067 break; 1068 } 1069 break; 1070 case VEX_OPCODE_0F_3A: 1071 tail_size = 1; 1072 break; 1073 } 1074 ip++; // skip opcode 1075 debug_only(has_disp32 = true); // has both kinds of operands! 1076 break; 1077 1078 case 0x62: // EVEX_4bytes 1079 assert(VM_Version::supports_evex(), "shouldn't have EVEX prefix"); 1080 assert(ip == inst+1, "no prefixes allowed"); 1081 // no EVEX collisions, all instructions that have 0x62 opcodes 1082 // have EVEX versions and are subopcodes of 0x66 1083 ip++; // skip P0 and examine W in P1 1084 is_64bit = ((VEX_W & *ip) == VEX_W); 1085 ip++; // move to P2 1086 ip++; // skip P2, move to opcode 1087 // To find the end of instruction (which == end_pc_operand). 1088 switch (0xFF & *ip) { 1089 case 0x22: // pinsrd r, r/a, #8 1090 case 0x61: // pcmpestri r, r/a, #8 1091 case 0x70: // pshufd r, r/a, #8 1092 case 0x73: // psrldq r, #8 1093 case 0x1f: // evpcmpd/evpcmpq 1094 case 0x3f: // evpcmpb/evpcmpw 1095 tail_size = 1; // the imm8 1096 break; 1097 default: 1098 break; 1099 } 1100 ip++; // skip opcode 1101 debug_only(has_disp32 = true); // has both kinds of operands! 1102 break; 1103 1104 case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1 1105 case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl 1106 case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a 1107 case 0xDD: // fld_d a; fst_d a; fstp_d a 1108 case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a 1109 case 0xDF: // fild_d a; fistp_d a 1110 case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a 1111 case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a 1112 case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a 1113 debug_only(has_disp32 = true); 1114 break; 1115 1116 case 0xE8: // call rdisp32 1117 case 0xE9: // jmp rdisp32 1118 if (which == end_pc_operand) return ip + 4; 1119 assert(which == call32_operand, "call has no disp32 or imm"); 1120 return ip; 1121 1122 case 0xF0: // Lock 1123 goto again_after_prefix; 1124 1125 case 0xF3: // For SSE 1126 case 0xF2: // For SSE2 1127 switch (0xFF & *ip++) { 1128 case REX: 1129 case REX_B: 1130 case REX_X: 1131 case REX_XB: 1132 case REX_R: 1133 case REX_RB: 1134 case REX_RX: 1135 case REX_RXB: 1136 case REX_W: 1137 case REX_WB: 1138 case REX_WX: 1139 case REX_WXB: 1140 case REX_WR: 1141 case REX_WRB: 1142 case REX_WRX: 1143 case REX_WRXB: 1144 NOT_LP64(assert(false, "found 64bit prefix")); 1145 ip++; 1146 default: 1147 ip++; 1148 } 1149 debug_only(has_disp32 = true); // has both kinds of operands! 1150 break; 1151 1152 default: 1153 ShouldNotReachHere(); 1154 1155 #undef REP8 1156 #undef REP16 1157 } 1158 1159 assert(which != call32_operand, "instruction is not a call, jmp, or jcc"); 1160 #ifdef _LP64 1161 assert(which != imm_operand, "instruction is not a movq reg, imm64"); 1162 #else 1163 // assert(which != imm_operand || has_imm32, "instruction has no imm32 field"); 1164 assert(which != imm_operand || has_disp32, "instruction has no imm32 field"); 1165 #endif // LP64 1166 assert(which != disp32_operand || has_disp32, "instruction has no disp32 field"); 1167 1168 // parse the output of emit_operand 1169 int op2 = 0xFF & *ip++; 1170 int base = op2 & 0x07; 1171 int op3 = -1; 1172 const int b100 = 4; 1173 const int b101 = 5; 1174 if (base == b100 && (op2 >> 6) != 3) { 1175 op3 = 0xFF & *ip++; 1176 base = op3 & 0x07; // refetch the base 1177 } 1178 // now ip points at the disp (if any) 1179 1180 switch (op2 >> 6) { 1181 case 0: 1182 // [00 reg 100][ss index base] 1183 // [00 reg 100][00 100 esp] 1184 // [00 reg base] 1185 // [00 reg 100][ss index 101][disp32] 1186 // [00 reg 101] [disp32] 1187 1188 if (base == b101) { 1189 if (which == disp32_operand) 1190 return ip; // caller wants the disp32 1191 ip += 4; // skip the disp32 1192 } 1193 break; 1194 1195 case 1: 1196 // [01 reg 100][ss index base][disp8] 1197 // [01 reg 100][00 100 esp][disp8] 1198 // [01 reg base] [disp8] 1199 ip += 1; // skip the disp8 1200 break; 1201 1202 case 2: 1203 // [10 reg 100][ss index base][disp32] 1204 // [10 reg 100][00 100 esp][disp32] 1205 // [10 reg base] [disp32] 1206 if (which == disp32_operand) 1207 return ip; // caller wants the disp32 1208 ip += 4; // skip the disp32 1209 break; 1210 1211 case 3: 1212 // [11 reg base] (not a memory addressing mode) 1213 break; 1214 } 1215 1216 if (which == end_pc_operand) { 1217 return ip + tail_size; 1218 } 1219 1220 #ifdef _LP64 1221 assert(which == narrow_oop_operand && !is_64bit, "instruction is not a movl adr, imm32"); 1222 #else 1223 assert(which == imm_operand, "instruction has only an imm field"); 1224 #endif // LP64 1225 return ip; 1226 } 1227 1228 address Assembler::locate_next_instruction(address inst) { 1229 // Secretly share code with locate_operand: 1230 return locate_operand(inst, end_pc_operand); 1231 } 1232 1233 1234 #ifdef ASSERT 1235 void Assembler::check_relocation(RelocationHolder const& rspec, int format) { 1236 address inst = inst_mark(); 1237 assert(inst != NULL && inst < pc(), "must point to beginning of instruction"); 1238 address opnd; 1239 1240 Relocation* r = rspec.reloc(); 1241 if (r->type() == relocInfo::none) { 1242 return; 1243 } else if (r->is_call() || format == call32_operand) { 1244 // assert(format == imm32_operand, "cannot specify a nonzero format"); 1245 opnd = locate_operand(inst, call32_operand); 1246 } else if (r->is_data()) { 1247 assert(format == imm_operand || format == disp32_operand 1248 LP64_ONLY(|| format == narrow_oop_operand), "format ok"); 1249 opnd = locate_operand(inst, (WhichOperand)format); 1250 } else { 1251 assert(format == imm_operand, "cannot specify a format"); 1252 return; 1253 } 1254 assert(opnd == pc(), "must put operand where relocs can find it"); 1255 } 1256 #endif // ASSERT 1257 1258 void Assembler::emit_operand(Register reg, Address adr, int post_addr_length) { 1259 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec, post_addr_length); 1260 } 1261 1262 void Assembler::emit_operand(XMMRegister reg, Address adr, int post_addr_length) { 1263 if (adr.isxmmindex()) { 1264 emit_operand(reg, adr._base, adr._xmmindex, adr._scale, adr._disp, adr._rspec, post_addr_length); 1265 } else { 1266 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec, post_addr_length); 1267 } 1268 } 1269 1270 // Now the Assembler instructions (identical for 32/64 bits) 1271 1272 void Assembler::adcl(Address dst, int32_t imm32) { 1273 InstructionMark im(this); 1274 prefix(dst); 1275 emit_arith_operand(0x81, rdx, dst, imm32); 1276 } 1277 1278 void Assembler::adcl(Address dst, Register src) { 1279 InstructionMark im(this); 1280 prefix(dst, src); 1281 emit_int8(0x11); 1282 emit_operand(src, dst, 0); 1283 } 1284 1285 void Assembler::adcl(Register dst, int32_t imm32) { 1286 prefix(dst); 1287 emit_arith(0x81, 0xD0, dst, imm32); 1288 } 1289 1290 void Assembler::adcl(Register dst, Address src) { 1291 InstructionMark im(this); 1292 prefix(src, dst); 1293 emit_int8(0x13); 1294 emit_operand(dst, src, 0); 1295 } 1296 1297 void Assembler::adcl(Register dst, Register src) { 1298 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1299 emit_arith(0x13, 0xC0, dst, src); 1300 } 1301 1302 void Assembler::addl(Address dst, int32_t imm32) { 1303 InstructionMark im(this); 1304 prefix(dst); 1305 emit_arith_operand(0x81, rax, dst, imm32); 1306 } 1307 1308 void Assembler::addb(Address dst, int imm8) { 1309 InstructionMark im(this); 1310 prefix(dst); 1311 emit_int8((unsigned char)0x80); 1312 emit_operand(rax, dst, 1); 1313 emit_int8(imm8); 1314 } 1315 1316 void Assembler::addw(Register dst, Register src) { 1317 emit_int8(0x66); 1318 (void)prefix_and_encode(dst->encoding(), src->encoding()); 1319 emit_arith(0x03, 0xC0, dst, src); 1320 } 1321 1322 void Assembler::addw(Address dst, int imm16) { 1323 InstructionMark im(this); 1324 emit_int8(0x66); 1325 prefix(dst); 1326 emit_int8((unsigned char)0x81); 1327 emit_operand(rax, dst, 2); 1328 emit_int16(imm16); 1329 } 1330 1331 void Assembler::addl(Address dst, Register src) { 1332 InstructionMark im(this); 1333 prefix(dst, src); 1334 emit_int8(0x01); 1335 emit_operand(src, dst, 0); 1336 } 1337 1338 void Assembler::addl(Register dst, int32_t imm32) { 1339 prefix(dst); 1340 emit_arith(0x81, 0xC0, dst, imm32); 1341 } 1342 1343 void Assembler::addl(Register dst, Address src) { 1344 InstructionMark im(this); 1345 prefix(src, dst); 1346 emit_int8(0x03); 1347 emit_operand(dst, src, 0); 1348 } 1349 1350 void Assembler::addl(Register dst, Register src) { 1351 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1352 emit_arith(0x03, 0xC0, dst, src); 1353 } 1354 1355 void Assembler::addr_nop_4() { 1356 assert(UseAddressNop, "no CPU support"); 1357 // 4 bytes: NOP DWORD PTR [EAX+0] 1358 emit_int32(0x0F, 1359 0x1F, 1360 0x40, // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc); 1361 0); // 8-bits offset (1 byte) 1362 } 1363 1364 void Assembler::addr_nop_5() { 1365 assert(UseAddressNop, "no CPU support"); 1366 // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset 1367 emit_int32(0x0F, 1368 0x1F, 1369 0x44, // emit_rm(cbuf, 0x1, EAX_enc, 0x4); 1370 0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); 1371 emit_int8(0); // 8-bits offset (1 byte) 1372 } 1373 1374 void Assembler::addr_nop_7() { 1375 assert(UseAddressNop, "no CPU support"); 1376 // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset 1377 emit_int24(0x0F, 1378 0x1F, 1379 (unsigned char)0x80); 1380 // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc); 1381 emit_int32(0); // 32-bits offset (4 bytes) 1382 } 1383 1384 void Assembler::addr_nop_8() { 1385 assert(UseAddressNop, "no CPU support"); 1386 // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset 1387 emit_int32(0x0F, 1388 0x1F, 1389 (unsigned char)0x84, 1390 // emit_rm(cbuf, 0x2, EAX_enc, 0x4); 1391 0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); 1392 emit_int32(0); // 32-bits offset (4 bytes) 1393 } 1394 1395 void Assembler::addsd(XMMRegister dst, XMMRegister src) { 1396 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1397 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1398 attributes.set_rex_vex_w_reverted(); 1399 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1400 emit_int16(0x58, (0xC0 | encode)); 1401 } 1402 1403 void Assembler::addsd(XMMRegister dst, Address src) { 1404 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1405 InstructionMark im(this); 1406 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1407 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 1408 attributes.set_rex_vex_w_reverted(); 1409 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1410 emit_int8(0x58); 1411 emit_operand(dst, src, 0); 1412 } 1413 1414 void Assembler::addss(XMMRegister dst, XMMRegister src) { 1415 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1416 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1417 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1418 emit_int16(0x58, (0xC0 | encode)); 1419 } 1420 1421 void Assembler::addss(XMMRegister dst, Address src) { 1422 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1423 InstructionMark im(this); 1424 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1425 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1426 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1427 emit_int8(0x58); 1428 emit_operand(dst, src, 0); 1429 } 1430 1431 void Assembler::aesdec(XMMRegister dst, Address src) { 1432 assert(VM_Version::supports_aes(), ""); 1433 InstructionMark im(this); 1434 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1435 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1436 emit_int8((unsigned char)0xDE); 1437 emit_operand(dst, src, 0); 1438 } 1439 1440 void Assembler::aesdec(XMMRegister dst, XMMRegister src) { 1441 assert(VM_Version::supports_aes(), ""); 1442 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1443 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1444 emit_int16((unsigned char)0xDE, (0xC0 | encode)); 1445 } 1446 1447 void Assembler::vaesdec(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1448 assert(VM_Version::supports_avx512_vaes(), ""); 1449 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1450 attributes.set_is_evex_instruction(); 1451 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1452 emit_int16((unsigned char)0xDE, (0xC0 | encode)); 1453 } 1454 1455 1456 void Assembler::aesdeclast(XMMRegister dst, Address src) { 1457 assert(VM_Version::supports_aes(), ""); 1458 InstructionMark im(this); 1459 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1460 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1461 emit_int8((unsigned char)0xDF); 1462 emit_operand(dst, src, 0); 1463 } 1464 1465 void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) { 1466 assert(VM_Version::supports_aes(), ""); 1467 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1468 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1469 emit_int16((unsigned char)0xDF, (0xC0 | encode)); 1470 } 1471 1472 void Assembler::vaesdeclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1473 assert(VM_Version::supports_avx512_vaes(), ""); 1474 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1475 attributes.set_is_evex_instruction(); 1476 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1477 emit_int16((unsigned char)0xDF, (0xC0 | encode)); 1478 } 1479 1480 void Assembler::aesenc(XMMRegister dst, Address src) { 1481 assert(VM_Version::supports_aes(), ""); 1482 InstructionMark im(this); 1483 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1484 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1485 emit_int8((unsigned char)0xDC); 1486 emit_operand(dst, src, 0); 1487 } 1488 1489 void Assembler::aesenc(XMMRegister dst, XMMRegister src) { 1490 assert(VM_Version::supports_aes(), ""); 1491 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1492 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1493 emit_int16((unsigned char)0xDC, 0xC0 | encode); 1494 } 1495 1496 void Assembler::vaesenc(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1497 assert(VM_Version::supports_avx512_vaes(), "requires vaes support/enabling"); 1498 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1499 attributes.set_is_evex_instruction(); 1500 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1501 emit_int16((unsigned char)0xDC, (0xC0 | encode)); 1502 } 1503 1504 void Assembler::aesenclast(XMMRegister dst, Address src) { 1505 assert(VM_Version::supports_aes(), ""); 1506 InstructionMark im(this); 1507 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1508 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1509 emit_int8((unsigned char)0xDD); 1510 emit_operand(dst, src, 0); 1511 } 1512 1513 void Assembler::aesenclast(XMMRegister dst, XMMRegister src) { 1514 assert(VM_Version::supports_aes(), ""); 1515 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1516 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1517 emit_int16((unsigned char)0xDD, (0xC0 | encode)); 1518 } 1519 1520 void Assembler::vaesenclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1521 assert(VM_Version::supports_avx512_vaes(), "requires vaes support/enabling"); 1522 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1523 attributes.set_is_evex_instruction(); 1524 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1525 emit_int16((unsigned char)0xDD, (0xC0 | encode)); 1526 } 1527 1528 void Assembler::andb(Address dst, Register src) { 1529 InstructionMark im(this); 1530 prefix(dst, src, true); 1531 emit_int8(0x20); 1532 emit_operand(src, dst, 0); 1533 } 1534 1535 void Assembler::andw(Register dst, Register src) { 1536 (void)prefix_and_encode(dst->encoding(), src->encoding()); 1537 emit_arith(0x23, 0xC0, dst, src); 1538 } 1539 1540 void Assembler::andl(Address dst, int32_t imm32) { 1541 InstructionMark im(this); 1542 prefix(dst); 1543 emit_arith_operand(0x81, as_Register(4), dst, imm32); 1544 } 1545 1546 void Assembler::andl(Register dst, int32_t imm32) { 1547 prefix(dst); 1548 emit_arith(0x81, 0xE0, dst, imm32); 1549 } 1550 1551 void Assembler::andl(Address dst, Register src) { 1552 InstructionMark im(this); 1553 prefix(dst, src); 1554 emit_int8(0x21); 1555 emit_operand(src, dst, 0); 1556 } 1557 1558 void Assembler::andl(Register dst, Address src) { 1559 InstructionMark im(this); 1560 prefix(src, dst); 1561 emit_int8(0x23); 1562 emit_operand(dst, src, 0); 1563 } 1564 1565 void Assembler::andl(Register dst, Register src) { 1566 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1567 emit_arith(0x23, 0xC0, dst, src); 1568 } 1569 1570 void Assembler::andnl(Register dst, Register src1, Register src2) { 1571 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1572 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1573 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1574 emit_int16((unsigned char)0xF2, (0xC0 | encode)); 1575 } 1576 1577 void Assembler::andnl(Register dst, Register src1, Address src2) { 1578 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1579 InstructionMark im(this); 1580 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1581 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1582 emit_int8((unsigned char)0xF2); 1583 emit_operand(dst, src2, 0); 1584 } 1585 1586 void Assembler::bsfl(Register dst, Register src) { 1587 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1588 emit_int24(0x0F, 1589 (unsigned char)0xBC, 1590 0xC0 | encode); 1591 } 1592 1593 void Assembler::bsrl(Register dst, Register src) { 1594 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1595 emit_int24(0x0F, 1596 (unsigned char)0xBD, 1597 0xC0 | encode); 1598 } 1599 1600 void Assembler::bswapl(Register reg) { // bswap 1601 int encode = prefix_and_encode(reg->encoding()); 1602 emit_int16(0x0F, (0xC8 | encode)); 1603 } 1604 1605 void Assembler::blsil(Register dst, Register src) { 1606 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1607 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1608 int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1609 emit_int16((unsigned char)0xF3, (0xC0 | encode)); 1610 } 1611 1612 void Assembler::blsil(Register dst, Address src) { 1613 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1614 InstructionMark im(this); 1615 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1616 vex_prefix(src, dst->encoding(), rbx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1617 emit_int8((unsigned char)0xF3); 1618 emit_operand(rbx, src, 0); 1619 } 1620 1621 void Assembler::blsmskl(Register dst, Register src) { 1622 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1623 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1624 int encode = vex_prefix_and_encode(rdx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1625 emit_int16((unsigned char)0xF3, 1626 0xC0 | encode); 1627 } 1628 1629 void Assembler::blsmskl(Register dst, Address src) { 1630 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1631 InstructionMark im(this); 1632 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1633 vex_prefix(src, dst->encoding(), rdx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1634 emit_int8((unsigned char)0xF3); 1635 emit_operand(rdx, src, 0); 1636 } 1637 1638 void Assembler::blsrl(Register dst, Register src) { 1639 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1640 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1641 int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1642 emit_int16((unsigned char)0xF3, (0xC0 | encode)); 1643 } 1644 1645 void Assembler::blsrl(Register dst, Address src) { 1646 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1647 InstructionMark im(this); 1648 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1649 vex_prefix(src, dst->encoding(), rcx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1650 emit_int8((unsigned char)0xF3); 1651 emit_operand(rcx, src, 0); 1652 } 1653 1654 void Assembler::call(Label& L, relocInfo::relocType rtype) { 1655 // suspect disp32 is always good 1656 int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand); 1657 1658 if (L.is_bound()) { 1659 const int long_size = 5; 1660 int offs = (int)( target(L) - pc() ); 1661 assert(offs <= 0, "assembler error"); 1662 InstructionMark im(this); 1663 // 1110 1000 #32-bit disp 1664 emit_int8((unsigned char)0xE8); 1665 emit_data(offs - long_size, rtype, operand); 1666 } else { 1667 InstructionMark im(this); 1668 // 1110 1000 #32-bit disp 1669 L.add_patch_at(code(), locator()); 1670 1671 emit_int8((unsigned char)0xE8); 1672 emit_data(int(0), rtype, operand); 1673 } 1674 } 1675 1676 void Assembler::call(Register dst) { 1677 int encode = prefix_and_encode(dst->encoding()); 1678 emit_int16((unsigned char)0xFF, (0xD0 | encode)); 1679 } 1680 1681 1682 void Assembler::call(Address adr) { 1683 InstructionMark im(this); 1684 prefix(adr); 1685 emit_int8((unsigned char)0xFF); 1686 emit_operand(rdx, adr, 0); 1687 } 1688 1689 void Assembler::call_literal(address entry, RelocationHolder const& rspec) { 1690 InstructionMark im(this); 1691 emit_int8((unsigned char)0xE8); 1692 intptr_t disp = entry - (pc() + sizeof(int32_t)); 1693 // Entry is NULL in case of a scratch emit. 1694 assert(entry == NULL || is_simm32(disp), "disp=" INTPTR_FORMAT " must be 32bit offset (call2)", disp); 1695 // Technically, should use call32_operand, but this format is 1696 // implied by the fact that we're emitting a call instruction. 1697 1698 int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand); 1699 emit_data((int) disp, rspec, operand); 1700 } 1701 1702 void Assembler::cdql() { 1703 emit_int8((unsigned char)0x99); 1704 } 1705 1706 void Assembler::cld() { 1707 emit_int8((unsigned char)0xFC); 1708 } 1709 1710 void Assembler::cmovl(Condition cc, Register dst, Register src) { 1711 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); 1712 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1713 emit_int24(0x0F, 1714 0x40 | cc, 1715 0xC0 | encode); 1716 } 1717 1718 1719 void Assembler::cmovl(Condition cc, Register dst, Address src) { 1720 InstructionMark im(this); 1721 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); 1722 prefix(src, dst); 1723 emit_int16(0x0F, (0x40 | cc)); 1724 emit_operand(dst, src, 0); 1725 } 1726 1727 void Assembler::cmpb(Address dst, int imm8) { 1728 InstructionMark im(this); 1729 prefix(dst); 1730 emit_int8((unsigned char)0x80); 1731 emit_operand(rdi, dst, 1); 1732 emit_int8(imm8); 1733 } 1734 1735 void Assembler::cmpl(Address dst, int32_t imm32) { 1736 InstructionMark im(this); 1737 prefix(dst); 1738 emit_arith_operand(0x81, as_Register(7), dst, imm32); 1739 } 1740 1741 void Assembler::cmpl(Register dst, int32_t imm32) { 1742 prefix(dst); 1743 emit_arith(0x81, 0xF8, dst, imm32); 1744 } 1745 1746 void Assembler::cmpl(Register dst, Register src) { 1747 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1748 emit_arith(0x3B, 0xC0, dst, src); 1749 } 1750 1751 void Assembler::cmpl(Register dst, Address src) { 1752 InstructionMark im(this); 1753 prefix(src, dst); 1754 emit_int8(0x3B); 1755 emit_operand(dst, src, 0); 1756 } 1757 1758 void Assembler::cmpl_imm32(Address dst, int32_t imm32) { 1759 InstructionMark im(this); 1760 prefix(dst); 1761 emit_arith_operand_imm32(0x81, as_Register(7), dst, imm32); 1762 } 1763 1764 void Assembler::cmpw(Address dst, int imm16) { 1765 InstructionMark im(this); 1766 emit_int8(0x66); 1767 prefix(dst); 1768 emit_int8((unsigned char)0x81); 1769 emit_operand(rdi, dst, 2); 1770 emit_int16(imm16); 1771 } 1772 1773 // The 32-bit cmpxchg compares the value at adr with the contents of rax, 1774 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. 1775 // The ZF is set if the compared values were equal, and cleared otherwise. 1776 void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg 1777 InstructionMark im(this); 1778 prefix(adr, reg); 1779 emit_int16(0x0F, (unsigned char)0xB1); 1780 emit_operand(reg, adr, 0); 1781 } 1782 1783 void Assembler::cmpxchgw(Register reg, Address adr) { // cmpxchg 1784 InstructionMark im(this); 1785 size_prefix(); 1786 prefix(adr, reg); 1787 emit_int16(0x0F, (unsigned char)0xB1); 1788 emit_operand(reg, adr, 0); 1789 } 1790 1791 // The 8-bit cmpxchg compares the value at adr with the contents of rax, 1792 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. 1793 // The ZF is set if the compared values were equal, and cleared otherwise. 1794 void Assembler::cmpxchgb(Register reg, Address adr) { // cmpxchg 1795 InstructionMark im(this); 1796 prefix(adr, reg, true); 1797 emit_int16(0x0F, (unsigned char)0xB0); 1798 emit_operand(reg, adr, 0); 1799 } 1800 1801 void Assembler::comisd(XMMRegister dst, Address src) { 1802 // NOTE: dbx seems to decode this as comiss even though the 1803 // 0x66 is there. Strangely ucomisd comes out correct 1804 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1805 InstructionMark im(this); 1806 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);; 1807 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 1808 attributes.set_rex_vex_w_reverted(); 1809 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 1810 emit_int8(0x2F); 1811 emit_operand(dst, src, 0); 1812 } 1813 1814 void Assembler::comisd(XMMRegister dst, XMMRegister src) { 1815 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1816 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1817 attributes.set_rex_vex_w_reverted(); 1818 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 1819 emit_int16(0x2F, (0xC0 | encode)); 1820 } 1821 1822 void Assembler::comiss(XMMRegister dst, Address src) { 1823 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1824 InstructionMark im(this); 1825 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1826 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1827 simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 1828 emit_int8(0x2F); 1829 emit_operand(dst, src, 0); 1830 } 1831 1832 void Assembler::comiss(XMMRegister dst, XMMRegister src) { 1833 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1834 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1835 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 1836 emit_int16(0x2F, (0xC0 | encode)); 1837 } 1838 1839 void Assembler::cpuid() { 1840 emit_int16(0x0F, (unsigned char)0xA2); 1841 } 1842 1843 // Opcode / Instruction Op / En 64 - Bit Mode Compat / Leg Mode Description Implemented 1844 // F2 0F 38 F0 / r CRC32 r32, r / m8 RM Valid Valid Accumulate CRC32 on r / m8. v 1845 // F2 REX 0F 38 F0 / r CRC32 r32, r / m8* RM Valid N.E. Accumulate CRC32 on r / m8. - 1846 // F2 REX.W 0F 38 F0 / r CRC32 r64, r / m8 RM Valid N.E. Accumulate CRC32 on r / m8. - 1847 // 1848 // F2 0F 38 F1 / r CRC32 r32, r / m16 RM Valid Valid Accumulate CRC32 on r / m16. v 1849 // 1850 // F2 0F 38 F1 / r CRC32 r32, r / m32 RM Valid Valid Accumulate CRC32 on r / m32. v 1851 // 1852 // F2 REX.W 0F 38 F1 / r CRC32 r64, r / m64 RM Valid N.E. Accumulate CRC32 on r / m64. v 1853 void Assembler::crc32(Register crc, Register v, int8_t sizeInBytes) { 1854 assert(VM_Version::supports_sse4_2(), ""); 1855 int8_t w = 0x01; 1856 Prefix p = Prefix_EMPTY; 1857 1858 emit_int8((unsigned char)0xF2); 1859 switch (sizeInBytes) { 1860 case 1: 1861 w = 0; 1862 break; 1863 case 2: 1864 case 4: 1865 break; 1866 LP64_ONLY(case 8:) 1867 // This instruction is not valid in 32 bits 1868 // Note: 1869 // http://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf 1870 // 1871 // Page B - 72 Vol. 2C says 1872 // qwreg2 to qwreg 1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : 11 qwreg1 qwreg2 1873 // mem64 to qwreg 1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : mod qwreg r / m 1874 // F0!!! 1875 // while 3 - 208 Vol. 2A 1876 // F2 REX.W 0F 38 F1 / r CRC32 r64, r / m64 RM Valid N.E.Accumulate CRC32 on r / m64. 1877 // 1878 // the 0 on a last bit is reserved for a different flavor of this instruction : 1879 // F2 REX.W 0F 38 F0 / r CRC32 r64, r / m8 RM Valid N.E.Accumulate CRC32 on r / m8. 1880 p = REX_W; 1881 break; 1882 default: 1883 assert(0, "Unsupported value for a sizeInBytes argument"); 1884 break; 1885 } 1886 LP64_ONLY(prefix(crc, v, p);) 1887 emit_int32(0x0F, 1888 0x38, 1889 0xF0 | w, 1890 0xC0 | ((crc->encoding() & 0x7) << 3) | (v->encoding() & 7)); 1891 } 1892 1893 void Assembler::crc32(Register crc, Address adr, int8_t sizeInBytes) { 1894 assert(VM_Version::supports_sse4_2(), ""); 1895 InstructionMark im(this); 1896 int8_t w = 0x01; 1897 Prefix p = Prefix_EMPTY; 1898 1899 emit_int8((int8_t)0xF2); 1900 switch (sizeInBytes) { 1901 case 1: 1902 w = 0; 1903 break; 1904 case 2: 1905 case 4: 1906 break; 1907 LP64_ONLY(case 8:) 1908 // This instruction is not valid in 32 bits 1909 p = REX_W; 1910 break; 1911 default: 1912 assert(0, "Unsupported value for a sizeInBytes argument"); 1913 break; 1914 } 1915 LP64_ONLY(prefix(crc, adr, p);) 1916 emit_int24(0x0F, 0x38, (0xF0 | w)); 1917 emit_operand(crc, adr, 0); 1918 } 1919 1920 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) { 1921 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1922 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1923 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1924 emit_int16((unsigned char)0xE6, (0xC0 | encode)); 1925 } 1926 1927 void Assembler::vcvtdq2pd(XMMRegister dst, XMMRegister src, int vector_len) { 1928 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), ""); 1929 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1930 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1931 emit_int16((unsigned char)0xE6, (0xC0 | encode)); 1932 } 1933 1934 void Assembler::vcvtps2ph(XMMRegister dst, XMMRegister src, int imm8, int vector_len) { 1935 assert(VM_Version::supports_evex() || VM_Version::supports_f16c(), ""); 1936 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /*uses_vl */ true); 1937 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 1938 emit_int24(0x1D, (0xC0 | encode), imm8); 1939 } 1940 1941 void Assembler::evcvtps2ph(Address dst, KRegister mask, XMMRegister src, int imm8, int vector_len) { 1942 assert(VM_Version::supports_evex(), ""); 1943 InstructionMark im(this); 1944 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /*uses_vl */ true); 1945 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_64bit); 1946 attributes.reset_is_clear_context(); 1947 attributes.set_embedded_opmask_register_specifier(mask); 1948 attributes.set_is_evex_instruction(); 1949 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 1950 emit_int8(0x1D); 1951 emit_operand(src, dst, 1); 1952 emit_int8(imm8); 1953 } 1954 1955 void Assembler::vcvtps2ph(Address dst, XMMRegister src, int imm8, int vector_len) { 1956 assert(VM_Version::supports_evex() || VM_Version::supports_f16c(), ""); 1957 InstructionMark im(this); 1958 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /*uses_vl */ true); 1959 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 1960 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 1961 emit_int8(0x1D); 1962 emit_operand(src, dst, 1); 1963 emit_int8(imm8); 1964 } 1965 1966 void Assembler::vcvtph2ps(XMMRegister dst, XMMRegister src, int vector_len) { 1967 assert(VM_Version::supports_evex() || VM_Version::supports_f16c(), ""); 1968 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */false, /* no_mask_reg */ true, /* uses_vl */ true); 1969 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1970 emit_int16(0x13, (0xC0 | encode)); 1971 } 1972 1973 void Assembler::vcvtph2ps(XMMRegister dst, Address src, int vector_len) { 1974 assert(VM_Version::supports_evex() || VM_Version::supports_f16c(), ""); 1975 InstructionMark im(this); 1976 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /*uses_vl */ true); 1977 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 1978 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1979 emit_int8(0x13); 1980 emit_operand(dst, src, 0); 1981 } 1982 1983 void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) { 1984 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1985 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1986 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 1987 emit_int16(0x5B, (0xC0 | encode)); 1988 } 1989 1990 void Assembler::vcvtdq2ps(XMMRegister dst, XMMRegister src, int vector_len) { 1991 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), ""); 1992 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1993 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 1994 emit_int16(0x5B, (0xC0 | encode)); 1995 } 1996 1997 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) { 1998 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1999 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2000 attributes.set_rex_vex_w_reverted(); 2001 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2002 emit_int16(0x5A, (0xC0 | encode)); 2003 } 2004 2005 void Assembler::cvtsd2ss(XMMRegister dst, Address src) { 2006 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2007 InstructionMark im(this); 2008 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2009 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 2010 attributes.set_rex_vex_w_reverted(); 2011 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2012 emit_int8(0x5A); 2013 emit_operand(dst, src, 0); 2014 } 2015 2016 void Assembler::cvtsi2sdl(XMMRegister dst, Register src) { 2017 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2018 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2019 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2020 emit_int16(0x2A, (0xC0 | encode)); 2021 } 2022 2023 void Assembler::cvtsi2sdl(XMMRegister dst, Address src) { 2024 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2025 InstructionMark im(this); 2026 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2027 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2028 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2029 emit_int8(0x2A); 2030 emit_operand(dst, src, 0); 2031 } 2032 2033 void Assembler::cvtsi2ssl(XMMRegister dst, Register src) { 2034 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2035 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2036 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2037 emit_int16(0x2A, (0xC0 | encode)); 2038 } 2039 2040 void Assembler::cvtsi2ssl(XMMRegister dst, Address src) { 2041 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2042 InstructionMark im(this); 2043 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2044 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2045 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2046 emit_int8(0x2A); 2047 emit_operand(dst, src, 0); 2048 } 2049 2050 void Assembler::cvtsi2ssq(XMMRegister dst, Register src) { 2051 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2052 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2053 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2054 emit_int16(0x2A, (0xC0 | encode)); 2055 } 2056 2057 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) { 2058 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2059 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2060 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2061 emit_int16(0x5A, (0xC0 | encode)); 2062 } 2063 2064 void Assembler::cvtss2sd(XMMRegister dst, Address src) { 2065 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2066 InstructionMark im(this); 2067 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2068 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2069 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2070 emit_int8(0x5A); 2071 emit_operand(dst, src, 0); 2072 } 2073 2074 2075 void Assembler::cvttsd2sil(Register dst, XMMRegister src) { 2076 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2077 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2078 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2079 emit_int16(0x2C, (0xC0 | encode)); 2080 } 2081 2082 void Assembler::cvtss2sil(Register dst, XMMRegister src) { 2083 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2084 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2085 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2086 emit_int16(0x2D, (0xC0 | encode)); 2087 } 2088 2089 void Assembler::cvttss2sil(Register dst, XMMRegister src) { 2090 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2091 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2092 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2093 emit_int16(0x2C, (0xC0 | encode)); 2094 } 2095 2096 void Assembler::cvttpd2dq(XMMRegister dst, XMMRegister src) { 2097 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2098 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 2099 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2100 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2101 emit_int16((unsigned char)0xE6, (0xC0 | encode)); 2102 } 2103 2104 void Assembler::pabsb(XMMRegister dst, XMMRegister src) { 2105 assert(VM_Version::supports_ssse3(), ""); 2106 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2107 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 2108 emit_int16(0x1C, (0xC0 | encode)); 2109 } 2110 2111 void Assembler::pabsw(XMMRegister dst, XMMRegister src) { 2112 assert(VM_Version::supports_ssse3(), ""); 2113 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2114 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 2115 emit_int16(0x1D, (0xC0 | encode)); 2116 } 2117 2118 void Assembler::pabsd(XMMRegister dst, XMMRegister src) { 2119 assert(VM_Version::supports_ssse3(), ""); 2120 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2121 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 2122 emit_int16(0x1E, (0xC0 | encode)); 2123 } 2124 2125 void Assembler::vpabsb(XMMRegister dst, XMMRegister src, int vector_len) { 2126 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 2127 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 2128 vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : false, "not supported"); 2129 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2130 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 2131 emit_int16(0x1C, (0xC0 | encode)); 2132 } 2133 2134 void Assembler::vpabsw(XMMRegister dst, XMMRegister src, int vector_len) { 2135 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 2136 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 2137 vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : false, ""); 2138 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2139 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 2140 emit_int16(0x1D, (0xC0 | encode)); 2141 } 2142 2143 void Assembler::vpabsd(XMMRegister dst, XMMRegister src, int vector_len) { 2144 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 2145 vector_len == AVX_256bit? VM_Version::supports_avx2() : 2146 vector_len == AVX_512bit? VM_Version::supports_evex() : 0, ""); 2147 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2148 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 2149 emit_int16(0x1E, (0xC0 | encode)); 2150 } 2151 2152 void Assembler::evpabsq(XMMRegister dst, XMMRegister src, int vector_len) { 2153 assert(UseAVX > 2, ""); 2154 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2155 attributes.set_is_evex_instruction(); 2156 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 2157 emit_int16(0x1F, (0xC0 | encode)); 2158 } 2159 2160 void Assembler::vcvtps2pd(XMMRegister dst, XMMRegister src, int vector_len) { 2161 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), ""); 2162 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2163 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2164 emit_int16(0x5A, (0xC0 | encode)); 2165 } 2166 2167 void Assembler::vcvtpd2ps(XMMRegister dst, XMMRegister src, int vector_len) { 2168 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), ""); 2169 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2170 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2171 attributes.set_rex_vex_w_reverted(); 2172 emit_int16(0x5A, (0xC0 | encode)); 2173 } 2174 2175 void Assembler::vcvttps2dq(XMMRegister dst, XMMRegister src, int vector_len) { 2176 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), ""); 2177 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2178 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2179 emit_int16(0x5B, (0xC0 | encode)); 2180 } 2181 2182 void Assembler::vcvttpd2dq(XMMRegister dst, XMMRegister src, int vector_len) { 2183 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), ""); 2184 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2185 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2186 emit_int16((unsigned char)0xE6, (0xC0 | encode)); 2187 } 2188 2189 void Assembler::vcvtps2dq(XMMRegister dst, XMMRegister src, int vector_len) { 2190 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), ""); 2191 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2192 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2193 emit_int16(0x5B, (0xC0 | encode)); 2194 } 2195 2196 void Assembler::evcvttps2qq(XMMRegister dst, XMMRegister src, int vector_len) { 2197 assert(VM_Version::supports_avx512dq(), ""); 2198 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2199 attributes.set_is_evex_instruction(); 2200 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2201 emit_int16(0x7A, (0xC0 | encode)); 2202 } 2203 2204 void Assembler::evcvtpd2qq(XMMRegister dst, XMMRegister src, int vector_len) { 2205 assert(VM_Version::supports_avx512dq(), ""); 2206 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2207 attributes.set_is_evex_instruction(); 2208 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2209 emit_int16(0x7B, (0xC0 | encode)); 2210 } 2211 2212 void Assembler::evcvtqq2ps(XMMRegister dst, XMMRegister src, int vector_len) { 2213 assert(VM_Version::supports_avx512dq(), ""); 2214 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2215 attributes.set_is_evex_instruction(); 2216 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2217 emit_int16(0x5B, (0xC0 | encode)); 2218 } 2219 2220 void Assembler::evcvttpd2qq(XMMRegister dst, XMMRegister src, int vector_len) { 2221 assert(VM_Version::supports_avx512dq(), ""); 2222 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2223 attributes.set_is_evex_instruction(); 2224 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2225 emit_int16(0x7A, (0xC0 | encode)); 2226 } 2227 2228 void Assembler::evcvtqq2pd(XMMRegister dst, XMMRegister src, int vector_len) { 2229 assert(VM_Version::supports_avx512dq(), ""); 2230 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2231 attributes.set_is_evex_instruction(); 2232 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2233 emit_int16((unsigned char)0xE6, (0xC0 | encode)); 2234 } 2235 2236 void Assembler::evpmovwb(XMMRegister dst, XMMRegister src, int vector_len) { 2237 assert(VM_Version::supports_avx512bw(), ""); 2238 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2239 attributes.set_is_evex_instruction(); 2240 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 2241 emit_int16(0x30, (0xC0 | encode)); 2242 } 2243 2244 void Assembler::evpmovdw(XMMRegister dst, XMMRegister src, int vector_len) { 2245 assert(UseAVX > 2, ""); 2246 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2247 attributes.set_is_evex_instruction(); 2248 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 2249 emit_int16(0x33, (0xC0 | encode)); 2250 } 2251 2252 void Assembler::evpmovdb(XMMRegister dst, XMMRegister src, int vector_len) { 2253 assert(UseAVX > 2, ""); 2254 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2255 attributes.set_is_evex_instruction(); 2256 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 2257 emit_int16(0x31, (0xC0 | encode)); 2258 } 2259 2260 void Assembler::evpmovqd(XMMRegister dst, XMMRegister src, int vector_len) { 2261 assert(UseAVX > 2, ""); 2262 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2263 attributes.set_is_evex_instruction(); 2264 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 2265 emit_int16(0x35, (0xC0 | encode)); 2266 } 2267 2268 void Assembler::evpmovqb(XMMRegister dst, XMMRegister src, int vector_len) { 2269 assert(UseAVX > 2, ""); 2270 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2271 attributes.set_is_evex_instruction(); 2272 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 2273 emit_int16(0x32, (0xC0 | encode)); 2274 } 2275 2276 void Assembler::evpmovqw(XMMRegister dst, XMMRegister src, int vector_len) { 2277 assert(UseAVX > 2, ""); 2278 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2279 attributes.set_is_evex_instruction(); 2280 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 2281 emit_int16(0x34, (0xC0 | encode)); 2282 } 2283 2284 void Assembler::evpmovsqd(XMMRegister dst, XMMRegister src, int vector_len) { 2285 assert(UseAVX > 2, ""); 2286 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2287 attributes.set_is_evex_instruction(); 2288 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 2289 emit_int16(0x25, (0xC0 | encode)); 2290 } 2291 2292 void Assembler::decl(Address dst) { 2293 // Don't use it directly. Use MacroAssembler::decrement() instead. 2294 InstructionMark im(this); 2295 prefix(dst); 2296 emit_int8((unsigned char)0xFF); 2297 emit_operand(rcx, dst, 0); 2298 } 2299 2300 void Assembler::divsd(XMMRegister dst, Address src) { 2301 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2302 InstructionMark im(this); 2303 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2304 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 2305 attributes.set_rex_vex_w_reverted(); 2306 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2307 emit_int8(0x5E); 2308 emit_operand(dst, src, 0); 2309 } 2310 2311 void Assembler::divsd(XMMRegister dst, XMMRegister src) { 2312 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2313 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2314 attributes.set_rex_vex_w_reverted(); 2315 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2316 emit_int16(0x5E, (0xC0 | encode)); 2317 } 2318 2319 void Assembler::divss(XMMRegister dst, Address src) { 2320 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2321 InstructionMark im(this); 2322 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2323 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2324 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2325 emit_int8(0x5E); 2326 emit_operand(dst, src, 0); 2327 } 2328 2329 void Assembler::divss(XMMRegister dst, XMMRegister src) { 2330 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2331 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2332 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2333 emit_int16(0x5E, (0xC0 | encode)); 2334 } 2335 2336 void Assembler::hlt() { 2337 emit_int8((unsigned char)0xF4); 2338 } 2339 2340 void Assembler::idivl(Register src) { 2341 int encode = prefix_and_encode(src->encoding()); 2342 emit_int16((unsigned char)0xF7, (0xF8 | encode)); 2343 } 2344 2345 void Assembler::divl(Register src) { // Unsigned 2346 int encode = prefix_and_encode(src->encoding()); 2347 emit_int16((unsigned char)0xF7, (0xF0 | encode)); 2348 } 2349 2350 void Assembler::imull(Register src) { 2351 int encode = prefix_and_encode(src->encoding()); 2352 emit_int16((unsigned char)0xF7, (0xE8 | encode)); 2353 } 2354 2355 void Assembler::imull(Register dst, Register src) { 2356 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2357 emit_int24(0x0F, 2358 (unsigned char)0xAF, 2359 (0xC0 | encode)); 2360 } 2361 2362 void Assembler::imull(Register dst, Address src, int32_t value) { 2363 InstructionMark im(this); 2364 prefix(src, dst); 2365 if (is8bit(value)) { 2366 emit_int8((unsigned char)0x6B); 2367 emit_operand(dst, src, 1); 2368 emit_int8(value); 2369 } else { 2370 emit_int8((unsigned char)0x69); 2371 emit_operand(dst, src, 4); 2372 emit_int32(value); 2373 } 2374 } 2375 2376 void Assembler::imull(Register dst, Register src, int value) { 2377 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2378 if (is8bit(value)) { 2379 emit_int24(0x6B, (0xC0 | encode), value & 0xFF); 2380 } else { 2381 emit_int16(0x69, (0xC0 | encode)); 2382 emit_int32(value); 2383 } 2384 } 2385 2386 void Assembler::imull(Register dst, Address src) { 2387 InstructionMark im(this); 2388 prefix(src, dst); 2389 emit_int16(0x0F, (unsigned char)0xAF); 2390 emit_operand(dst, src, 0); 2391 } 2392 2393 2394 void Assembler::incl(Address dst) { 2395 // Don't use it directly. Use MacroAssembler::increment() instead. 2396 InstructionMark im(this); 2397 prefix(dst); 2398 emit_int8((unsigned char)0xFF); 2399 emit_operand(rax, dst, 0); 2400 } 2401 2402 void Assembler::jcc(Condition cc, Label& L, bool maybe_short) { 2403 InstructionMark im(this); 2404 assert((0 <= cc) && (cc < 16), "illegal cc"); 2405 if (L.is_bound()) { 2406 address dst = target(L); 2407 assert(dst != NULL, "jcc most probably wrong"); 2408 2409 const int short_size = 2; 2410 const int long_size = 6; 2411 intptr_t offs = (intptr_t)dst - (intptr_t)pc(); 2412 if (maybe_short && is8bit(offs - short_size)) { 2413 // 0111 tttn #8-bit disp 2414 emit_int16(0x70 | cc, (offs - short_size) & 0xFF); 2415 } else { 2416 // 0000 1111 1000 tttn #32-bit disp 2417 assert(is_simm32(offs - long_size), 2418 "must be 32bit offset (call4)"); 2419 emit_int16(0x0F, (0x80 | cc)); 2420 emit_int32(offs - long_size); 2421 } 2422 } else { 2423 // Note: could eliminate cond. jumps to this jump if condition 2424 // is the same however, seems to be rather unlikely case. 2425 // Note: use jccb() if label to be bound is very close to get 2426 // an 8-bit displacement 2427 L.add_patch_at(code(), locator()); 2428 emit_int16(0x0F, (0x80 | cc)); 2429 emit_int32(0); 2430 } 2431 } 2432 2433 void Assembler::jccb_0(Condition cc, Label& L, const char* file, int line) { 2434 if (L.is_bound()) { 2435 const int short_size = 2; 2436 address entry = target(L); 2437 #ifdef ASSERT 2438 intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size); 2439 intptr_t delta = short_branch_delta(); 2440 if (delta != 0) { 2441 dist += (dist < 0 ? (-delta) :delta); 2442 } 2443 assert(is8bit(dist), "Dispacement too large for a short jmp at %s:%d", file, line); 2444 #endif 2445 intptr_t offs = (intptr_t)entry - (intptr_t)pc(); 2446 // 0111 tttn #8-bit disp 2447 emit_int16(0x70 | cc, (offs - short_size) & 0xFF); 2448 } else { 2449 InstructionMark im(this); 2450 L.add_patch_at(code(), locator(), file, line); 2451 emit_int16(0x70 | cc, 0); 2452 } 2453 } 2454 2455 void Assembler::jmp(Address adr) { 2456 InstructionMark im(this); 2457 prefix(adr); 2458 emit_int8((unsigned char)0xFF); 2459 emit_operand(rsp, adr, 0); 2460 } 2461 2462 void Assembler::jmp(Label& L, bool maybe_short) { 2463 if (L.is_bound()) { 2464 address entry = target(L); 2465 assert(entry != NULL, "jmp most probably wrong"); 2466 InstructionMark im(this); 2467 const int short_size = 2; 2468 const int long_size = 5; 2469 intptr_t offs = entry - pc(); 2470 if (maybe_short && is8bit(offs - short_size)) { 2471 emit_int16((unsigned char)0xEB, ((offs - short_size) & 0xFF)); 2472 } else { 2473 emit_int8((unsigned char)0xE9); 2474 emit_int32(offs - long_size); 2475 } 2476 } else { 2477 // By default, forward jumps are always 32-bit displacements, since 2478 // we can't yet know where the label will be bound. If you're sure that 2479 // the forward jump will not run beyond 256 bytes, use jmpb to 2480 // force an 8-bit displacement. 2481 InstructionMark im(this); 2482 L.add_patch_at(code(), locator()); 2483 emit_int8((unsigned char)0xE9); 2484 emit_int32(0); 2485 } 2486 } 2487 2488 void Assembler::jmp(Register entry) { 2489 int encode = prefix_and_encode(entry->encoding()); 2490 emit_int16((unsigned char)0xFF, (0xE0 | encode)); 2491 } 2492 2493 void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) { 2494 InstructionMark im(this); 2495 emit_int8((unsigned char)0xE9); 2496 assert(dest != NULL, "must have a target"); 2497 intptr_t disp = dest - (pc() + sizeof(int32_t)); 2498 assert(is_simm32(disp), "must be 32bit offset (jmp)"); 2499 emit_data(disp, rspec, call32_operand); 2500 } 2501 2502 void Assembler::jmpb_0(Label& L, const char* file, int line) { 2503 if (L.is_bound()) { 2504 const int short_size = 2; 2505 address entry = target(L); 2506 assert(entry != NULL, "jmp most probably wrong"); 2507 #ifdef ASSERT 2508 intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size); 2509 intptr_t delta = short_branch_delta(); 2510 if (delta != 0) { 2511 dist += (dist < 0 ? (-delta) :delta); 2512 } 2513 assert(is8bit(dist), "Dispacement too large for a short jmp at %s:%d", file, line); 2514 #endif 2515 intptr_t offs = entry - pc(); 2516 emit_int16((unsigned char)0xEB, (offs - short_size) & 0xFF); 2517 } else { 2518 InstructionMark im(this); 2519 L.add_patch_at(code(), locator(), file, line); 2520 emit_int16((unsigned char)0xEB, 0); 2521 } 2522 } 2523 2524 void Assembler::ldmxcsr( Address src) { 2525 if (UseAVX > 0 ) { 2526 InstructionMark im(this); 2527 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2528 vex_prefix(src, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2529 emit_int8((unsigned char)0xAE); 2530 emit_operand(as_Register(2), src, 0); 2531 } else { 2532 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2533 InstructionMark im(this); 2534 prefix(src); 2535 emit_int16(0x0F, (unsigned char)0xAE); 2536 emit_operand(as_Register(2), src, 0); 2537 } 2538 } 2539 2540 void Assembler::leal(Register dst, Address src) { 2541 InstructionMark im(this); 2542 prefix(src, dst); 2543 emit_int8((unsigned char)0x8D); 2544 emit_operand(dst, src, 0); 2545 } 2546 2547 void Assembler::lfence() { 2548 emit_int24(0x0F, (unsigned char)0xAE, (unsigned char)0xE8); 2549 } 2550 2551 void Assembler::lock() { 2552 emit_int8((unsigned char)0xF0); 2553 } 2554 2555 void Assembler::size_prefix() { 2556 emit_int8(0x66); 2557 } 2558 2559 void Assembler::lzcntl(Register dst, Register src) { 2560 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); 2561 emit_int8((unsigned char)0xF3); 2562 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2563 emit_int24(0x0F, (unsigned char)0xBD, (0xC0 | encode)); 2564 } 2565 2566 void Assembler::lzcntl(Register dst, Address src) { 2567 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); 2568 InstructionMark im(this); 2569 emit_int8((unsigned char)0xF3); 2570 prefix(src, dst); 2571 emit_int16(0x0F, (unsigned char)0xBD); 2572 emit_operand(dst, src, 0); 2573 } 2574 2575 // Emit mfence instruction 2576 void Assembler::mfence() { 2577 NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");) 2578 emit_int24(0x0F, (unsigned char)0xAE, (unsigned char)0xF0); 2579 } 2580 2581 // Emit sfence instruction 2582 void Assembler::sfence() { 2583 NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");) 2584 emit_int24(0x0F, (unsigned char)0xAE, (unsigned char)0xF8); 2585 } 2586 2587 void Assembler::mov(Register dst, Register src) { 2588 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2589 } 2590 2591 void Assembler::movapd(XMMRegister dst, XMMRegister src) { 2592 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2593 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 2594 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2595 attributes.set_rex_vex_w_reverted(); 2596 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2597 emit_int16(0x28, (0xC0 | encode)); 2598 } 2599 2600 void Assembler::movaps(XMMRegister dst, XMMRegister src) { 2601 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2602 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 2603 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2604 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2605 emit_int16(0x28, (0xC0 | encode)); 2606 } 2607 2608 void Assembler::movlhps(XMMRegister dst, XMMRegister src) { 2609 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2610 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2611 int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2612 emit_int16(0x16, (0xC0 | encode)); 2613 } 2614 2615 void Assembler::movb(Register dst, Address src) { 2616 NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); 2617 InstructionMark im(this); 2618 prefix(src, dst, true); 2619 emit_int8((unsigned char)0x8A); 2620 emit_operand(dst, src, 0); 2621 } 2622 2623 void Assembler::movddup(XMMRegister dst, XMMRegister src) { 2624 NOT_LP64(assert(VM_Version::supports_sse3(), "")); 2625 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 2626 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2627 attributes.set_rex_vex_w_reverted(); 2628 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2629 emit_int16(0x12, 0xC0 | encode); 2630 } 2631 2632 void Assembler::movddup(XMMRegister dst, Address src) { 2633 NOT_LP64(assert(VM_Version::supports_sse3(), "")); 2634 InstructionMark im(this); 2635 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2636 attributes.set_address_attributes(/* tuple_type */ EVEX_DUP, /* input_size_in_bits */ EVEX_64bit); 2637 attributes.set_rex_vex_w_reverted(); 2638 simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2639 emit_int8(0x12); 2640 emit_operand(dst, src, 0); 2641 } 2642 2643 void Assembler::vmovddup(XMMRegister dst, Address src, int vector_len) { 2644 assert(VM_Version::supports_avx(), ""); 2645 InstructionMark im(this); 2646 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2647 attributes.set_address_attributes(/* tuple_type */ EVEX_DUP, /* input_size_in_bits */ EVEX_64bit); 2648 attributes.set_rex_vex_w_reverted(); 2649 simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2650 emit_int8(0x12); 2651 emit_operand(dst, src, 0); 2652 } 2653 2654 void Assembler::kmovbl(KRegister dst, KRegister src) { 2655 assert(VM_Version::supports_avx512dq(), ""); 2656 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2657 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2658 emit_int16((unsigned char)0x90, (0xC0 | encode)); 2659 } 2660 2661 void Assembler::kmovbl(KRegister dst, Register src) { 2662 assert(VM_Version::supports_avx512dq(), ""); 2663 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2664 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2665 emit_int16((unsigned char)0x92, (0xC0 | encode)); 2666 } 2667 2668 void Assembler::kmovbl(Register dst, KRegister src) { 2669 assert(VM_Version::supports_avx512dq(), ""); 2670 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2671 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2672 emit_int16((unsigned char)0x93, (0xC0 | encode)); 2673 } 2674 2675 void Assembler::kmovwl(KRegister dst, Register src) { 2676 assert(VM_Version::supports_evex(), ""); 2677 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2678 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2679 emit_int16((unsigned char)0x92, (0xC0 | encode)); 2680 } 2681 2682 void Assembler::kmovwl(Register dst, KRegister src) { 2683 assert(VM_Version::supports_evex(), ""); 2684 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2685 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2686 emit_int16((unsigned char)0x93, (0xC0 | encode)); 2687 } 2688 2689 void Assembler::kmovwl(KRegister dst, Address src) { 2690 assert(VM_Version::supports_evex(), ""); 2691 InstructionMark im(this); 2692 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2693 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2694 emit_int8((unsigned char)0x90); 2695 emit_operand(dst, src, 0); 2696 } 2697 2698 void Assembler::kmovwl(Address dst, KRegister src) { 2699 assert(VM_Version::supports_evex(), ""); 2700 InstructionMark im(this); 2701 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2702 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2703 emit_int8((unsigned char)0x91); 2704 emit_operand(src, dst, 0); 2705 } 2706 2707 void Assembler::kmovwl(KRegister dst, KRegister src) { 2708 assert(VM_Version::supports_evex(), ""); 2709 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2710 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2711 emit_int16((unsigned char)0x90, (0xC0 | encode)); 2712 } 2713 2714 void Assembler::kmovdl(KRegister dst, Register src) { 2715 assert(VM_Version::supports_avx512bw(), ""); 2716 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2717 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2718 emit_int16((unsigned char)0x92, (0xC0 | encode)); 2719 } 2720 2721 void Assembler::kmovdl(Register dst, KRegister src) { 2722 assert(VM_Version::supports_avx512bw(), ""); 2723 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2724 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2725 emit_int16((unsigned char)0x93, (0xC0 | encode)); 2726 } 2727 2728 void Assembler::kmovql(KRegister dst, KRegister src) { 2729 assert(VM_Version::supports_avx512bw(), ""); 2730 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2731 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2732 emit_int16((unsigned char)0x90, (0xC0 | encode)); 2733 } 2734 2735 void Assembler::kmovql(KRegister dst, Address src) { 2736 assert(VM_Version::supports_avx512bw(), ""); 2737 InstructionMark im(this); 2738 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2739 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2740 emit_int8((unsigned char)0x90); 2741 emit_operand(dst, src, 0); 2742 } 2743 2744 void Assembler::kmovql(Address dst, KRegister src) { 2745 assert(VM_Version::supports_avx512bw(), ""); 2746 InstructionMark im(this); 2747 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2748 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2749 emit_int8((unsigned char)0x91); 2750 emit_operand(src, dst, 0); 2751 } 2752 2753 void Assembler::kmovql(KRegister dst, Register src) { 2754 assert(VM_Version::supports_avx512bw(), ""); 2755 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2756 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2757 emit_int16((unsigned char)0x92, (0xC0 | encode)); 2758 } 2759 2760 void Assembler::kmovql(Register dst, KRegister src) { 2761 assert(VM_Version::supports_avx512bw(), ""); 2762 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2763 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2764 emit_int16((unsigned char)0x93, (0xC0 | encode)); 2765 } 2766 2767 void Assembler::knotwl(KRegister dst, KRegister src) { 2768 assert(VM_Version::supports_evex(), ""); 2769 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2770 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2771 emit_int16(0x44, (0xC0 | encode)); 2772 } 2773 2774 void Assembler::knotbl(KRegister dst, KRegister src) { 2775 assert(VM_Version::supports_evex(), ""); 2776 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2777 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2778 emit_int16(0x44, (0xC0 | encode)); 2779 } 2780 2781 void Assembler::korbl(KRegister dst, KRegister src1, KRegister src2) { 2782 assert(VM_Version::supports_avx512dq(), ""); 2783 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2784 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2785 emit_int16(0x45, (0xC0 | encode)); 2786 } 2787 2788 void Assembler::korwl(KRegister dst, KRegister src1, KRegister src2) { 2789 assert(VM_Version::supports_evex(), ""); 2790 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2791 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2792 emit_int16(0x45, (0xC0 | encode)); 2793 } 2794 2795 void Assembler::kordl(KRegister dst, KRegister src1, KRegister src2) { 2796 assert(VM_Version::supports_avx512bw(), ""); 2797 InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2798 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2799 emit_int16(0x45, (0xC0 | encode)); 2800 } 2801 2802 void Assembler::korql(KRegister dst, KRegister src1, KRegister src2) { 2803 assert(VM_Version::supports_avx512bw(), ""); 2804 InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2805 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2806 emit_int16(0x45, (0xC0 | encode)); 2807 } 2808 2809 void Assembler::kxorbl(KRegister dst, KRegister src1, KRegister src2) { 2810 assert(VM_Version::supports_avx512dq(), ""); 2811 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2812 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2813 emit_int16(0x47, (0xC0 | encode)); 2814 } 2815 2816 void Assembler::kxorwl(KRegister dst, KRegister src1, KRegister src2) { 2817 assert(VM_Version::supports_evex(), ""); 2818 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2819 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2820 emit_int16(0x47, (0xC0 | encode)); 2821 } 2822 2823 void Assembler::kxordl(KRegister dst, KRegister src1, KRegister src2) { 2824 assert(VM_Version::supports_avx512bw(), ""); 2825 InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2826 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2827 emit_int16(0x47, (0xC0 | encode)); 2828 } 2829 2830 void Assembler::kxorql(KRegister dst, KRegister src1, KRegister src2) { 2831 assert(VM_Version::supports_avx512bw(), ""); 2832 InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2833 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2834 emit_int16(0x47, (0xC0 | encode)); 2835 } 2836 2837 void Assembler::kandbl(KRegister dst, KRegister src1, KRegister src2) { 2838 assert(VM_Version::supports_avx512dq(), ""); 2839 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2840 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2841 emit_int16(0x41, (0xC0 | encode)); 2842 } 2843 2844 void Assembler::kandwl(KRegister dst, KRegister src1, KRegister src2) { 2845 assert(VM_Version::supports_evex(), ""); 2846 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2847 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2848 emit_int16(0x41, (0xC0 | encode)); 2849 } 2850 2851 void Assembler::kanddl(KRegister dst, KRegister src1, KRegister src2) { 2852 assert(VM_Version::supports_avx512bw(), ""); 2853 InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2854 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2855 emit_int16(0x41, (0xC0 | encode)); 2856 } 2857 2858 void Assembler::kandql(KRegister dst, KRegister src1, KRegister src2) { 2859 assert(VM_Version::supports_avx512bw(), ""); 2860 InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2861 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2862 emit_int16(0x41, (0xC0 | encode)); 2863 } 2864 2865 void Assembler::knotdl(KRegister dst, KRegister src) { 2866 assert(VM_Version::supports_avx512bw(), ""); 2867 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2868 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2869 emit_int16(0x44, (0xC0 | encode)); 2870 } 2871 2872 void Assembler::knotql(KRegister dst, KRegister src) { 2873 assert(VM_Version::supports_avx512bw(), ""); 2874 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2875 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2876 emit_int16(0x44, (0xC0 | encode)); 2877 } 2878 2879 // This instruction produces ZF or CF flags 2880 void Assembler::kortestbl(KRegister src1, KRegister src2) { 2881 assert(VM_Version::supports_avx512dq(), ""); 2882 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2883 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2884 emit_int16((unsigned char)0x98, (0xC0 | encode)); 2885 } 2886 2887 // This instruction produces ZF or CF flags 2888 void Assembler::kortestwl(KRegister src1, KRegister src2) { 2889 assert(VM_Version::supports_evex(), ""); 2890 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2891 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2892 emit_int16((unsigned char)0x98, (0xC0 | encode)); 2893 } 2894 2895 // This instruction produces ZF or CF flags 2896 void Assembler::kortestdl(KRegister src1, KRegister src2) { 2897 assert(VM_Version::supports_avx512bw(), ""); 2898 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2899 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2900 emit_int16((unsigned char)0x98, (0xC0 | encode)); 2901 } 2902 2903 // This instruction produces ZF or CF flags 2904 void Assembler::kortestql(KRegister src1, KRegister src2) { 2905 assert(VM_Version::supports_avx512bw(), ""); 2906 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2907 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2908 emit_int16((unsigned char)0x98, (0xC0 | encode)); 2909 } 2910 2911 // This instruction produces ZF or CF flags 2912 void Assembler::ktestql(KRegister src1, KRegister src2) { 2913 assert(VM_Version::supports_avx512bw(), ""); 2914 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2915 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2916 emit_int16((unsigned char)0x99, (0xC0 | encode)); 2917 } 2918 2919 void Assembler::ktestdl(KRegister src1, KRegister src2) { 2920 assert(VM_Version::supports_avx512bw(), ""); 2921 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2922 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2923 emit_int16((unsigned char)0x99, (0xC0 | encode)); 2924 } 2925 2926 void Assembler::ktestwl(KRegister src1, KRegister src2) { 2927 assert(VM_Version::supports_avx512dq(), ""); 2928 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2929 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2930 emit_int16((unsigned char)0x99, (0xC0 | encode)); 2931 } 2932 2933 void Assembler::ktestbl(KRegister src1, KRegister src2) { 2934 assert(VM_Version::supports_avx512dq(), ""); 2935 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2936 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2937 emit_int16((unsigned char)0x99, (0xC0 | encode)); 2938 } 2939 2940 void Assembler::ktestq(KRegister src1, KRegister src2) { 2941 assert(VM_Version::supports_avx512bw(), ""); 2942 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2943 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2944 emit_int16((unsigned char)0x99, (0xC0 | encode)); 2945 } 2946 2947 void Assembler::ktestd(KRegister src1, KRegister src2) { 2948 assert(VM_Version::supports_avx512bw(), ""); 2949 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2950 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2951 emit_int16((unsigned char)0x99, (0xC0 | encode)); 2952 } 2953 2954 void Assembler::kxnorbl(KRegister dst, KRegister src1, KRegister src2) { 2955 assert(VM_Version::supports_avx512dq(), ""); 2956 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2957 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2958 emit_int16(0x46, (0xC0 | encode)); 2959 } 2960 2961 void Assembler::kshiftlbl(KRegister dst, KRegister src, int imm8) { 2962 assert(VM_Version::supports_avx512dq(), ""); 2963 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2964 int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 2965 emit_int16(0x32, (0xC0 | encode)); 2966 emit_int8(imm8); 2967 } 2968 2969 void Assembler::kshiftlql(KRegister dst, KRegister src, int imm8) { 2970 assert(VM_Version::supports_avx512bw(), ""); 2971 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2972 int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 2973 emit_int16(0x33, (0xC0 | encode)); 2974 emit_int8(imm8); 2975 } 2976 2977 2978 void Assembler::kshiftrbl(KRegister dst, KRegister src, int imm8) { 2979 assert(VM_Version::supports_avx512dq(), ""); 2980 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2981 int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 2982 emit_int16(0x30, (0xC0 | encode)); 2983 } 2984 2985 void Assembler::kshiftrwl(KRegister dst, KRegister src, int imm8) { 2986 assert(VM_Version::supports_evex(), ""); 2987 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2988 int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 2989 emit_int16(0x30, (0xC0 | encode)); 2990 emit_int8(imm8); 2991 } 2992 2993 void Assembler::kshiftrdl(KRegister dst, KRegister src, int imm8) { 2994 assert(VM_Version::supports_avx512bw(), ""); 2995 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2996 int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 2997 emit_int16(0x31, (0xC0 | encode)); 2998 emit_int8(imm8); 2999 } 3000 3001 void Assembler::kshiftrql(KRegister dst, KRegister src, int imm8) { 3002 assert(VM_Version::supports_avx512bw(), ""); 3003 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3004 int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3005 emit_int16(0x31, (0xC0 | encode)); 3006 emit_int8(imm8); 3007 } 3008 3009 void Assembler::kunpckdql(KRegister dst, KRegister src1, KRegister src2) { 3010 assert(VM_Version::supports_avx512bw(), ""); 3011 InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3012 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 3013 emit_int16(0x4B, (0xC0 | encode)); 3014 } 3015 3016 void Assembler::movb(Address dst, int imm8) { 3017 InstructionMark im(this); 3018 prefix(dst); 3019 emit_int8((unsigned char)0xC6); 3020 emit_operand(rax, dst, 1); 3021 emit_int8(imm8); 3022 } 3023 3024 3025 void Assembler::movb(Address dst, Register src) { 3026 assert(src->has_byte_register(), "must have byte register"); 3027 InstructionMark im(this); 3028 prefix(dst, src, true); 3029 emit_int8((unsigned char)0x88); 3030 emit_operand(src, dst, 0); 3031 } 3032 3033 void Assembler::movdl(XMMRegister dst, Register src) { 3034 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3035 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3036 int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3037 emit_int16(0x6E, (0xC0 | encode)); 3038 } 3039 3040 void Assembler::movdl(Register dst, XMMRegister src) { 3041 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3042 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3043 // swap src/dst to get correct prefix 3044 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3045 emit_int16(0x7E, (0xC0 | encode)); 3046 } 3047 3048 void Assembler::movdl(XMMRegister dst, Address src) { 3049 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3050 InstructionMark im(this); 3051 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3052 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 3053 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3054 emit_int8(0x6E); 3055 emit_operand(dst, src, 0); 3056 } 3057 3058 void Assembler::movdl(Address dst, XMMRegister src) { 3059 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3060 InstructionMark im(this); 3061 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3062 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 3063 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3064 emit_int8(0x7E); 3065 emit_operand(src, dst, 0); 3066 } 3067 3068 void Assembler::movdqa(XMMRegister dst, XMMRegister src) { 3069 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3070 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3071 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3072 emit_int16(0x6F, (0xC0 | encode)); 3073 } 3074 3075 void Assembler::movdqa(XMMRegister dst, Address src) { 3076 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3077 InstructionMark im(this); 3078 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3079 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3080 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3081 emit_int8(0x6F); 3082 emit_operand(dst, src, 0); 3083 } 3084 3085 void Assembler::movdqu(XMMRegister dst, Address src) { 3086 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3087 InstructionMark im(this); 3088 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3089 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3090 simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3091 emit_int8(0x6F); 3092 emit_operand(dst, src, 0); 3093 } 3094 3095 void Assembler::movdqu(XMMRegister dst, XMMRegister src) { 3096 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3097 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3098 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3099 emit_int16(0x6F, (0xC0 | encode)); 3100 } 3101 3102 void Assembler::movdqu(Address dst, XMMRegister src) { 3103 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3104 InstructionMark im(this); 3105 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3106 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3107 attributes.reset_is_clear_context(); 3108 simd_prefix(src, xnoreg, dst, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3109 emit_int8(0x7F); 3110 emit_operand(src, dst, 0); 3111 } 3112 3113 // Move Unaligned 256bit Vector 3114 void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) { 3115 assert(UseAVX > 0, ""); 3116 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3117 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3118 emit_int16(0x6F, (0xC0 | encode)); 3119 } 3120 3121 void Assembler::vmovdqu(XMMRegister dst, Address src) { 3122 assert(UseAVX > 0, ""); 3123 InstructionMark im(this); 3124 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3125 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3126 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3127 emit_int8(0x6F); 3128 emit_operand(dst, src, 0); 3129 } 3130 3131 void Assembler::vmovdqu(Address dst, XMMRegister src) { 3132 assert(UseAVX > 0, ""); 3133 InstructionMark im(this); 3134 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3135 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3136 attributes.reset_is_clear_context(); 3137 // swap src<->dst for encoding 3138 assert(src != xnoreg, "sanity"); 3139 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3140 emit_int8(0x7F); 3141 emit_operand(src, dst, 0); 3142 } 3143 3144 void Assembler::vpmaskmovd(XMMRegister dst, XMMRegister mask, Address src, int vector_len) { 3145 assert((VM_Version::supports_avx2() && vector_len == AVX_256bit), ""); 3146 InstructionMark im(this); 3147 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3148 vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3149 emit_int8((unsigned char)0x8C); 3150 emit_operand(dst, src, 0); 3151 } 3152 3153 void Assembler::vpmaskmovq(XMMRegister dst, XMMRegister mask, Address src, int vector_len) { 3154 assert((VM_Version::supports_avx2() && vector_len == AVX_256bit), ""); 3155 InstructionMark im(this); 3156 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3157 vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3158 emit_int8((unsigned char)0x8C); 3159 emit_operand(dst, src, 0); 3160 } 3161 3162 void Assembler::vmaskmovps(XMMRegister dst, Address src, XMMRegister mask, int vector_len) { 3163 assert(UseAVX > 0, "requires some form of AVX"); 3164 InstructionMark im(this); 3165 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3166 vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3167 emit_int8(0x2C); 3168 emit_operand(dst, src, 0); 3169 } 3170 3171 void Assembler::vmaskmovpd(XMMRegister dst, Address src, XMMRegister mask, int vector_len) { 3172 assert(UseAVX > 0, "requires some form of AVX"); 3173 InstructionMark im(this); 3174 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3175 vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3176 emit_int8(0x2D); 3177 emit_operand(dst, src, 0); 3178 } 3179 3180 void Assembler::vmaskmovps(Address dst, XMMRegister src, XMMRegister mask, int vector_len) { 3181 assert(UseAVX > 0, ""); 3182 InstructionMark im(this); 3183 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3184 vex_prefix(dst, mask->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3185 emit_int8(0x2E); 3186 emit_operand(src, dst, 0); 3187 } 3188 3189 void Assembler::vmaskmovpd(Address dst, XMMRegister src, XMMRegister mask, int vector_len) { 3190 assert(UseAVX > 0, ""); 3191 InstructionMark im(this); 3192 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3193 vex_prefix(dst, mask->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3194 emit_int8(0x2F); 3195 emit_operand(src, dst, 0); 3196 } 3197 3198 // Move Unaligned EVEX enabled Vector (programmable : 8,16,32,64) 3199 void Assembler::evmovdqub(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 3200 assert(VM_Version::supports_avx512vlbw(), ""); 3201 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true); 3202 attributes.set_embedded_opmask_register_specifier(mask); 3203 attributes.set_is_evex_instruction(); 3204 if (merge) { 3205 attributes.reset_is_clear_context(); 3206 } 3207 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3208 emit_int16(0x6F, (0xC0 | encode)); 3209 } 3210 3211 void Assembler::evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { 3212 // Unmasked instruction 3213 evmovdqub(dst, k0, src, /*merge*/ false, vector_len); 3214 } 3215 3216 void Assembler::evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { 3217 assert(VM_Version::supports_avx512vlbw(), ""); 3218 InstructionMark im(this); 3219 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true); 3220 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3221 attributes.set_embedded_opmask_register_specifier(mask); 3222 attributes.set_is_evex_instruction(); 3223 if (merge) { 3224 attributes.reset_is_clear_context(); 3225 } 3226 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3227 emit_int8(0x6F); 3228 emit_operand(dst, src, 0); 3229 } 3230 3231 void Assembler::evmovdqub(XMMRegister dst, Address src, int vector_len) { 3232 // Unmasked instruction 3233 evmovdqub(dst, k0, src, /*merge*/ false, vector_len); 3234 } 3235 3236 void Assembler::evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 3237 assert(VM_Version::supports_avx512vlbw(), ""); 3238 assert(src != xnoreg, "sanity"); 3239 InstructionMark im(this); 3240 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true); 3241 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3242 attributes.set_embedded_opmask_register_specifier(mask); 3243 attributes.set_is_evex_instruction(); 3244 if (merge) { 3245 attributes.reset_is_clear_context(); 3246 } 3247 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3248 emit_int8(0x7F); 3249 emit_operand(src, dst, 0); 3250 } 3251 3252 void Assembler::evmovdquw(XMMRegister dst, Address src, int vector_len) { 3253 // Unmasked instruction 3254 evmovdquw(dst, k0, src, /*merge*/ false, vector_len); 3255 } 3256 3257 void Assembler::evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { 3258 assert(VM_Version::supports_avx512vlbw(), ""); 3259 InstructionMark im(this); 3260 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true); 3261 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3262 attributes.set_embedded_opmask_register_specifier(mask); 3263 attributes.set_is_evex_instruction(); 3264 if (merge) { 3265 attributes.reset_is_clear_context(); 3266 } 3267 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3268 emit_int8(0x6F); 3269 emit_operand(dst, src, 0); 3270 } 3271 3272 void Assembler::evmovdquw(Address dst, XMMRegister src, int vector_len) { 3273 // Unmasked instruction 3274 evmovdquw(dst, k0, src, /*merge*/ false, vector_len); 3275 } 3276 3277 void Assembler::evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 3278 assert(VM_Version::supports_avx512vlbw(), ""); 3279 assert(src != xnoreg, "sanity"); 3280 InstructionMark im(this); 3281 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true); 3282 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3283 attributes.set_embedded_opmask_register_specifier(mask); 3284 attributes.set_is_evex_instruction(); 3285 if (merge) { 3286 attributes.reset_is_clear_context(); 3287 } 3288 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3289 emit_int8(0x7F); 3290 emit_operand(src, dst, 0); 3291 } 3292 3293 void Assembler::evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) { 3294 // Unmasked instruction 3295 evmovdqul(dst, k0, src, /*merge*/ false, vector_len); 3296 } 3297 3298 void Assembler::evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 3299 assert(VM_Version::supports_evex(), ""); 3300 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3301 attributes.set_embedded_opmask_register_specifier(mask); 3302 attributes.set_is_evex_instruction(); 3303 if (merge) { 3304 attributes.reset_is_clear_context(); 3305 } 3306 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3307 emit_int16(0x6F, (0xC0 | encode)); 3308 } 3309 3310 void Assembler::evmovdqul(XMMRegister dst, Address src, int vector_len) { 3311 // Unmasked instruction 3312 evmovdqul(dst, k0, src, /*merge*/ false, vector_len); 3313 } 3314 3315 void Assembler::evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { 3316 assert(VM_Version::supports_evex(), ""); 3317 InstructionMark im(this); 3318 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false , /* uses_vl */ true); 3319 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3320 attributes.set_embedded_opmask_register_specifier(mask); 3321 attributes.set_is_evex_instruction(); 3322 if (merge) { 3323 attributes.reset_is_clear_context(); 3324 } 3325 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3326 emit_int8(0x6F); 3327 emit_operand(dst, src, 0); 3328 } 3329 3330 void Assembler::evmovdqul(Address dst, XMMRegister src, int vector_len) { 3331 // Unmasked isntruction 3332 evmovdqul(dst, k0, src, /*merge*/ true, vector_len); 3333 } 3334 3335 void Assembler::evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 3336 assert(VM_Version::supports_evex(), ""); 3337 assert(src != xnoreg, "sanity"); 3338 InstructionMark im(this); 3339 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3340 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3341 attributes.set_embedded_opmask_register_specifier(mask); 3342 attributes.set_is_evex_instruction(); 3343 if (merge) { 3344 attributes.reset_is_clear_context(); 3345 } 3346 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3347 emit_int8(0x7F); 3348 emit_operand(src, dst, 0); 3349 } 3350 3351 void Assembler::evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { 3352 // Unmasked instruction 3353 evmovdquq(dst, k0, src, /*merge*/ false, vector_len); 3354 } 3355 3356 void Assembler::evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 3357 assert(VM_Version::supports_evex(), ""); 3358 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3359 attributes.set_embedded_opmask_register_specifier(mask); 3360 attributes.set_is_evex_instruction(); 3361 if (merge) { 3362 attributes.reset_is_clear_context(); 3363 } 3364 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3365 emit_int16(0x6F, (0xC0 | encode)); 3366 } 3367 3368 void Assembler::evmovdquq(XMMRegister dst, Address src, int vector_len) { 3369 // Unmasked instruction 3370 evmovdquq(dst, k0, src, /*merge*/ false, vector_len); 3371 } 3372 3373 void Assembler::evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { 3374 assert(VM_Version::supports_evex(), ""); 3375 InstructionMark im(this); 3376 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3377 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3378 attributes.set_embedded_opmask_register_specifier(mask); 3379 attributes.set_is_evex_instruction(); 3380 if (merge) { 3381 attributes.reset_is_clear_context(); 3382 } 3383 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3384 emit_int8(0x6F); 3385 emit_operand(dst, src, 0); 3386 } 3387 3388 void Assembler::evmovdquq(Address dst, XMMRegister src, int vector_len) { 3389 // Unmasked instruction 3390 evmovdquq(dst, k0, src, /*merge*/ true, vector_len); 3391 } 3392 3393 void Assembler::evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 3394 assert(VM_Version::supports_evex(), ""); 3395 assert(src != xnoreg, "sanity"); 3396 InstructionMark im(this); 3397 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3398 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3399 attributes.set_embedded_opmask_register_specifier(mask); 3400 if (merge) { 3401 attributes.reset_is_clear_context(); 3402 } 3403 attributes.set_is_evex_instruction(); 3404 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3405 emit_int8(0x7F); 3406 emit_operand(src, dst, 0); 3407 } 3408 3409 // Uses zero extension on 64bit 3410 3411 void Assembler::movl(Register dst, int32_t imm32) { 3412 int encode = prefix_and_encode(dst->encoding()); 3413 emit_int8(0xB8 | encode); 3414 emit_int32(imm32); 3415 } 3416 3417 void Assembler::movl(Register dst, Register src) { 3418 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 3419 emit_int16((unsigned char)0x8B, (0xC0 | encode)); 3420 } 3421 3422 void Assembler::movl(Register dst, Address src) { 3423 InstructionMark im(this); 3424 prefix(src, dst); 3425 emit_int8((unsigned char)0x8B); 3426 emit_operand(dst, src, 0); 3427 } 3428 3429 void Assembler::movl(Address dst, int32_t imm32) { 3430 InstructionMark im(this); 3431 prefix(dst); 3432 emit_int8((unsigned char)0xC7); 3433 emit_operand(rax, dst, 4); 3434 emit_int32(imm32); 3435 } 3436 3437 void Assembler::movl(Address dst, Register src) { 3438 InstructionMark im(this); 3439 prefix(dst, src); 3440 emit_int8((unsigned char)0x89); 3441 emit_operand(src, dst, 0); 3442 } 3443 3444 // New cpus require to use movsd and movss to avoid partial register stall 3445 // when loading from memory. But for old Opteron use movlpd instead of movsd. 3446 // The selection is done in MacroAssembler::movdbl() and movflt(). 3447 void Assembler::movlpd(XMMRegister dst, Address src) { 3448 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3449 InstructionMark im(this); 3450 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3451 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 3452 attributes.set_rex_vex_w_reverted(); 3453 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3454 emit_int8(0x12); 3455 emit_operand(dst, src, 0); 3456 } 3457 3458 void Assembler::movq(XMMRegister dst, Address src) { 3459 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3460 InstructionMark im(this); 3461 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3462 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 3463 attributes.set_rex_vex_w_reverted(); 3464 simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3465 emit_int8(0x7E); 3466 emit_operand(dst, src, 0); 3467 } 3468 3469 void Assembler::movq(Address dst, XMMRegister src) { 3470 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3471 InstructionMark im(this); 3472 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3473 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 3474 attributes.set_rex_vex_w_reverted(); 3475 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3476 emit_int8((unsigned char)0xD6); 3477 emit_operand(src, dst, 0); 3478 } 3479 3480 void Assembler::movq(XMMRegister dst, XMMRegister src) { 3481 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3482 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3483 attributes.set_rex_vex_w_reverted(); 3484 int encode = simd_prefix_and_encode(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3485 emit_int16((unsigned char)0xD6, (0xC0 | encode)); 3486 } 3487 3488 void Assembler::movq(Register dst, XMMRegister src) { 3489 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3490 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3491 // swap src/dst to get correct prefix 3492 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3493 emit_int16(0x7E, (0xC0 | encode)); 3494 } 3495 3496 void Assembler::movq(XMMRegister dst, Register src) { 3497 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3498 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3499 int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3500 emit_int16(0x6E, (0xC0 | encode)); 3501 } 3502 3503 void Assembler::movsbl(Register dst, Address src) { // movsxb 3504 InstructionMark im(this); 3505 prefix(src, dst); 3506 emit_int16(0x0F, (unsigned char)0xBE); 3507 emit_operand(dst, src, 0); 3508 } 3509 3510 void Assembler::movsbl(Register dst, Register src) { // movsxb 3511 NOT_LP64(assert(src->has_byte_register(), "must have byte register")); 3512 int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true); 3513 emit_int24(0x0F, (unsigned char)0xBE, (0xC0 | encode)); 3514 } 3515 3516 void Assembler::movsd(XMMRegister dst, XMMRegister src) { 3517 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3518 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3519 attributes.set_rex_vex_w_reverted(); 3520 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3521 emit_int16(0x10, (0xC0 | encode)); 3522 } 3523 3524 void Assembler::movsd(XMMRegister dst, Address src) { 3525 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3526 InstructionMark im(this); 3527 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3528 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 3529 attributes.set_rex_vex_w_reverted(); 3530 simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3531 emit_int8(0x10); 3532 emit_operand(dst, src, 0); 3533 } 3534 3535 void Assembler::movsd(Address dst, XMMRegister src) { 3536 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3537 InstructionMark im(this); 3538 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3539 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 3540 attributes.reset_is_clear_context(); 3541 attributes.set_rex_vex_w_reverted(); 3542 simd_prefix(src, xnoreg, dst, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3543 emit_int8(0x11); 3544 emit_operand(src, dst, 0); 3545 } 3546 3547 void Assembler::movss(XMMRegister dst, XMMRegister src) { 3548 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3549 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3550 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3551 emit_int16(0x10, (0xC0 | encode)); 3552 } 3553 3554 void Assembler::movss(XMMRegister dst, Address src) { 3555 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3556 InstructionMark im(this); 3557 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3558 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 3559 simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3560 emit_int8(0x10); 3561 emit_operand(dst, src, 0); 3562 } 3563 3564 void Assembler::movss(Address dst, XMMRegister src) { 3565 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3566 InstructionMark im(this); 3567 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3568 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 3569 attributes.reset_is_clear_context(); 3570 simd_prefix(src, xnoreg, dst, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3571 emit_int8(0x11); 3572 emit_operand(src, dst, 0); 3573 } 3574 3575 void Assembler::movswl(Register dst, Address src) { // movsxw 3576 InstructionMark im(this); 3577 prefix(src, dst); 3578 emit_int16(0x0F, (unsigned char)0xBF); 3579 emit_operand(dst, src, 0); 3580 } 3581 3582 void Assembler::movswl(Register dst, Register src) { // movsxw 3583 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 3584 emit_int24(0x0F, (unsigned char)0xBF, (0xC0 | encode)); 3585 } 3586 3587 void Assembler::movups(XMMRegister dst, Address src) { 3588 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3589 InstructionMark im(this); 3590 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3591 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_32bit); 3592 simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 3593 emit_int8(0x10); 3594 emit_operand(dst, src, 0); 3595 } 3596 3597 void Assembler::vmovups(XMMRegister dst, Address src, int vector_len) { 3598 assert(vector_len == AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), ""); 3599 InstructionMark im(this); 3600 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3601 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_32bit); 3602 simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 3603 emit_int8(0x10); 3604 emit_operand(dst, src, 0); 3605 } 3606 3607 void Assembler::movups(Address dst, XMMRegister src) { 3608 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3609 InstructionMark im(this); 3610 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3611 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_32bit); 3612 simd_prefix(src, xnoreg, dst, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 3613 emit_int8(0x11); 3614 emit_operand(src, dst, 0); 3615 } 3616 3617 void Assembler::vmovups(Address dst, XMMRegister src, int vector_len) { 3618 assert(vector_len == AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), ""); 3619 InstructionMark im(this); 3620 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3621 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_32bit); 3622 simd_prefix(src, xnoreg, dst, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 3623 emit_int8(0x11); 3624 emit_operand(src, dst, 0); 3625 } 3626 3627 void Assembler::movw(Address dst, int imm16) { 3628 InstructionMark im(this); 3629 3630 emit_int8(0x66); // switch to 16-bit mode 3631 prefix(dst); 3632 emit_int8((unsigned char)0xC7); 3633 emit_operand(rax, dst, 2); 3634 emit_int16(imm16); 3635 } 3636 3637 void Assembler::movw(Register dst, Address src) { 3638 InstructionMark im(this); 3639 emit_int8(0x66); 3640 prefix(src, dst); 3641 emit_int8((unsigned char)0x8B); 3642 emit_operand(dst, src, 0); 3643 } 3644 3645 void Assembler::movw(Address dst, Register src) { 3646 InstructionMark im(this); 3647 emit_int8(0x66); 3648 prefix(dst, src); 3649 emit_int8((unsigned char)0x89); 3650 emit_operand(src, dst, 0); 3651 } 3652 3653 void Assembler::movzbl(Register dst, Address src) { // movzxb 3654 InstructionMark im(this); 3655 prefix(src, dst); 3656 emit_int16(0x0F, (unsigned char)0xB6); 3657 emit_operand(dst, src, 0); 3658 } 3659 3660 void Assembler::movzbl(Register dst, Register src) { // movzxb 3661 NOT_LP64(assert(src->has_byte_register(), "must have byte register")); 3662 int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true); 3663 emit_int24(0x0F, (unsigned char)0xB6, 0xC0 | encode); 3664 } 3665 3666 void Assembler::movzwl(Register dst, Address src) { // movzxw 3667 InstructionMark im(this); 3668 prefix(src, dst); 3669 emit_int16(0x0F, (unsigned char)0xB7); 3670 emit_operand(dst, src, 0); 3671 } 3672 3673 void Assembler::movzwl(Register dst, Register src) { // movzxw 3674 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 3675 emit_int24(0x0F, (unsigned char)0xB7, 0xC0 | encode); 3676 } 3677 3678 void Assembler::mull(Address src) { 3679 InstructionMark im(this); 3680 prefix(src); 3681 emit_int8((unsigned char)0xF7); 3682 emit_operand(rsp, src, 0); 3683 } 3684 3685 void Assembler::mull(Register src) { 3686 int encode = prefix_and_encode(src->encoding()); 3687 emit_int16((unsigned char)0xF7, (0xE0 | encode)); 3688 } 3689 3690 void Assembler::mulsd(XMMRegister dst, Address src) { 3691 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3692 InstructionMark im(this); 3693 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3694 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 3695 attributes.set_rex_vex_w_reverted(); 3696 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3697 emit_int8(0x59); 3698 emit_operand(dst, src, 0); 3699 } 3700 3701 void Assembler::mulsd(XMMRegister dst, XMMRegister src) { 3702 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3703 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3704 attributes.set_rex_vex_w_reverted(); 3705 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3706 emit_int16(0x59, (0xC0 | encode)); 3707 } 3708 3709 void Assembler::mulss(XMMRegister dst, Address src) { 3710 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3711 InstructionMark im(this); 3712 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3713 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 3714 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3715 emit_int8(0x59); 3716 emit_operand(dst, src, 0); 3717 } 3718 3719 void Assembler::mulss(XMMRegister dst, XMMRegister src) { 3720 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3721 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3722 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3723 emit_int16(0x59, (0xC0 | encode)); 3724 } 3725 3726 void Assembler::negl(Register dst) { 3727 int encode = prefix_and_encode(dst->encoding()); 3728 emit_int16((unsigned char)0xF7, (0xD8 | encode)); 3729 } 3730 3731 void Assembler::negl(Address dst) { 3732 InstructionMark im(this); 3733 prefix(dst); 3734 emit_int8((unsigned char)0xF7); 3735 emit_operand(as_Register(3), dst, 0); 3736 } 3737 3738 void Assembler::nop(int i) { 3739 #ifdef ASSERT 3740 assert(i > 0, " "); 3741 // The fancy nops aren't currently recognized by debuggers making it a 3742 // pain to disassemble code while debugging. If asserts are on clearly 3743 // speed is not an issue so simply use the single byte traditional nop 3744 // to do alignment. 3745 3746 for (; i > 0 ; i--) emit_int8((unsigned char)0x90); 3747 return; 3748 3749 #endif // ASSERT 3750 3751 if (UseAddressNop && VM_Version::is_intel()) { 3752 // 3753 // Using multi-bytes nops "0x0F 0x1F [address]" for Intel 3754 // 1: 0x90 3755 // 2: 0x66 0x90 3756 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) 3757 // 4: 0x0F 0x1F 0x40 0x00 3758 // 5: 0x0F 0x1F 0x44 0x00 0x00 3759 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 3760 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 3761 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3762 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3763 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3764 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3765 3766 // The rest coding is Intel specific - don't use consecutive address nops 3767 3768 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3769 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3770 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3771 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3772 3773 while(i >= 15) { 3774 // For Intel don't generate consecutive address nops (mix with regular nops) 3775 i -= 15; 3776 emit_int24(0x66, 0x66, 0x66); 3777 addr_nop_8(); 3778 emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90); 3779 } 3780 switch (i) { 3781 case 14: 3782 emit_int8(0x66); // size prefix 3783 case 13: 3784 emit_int8(0x66); // size prefix 3785 case 12: 3786 addr_nop_8(); 3787 emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90); 3788 break; 3789 case 11: 3790 emit_int8(0x66); // size prefix 3791 case 10: 3792 emit_int8(0x66); // size prefix 3793 case 9: 3794 emit_int8(0x66); // size prefix 3795 case 8: 3796 addr_nop_8(); 3797 break; 3798 case 7: 3799 addr_nop_7(); 3800 break; 3801 case 6: 3802 emit_int8(0x66); // size prefix 3803 case 5: 3804 addr_nop_5(); 3805 break; 3806 case 4: 3807 addr_nop_4(); 3808 break; 3809 case 3: 3810 // Don't use "0x0F 0x1F 0x00" - need patching safe padding 3811 emit_int8(0x66); // size prefix 3812 case 2: 3813 emit_int8(0x66); // size prefix 3814 case 1: 3815 emit_int8((unsigned char)0x90); 3816 // nop 3817 break; 3818 default: 3819 assert(i == 0, " "); 3820 } 3821 return; 3822 } 3823 if (UseAddressNop && VM_Version::is_amd_family()) { 3824 // 3825 // Using multi-bytes nops "0x0F 0x1F [address]" for AMD. 3826 // 1: 0x90 3827 // 2: 0x66 0x90 3828 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) 3829 // 4: 0x0F 0x1F 0x40 0x00 3830 // 5: 0x0F 0x1F 0x44 0x00 0x00 3831 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 3832 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 3833 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3834 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3835 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3836 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3837 3838 // The rest coding is AMD specific - use consecutive address nops 3839 3840 // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 3841 // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 3842 // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 3843 // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 3844 // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3845 // Size prefixes (0x66) are added for larger sizes 3846 3847 while(i >= 22) { 3848 i -= 11; 3849 emit_int24(0x66, 0x66, 0x66); 3850 addr_nop_8(); 3851 } 3852 // Generate first nop for size between 21-12 3853 switch (i) { 3854 case 21: 3855 i -= 1; 3856 emit_int8(0x66); // size prefix 3857 case 20: 3858 case 19: 3859 i -= 1; 3860 emit_int8(0x66); // size prefix 3861 case 18: 3862 case 17: 3863 i -= 1; 3864 emit_int8(0x66); // size prefix 3865 case 16: 3866 case 15: 3867 i -= 8; 3868 addr_nop_8(); 3869 break; 3870 case 14: 3871 case 13: 3872 i -= 7; 3873 addr_nop_7(); 3874 break; 3875 case 12: 3876 i -= 6; 3877 emit_int8(0x66); // size prefix 3878 addr_nop_5(); 3879 break; 3880 default: 3881 assert(i < 12, " "); 3882 } 3883 3884 // Generate second nop for size between 11-1 3885 switch (i) { 3886 case 11: 3887 emit_int8(0x66); // size prefix 3888 case 10: 3889 emit_int8(0x66); // size prefix 3890 case 9: 3891 emit_int8(0x66); // size prefix 3892 case 8: 3893 addr_nop_8(); 3894 break; 3895 case 7: 3896 addr_nop_7(); 3897 break; 3898 case 6: 3899 emit_int8(0x66); // size prefix 3900 case 5: 3901 addr_nop_5(); 3902 break; 3903 case 4: 3904 addr_nop_4(); 3905 break; 3906 case 3: 3907 // Don't use "0x0F 0x1F 0x00" - need patching safe padding 3908 emit_int8(0x66); // size prefix 3909 case 2: 3910 emit_int8(0x66); // size prefix 3911 case 1: 3912 emit_int8((unsigned char)0x90); 3913 // nop 3914 break; 3915 default: 3916 assert(i == 0, " "); 3917 } 3918 return; 3919 } 3920 3921 if (UseAddressNop && VM_Version::is_zx()) { 3922 // 3923 // Using multi-bytes nops "0x0F 0x1F [address]" for ZX 3924 // 1: 0x90 3925 // 2: 0x66 0x90 3926 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) 3927 // 4: 0x0F 0x1F 0x40 0x00 3928 // 5: 0x0F 0x1F 0x44 0x00 0x00 3929 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 3930 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 3931 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3932 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3933 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3934 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3935 3936 // The rest coding is ZX specific - don't use consecutive address nops 3937 3938 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3939 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3940 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3941 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3942 3943 while (i >= 15) { 3944 // For ZX don't generate consecutive address nops (mix with regular nops) 3945 i -= 15; 3946 emit_int24(0x66, 0x66, 0x66); 3947 addr_nop_8(); 3948 emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90); 3949 } 3950 switch (i) { 3951 case 14: 3952 emit_int8(0x66); // size prefix 3953 case 13: 3954 emit_int8(0x66); // size prefix 3955 case 12: 3956 addr_nop_8(); 3957 emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90); 3958 break; 3959 case 11: 3960 emit_int8(0x66); // size prefix 3961 case 10: 3962 emit_int8(0x66); // size prefix 3963 case 9: 3964 emit_int8(0x66); // size prefix 3965 case 8: 3966 addr_nop_8(); 3967 break; 3968 case 7: 3969 addr_nop_7(); 3970 break; 3971 case 6: 3972 emit_int8(0x66); // size prefix 3973 case 5: 3974 addr_nop_5(); 3975 break; 3976 case 4: 3977 addr_nop_4(); 3978 break; 3979 case 3: 3980 // Don't use "0x0F 0x1F 0x00" - need patching safe padding 3981 emit_int8(0x66); // size prefix 3982 case 2: 3983 emit_int8(0x66); // size prefix 3984 case 1: 3985 emit_int8((unsigned char)0x90); 3986 // nop 3987 break; 3988 default: 3989 assert(i == 0, " "); 3990 } 3991 return; 3992 } 3993 3994 // Using nops with size prefixes "0x66 0x90". 3995 // From AMD Optimization Guide: 3996 // 1: 0x90 3997 // 2: 0x66 0x90 3998 // 3: 0x66 0x66 0x90 3999 // 4: 0x66 0x66 0x66 0x90 4000 // 5: 0x66 0x66 0x90 0x66 0x90 4001 // 6: 0x66 0x66 0x90 0x66 0x66 0x90 4002 // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 4003 // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90 4004 // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 4005 // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 4006 // 4007 while (i > 12) { 4008 i -= 4; 4009 emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90); 4010 } 4011 // 1 - 12 nops 4012 if (i > 8) { 4013 if (i > 9) { 4014 i -= 1; 4015 emit_int8(0x66); 4016 } 4017 i -= 3; 4018 emit_int24(0x66, 0x66, (unsigned char)0x90); 4019 } 4020 // 1 - 8 nops 4021 if (i > 4) { 4022 if (i > 6) { 4023 i -= 1; 4024 emit_int8(0x66); 4025 } 4026 i -= 3; 4027 emit_int24(0x66, 0x66, (unsigned char)0x90); 4028 } 4029 switch (i) { 4030 case 4: 4031 emit_int8(0x66); 4032 case 3: 4033 emit_int8(0x66); 4034 case 2: 4035 emit_int8(0x66); 4036 case 1: 4037 emit_int8((unsigned char)0x90); 4038 break; 4039 default: 4040 assert(i == 0, " "); 4041 } 4042 } 4043 4044 void Assembler::notl(Register dst) { 4045 int encode = prefix_and_encode(dst->encoding()); 4046 emit_int16((unsigned char)0xF7, (0xD0 | encode)); 4047 } 4048 4049 void Assembler::orw(Register dst, Register src) { 4050 (void)prefix_and_encode(dst->encoding(), src->encoding()); 4051 emit_arith(0x0B, 0xC0, dst, src); 4052 } 4053 4054 void Assembler::orl(Address dst, int32_t imm32) { 4055 InstructionMark im(this); 4056 prefix(dst); 4057 emit_arith_operand(0x81, rcx, dst, imm32); 4058 } 4059 4060 void Assembler::orl(Register dst, int32_t imm32) { 4061 prefix(dst); 4062 emit_arith(0x81, 0xC8, dst, imm32); 4063 } 4064 4065 void Assembler::orl(Register dst, Address src) { 4066 InstructionMark im(this); 4067 prefix(src, dst); 4068 emit_int8(0x0B); 4069 emit_operand(dst, src, 0); 4070 } 4071 4072 void Assembler::orl(Register dst, Register src) { 4073 (void) prefix_and_encode(dst->encoding(), src->encoding()); 4074 emit_arith(0x0B, 0xC0, dst, src); 4075 } 4076 4077 void Assembler::orl(Address dst, Register src) { 4078 InstructionMark im(this); 4079 prefix(dst, src); 4080 emit_int8(0x09); 4081 emit_operand(src, dst, 0); 4082 } 4083 4084 void Assembler::orb(Address dst, int imm8) { 4085 InstructionMark im(this); 4086 prefix(dst); 4087 emit_int8((unsigned char)0x80); 4088 emit_operand(rcx, dst, 1); 4089 emit_int8(imm8); 4090 } 4091 4092 void Assembler::orb(Address dst, Register src) { 4093 InstructionMark im(this); 4094 prefix(dst, src, true); 4095 emit_int8(0x08); 4096 emit_operand(src, dst, 0); 4097 } 4098 4099 void Assembler::packsswb(XMMRegister dst, XMMRegister src) { 4100 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4101 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4102 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4103 emit_int16(0x63, (0xC0 | encode)); 4104 } 4105 4106 void Assembler::vpacksswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4107 assert(UseAVX > 0, "some form of AVX must be enabled"); 4108 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4109 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4110 emit_int16(0x63, (0xC0 | encode)); 4111 } 4112 4113 void Assembler::packssdw(XMMRegister dst, XMMRegister src) { 4114 assert(VM_Version::supports_sse2(), ""); 4115 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4116 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4117 emit_int16(0x6B, (0xC0 | encode)); 4118 } 4119 4120 void Assembler::vpackssdw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4121 assert(UseAVX > 0, "some form of AVX must be enabled"); 4122 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4123 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4124 emit_int16(0x6B, (0xC0 | encode)); 4125 } 4126 4127 void Assembler::packuswb(XMMRegister dst, Address src) { 4128 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4129 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 4130 InstructionMark im(this); 4131 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4132 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 4133 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4134 emit_int8(0x67); 4135 emit_operand(dst, src, 0); 4136 } 4137 4138 void Assembler::packuswb(XMMRegister dst, XMMRegister src) { 4139 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4140 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4141 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4142 emit_int16(0x67, (0xC0 | encode)); 4143 } 4144 4145 void Assembler::vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4146 assert(UseAVX > 0, "some form of AVX must be enabled"); 4147 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4148 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4149 emit_int16(0x67, (0xC0 | encode)); 4150 } 4151 4152 void Assembler::packusdw(XMMRegister dst, XMMRegister src) { 4153 assert(VM_Version::supports_sse4_1(), ""); 4154 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4155 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4156 emit_int16(0x2B, (0xC0 | encode)); 4157 } 4158 4159 void Assembler::vpackusdw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4160 assert(UseAVX > 0, "some form of AVX must be enabled"); 4161 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4162 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4163 emit_int16(0x2B, (0xC0 | encode)); 4164 } 4165 4166 void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len) { 4167 assert(VM_Version::supports_avx2(), ""); 4168 assert(vector_len != AVX_128bit, ""); 4169 // VEX.256.66.0F3A.W1 00 /r ib 4170 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4171 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4172 emit_int24(0x00, (0xC0 | encode), imm8); 4173 } 4174 4175 void Assembler::vpermq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4176 assert(vector_len == AVX_256bit ? VM_Version::supports_avx512vl() : 4177 vector_len == AVX_512bit ? VM_Version::supports_evex() : false, "not supported"); 4178 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4179 attributes.set_is_evex_instruction(); 4180 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4181 emit_int16(0x36, (0xC0 | encode)); 4182 } 4183 4184 void Assembler::vpermb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4185 assert(VM_Version::supports_avx512_vbmi(), ""); 4186 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4187 attributes.set_is_evex_instruction(); 4188 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4189 emit_int16((unsigned char)0x8D, (0xC0 | encode)); 4190 } 4191 4192 void Assembler::vpermb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4193 assert(VM_Version::supports_avx512_vbmi(), ""); 4194 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4195 attributes.set_is_evex_instruction(); 4196 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4197 emit_int8((unsigned char)0x8D); 4198 emit_operand(dst, src, 0); 4199 } 4200 4201 void Assembler::vpermw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4202 assert(vector_len == AVX_128bit ? VM_Version::supports_avx512vlbw() : 4203 vector_len == AVX_256bit ? VM_Version::supports_avx512vlbw() : 4204 vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : false, "not supported"); 4205 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4206 attributes.set_is_evex_instruction(); 4207 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4208 emit_int16((unsigned char)0x8D, (0xC0 | encode)); 4209 } 4210 4211 void Assembler::vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4212 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_evex(), ""); 4213 // VEX.NDS.256.66.0F38.W0 36 /r 4214 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4215 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4216 emit_int16(0x36, (0xC0 | encode)); 4217 } 4218 4219 void Assembler::vpermd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4220 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_evex(), ""); 4221 // VEX.NDS.256.66.0F38.W0 36 /r 4222 InstructionMark im(this); 4223 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4224 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4225 emit_int8(0x36); 4226 emit_operand(dst, src, 0); 4227 } 4228 4229 void Assembler::vperm2i128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { 4230 assert(VM_Version::supports_avx2(), ""); 4231 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4232 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4233 emit_int24(0x46, (0xC0 | encode), imm8); 4234 } 4235 4236 void Assembler::vperm2f128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { 4237 assert(VM_Version::supports_avx(), ""); 4238 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4239 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4240 emit_int24(0x06, (0xC0 | encode), imm8); 4241 } 4242 4243 void Assembler::vpermilps(XMMRegister dst, XMMRegister src, int imm8, int vector_len) { 4244 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), ""); 4245 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4246 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4247 emit_int24(0x04, (0xC0 | encode), imm8); 4248 } 4249 4250 void Assembler::vpermilpd(XMMRegister dst, XMMRegister src, int imm8, int vector_len) { 4251 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), ""); 4252 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(),/* legacy_mode */ false,/* no_mask_reg */ true, /* uses_vl */ false); 4253 attributes.set_rex_vex_w_reverted(); 4254 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4255 emit_int24(0x05, (0xC0 | encode), imm8); 4256 } 4257 4258 void Assembler::vpermpd(XMMRegister dst, XMMRegister src, int imm8, int vector_len) { 4259 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_evex(), ""); 4260 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */false, /* no_mask_reg */ true, /* uses_vl */ false); 4261 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4262 emit_int24(0x01, (0xC0 | encode), imm8); 4263 } 4264 4265 void Assembler::evpermi2q(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4266 assert(VM_Version::supports_evex(), ""); 4267 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4268 attributes.set_is_evex_instruction(); 4269 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4270 emit_int16(0x76, (0xC0 | encode)); 4271 } 4272 4273 void Assembler::evpermt2b(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4274 assert(VM_Version::supports_avx512_vbmi(), ""); 4275 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4276 attributes.set_is_evex_instruction(); 4277 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4278 emit_int16(0x7D, (0xC0 | encode)); 4279 } 4280 4281 void Assembler::evpmultishiftqb(XMMRegister dst, XMMRegister ctl, XMMRegister src, int vector_len) { 4282 assert(VM_Version::supports_avx512_vbmi(), ""); 4283 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4284 attributes.set_is_evex_instruction(); 4285 int encode = vex_prefix_and_encode(dst->encoding(), ctl->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4286 emit_int16((unsigned char)0x83, (unsigned char)(0xC0 | encode)); 4287 } 4288 4289 void Assembler::pause() { 4290 emit_int16((unsigned char)0xF3, (unsigned char)0x90); 4291 } 4292 4293 void Assembler::ud2() { 4294 emit_int16(0x0F, 0x0B); 4295 } 4296 4297 void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) { 4298 assert(VM_Version::supports_sse4_2(), ""); 4299 InstructionMark im(this); 4300 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4301 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4302 emit_int8(0x61); 4303 emit_operand(dst, src, 1); 4304 emit_int8(imm8); 4305 } 4306 4307 void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) { 4308 assert(VM_Version::supports_sse4_2(), ""); 4309 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4310 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4311 emit_int24(0x61, (0xC0 | encode), imm8); 4312 } 4313 4314 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 4315 void Assembler::pcmpeqb(XMMRegister dst, XMMRegister src) { 4316 assert(VM_Version::supports_sse2(), ""); 4317 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4318 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4319 emit_int16(0x74, (0xC0 | encode)); 4320 } 4321 4322 void Assembler::vpcmpCCbwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, int vector_len) { 4323 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), ""); 4324 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest"); 4325 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4326 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4327 emit_int16(cond_encoding, (0xC0 | encode)); 4328 } 4329 4330 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 4331 void Assembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4332 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), ""); 4333 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest"); 4334 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4335 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4336 emit_int16(0x74, (0xC0 | encode)); 4337 } 4338 4339 // In this context, kdst is written the mask used to process the equal components 4340 void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { 4341 assert(VM_Version::supports_avx512bw(), ""); 4342 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4343 attributes.set_is_evex_instruction(); 4344 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4345 emit_int16(0x74, (0xC0 | encode)); 4346 } 4347 4348 void Assembler::evpcmpgtb(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 4349 assert(VM_Version::supports_avx512vlbw(), ""); 4350 InstructionMark im(this); 4351 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4352 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 4353 attributes.set_is_evex_instruction(); 4354 int dst_enc = kdst->encoding(); 4355 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4356 emit_int8(0x64); 4357 emit_operand(as_Register(dst_enc), src, 0); 4358 } 4359 4360 void Assembler::evpcmpgtb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) { 4361 assert(VM_Version::supports_avx512vlbw(), ""); 4362 InstructionMark im(this); 4363 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4364 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 4365 attributes.reset_is_clear_context(); 4366 attributes.set_embedded_opmask_register_specifier(mask); 4367 attributes.set_is_evex_instruction(); 4368 int dst_enc = kdst->encoding(); 4369 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4370 emit_int8(0x64); 4371 emit_operand(as_Register(dst_enc), src, 0); 4372 } 4373 4374 void Assembler::evpcmpuw(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) { 4375 assert(VM_Version::supports_avx512vlbw(), ""); 4376 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4377 attributes.set_is_evex_instruction(); 4378 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4379 emit_int24(0x3E, (0xC0 | encode), vcc); 4380 } 4381 4382 void Assembler::evpcmpuw(KRegister kdst, XMMRegister nds, Address src, ComparisonPredicate vcc, int vector_len) { 4383 assert(VM_Version::supports_avx512vlbw(), ""); 4384 InstructionMark im(this); 4385 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4386 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 4387 attributes.set_is_evex_instruction(); 4388 int dst_enc = kdst->encoding(); 4389 vex_prefix(src, nds->encoding(), kdst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4390 emit_int8(0x3E); 4391 emit_operand(as_Register(dst_enc), src, 1); 4392 emit_int8(vcc); 4393 } 4394 4395 void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 4396 assert(VM_Version::supports_avx512bw(), ""); 4397 InstructionMark im(this); 4398 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4399 attributes.set_is_evex_instruction(); 4400 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 4401 int dst_enc = kdst->encoding(); 4402 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4403 emit_int8(0x74); 4404 emit_operand(as_Register(dst_enc), src, 0); 4405 } 4406 4407 void Assembler::evpcmpeqb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) { 4408 assert(VM_Version::supports_avx512vlbw(), ""); 4409 InstructionMark im(this); 4410 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4411 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 4412 attributes.reset_is_clear_context(); 4413 attributes.set_embedded_opmask_register_specifier(mask); 4414 attributes.set_is_evex_instruction(); 4415 vex_prefix(src, nds->encoding(), kdst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4416 emit_int8(0x74); 4417 emit_operand(as_Register(kdst->encoding()), src, 0); 4418 } 4419 4420 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 4421 void Assembler::pcmpeqw(XMMRegister dst, XMMRegister src) { 4422 assert(VM_Version::supports_sse2(), ""); 4423 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4424 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4425 emit_int16(0x75, (0xC0 | encode)); 4426 } 4427 4428 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 4429 void Assembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4430 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), ""); 4431 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest"); 4432 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4433 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4434 emit_int16(0x75, (0xC0 | encode)); 4435 } 4436 4437 // In this context, kdst is written the mask used to process the equal components 4438 void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { 4439 assert(VM_Version::supports_avx512bw(), ""); 4440 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4441 attributes.set_is_evex_instruction(); 4442 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4443 emit_int16(0x75, (0xC0 | encode)); 4444 } 4445 4446 void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 4447 assert(VM_Version::supports_avx512bw(), ""); 4448 InstructionMark im(this); 4449 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4450 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 4451 attributes.set_is_evex_instruction(); 4452 int dst_enc = kdst->encoding(); 4453 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4454 emit_int8(0x75); 4455 emit_operand(as_Register(dst_enc), src, 0); 4456 } 4457 4458 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 4459 void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) { 4460 assert(VM_Version::supports_sse2(), ""); 4461 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4462 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4463 emit_int16(0x76, (0xC0 | encode)); 4464 } 4465 4466 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 4467 void Assembler::vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4468 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), ""); 4469 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest"); 4470 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4471 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4472 emit_int16(0x76, (0xC0 | encode)); 4473 } 4474 4475 // In this context, kdst is written the mask used to process the equal components 4476 void Assembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int vector_len) { 4477 assert(VM_Version::supports_evex(), ""); 4478 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4479 attributes.set_is_evex_instruction(); 4480 attributes.reset_is_clear_context(); 4481 attributes.set_embedded_opmask_register_specifier(mask); 4482 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4483 emit_int16(0x76, (0xC0 | encode)); 4484 } 4485 4486 void Assembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) { 4487 assert(VM_Version::supports_evex(), ""); 4488 InstructionMark im(this); 4489 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4490 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 4491 attributes.set_is_evex_instruction(); 4492 attributes.reset_is_clear_context(); 4493 attributes.set_embedded_opmask_register_specifier(mask); 4494 int dst_enc = kdst->encoding(); 4495 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4496 emit_int8(0x76); 4497 emit_operand(as_Register(dst_enc), src, 0); 4498 } 4499 4500 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 4501 void Assembler::pcmpeqq(XMMRegister dst, XMMRegister src) { 4502 assert(VM_Version::supports_sse4_1(), ""); 4503 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4504 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4505 emit_int16(0x29, (0xC0 | encode)); 4506 } 4507 4508 void Assembler::evpcmpeqq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int vector_len) { 4509 assert(VM_Version::supports_evex(), ""); 4510 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4511 attributes.set_is_evex_instruction(); 4512 attributes.reset_is_clear_context(); 4513 attributes.set_embedded_opmask_register_specifier(mask); 4514 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4515 emit_int16(0x29, (0xC0 | encode)); 4516 } 4517 4518 void Assembler::vpcmpCCq(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, int vector_len) { 4519 assert(VM_Version::supports_avx(), ""); 4520 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4521 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4522 emit_int16(cond_encoding, (0xC0 | encode)); 4523 } 4524 4525 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 4526 void Assembler::vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4527 assert(VM_Version::supports_avx(), ""); 4528 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4529 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4530 emit_int16(0x29, (0xC0 | encode)); 4531 } 4532 4533 // In this context, kdst is written the mask used to process the equal components 4534 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { 4535 assert(VM_Version::supports_evex(), ""); 4536 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4537 attributes.reset_is_clear_context(); 4538 attributes.set_is_evex_instruction(); 4539 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4540 emit_int16(0x29, (0xC0 | encode)); 4541 } 4542 4543 // In this context, kdst is written the mask used to process the equal components 4544 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 4545 assert(VM_Version::supports_evex(), ""); 4546 InstructionMark im(this); 4547 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4548 attributes.reset_is_clear_context(); 4549 attributes.set_is_evex_instruction(); 4550 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 4551 int dst_enc = kdst->encoding(); 4552 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4553 emit_int8(0x29); 4554 emit_operand(as_Register(dst_enc), src, 0); 4555 } 4556 4557 void Assembler::pcmpgtq(XMMRegister dst, XMMRegister src) { 4558 assert(VM_Version::supports_sse4_1(), ""); 4559 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4560 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4561 emit_int16(0x37, (0xC0 | encode)); 4562 } 4563 4564 void Assembler::pmovmskb(Register dst, XMMRegister src) { 4565 assert(VM_Version::supports_sse2(), ""); 4566 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4567 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4568 emit_int16((unsigned char)0xD7, (0xC0 | encode)); 4569 } 4570 4571 void Assembler::vpmovmskb(Register dst, XMMRegister src, int vec_enc) { 4572 assert((VM_Version::supports_avx() && vec_enc == AVX_128bit) || 4573 (VM_Version::supports_avx2() && vec_enc == AVX_256bit), ""); 4574 InstructionAttr attributes(vec_enc, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4575 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4576 emit_int16((unsigned char)0xD7, (0xC0 | encode)); 4577 } 4578 4579 void Assembler::vmovmskps(Register dst, XMMRegister src, int vec_enc) { 4580 assert(VM_Version::supports_avx(), ""); 4581 InstructionAttr attributes(vec_enc, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4582 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4583 emit_int16(0x50, (0xC0 | encode)); 4584 } 4585 4586 void Assembler::vmovmskpd(Register dst, XMMRegister src, int vec_enc) { 4587 assert(VM_Version::supports_avx(), ""); 4588 InstructionAttr attributes(vec_enc, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4589 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4590 emit_int16(0x50, (0xC0 | encode)); 4591 } 4592 4593 4594 void Assembler::pextrd(Register dst, XMMRegister src, int imm8) { 4595 assert(VM_Version::supports_sse4_1(), ""); 4596 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4597 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4598 emit_int24(0x16, (0xC0 | encode), imm8); 4599 } 4600 4601 void Assembler::pextrd(Address dst, XMMRegister src, int imm8) { 4602 assert(VM_Version::supports_sse4_1(), ""); 4603 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4604 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4605 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4606 emit_int8(0x16); 4607 emit_operand(src, dst, 1); 4608 emit_int8(imm8); 4609 } 4610 4611 void Assembler::pextrq(Register dst, XMMRegister src, int imm8) { 4612 assert(VM_Version::supports_sse4_1(), ""); 4613 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4614 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4615 emit_int24(0x16, (0xC0 | encode), imm8); 4616 } 4617 4618 void Assembler::pextrq(Address dst, XMMRegister src, int imm8) { 4619 assert(VM_Version::supports_sse4_1(), ""); 4620 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4621 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4622 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4623 emit_int8(0x16); 4624 emit_operand(src, dst, 1); 4625 emit_int8(imm8); 4626 } 4627 4628 void Assembler::pextrw(Register dst, XMMRegister src, int imm8) { 4629 assert(VM_Version::supports_sse2(), ""); 4630 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4631 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4632 emit_int24((unsigned char)0xC5, (0xC0 | encode), imm8); 4633 } 4634 4635 void Assembler::pextrw(Address dst, XMMRegister src, int imm8) { 4636 assert(VM_Version::supports_sse4_1(), ""); 4637 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4638 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit); 4639 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4640 emit_int8(0x15); 4641 emit_operand(src, dst, 1); 4642 emit_int8(imm8); 4643 } 4644 4645 void Assembler::pextrb(Register dst, XMMRegister src, int imm8) { 4646 assert(VM_Version::supports_sse4_1(), ""); 4647 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4648 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4649 emit_int24(0x14, (0xC0 | encode), imm8); 4650 } 4651 4652 void Assembler::pextrb(Address dst, XMMRegister src, int imm8) { 4653 assert(VM_Version::supports_sse4_1(), ""); 4654 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4655 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit); 4656 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4657 emit_int8(0x14); 4658 emit_operand(src, dst, 1); 4659 emit_int8(imm8); 4660 } 4661 4662 void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) { 4663 assert(VM_Version::supports_sse4_1(), ""); 4664 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4665 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4666 emit_int24(0x22, (0xC0 | encode), imm8); 4667 } 4668 4669 void Assembler::pinsrd(XMMRegister dst, Address src, int imm8) { 4670 assert(VM_Version::supports_sse4_1(), ""); 4671 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4672 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4673 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4674 emit_int8(0x22); 4675 emit_operand(dst, src, 1); 4676 emit_int8(imm8); 4677 } 4678 4679 void Assembler::vpinsrd(XMMRegister dst, XMMRegister nds, Register src, int imm8) { 4680 assert(VM_Version::supports_avx(), ""); 4681 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4682 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4683 emit_int24(0x22, (0xC0 | encode), imm8); 4684 } 4685 4686 void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) { 4687 assert(VM_Version::supports_sse4_1(), ""); 4688 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4689 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4690 emit_int24(0x22, (0xC0 | encode), imm8); 4691 } 4692 4693 void Assembler::pinsrq(XMMRegister dst, Address src, int imm8) { 4694 assert(VM_Version::supports_sse4_1(), ""); 4695 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4696 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4697 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4698 emit_int8(0x22); 4699 emit_operand(dst, src, 1); 4700 emit_int8(imm8); 4701 } 4702 4703 void Assembler::vpinsrq(XMMRegister dst, XMMRegister nds, Register src, int imm8) { 4704 assert(VM_Version::supports_avx(), ""); 4705 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4706 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4707 emit_int24(0x22, (0xC0 | encode), imm8); 4708 } 4709 4710 void Assembler::pinsrw(XMMRegister dst, Register src, int imm8) { 4711 assert(VM_Version::supports_sse2(), ""); 4712 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4713 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4714 emit_int24((unsigned char)0xC4, (0xC0 | encode), imm8); 4715 } 4716 4717 void Assembler::pinsrw(XMMRegister dst, Address src, int imm8) { 4718 assert(VM_Version::supports_sse2(), ""); 4719 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4720 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit); 4721 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4722 emit_int8((unsigned char)0xC4); 4723 emit_operand(dst, src, 1); 4724 emit_int8(imm8); 4725 } 4726 4727 void Assembler::vpinsrw(XMMRegister dst, XMMRegister nds, Register src, int imm8) { 4728 assert(VM_Version::supports_avx(), ""); 4729 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4730 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4731 emit_int24((unsigned char)0xC4, (0xC0 | encode), imm8); 4732 } 4733 4734 void Assembler::pinsrb(XMMRegister dst, Address src, int imm8) { 4735 assert(VM_Version::supports_sse4_1(), ""); 4736 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4737 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit); 4738 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4739 emit_int8(0x20); 4740 emit_operand(dst, src, 1); 4741 emit_int8(imm8); 4742 } 4743 4744 void Assembler::pinsrb(XMMRegister dst, Register src, int imm8) { 4745 assert(VM_Version::supports_sse4_1(), ""); 4746 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4747 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4748 emit_int24(0x20, (0xC0 | encode), imm8); 4749 } 4750 4751 void Assembler::vpinsrb(XMMRegister dst, XMMRegister nds, Register src, int imm8) { 4752 assert(VM_Version::supports_avx(), ""); 4753 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4754 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4755 emit_int24(0x20, (0xC0 | encode), imm8); 4756 } 4757 4758 void Assembler::insertps(XMMRegister dst, XMMRegister src, int imm8) { 4759 assert(VM_Version::supports_sse4_1(), ""); 4760 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4761 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4762 emit_int24(0x21, (0xC0 | encode), imm8); 4763 } 4764 4765 void Assembler::vinsertps(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { 4766 assert(VM_Version::supports_avx(), ""); 4767 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4768 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4769 emit_int24(0x21, (0xC0 | encode), imm8); 4770 } 4771 4772 void Assembler::pmovzxbw(XMMRegister dst, Address src) { 4773 assert(VM_Version::supports_sse4_1(), ""); 4774 InstructionMark im(this); 4775 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4776 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 4777 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4778 emit_int8(0x30); 4779 emit_operand(dst, src, 0); 4780 } 4781 4782 void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) { 4783 assert(VM_Version::supports_sse4_1(), ""); 4784 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4785 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4786 emit_int16(0x30, (0xC0 | encode)); 4787 } 4788 4789 void Assembler::pmovsxbw(XMMRegister dst, XMMRegister src) { 4790 assert(VM_Version::supports_sse4_1(), ""); 4791 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4792 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4793 emit_int16(0x20, (0xC0 | encode)); 4794 } 4795 4796 void Assembler::pmovzxdq(XMMRegister dst, XMMRegister src) { 4797 assert(VM_Version::supports_sse4_1(), ""); 4798 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4799 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4800 emit_int16(0x35, (0xC0 | encode)); 4801 } 4802 4803 void Assembler::pmovsxbd(XMMRegister dst, XMMRegister src) { 4804 assert(VM_Version::supports_sse4_1(), ""); 4805 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4806 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4807 emit_int16(0x21, (0xC0 | encode)); 4808 } 4809 4810 void Assembler::pmovzxbd(XMMRegister dst, XMMRegister src) { 4811 assert(VM_Version::supports_sse4_1(), ""); 4812 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4813 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4814 emit_int16(0x31, (0xC0 | encode)); 4815 } 4816 4817 void Assembler::pmovsxbq(XMMRegister dst, XMMRegister src) { 4818 assert(VM_Version::supports_sse4_1(), ""); 4819 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4820 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4821 emit_int16(0x22, (0xC0 | encode)); 4822 } 4823 4824 void Assembler::pmovsxwd(XMMRegister dst, XMMRegister src) { 4825 assert(VM_Version::supports_sse4_1(), ""); 4826 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4827 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4828 emit_int16(0x23, (0xC0 | encode)); 4829 } 4830 4831 void Assembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) { 4832 assert(VM_Version::supports_avx(), ""); 4833 InstructionMark im(this); 4834 assert(dst != xnoreg, "sanity"); 4835 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4836 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 4837 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4838 emit_int8(0x30); 4839 emit_operand(dst, src, 0); 4840 } 4841 4842 void Assembler::vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { 4843 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 4844 vector_len == AVX_256bit? VM_Version::supports_avx2() : 4845 vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); 4846 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4847 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4848 emit_int16(0x30, (unsigned char) (0xC0 | encode)); 4849 } 4850 4851 void Assembler::vpmovsxbw(XMMRegister dst, XMMRegister src, int vector_len) { 4852 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 4853 vector_len == AVX_256bit? VM_Version::supports_avx2() : 4854 vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); 4855 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4856 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4857 emit_int16(0x20, (0xC0 | encode)); 4858 } 4859 4860 void Assembler::evpmovzxbw(XMMRegister dst, KRegister mask, Address src, int vector_len) { 4861 assert(VM_Version::supports_avx512vlbw(), ""); 4862 assert(dst != xnoreg, "sanity"); 4863 InstructionMark im(this); 4864 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true); 4865 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 4866 attributes.set_embedded_opmask_register_specifier(mask); 4867 attributes.set_is_evex_instruction(); 4868 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4869 emit_int8(0x30); 4870 emit_operand(dst, src, 0); 4871 } 4872 4873 void Assembler::evpmovzxbd(XMMRegister dst, KRegister mask, Address src, int vector_len) { 4874 assert(VM_Version::supports_avx512vl(), ""); 4875 assert(dst != xnoreg, "sanity"); 4876 InstructionMark im(this); 4877 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true); 4878 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 4879 attributes.set_embedded_opmask_register_specifier(mask); 4880 attributes.set_is_evex_instruction(); 4881 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4882 emit_int8(0x31); 4883 emit_operand(dst, src, 0); 4884 } 4885 4886 void Assembler::evpmovzxbd(XMMRegister dst, Address src, int vector_len) { 4887 evpmovzxbd(dst, k0, src, vector_len); 4888 } 4889 4890 void Assembler::evpandd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 4891 assert(VM_Version::supports_evex(), ""); 4892 // Encoding: EVEX.NDS.XXX.66.0F.W0 DB /r 4893 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4894 attributes.set_is_evex_instruction(); 4895 attributes.set_embedded_opmask_register_specifier(mask); 4896 if (merge) { 4897 attributes.reset_is_clear_context(); 4898 } 4899 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4900 emit_int16((unsigned char)0xDB, (0xC0 | encode)); 4901 } 4902 4903 void Assembler::vpmovzxdq(XMMRegister dst, XMMRegister src, int vector_len) { 4904 assert(vector_len > AVX_128bit ? VM_Version::supports_avx2() : VM_Version::supports_avx(), ""); 4905 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4906 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4907 emit_int16(0x35, (0xC0 | encode)); 4908 } 4909 4910 void Assembler::vpmovzxbd(XMMRegister dst, XMMRegister src, int vector_len) { 4911 assert(vector_len > AVX_128bit ? VM_Version::supports_avx2() : VM_Version::supports_avx(), ""); 4912 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4913 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4914 emit_int16(0x31, (0xC0 | encode)); 4915 } 4916 4917 void Assembler::vpmovzxbq(XMMRegister dst, XMMRegister src, int vector_len) { 4918 assert(vector_len > AVX_128bit ? VM_Version::supports_avx2() : VM_Version::supports_avx(), ""); 4919 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4920 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4921 emit_int16(0x32, (0xC0 | encode)); 4922 } 4923 4924 void Assembler::vpmovsxbd(XMMRegister dst, XMMRegister src, int vector_len) { 4925 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 4926 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 4927 VM_Version::supports_evex(), ""); 4928 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4929 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4930 emit_int16(0x21, (0xC0 | encode)); 4931 } 4932 4933 void Assembler::vpmovsxbq(XMMRegister dst, XMMRegister src, int vector_len) { 4934 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 4935 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 4936 VM_Version::supports_evex(), ""); 4937 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4938 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4939 emit_int16(0x22, (0xC0 | encode)); 4940 } 4941 4942 void Assembler::vpmovsxwd(XMMRegister dst, XMMRegister src, int vector_len) { 4943 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 4944 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 4945 VM_Version::supports_evex(), ""); 4946 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4947 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4948 emit_int16(0x23, (0xC0 | encode)); 4949 } 4950 4951 void Assembler::vpmovsxwq(XMMRegister dst, XMMRegister src, int vector_len) { 4952 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 4953 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 4954 VM_Version::supports_evex(), ""); 4955 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4956 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4957 emit_int16(0x24, (0xC0 | encode)); 4958 } 4959 4960 void Assembler::vpmovsxdq(XMMRegister dst, XMMRegister src, int vector_len) { 4961 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 4962 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 4963 VM_Version::supports_evex(), ""); 4964 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4965 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4966 emit_int16(0x25, (0xC0 | encode)); 4967 } 4968 4969 void Assembler::evpmovwb(Address dst, XMMRegister src, int vector_len) { 4970 assert(VM_Version::supports_avx512vlbw(), ""); 4971 assert(src != xnoreg, "sanity"); 4972 InstructionMark im(this); 4973 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4974 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 4975 attributes.set_is_evex_instruction(); 4976 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 4977 emit_int8(0x30); 4978 emit_operand(src, dst, 0); 4979 } 4980 4981 void Assembler::evpmovwb(Address dst, KRegister mask, XMMRegister src, int vector_len) { 4982 assert(VM_Version::supports_avx512vlbw(), ""); 4983 assert(src != xnoreg, "sanity"); 4984 InstructionMark im(this); 4985 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4986 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 4987 attributes.reset_is_clear_context(); 4988 attributes.set_embedded_opmask_register_specifier(mask); 4989 attributes.set_is_evex_instruction(); 4990 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 4991 emit_int8(0x30); 4992 emit_operand(src, dst, 0); 4993 } 4994 4995 void Assembler::evpmovdb(Address dst, XMMRegister src, int vector_len) { 4996 assert(VM_Version::supports_evex(), ""); 4997 assert(src != xnoreg, "sanity"); 4998 InstructionMark im(this); 4999 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5000 attributes.set_address_attributes(/* tuple_type */ EVEX_QVM, /* input_size_in_bits */ EVEX_NObit); 5001 attributes.set_is_evex_instruction(); 5002 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 5003 emit_int8(0x31); 5004 emit_operand(src, dst, 0); 5005 } 5006 5007 void Assembler::vpmovzxwd(XMMRegister dst, XMMRegister src, int vector_len) { 5008 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 5009 vector_len == AVX_256bit? VM_Version::supports_avx2() : 5010 vector_len == AVX_512bit? VM_Version::supports_evex() : 0, " "); 5011 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5012 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5013 emit_int16(0x33, (0xC0 | encode)); 5014 } 5015 5016 void Assembler::vpmovzxwq(XMMRegister dst, XMMRegister src, int vector_len) { 5017 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 5018 vector_len == AVX_256bit? VM_Version::supports_avx2() : 5019 vector_len == AVX_512bit? VM_Version::supports_evex() : 0, " "); 5020 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5021 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5022 emit_int16(0x34, (0xC0 | encode)); 5023 } 5024 5025 void Assembler::pmaddwd(XMMRegister dst, XMMRegister src) { 5026 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5027 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5028 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5029 emit_int16((unsigned char)0xF5, (0xC0 | encode)); 5030 } 5031 5032 void Assembler::vpmaddwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5033 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 5034 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : 5035 (vector_len == AVX_512bit ? VM_Version::supports_evex() : 0)), ""); 5036 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5037 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5038 emit_int16((unsigned char)0xF5, (0xC0 | encode)); 5039 } 5040 5041 void Assembler::vpmaddubsw(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) { 5042 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 5043 vector_len == AVX_256bit? VM_Version::supports_avx2() : 5044 vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); 5045 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5046 int encode = simd_prefix_and_encode(dst, src1, src2, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5047 emit_int16(0x04, (0xC0 | encode)); 5048 } 5049 5050 void Assembler::evpmadd52luq(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) { 5051 evpmadd52luq(dst, k0, src1, src2, false, vector_len); 5052 } 5053 5054 void Assembler::evpmadd52luq(XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vector_len) { 5055 assert(VM_Version::supports_avx512ifma(), ""); 5056 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5057 attributes.set_is_evex_instruction(); 5058 attributes.set_embedded_opmask_register_specifier(mask); 5059 if (merge) { 5060 attributes.reset_is_clear_context(); 5061 } 5062 5063 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5064 emit_int16((unsigned char)0xB4, (0xC0 | encode)); 5065 } 5066 5067 void Assembler::evpmadd52huq(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) { 5068 evpmadd52huq(dst, k0, src1, src2, false, vector_len); 5069 } 5070 5071 void Assembler::evpmadd52huq(XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vector_len) { 5072 assert(VM_Version::supports_avx512ifma(), ""); 5073 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5074 attributes.set_is_evex_instruction(); 5075 attributes.set_embedded_opmask_register_specifier(mask); 5076 if (merge) { 5077 attributes.reset_is_clear_context(); 5078 } 5079 5080 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5081 emit_int16((unsigned char)0xB5, (0xC0 | encode)); 5082 } 5083 5084 void Assembler::evpdpwssd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5085 assert(VM_Version::supports_evex(), ""); 5086 assert(VM_Version::supports_avx512_vnni(), "must support vnni"); 5087 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5088 attributes.set_is_evex_instruction(); 5089 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5090 emit_int16(0x52, (0xC0 | encode)); 5091 } 5092 5093 // generic 5094 void Assembler::pop(Register dst) { 5095 int encode = prefix_and_encode(dst->encoding()); 5096 emit_int8(0x58 | encode); 5097 } 5098 5099 void Assembler::popcntl(Register dst, Address src) { 5100 assert(VM_Version::supports_popcnt(), "must support"); 5101 InstructionMark im(this); 5102 emit_int8((unsigned char)0xF3); 5103 prefix(src, dst); 5104 emit_int16(0x0F, (unsigned char)0xB8); 5105 emit_operand(dst, src, 0); 5106 } 5107 5108 void Assembler::popcntl(Register dst, Register src) { 5109 assert(VM_Version::supports_popcnt(), "must support"); 5110 emit_int8((unsigned char)0xF3); 5111 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 5112 emit_int24(0x0F, (unsigned char)0xB8, (0xC0 | encode)); 5113 } 5114 5115 void Assembler::evpopcntb(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 5116 assert(VM_Version::supports_avx512_bitalg(), "must support avx512bitalg feature"); 5117 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 5118 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5119 attributes.set_embedded_opmask_register_specifier(mask); 5120 attributes.set_is_evex_instruction(); 5121 if (merge) { 5122 attributes.reset_is_clear_context(); 5123 } 5124 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5125 emit_int16(0x54, (0xC0 | encode)); 5126 } 5127 5128 void Assembler::evpopcntw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 5129 assert(VM_Version::supports_avx512_bitalg(), "must support avx512bitalg feature"); 5130 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 5131 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5132 attributes.set_is_evex_instruction(); 5133 attributes.set_embedded_opmask_register_specifier(mask); 5134 if (merge) { 5135 attributes.reset_is_clear_context(); 5136 } 5137 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5138 emit_int16(0x54, (0xC0 | encode)); 5139 } 5140 5141 void Assembler::evpopcntd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 5142 assert(VM_Version::supports_avx512_vpopcntdq(), "must support vpopcntdq feature"); 5143 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 5144 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5145 attributes.set_is_evex_instruction(); 5146 attributes.set_embedded_opmask_register_specifier(mask); 5147 if (merge) { 5148 attributes.reset_is_clear_context(); 5149 } 5150 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5151 emit_int16(0x55, (0xC0 | encode)); 5152 } 5153 5154 void Assembler::evpopcntq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 5155 assert(VM_Version::supports_avx512_vpopcntdq(), "must support vpopcntdq feature"); 5156 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 5157 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5158 attributes.set_is_evex_instruction(); 5159 attributes.set_embedded_opmask_register_specifier(mask); 5160 if (merge) { 5161 attributes.reset_is_clear_context(); 5162 } 5163 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5164 emit_int16(0x55, (0xC0 | encode)); 5165 } 5166 5167 void Assembler::popf() { 5168 emit_int8((unsigned char)0x9D); 5169 } 5170 5171 #ifndef _LP64 // no 32bit push/pop on amd64 5172 void Assembler::popl(Address dst) { 5173 // NOTE: this will adjust stack by 8byte on 64bits 5174 InstructionMark im(this); 5175 prefix(dst); 5176 emit_int8((unsigned char)0x8F); 5177 emit_operand(rax, dst, 0); 5178 } 5179 #endif 5180 5181 void Assembler::prefetchnta(Address src) { 5182 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 5183 InstructionMark im(this); 5184 prefix(src); 5185 emit_int16(0x0F, 0x18); 5186 emit_operand(rax, src, 0); // 0, src 5187 } 5188 5189 void Assembler::prefetchr(Address src) { 5190 assert(VM_Version::supports_3dnow_prefetch(), "must support"); 5191 InstructionMark im(this); 5192 prefix(src); 5193 emit_int16(0x0F, 0x0D); 5194 emit_operand(rax, src, 0); // 0, src 5195 } 5196 5197 void Assembler::prefetcht0(Address src) { 5198 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 5199 InstructionMark im(this); 5200 prefix(src); 5201 emit_int16(0x0F, 0x18); 5202 emit_operand(rcx, src, 0); // 1, src 5203 } 5204 5205 void Assembler::prefetcht1(Address src) { 5206 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 5207 InstructionMark im(this); 5208 prefix(src); 5209 emit_int16(0x0F, 0x18); 5210 emit_operand(rdx, src, 0); // 2, src 5211 } 5212 5213 void Assembler::prefetcht2(Address src) { 5214 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 5215 InstructionMark im(this); 5216 prefix(src); 5217 emit_int16(0x0F, 0x18); 5218 emit_operand(rbx, src, 0); // 3, src 5219 } 5220 5221 void Assembler::prefetchw(Address src) { 5222 assert(VM_Version::supports_3dnow_prefetch(), "must support"); 5223 InstructionMark im(this); 5224 prefix(src); 5225 emit_int16(0x0F, 0x0D); 5226 emit_operand(rcx, src, 0); // 1, src 5227 } 5228 5229 void Assembler::prefix(Prefix p) { 5230 emit_int8(p); 5231 } 5232 5233 void Assembler::pshufb(XMMRegister dst, XMMRegister src) { 5234 assert(VM_Version::supports_ssse3(), ""); 5235 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5236 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5237 emit_int16(0x00, (0xC0 | encode)); 5238 } 5239 5240 void Assembler::evpshufb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 5241 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 5242 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5243 attributes.set_is_evex_instruction(); 5244 attributes.set_embedded_opmask_register_specifier(mask); 5245 if (merge) { 5246 attributes.reset_is_clear_context(); 5247 } 5248 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5249 emit_int16(0x00, (0xC0 | encode)); 5250 } 5251 5252 void Assembler::vpshufb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5253 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 5254 vector_len == AVX_256bit? VM_Version::supports_avx2() : 5255 vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); 5256 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5257 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5258 emit_int16(0x00, (0xC0 | encode)); 5259 } 5260 5261 void Assembler::pshufb(XMMRegister dst, Address src) { 5262 assert(VM_Version::supports_ssse3(), ""); 5263 InstructionMark im(this); 5264 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5265 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 5266 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5267 emit_int8(0x00); 5268 emit_operand(dst, src, 0); 5269 } 5270 5271 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) { 5272 assert(isByte(mode), "invalid value"); 5273 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5274 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 5275 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5276 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5277 emit_int24(0x70, (0xC0 | encode), mode & 0xFF); 5278 } 5279 5280 void Assembler::vpshufd(XMMRegister dst, XMMRegister src, int mode, int vector_len) { 5281 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 5282 (vector_len == AVX_256bit? VM_Version::supports_avx2() : 5283 (vector_len == AVX_512bit? VM_Version::supports_evex() : 0)), ""); 5284 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5285 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5286 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5287 emit_int24(0x70, (0xC0 | encode), mode & 0xFF); 5288 } 5289 5290 void Assembler::pshufd(XMMRegister dst, Address src, int mode) { 5291 assert(isByte(mode), "invalid value"); 5292 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5293 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 5294 InstructionMark im(this); 5295 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5296 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5297 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5298 emit_int8(0x70); 5299 emit_operand(dst, src, 1); 5300 emit_int8(mode & 0xFF); 5301 } 5302 5303 void Assembler::pshufhw(XMMRegister dst, XMMRegister src, int mode) { 5304 assert(isByte(mode), "invalid value"); 5305 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5306 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5307 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 5308 emit_int24(0x70, (0xC0 | encode), mode & 0xFF); 5309 } 5310 5311 void Assembler::vpshufhw(XMMRegister dst, XMMRegister src, int mode, int vector_len) { 5312 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 5313 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : 5314 (vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : false)), ""); 5315 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5316 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5317 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 5318 emit_int24(0x70, (0xC0 | encode), mode & 0xFF); 5319 } 5320 5321 void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { 5322 assert(isByte(mode), "invalid value"); 5323 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5324 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5325 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 5326 emit_int24(0x70, (0xC0 | encode), mode & 0xFF); 5327 } 5328 5329 void Assembler::pshuflw(XMMRegister dst, Address src, int mode) { 5330 assert(isByte(mode), "invalid value"); 5331 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5332 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 5333 InstructionMark im(this); 5334 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5335 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 5336 simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 5337 emit_int8(0x70); 5338 emit_operand(dst, src, 1); 5339 emit_int8(mode & 0xFF); 5340 } 5341 5342 void Assembler::vpshuflw(XMMRegister dst, XMMRegister src, int mode, int vector_len) { 5343 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 5344 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : 5345 (vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : false)), ""); 5346 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5347 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5348 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 5349 emit_int24(0x70, (0xC0 | encode), mode & 0xFF); 5350 } 5351 5352 void Assembler::evshufi64x2(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { 5353 assert(VM_Version::supports_evex(), "requires EVEX support"); 5354 assert(vector_len == Assembler::AVX_256bit || vector_len == Assembler::AVX_512bit, ""); 5355 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5356 attributes.set_is_evex_instruction(); 5357 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5358 emit_int24(0x43, (0xC0 | encode), imm8 & 0xFF); 5359 } 5360 5361 void Assembler::shufpd(XMMRegister dst, XMMRegister src, int imm8) { 5362 assert(isByte(imm8), "invalid value"); 5363 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5364 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5365 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5366 emit_int24((unsigned char)0xC6, (0xC0 | encode), imm8 & 0xFF); 5367 } 5368 5369 void Assembler::vshufpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { 5370 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5371 attributes.set_rex_vex_w_reverted(); 5372 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5373 emit_int24((unsigned char)0xC6, (0xC0 | encode), imm8 & 0xFF); 5374 } 5375 5376 void Assembler::shufps(XMMRegister dst, XMMRegister src, int imm8) { 5377 assert(isByte(imm8), "invalid value"); 5378 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5379 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5380 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5381 emit_int24((unsigned char)0xC6, (0xC0 | encode), imm8 & 0xFF); 5382 } 5383 5384 void Assembler::vshufps(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { 5385 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5386 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5387 emit_int24((unsigned char)0xC6, (0xC0 | encode), imm8 & 0xFF); 5388 } 5389 5390 void Assembler::psrldq(XMMRegister dst, int shift) { 5391 // Shift left 128 bit value in dst XMMRegister by shift number of bytes. 5392 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5393 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5394 int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5395 emit_int24(0x73, (0xC0 | encode), shift); 5396 } 5397 5398 void Assembler::vpsrldq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 5399 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 5400 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 5401 vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : 0, ""); 5402 InstructionAttr attributes(vector_len, /*vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5403 int encode = vex_prefix_and_encode(xmm3->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5404 emit_int24(0x73, (0xC0 | encode), shift & 0xFF); 5405 } 5406 5407 void Assembler::pslldq(XMMRegister dst, int shift) { 5408 // Shift left 128 bit value in dst XMMRegister by shift number of bytes. 5409 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5410 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5411 // XMM7 is for /7 encoding: 66 0F 73 /7 ib 5412 int encode = simd_prefix_and_encode(xmm7, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5413 emit_int24(0x73, (0xC0 | encode), shift); 5414 } 5415 5416 void Assembler::vpslldq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 5417 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 5418 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 5419 vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : 0, ""); 5420 InstructionAttr attributes(vector_len, /*vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5421 int encode = vex_prefix_and_encode(xmm7->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5422 emit_int24(0x73, (0xC0 | encode), shift & 0xFF); 5423 } 5424 5425 void Assembler::ptest(XMMRegister dst, Address src) { 5426 assert(VM_Version::supports_sse4_1(), ""); 5427 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 5428 InstructionMark im(this); 5429 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 5430 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5431 emit_int8(0x17); 5432 emit_operand(dst, src, 0); 5433 } 5434 5435 void Assembler::ptest(XMMRegister dst, XMMRegister src) { 5436 assert(VM_Version::supports_sse4_1() || VM_Version::supports_avx(), ""); 5437 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 5438 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5439 emit_int8(0x17); 5440 emit_int8((0xC0 | encode)); 5441 } 5442 5443 void Assembler::vptest(XMMRegister dst, Address src) { 5444 assert(VM_Version::supports_avx(), ""); 5445 InstructionMark im(this); 5446 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 5447 assert(dst != xnoreg, "sanity"); 5448 // swap src<->dst for encoding 5449 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5450 emit_int8(0x17); 5451 emit_operand(dst, src, 0); 5452 } 5453 5454 void Assembler::vptest(XMMRegister dst, XMMRegister src) { 5455 assert(VM_Version::supports_avx(), ""); 5456 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 5457 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5458 emit_int16(0x17, (0xC0 | encode)); 5459 } 5460 5461 void Assembler::vptest(XMMRegister dst, XMMRegister src, int vector_len) { 5462 assert(VM_Version::supports_avx(), ""); 5463 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 5464 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5465 emit_int16(0x17, (0xC0 | encode)); 5466 } 5467 5468 void Assembler::vtestps(XMMRegister dst, XMMRegister src, int vector_len) { 5469 assert(VM_Version::supports_avx(), ""); 5470 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 5471 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5472 emit_int16(0x0E, (0xC0 | encode)); 5473 } 5474 5475 void Assembler::evptestmb(KRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5476 assert(vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : VM_Version::supports_avx512vlbw(), ""); 5477 // Encoding: EVEX.NDS.XXX.66.0F38.W0 DB /r 5478 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5479 attributes.set_is_evex_instruction(); 5480 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5481 emit_int16(0x26, (0xC0 | encode)); 5482 } 5483 5484 void Assembler::evptestmd(KRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5485 assert(vector_len == AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx512vl(), ""); 5486 // Encoding: EVEX.NDS.XXX.66.0F38.W0 DB /r 5487 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5488 attributes.set_is_evex_instruction(); 5489 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5490 emit_int16(0x27, (0xC0 | encode)); 5491 } 5492 5493 void Assembler::evptestnmd(KRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5494 assert(vector_len == AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx512vl(), ""); 5495 // Encoding: EVEX.NDS.XXX.F3.0F38.W0 DB /r 5496 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5497 attributes.set_is_evex_instruction(); 5498 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 5499 emit_int16(0x27, (0xC0 | encode)); 5500 } 5501 5502 void Assembler::punpcklbw(XMMRegister dst, Address src) { 5503 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5504 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 5505 InstructionMark im(this); 5506 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ true, /* uses_vl */ true); 5507 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 5508 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5509 emit_int8(0x60); 5510 emit_operand(dst, src, 0); 5511 } 5512 5513 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) { 5514 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5515 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ true, /* uses_vl */ true); 5516 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5517 emit_int16(0x60, (0xC0 | encode)); 5518 } 5519 5520 void Assembler::punpckldq(XMMRegister dst, Address src) { 5521 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5522 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 5523 InstructionMark im(this); 5524 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5525 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5526 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5527 emit_int8(0x62); 5528 emit_operand(dst, src, 0); 5529 } 5530 5531 void Assembler::punpckldq(XMMRegister dst, XMMRegister src) { 5532 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5533 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5534 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5535 emit_int16(0x62, (0xC0 | encode)); 5536 } 5537 5538 void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) { 5539 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5540 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5541 attributes.set_rex_vex_w_reverted(); 5542 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5543 emit_int16(0x6C, (0xC0 | encode)); 5544 } 5545 5546 void Assembler::evpunpcklqdq(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) { 5547 evpunpcklqdq(dst, k0, src1, src2, false, vector_len); 5548 } 5549 5550 void Assembler::evpunpcklqdq(XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vector_len) { 5551 assert(VM_Version::supports_evex(), "requires AVX512F"); 5552 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL"); 5553 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5554 attributes.set_is_evex_instruction(); 5555 attributes.set_embedded_opmask_register_specifier(mask); 5556 if (merge) { 5557 attributes.reset_is_clear_context(); 5558 } 5559 5560 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5561 emit_int16(0x6C, (0xC0 | encode)); 5562 } 5563 5564 void Assembler::evpunpckhqdq(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) { 5565 evpunpckhqdq(dst, k0, src1, src2, false, vector_len); 5566 } 5567 5568 void Assembler::evpunpckhqdq(XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vector_len) { 5569 assert(VM_Version::supports_evex(), "requires AVX512F"); 5570 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL"); 5571 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5572 attributes.set_is_evex_instruction(); 5573 attributes.set_embedded_opmask_register_specifier(mask); 5574 if (merge) { 5575 attributes.reset_is_clear_context(); 5576 } 5577 5578 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5579 emit_int16(0x6D, (0xC0 | encode)); 5580 } 5581 5582 void Assembler::push(int32_t imm32) { 5583 // in 64bits we push 64bits onto the stack but only 5584 // take a 32bit immediate 5585 emit_int8(0x68); 5586 emit_int32(imm32); 5587 } 5588 5589 void Assembler::push(Register src) { 5590 int encode = prefix_and_encode(src->encoding()); 5591 emit_int8(0x50 | encode); 5592 } 5593 5594 void Assembler::pushf() { 5595 emit_int8((unsigned char)0x9C); 5596 } 5597 5598 #ifndef _LP64 // no 32bit push/pop on amd64 5599 void Assembler::pushl(Address src) { 5600 // Note this will push 64bit on 64bit 5601 InstructionMark im(this); 5602 prefix(src); 5603 emit_int8((unsigned char)0xFF); 5604 emit_operand(rsi, src, 0); 5605 } 5606 #endif 5607 5608 void Assembler::rcll(Register dst, int imm8) { 5609 assert(isShiftCount(imm8), "illegal shift count"); 5610 int encode = prefix_and_encode(dst->encoding()); 5611 if (imm8 == 1) { 5612 emit_int16((unsigned char)0xD1, (0xD0 | encode)); 5613 } else { 5614 emit_int24((unsigned char)0xC1, (0xD0 | encode), imm8); 5615 } 5616 } 5617 5618 void Assembler::rcpps(XMMRegister dst, XMMRegister src) { 5619 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5620 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 5621 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5622 emit_int16(0x53, (0xC0 | encode)); 5623 } 5624 5625 void Assembler::rcpss(XMMRegister dst, XMMRegister src) { 5626 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5627 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 5628 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 5629 emit_int16(0x53, (0xC0 | encode)); 5630 } 5631 5632 void Assembler::rdtsc() { 5633 emit_int16(0x0F, 0x31); 5634 } 5635 5636 // copies data from [esi] to [edi] using rcx pointer sized words 5637 // generic 5638 void Assembler::rep_mov() { 5639 // REP 5640 // MOVSQ 5641 LP64_ONLY(emit_int24((unsigned char)0xF3, REX_W, (unsigned char)0xA5);) 5642 NOT_LP64( emit_int16((unsigned char)0xF3, (unsigned char)0xA5);) 5643 } 5644 5645 // sets rcx bytes with rax, value at [edi] 5646 void Assembler::rep_stosb() { 5647 // REP 5648 // STOSB 5649 LP64_ONLY(emit_int24((unsigned char)0xF3, REX_W, (unsigned char)0xAA);) 5650 NOT_LP64( emit_int16((unsigned char)0xF3, (unsigned char)0xAA);) 5651 } 5652 5653 // sets rcx pointer sized words with rax, value at [edi] 5654 // generic 5655 void Assembler::rep_stos() { 5656 // REP 5657 // LP64:STOSQ, LP32:STOSD 5658 LP64_ONLY(emit_int24((unsigned char)0xF3, REX_W, (unsigned char)0xAB);) 5659 NOT_LP64( emit_int16((unsigned char)0xF3, (unsigned char)0xAB);) 5660 } 5661 5662 // scans rcx pointer sized words at [edi] for occurrence of rax, 5663 // generic 5664 void Assembler::repne_scan() { // repne_scan 5665 // SCASQ 5666 LP64_ONLY(emit_int24((unsigned char)0xF2, REX_W, (unsigned char)0xAF);) 5667 NOT_LP64( emit_int16((unsigned char)0xF2, (unsigned char)0xAF);) 5668 } 5669 5670 #ifdef _LP64 5671 // scans rcx 4 byte words at [edi] for occurrence of rax, 5672 // generic 5673 void Assembler::repne_scanl() { // repne_scan 5674 // SCASL 5675 emit_int16((unsigned char)0xF2, (unsigned char)0xAF); 5676 } 5677 #endif 5678 5679 void Assembler::ret(int imm16) { 5680 if (imm16 == 0) { 5681 emit_int8((unsigned char)0xC3); 5682 } else { 5683 emit_int8((unsigned char)0xC2); 5684 emit_int16(imm16); 5685 } 5686 } 5687 5688 void Assembler::roll(Register dst, int imm8) { 5689 assert(isShiftCount(imm8), "illegal shift count"); 5690 int encode = prefix_and_encode(dst->encoding()); 5691 if (imm8 == 1) { 5692 emit_int16((unsigned char)0xD1, (0xC0 | encode)); 5693 } else { 5694 emit_int24((unsigned char)0xC1, (0xc0 | encode), imm8); 5695 } 5696 } 5697 5698 void Assembler::roll(Register dst) { 5699 int encode = prefix_and_encode(dst->encoding()); 5700 emit_int16((unsigned char)0xD3, (0xC0 | encode)); 5701 } 5702 5703 void Assembler::rorl(Register dst, int imm8) { 5704 assert(isShiftCount(imm8), "illegal shift count"); 5705 int encode = prefix_and_encode(dst->encoding()); 5706 if (imm8 == 1) { 5707 emit_int16((unsigned char)0xD1, (0xC8 | encode)); 5708 } else { 5709 emit_int24((unsigned char)0xC1, (0xc8 | encode), imm8); 5710 } 5711 } 5712 5713 void Assembler::rorl(Register dst) { 5714 int encode = prefix_and_encode(dst->encoding()); 5715 emit_int16((unsigned char)0xD3, (0xC8 | encode)); 5716 } 5717 5718 #ifdef _LP64 5719 void Assembler::rorq(Register dst) { 5720 int encode = prefixq_and_encode(dst->encoding()); 5721 emit_int16((unsigned char)0xD3, (0xC8 | encode)); 5722 } 5723 5724 void Assembler::rorq(Register dst, int imm8) { 5725 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 5726 int encode = prefixq_and_encode(dst->encoding()); 5727 if (imm8 == 1) { 5728 emit_int16((unsigned char)0xD1, (0xC8 | encode)); 5729 } else { 5730 emit_int24((unsigned char)0xC1, (0xc8 | encode), imm8); 5731 } 5732 } 5733 5734 void Assembler::rolq(Register dst) { 5735 int encode = prefixq_and_encode(dst->encoding()); 5736 emit_int16((unsigned char)0xD3, (0xC0 | encode)); 5737 } 5738 5739 void Assembler::rolq(Register dst, int imm8) { 5740 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 5741 int encode = prefixq_and_encode(dst->encoding()); 5742 if (imm8 == 1) { 5743 emit_int16((unsigned char)0xD1, (0xC0 | encode)); 5744 } else { 5745 emit_int24((unsigned char)0xC1, (0xc0 | encode), imm8); 5746 } 5747 } 5748 #endif 5749 5750 void Assembler::sahf() { 5751 #ifdef _LP64 5752 // Not supported in 64bit mode 5753 ShouldNotReachHere(); 5754 #endif 5755 emit_int8((unsigned char)0x9E); 5756 } 5757 5758 void Assembler::sall(Address dst, int imm8) { 5759 InstructionMark im(this); 5760 assert(isShiftCount(imm8), "illegal shift count"); 5761 prefix(dst); 5762 if (imm8 == 1) { 5763 emit_int8((unsigned char)0xD1); 5764 emit_operand(as_Register(4), dst, 0); 5765 } 5766 else { 5767 emit_int8((unsigned char)0xC1); 5768 emit_operand(as_Register(4), dst, 1); 5769 emit_int8(imm8); 5770 } 5771 } 5772 5773 void Assembler::sall(Address dst) { 5774 InstructionMark im(this); 5775 prefix(dst); 5776 emit_int8((unsigned char)0xD3); 5777 emit_operand(as_Register(4), dst, 0); 5778 } 5779 5780 void Assembler::sall(Register dst, int imm8) { 5781 assert(isShiftCount(imm8), "illegal shift count"); 5782 int encode = prefix_and_encode(dst->encoding()); 5783 if (imm8 == 1) { 5784 emit_int16((unsigned char)0xD1, (0xE0 | encode)); 5785 } else { 5786 emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8); 5787 } 5788 } 5789 5790 void Assembler::sall(Register dst) { 5791 int encode = prefix_and_encode(dst->encoding()); 5792 emit_int16((unsigned char)0xD3, (0xE0 | encode)); 5793 } 5794 5795 void Assembler::sarl(Address dst, int imm8) { 5796 assert(isShiftCount(imm8), "illegal shift count"); 5797 InstructionMark im(this); 5798 prefix(dst); 5799 if (imm8 == 1) { 5800 emit_int8((unsigned char)0xD1); 5801 emit_operand(as_Register(7), dst, 0); 5802 } 5803 else { 5804 emit_int8((unsigned char)0xC1); 5805 emit_operand(as_Register(7), dst, 1); 5806 emit_int8(imm8); 5807 } 5808 } 5809 5810 void Assembler::sarl(Address dst) { 5811 InstructionMark im(this); 5812 prefix(dst); 5813 emit_int8((unsigned char)0xD3); 5814 emit_operand(as_Register(7), dst, 0); 5815 } 5816 5817 void Assembler::sarl(Register dst, int imm8) { 5818 int encode = prefix_and_encode(dst->encoding()); 5819 assert(isShiftCount(imm8), "illegal shift count"); 5820 if (imm8 == 1) { 5821 emit_int16((unsigned char)0xD1, (0xF8 | encode)); 5822 } else { 5823 emit_int24((unsigned char)0xC1, (0xF8 | encode), imm8); 5824 } 5825 } 5826 5827 void Assembler::sarl(Register dst) { 5828 int encode = prefix_and_encode(dst->encoding()); 5829 emit_int16((unsigned char)0xD3, (0xF8 | encode)); 5830 } 5831 5832 void Assembler::sbbl(Address dst, int32_t imm32) { 5833 InstructionMark im(this); 5834 prefix(dst); 5835 emit_arith_operand(0x81, rbx, dst, imm32); 5836 } 5837 5838 void Assembler::sbbl(Register dst, int32_t imm32) { 5839 prefix(dst); 5840 emit_arith(0x81, 0xD8, dst, imm32); 5841 } 5842 5843 5844 void Assembler::sbbl(Register dst, Address src) { 5845 InstructionMark im(this); 5846 prefix(src, dst); 5847 emit_int8(0x1B); 5848 emit_operand(dst, src, 0); 5849 } 5850 5851 void Assembler::sbbl(Register dst, Register src) { 5852 (void) prefix_and_encode(dst->encoding(), src->encoding()); 5853 emit_arith(0x1B, 0xC0, dst, src); 5854 } 5855 5856 void Assembler::setb(Condition cc, Register dst) { 5857 assert(0 <= cc && cc < 16, "illegal cc"); 5858 int encode = prefix_and_encode(dst->encoding(), true); 5859 emit_int24(0x0F, (unsigned char)0x90 | cc, (0xC0 | encode)); 5860 } 5861 5862 void Assembler::sete(Register dst) { 5863 int encode = prefix_and_encode(dst->encoding(), true); 5864 emit_int24(0x0F, (unsigned char)0x94, (0xC0 | encode)); 5865 } 5866 5867 void Assembler::setl(Register dst) { 5868 int encode = prefix_and_encode(dst->encoding(), true); 5869 emit_int24(0x0F, (unsigned char)0x9C, (0xC0 | encode)); 5870 } 5871 5872 void Assembler::setne(Register dst) { 5873 int encode = prefix_and_encode(dst->encoding(), true); 5874 emit_int24(0x0F, (unsigned char)0x95, (0xC0 | encode)); 5875 } 5876 5877 void Assembler::palignr(XMMRegister dst, XMMRegister src, int imm8) { 5878 assert(VM_Version::supports_ssse3(), ""); 5879 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5880 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5881 emit_int24(0x0F, (0xC0 | encode), imm8); 5882 } 5883 5884 void Assembler::vpalignr(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { 5885 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 5886 vector_len == AVX_256bit? VM_Version::supports_avx2() : 5887 0, ""); 5888 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5889 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5890 emit_int24(0x0F, (0xC0 | encode), imm8); 5891 } 5892 5893 void Assembler::evalignq(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 5894 assert(VM_Version::supports_evex(), ""); 5895 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5896 attributes.set_is_evex_instruction(); 5897 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5898 emit_int24(0x3, (0xC0 | encode), imm8); 5899 } 5900 5901 void Assembler::pblendw(XMMRegister dst, XMMRegister src, int imm8) { 5902 assert(VM_Version::supports_sse4_1(), ""); 5903 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 5904 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5905 emit_int24(0x0E, (0xC0 | encode), imm8); 5906 } 5907 5908 void Assembler::sha1rnds4(XMMRegister dst, XMMRegister src, int imm8) { 5909 assert(VM_Version::supports_sha(), ""); 5910 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3A, /* rex_w */ false); 5911 emit_int24((unsigned char)0xCC, (0xC0 | encode), (unsigned char)imm8); 5912 } 5913 5914 void Assembler::sha1nexte(XMMRegister dst, XMMRegister src) { 5915 assert(VM_Version::supports_sha(), ""); 5916 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 5917 emit_int16((unsigned char)0xC8, (0xC0 | encode)); 5918 } 5919 5920 void Assembler::sha1msg1(XMMRegister dst, XMMRegister src) { 5921 assert(VM_Version::supports_sha(), ""); 5922 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 5923 emit_int16((unsigned char)0xC9, (0xC0 | encode)); 5924 } 5925 5926 void Assembler::sha1msg2(XMMRegister dst, XMMRegister src) { 5927 assert(VM_Version::supports_sha(), ""); 5928 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 5929 emit_int16((unsigned char)0xCA, (0xC0 | encode)); 5930 } 5931 5932 // xmm0 is implicit additional source to this instruction. 5933 void Assembler::sha256rnds2(XMMRegister dst, XMMRegister src) { 5934 assert(VM_Version::supports_sha(), ""); 5935 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 5936 emit_int16((unsigned char)0xCB, (0xC0 | encode)); 5937 } 5938 5939 void Assembler::sha256msg1(XMMRegister dst, XMMRegister src) { 5940 assert(VM_Version::supports_sha(), ""); 5941 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 5942 emit_int16((unsigned char)0xCC, (0xC0 | encode)); 5943 } 5944 5945 void Assembler::sha256msg2(XMMRegister dst, XMMRegister src) { 5946 assert(VM_Version::supports_sha(), ""); 5947 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 5948 emit_int16((unsigned char)0xCD, (0xC0 | encode)); 5949 } 5950 5951 5952 void Assembler::shll(Register dst, int imm8) { 5953 assert(isShiftCount(imm8), "illegal shift count"); 5954 int encode = prefix_and_encode(dst->encoding()); 5955 if (imm8 == 1 ) { 5956 emit_int16((unsigned char)0xD1, (0xE0 | encode)); 5957 } else { 5958 emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8); 5959 } 5960 } 5961 5962 void Assembler::shll(Register dst) { 5963 int encode = prefix_and_encode(dst->encoding()); 5964 emit_int16((unsigned char)0xD3, (0xE0 | encode)); 5965 } 5966 5967 void Assembler::shrl(Register dst, int imm8) { 5968 assert(isShiftCount(imm8), "illegal shift count"); 5969 int encode = prefix_and_encode(dst->encoding()); 5970 if (imm8 == 1) { 5971 emit_int16((unsigned char)0xD1, (0xE8 | encode)); 5972 } 5973 else { 5974 emit_int24((unsigned char)0xC1, (0xE8 | encode), imm8); 5975 } 5976 } 5977 5978 void Assembler::shrl(Register dst) { 5979 int encode = prefix_and_encode(dst->encoding()); 5980 emit_int16((unsigned char)0xD3, (0xE8 | encode)); 5981 } 5982 5983 void Assembler::shrl(Address dst) { 5984 InstructionMark im(this); 5985 prefix(dst); 5986 emit_int8((unsigned char)0xD3); 5987 emit_operand(as_Register(5), dst, 0); 5988 } 5989 5990 void Assembler::shrl(Address dst, int imm8) { 5991 InstructionMark im(this); 5992 assert(isShiftCount(imm8), "illegal shift count"); 5993 prefix(dst); 5994 if (imm8 == 1) { 5995 emit_int8((unsigned char)0xD1); 5996 emit_operand(as_Register(5), dst, 0); 5997 } 5998 else { 5999 emit_int8((unsigned char)0xC1); 6000 emit_operand(as_Register(5), dst, 1); 6001 emit_int8(imm8); 6002 } 6003 } 6004 6005 6006 void Assembler::shldl(Register dst, Register src) { 6007 int encode = prefix_and_encode(src->encoding(), dst->encoding()); 6008 emit_int24(0x0F, (unsigned char)0xA5, (0xC0 | encode)); 6009 } 6010 6011 void Assembler::shldl(Register dst, Register src, int8_t imm8) { 6012 int encode = prefix_and_encode(src->encoding(), dst->encoding()); 6013 emit_int32(0x0F, (unsigned char)0xA4, (0xC0 | encode), imm8); 6014 } 6015 6016 void Assembler::shrdl(Register dst, Register src) { 6017 int encode = prefix_and_encode(src->encoding(), dst->encoding()); 6018 emit_int24(0x0F, (unsigned char)0xAD, (0xC0 | encode)); 6019 } 6020 6021 void Assembler::shrdl(Register dst, Register src, int8_t imm8) { 6022 int encode = prefix_and_encode(src->encoding(), dst->encoding()); 6023 emit_int32(0x0F, (unsigned char)0xAC, (0xC0 | encode), imm8); 6024 } 6025 6026 #ifdef _LP64 6027 void Assembler::shldq(Register dst, Register src, int8_t imm8) { 6028 int encode = prefixq_and_encode(src->encoding(), dst->encoding()); 6029 emit_int32(0x0F, (unsigned char)0xA4, (0xC0 | encode), imm8); 6030 } 6031 6032 void Assembler::shrdq(Register dst, Register src, int8_t imm8) { 6033 int encode = prefixq_and_encode(src->encoding(), dst->encoding()); 6034 emit_int32(0x0F, (unsigned char)0xAC, (0xC0 | encode), imm8); 6035 } 6036 #endif 6037 6038 // copies a single word from [esi] to [edi] 6039 void Assembler::smovl() { 6040 emit_int8((unsigned char)0xA5); 6041 } 6042 6043 void Assembler::roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { 6044 assert(VM_Version::supports_sse4_1(), ""); 6045 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 6046 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6047 emit_int24(0x0B, (0xC0 | encode), (unsigned char)rmode); 6048 } 6049 6050 void Assembler::roundsd(XMMRegister dst, Address src, int32_t rmode) { 6051 assert(VM_Version::supports_sse4_1(), ""); 6052 InstructionMark im(this); 6053 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 6054 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6055 emit_int8(0x0B); 6056 emit_operand(dst, src, 1); 6057 emit_int8((unsigned char)rmode); 6058 } 6059 6060 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) { 6061 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6062 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6063 attributes.set_rex_vex_w_reverted(); 6064 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 6065 emit_int16(0x51, (0xC0 | encode)); 6066 } 6067 6068 void Assembler::sqrtsd(XMMRegister dst, Address src) { 6069 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6070 InstructionMark im(this); 6071 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6072 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 6073 attributes.set_rex_vex_w_reverted(); 6074 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 6075 emit_int8(0x51); 6076 emit_operand(dst, src, 0); 6077 } 6078 6079 void Assembler::sqrtss(XMMRegister dst, XMMRegister src) { 6080 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6081 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6082 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 6083 emit_int16(0x51, (0xC0 | encode)); 6084 } 6085 6086 void Assembler::std() { 6087 emit_int8((unsigned char)0xFD); 6088 } 6089 6090 void Assembler::sqrtss(XMMRegister dst, Address src) { 6091 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6092 InstructionMark im(this); 6093 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6094 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 6095 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 6096 emit_int8(0x51); 6097 emit_operand(dst, src, 0); 6098 } 6099 6100 void Assembler::stmxcsr( Address dst) { 6101 if (UseAVX > 0 ) { 6102 assert(VM_Version::supports_avx(), ""); 6103 InstructionMark im(this); 6104 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 6105 vex_prefix(dst, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6106 emit_int8((unsigned char)0xAE); 6107 emit_operand(as_Register(3), dst, 0); 6108 } else { 6109 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6110 InstructionMark im(this); 6111 prefix(dst); 6112 emit_int16(0x0F, (unsigned char)0xAE); 6113 emit_operand(as_Register(3), dst, 0); 6114 } 6115 } 6116 6117 void Assembler::subl(Address dst, int32_t imm32) { 6118 InstructionMark im(this); 6119 prefix(dst); 6120 emit_arith_operand(0x81, rbp, dst, imm32); 6121 } 6122 6123 void Assembler::subl(Address dst, Register src) { 6124 InstructionMark im(this); 6125 prefix(dst, src); 6126 emit_int8(0x29); 6127 emit_operand(src, dst, 0); 6128 } 6129 6130 void Assembler::subl(Register dst, int32_t imm32) { 6131 prefix(dst); 6132 emit_arith(0x81, 0xE8, dst, imm32); 6133 } 6134 6135 // Force generation of a 4 byte immediate value even if it fits into 8bit 6136 void Assembler::subl_imm32(Register dst, int32_t imm32) { 6137 prefix(dst); 6138 emit_arith_imm32(0x81, 0xE8, dst, imm32); 6139 } 6140 6141 void Assembler::subl(Register dst, Address src) { 6142 InstructionMark im(this); 6143 prefix(src, dst); 6144 emit_int8(0x2B); 6145 emit_operand(dst, src, 0); 6146 } 6147 6148 void Assembler::subl(Register dst, Register src) { 6149 (void) prefix_and_encode(dst->encoding(), src->encoding()); 6150 emit_arith(0x2B, 0xC0, dst, src); 6151 } 6152 6153 void Assembler::subsd(XMMRegister dst, XMMRegister src) { 6154 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6155 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6156 attributes.set_rex_vex_w_reverted(); 6157 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 6158 emit_int16(0x5C, (0xC0 | encode)); 6159 } 6160 6161 void Assembler::subsd(XMMRegister dst, Address src) { 6162 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6163 InstructionMark im(this); 6164 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6165 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 6166 attributes.set_rex_vex_w_reverted(); 6167 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 6168 emit_int8(0x5C); 6169 emit_operand(dst, src, 0); 6170 } 6171 6172 void Assembler::subss(XMMRegister dst, XMMRegister src) { 6173 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6174 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true , /* uses_vl */ false); 6175 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 6176 emit_int16(0x5C, (0xC0 | encode)); 6177 } 6178 6179 void Assembler::subss(XMMRegister dst, Address src) { 6180 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6181 InstructionMark im(this); 6182 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6183 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 6184 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 6185 emit_int8(0x5C); 6186 emit_operand(dst, src, 0); 6187 } 6188 6189 void Assembler::testb(Register dst, int imm8) { 6190 NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); 6191 if (dst == rax) { 6192 emit_int8((unsigned char)0xA8); 6193 emit_int8(imm8); 6194 } else { 6195 (void) prefix_and_encode(dst->encoding(), true); 6196 emit_arith_b(0xF6, 0xC0, dst, imm8); 6197 } 6198 } 6199 6200 void Assembler::testb(Address dst, int imm8) { 6201 InstructionMark im(this); 6202 prefix(dst); 6203 emit_int8((unsigned char)0xF6); 6204 emit_operand(rax, dst, 1); 6205 emit_int8(imm8); 6206 } 6207 6208 void Assembler::testl(Address dst, int32_t imm32) { 6209 InstructionMark im(this); 6210 prefix(dst); 6211 emit_int8((unsigned char)0xF7); 6212 emit_operand(as_Register(0), dst, 4); 6213 emit_int32(imm32); 6214 } 6215 6216 void Assembler::testl(Register dst, int32_t imm32) { 6217 // not using emit_arith because test 6218 // doesn't support sign-extension of 6219 // 8bit operands 6220 if (dst == rax) { 6221 emit_int8((unsigned char)0xA9); 6222 emit_int32(imm32); 6223 } else { 6224 int encode = dst->encoding(); 6225 encode = prefix_and_encode(encode); 6226 emit_int16((unsigned char)0xF7, (0xC0 | encode)); 6227 emit_int32(imm32); 6228 } 6229 } 6230 6231 void Assembler::testl(Register dst, Register src) { 6232 (void) prefix_and_encode(dst->encoding(), src->encoding()); 6233 emit_arith(0x85, 0xC0, dst, src); 6234 } 6235 6236 void Assembler::testl(Register dst, Address src) { 6237 InstructionMark im(this); 6238 prefix(src, dst); 6239 emit_int8((unsigned char)0x85); 6240 emit_operand(dst, src, 0); 6241 } 6242 6243 void Assembler::tzcntl(Register dst, Register src) { 6244 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); 6245 emit_int8((unsigned char)0xF3); 6246 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 6247 emit_int24(0x0F, 6248 (unsigned char)0xBC, 6249 0xC0 | encode); 6250 } 6251 6252 void Assembler::tzcntl(Register dst, Address src) { 6253 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); 6254 InstructionMark im(this); 6255 emit_int8((unsigned char)0xF3); 6256 prefix(src, dst); 6257 emit_int16(0x0F, (unsigned char)0xBC); 6258 emit_operand(dst, src, 0); 6259 } 6260 6261 void Assembler::tzcntq(Register dst, Register src) { 6262 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); 6263 emit_int8((unsigned char)0xF3); 6264 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6265 emit_int24(0x0F, (unsigned char)0xBC, (0xC0 | encode)); 6266 } 6267 6268 void Assembler::tzcntq(Register dst, Address src) { 6269 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); 6270 InstructionMark im(this); 6271 emit_int8((unsigned char)0xF3); 6272 prefixq(src, dst); 6273 emit_int16(0x0F, (unsigned char)0xBC); 6274 emit_operand(dst, src, 0); 6275 } 6276 6277 void Assembler::ucomisd(XMMRegister dst, Address src) { 6278 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6279 InstructionMark im(this); 6280 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6281 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 6282 attributes.set_rex_vex_w_reverted(); 6283 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6284 emit_int8(0x2E); 6285 emit_operand(dst, src, 0); 6286 } 6287 6288 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { 6289 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6290 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6291 attributes.set_rex_vex_w_reverted(); 6292 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6293 emit_int16(0x2E, (0xC0 | encode)); 6294 } 6295 6296 void Assembler::ucomiss(XMMRegister dst, Address src) { 6297 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6298 InstructionMark im(this); 6299 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6300 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 6301 simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6302 emit_int8(0x2E); 6303 emit_operand(dst, src, 0); 6304 } 6305 6306 void Assembler::ucomiss(XMMRegister dst, XMMRegister src) { 6307 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6308 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6309 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6310 emit_int16(0x2E, (0xC0 | encode)); 6311 } 6312 6313 void Assembler::xabort(int8_t imm8) { 6314 emit_int24((unsigned char)0xC6, (unsigned char)0xF8, (imm8 & 0xFF)); 6315 } 6316 6317 void Assembler::xaddb(Address dst, Register src) { 6318 InstructionMark im(this); 6319 prefix(dst, src, true); 6320 emit_int16(0x0F, (unsigned char)0xC0); 6321 emit_operand(src, dst, 0); 6322 } 6323 6324 void Assembler::xaddw(Address dst, Register src) { 6325 InstructionMark im(this); 6326 emit_int8(0x66); 6327 prefix(dst, src); 6328 emit_int16(0x0F, (unsigned char)0xC1); 6329 emit_operand(src, dst, 0); 6330 } 6331 6332 void Assembler::xaddl(Address dst, Register src) { 6333 InstructionMark im(this); 6334 prefix(dst, src); 6335 emit_int16(0x0F, (unsigned char)0xC1); 6336 emit_operand(src, dst, 0); 6337 } 6338 6339 void Assembler::xbegin(Label& abort, relocInfo::relocType rtype) { 6340 InstructionMark im(this); 6341 relocate(rtype); 6342 if (abort.is_bound()) { 6343 address entry = target(abort); 6344 assert(entry != NULL, "abort entry NULL"); 6345 intptr_t offset = entry - pc(); 6346 emit_int16((unsigned char)0xC7, (unsigned char)0xF8); 6347 emit_int32(offset - 6); // 2 opcode + 4 address 6348 } else { 6349 abort.add_patch_at(code(), locator()); 6350 emit_int16((unsigned char)0xC7, (unsigned char)0xF8); 6351 emit_int32(0); 6352 } 6353 } 6354 6355 void Assembler::xchgb(Register dst, Address src) { // xchg 6356 InstructionMark im(this); 6357 prefix(src, dst, true); 6358 emit_int8((unsigned char)0x86); 6359 emit_operand(dst, src, 0); 6360 } 6361 6362 void Assembler::xchgw(Register dst, Address src) { // xchg 6363 InstructionMark im(this); 6364 emit_int8(0x66); 6365 prefix(src, dst); 6366 emit_int8((unsigned char)0x87); 6367 emit_operand(dst, src, 0); 6368 } 6369 6370 void Assembler::xchgl(Register dst, Address src) { // xchg 6371 InstructionMark im(this); 6372 prefix(src, dst); 6373 emit_int8((unsigned char)0x87); 6374 emit_operand(dst, src, 0); 6375 } 6376 6377 void Assembler::xchgl(Register dst, Register src) { 6378 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 6379 emit_int16((unsigned char)0x87, (0xC0 | encode)); 6380 } 6381 6382 void Assembler::xend() { 6383 emit_int24(0x0F, 0x01, (unsigned char)0xD5); 6384 } 6385 6386 void Assembler::xgetbv() { 6387 emit_int24(0x0F, 0x01, (unsigned char)0xD0); 6388 } 6389 6390 void Assembler::xorl(Address dst, int32_t imm32) { 6391 InstructionMark im(this); 6392 prefix(dst); 6393 emit_arith_operand(0x81, as_Register(6), dst, imm32); 6394 } 6395 6396 void Assembler::xorl(Register dst, int32_t imm32) { 6397 prefix(dst); 6398 emit_arith(0x81, 0xF0, dst, imm32); 6399 } 6400 6401 void Assembler::xorl(Register dst, Address src) { 6402 InstructionMark im(this); 6403 prefix(src, dst); 6404 emit_int8(0x33); 6405 emit_operand(dst, src, 0); 6406 } 6407 6408 void Assembler::xorl(Register dst, Register src) { 6409 (void) prefix_and_encode(dst->encoding(), src->encoding()); 6410 emit_arith(0x33, 0xC0, dst, src); 6411 } 6412 6413 void Assembler::xorl(Address dst, Register src) { 6414 InstructionMark im(this); 6415 prefix(dst, src); 6416 emit_int8(0x31); 6417 emit_operand(src, dst, 0); 6418 } 6419 6420 void Assembler::xorb(Register dst, Address src) { 6421 InstructionMark im(this); 6422 prefix(src, dst); 6423 emit_int8(0x32); 6424 emit_operand(dst, src, 0); 6425 } 6426 6427 void Assembler::xorb(Address dst, Register src) { 6428 InstructionMark im(this); 6429 prefix(dst, src, true); 6430 emit_int8(0x30); 6431 emit_operand(src, dst, 0); 6432 } 6433 6434 void Assembler::xorw(Register dst, Register src) { 6435 (void)prefix_and_encode(dst->encoding(), src->encoding()); 6436 emit_arith(0x33, 0xC0, dst, src); 6437 } 6438 6439 // AVX 3-operands scalar float-point arithmetic instructions 6440 6441 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) { 6442 assert(VM_Version::supports_avx(), ""); 6443 InstructionMark im(this); 6444 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6445 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 6446 attributes.set_rex_vex_w_reverted(); 6447 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 6448 emit_int8(0x58); 6449 emit_operand(dst, src, 0); 6450 } 6451 6452 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 6453 assert(VM_Version::supports_avx(), ""); 6454 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6455 attributes.set_rex_vex_w_reverted(); 6456 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 6457 emit_int16(0x58, (0xC0 | encode)); 6458 } 6459 6460 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) { 6461 assert(VM_Version::supports_avx(), ""); 6462 InstructionMark im(this); 6463 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6464 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 6465 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 6466 emit_int8(0x58); 6467 emit_operand(dst, src, 0); 6468 } 6469 6470 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 6471 assert(VM_Version::supports_avx(), ""); 6472 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6473 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 6474 emit_int16(0x58, (0xC0 | encode)); 6475 } 6476 6477 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) { 6478 assert(VM_Version::supports_avx(), ""); 6479 InstructionMark im(this); 6480 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6481 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 6482 attributes.set_rex_vex_w_reverted(); 6483 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 6484 emit_int8(0x5E); 6485 emit_operand(dst, src, 0); 6486 } 6487 6488 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 6489 assert(VM_Version::supports_avx(), ""); 6490 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6491 attributes.set_rex_vex_w_reverted(); 6492 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 6493 emit_int16(0x5E, (0xC0 | encode)); 6494 } 6495 6496 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) { 6497 assert(VM_Version::supports_avx(), ""); 6498 InstructionMark im(this); 6499 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6500 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 6501 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 6502 emit_int8(0x5E); 6503 emit_operand(dst, src, 0); 6504 } 6505 6506 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 6507 assert(VM_Version::supports_avx(), ""); 6508 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6509 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 6510 emit_int16(0x5E, (0xC0 | encode)); 6511 } 6512 6513 void Assembler::vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { 6514 assert(VM_Version::supports_fma(), ""); 6515 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6516 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6517 emit_int16((unsigned char)0xB9, (0xC0 | encode)); 6518 } 6519 6520 void Assembler::vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) { 6521 assert(VM_Version::supports_fma(), ""); 6522 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6523 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6524 emit_int16((unsigned char)0xB9, (0xC0 | encode)); 6525 } 6526 6527 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) { 6528 assert(VM_Version::supports_avx(), ""); 6529 InstructionMark im(this); 6530 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6531 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 6532 attributes.set_rex_vex_w_reverted(); 6533 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 6534 emit_int8(0x59); 6535 emit_operand(dst, src, 0); 6536 } 6537 6538 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 6539 assert(VM_Version::supports_avx(), ""); 6540 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6541 attributes.set_rex_vex_w_reverted(); 6542 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 6543 emit_int16(0x59, (0xC0 | encode)); 6544 } 6545 6546 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) { 6547 assert(VM_Version::supports_avx(), ""); 6548 InstructionMark im(this); 6549 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6550 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 6551 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 6552 emit_int8(0x59); 6553 emit_operand(dst, src, 0); 6554 } 6555 6556 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 6557 assert(VM_Version::supports_avx(), ""); 6558 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6559 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 6560 emit_int16(0x59, (0xC0 | encode)); 6561 } 6562 6563 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) { 6564 assert(VM_Version::supports_avx(), ""); 6565 InstructionMark im(this); 6566 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6567 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 6568 attributes.set_rex_vex_w_reverted(); 6569 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 6570 emit_int8(0x5C); 6571 emit_operand(dst, src, 0); 6572 } 6573 6574 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 6575 assert(VM_Version::supports_avx(), ""); 6576 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6577 attributes.set_rex_vex_w_reverted(); 6578 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 6579 emit_int16(0x5C, (0xC0 | encode)); 6580 } 6581 6582 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) { 6583 assert(VM_Version::supports_avx(), ""); 6584 InstructionMark im(this); 6585 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6586 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 6587 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 6588 emit_int8(0x5C); 6589 emit_operand(dst, src, 0); 6590 } 6591 6592 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 6593 assert(VM_Version::supports_avx(), ""); 6594 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6595 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 6596 emit_int16(0x5C, (0xC0 | encode)); 6597 } 6598 6599 //====================VECTOR ARITHMETIC===================================== 6600 6601 // Float-point vector arithmetic 6602 6603 void Assembler::addpd(XMMRegister dst, XMMRegister src) { 6604 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6605 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6606 attributes.set_rex_vex_w_reverted(); 6607 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6608 emit_int16(0x58, (0xC0 | encode)); 6609 } 6610 6611 void Assembler::addpd(XMMRegister dst, Address src) { 6612 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6613 InstructionMark im(this); 6614 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6615 attributes.set_rex_vex_w_reverted(); 6616 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6617 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6618 emit_int8(0x58); 6619 emit_operand(dst, src, 0); 6620 } 6621 6622 6623 void Assembler::addps(XMMRegister dst, XMMRegister src) { 6624 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6625 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6626 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6627 emit_int16(0x58, (0xC0 | encode)); 6628 } 6629 6630 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6631 assert(VM_Version::supports_avx(), ""); 6632 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6633 attributes.set_rex_vex_w_reverted(); 6634 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6635 emit_int16(0x58, (0xC0 | encode)); 6636 } 6637 6638 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6639 assert(VM_Version::supports_avx(), ""); 6640 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6641 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6642 emit_int16(0x58, (0xC0 | encode)); 6643 } 6644 6645 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6646 assert(VM_Version::supports_avx(), ""); 6647 InstructionMark im(this); 6648 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6649 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6650 attributes.set_rex_vex_w_reverted(); 6651 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6652 emit_int8(0x58); 6653 emit_operand(dst, src, 0); 6654 } 6655 6656 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6657 assert(VM_Version::supports_avx(), ""); 6658 InstructionMark im(this); 6659 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6660 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6661 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6662 emit_int8(0x58); 6663 emit_operand(dst, src, 0); 6664 } 6665 6666 void Assembler::subpd(XMMRegister dst, XMMRegister src) { 6667 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6668 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6669 attributes.set_rex_vex_w_reverted(); 6670 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6671 emit_int16(0x5C, (0xC0 | encode)); 6672 } 6673 6674 void Assembler::subps(XMMRegister dst, XMMRegister src) { 6675 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6676 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6677 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6678 emit_int16(0x5C, (0xC0 | encode)); 6679 } 6680 6681 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6682 assert(VM_Version::supports_avx(), ""); 6683 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6684 attributes.set_rex_vex_w_reverted(); 6685 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6686 emit_int16(0x5C, (0xC0 | encode)); 6687 } 6688 6689 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6690 assert(VM_Version::supports_avx(), ""); 6691 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6692 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6693 emit_int16(0x5C, (0xC0 | encode)); 6694 } 6695 6696 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6697 assert(VM_Version::supports_avx(), ""); 6698 InstructionMark im(this); 6699 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6700 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6701 attributes.set_rex_vex_w_reverted(); 6702 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6703 emit_int8(0x5C); 6704 emit_operand(dst, src, 0); 6705 } 6706 6707 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6708 assert(VM_Version::supports_avx(), ""); 6709 InstructionMark im(this); 6710 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6711 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6712 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6713 emit_int8(0x5C); 6714 emit_operand(dst, src, 0); 6715 } 6716 6717 void Assembler::mulpd(XMMRegister dst, XMMRegister src) { 6718 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6719 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6720 attributes.set_rex_vex_w_reverted(); 6721 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6722 emit_int16(0x59, (0xC0 | encode)); 6723 } 6724 6725 void Assembler::mulpd(XMMRegister dst, Address src) { 6726 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6727 InstructionMark im(this); 6728 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6729 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6730 attributes.set_rex_vex_w_reverted(); 6731 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6732 emit_int8(0x59); 6733 emit_operand(dst, src, 0); 6734 } 6735 6736 void Assembler::mulps(XMMRegister dst, XMMRegister src) { 6737 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6738 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6739 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6740 emit_int16(0x59, (0xC0 | encode)); 6741 } 6742 6743 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6744 assert(VM_Version::supports_avx(), ""); 6745 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6746 attributes.set_rex_vex_w_reverted(); 6747 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6748 emit_int16(0x59, (0xC0 | encode)); 6749 } 6750 6751 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6752 assert(VM_Version::supports_avx(), ""); 6753 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6754 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6755 emit_int16(0x59, (0xC0 | encode)); 6756 } 6757 6758 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6759 assert(VM_Version::supports_avx(), ""); 6760 InstructionMark im(this); 6761 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6762 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6763 attributes.set_rex_vex_w_reverted(); 6764 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6765 emit_int8(0x59); 6766 emit_operand(dst, src, 0); 6767 } 6768 6769 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6770 assert(VM_Version::supports_avx(), ""); 6771 InstructionMark im(this); 6772 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6773 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6774 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6775 emit_int8(0x59); 6776 emit_operand(dst, src, 0); 6777 } 6778 6779 void Assembler::vfmadd231pd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) { 6780 assert(VM_Version::supports_fma(), ""); 6781 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6782 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6783 emit_int16((unsigned char)0xB8, (0xC0 | encode)); 6784 } 6785 6786 void Assembler::vfmadd231ps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) { 6787 assert(VM_Version::supports_fma(), ""); 6788 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6789 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6790 emit_int16((unsigned char)0xB8, (0xC0 | encode)); 6791 } 6792 6793 void Assembler::vfmadd231pd(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) { 6794 assert(VM_Version::supports_fma(), ""); 6795 InstructionMark im(this); 6796 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6797 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6798 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6799 emit_int8((unsigned char)0xB8); 6800 emit_operand(dst, src2, 0); 6801 } 6802 6803 void Assembler::vfmadd231ps(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) { 6804 assert(VM_Version::supports_fma(), ""); 6805 InstructionMark im(this); 6806 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6807 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6808 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6809 emit_int8((unsigned char)0xB8); 6810 emit_operand(dst, src2, 0); 6811 } 6812 6813 void Assembler::divpd(XMMRegister dst, XMMRegister src) { 6814 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6815 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6816 attributes.set_rex_vex_w_reverted(); 6817 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6818 emit_int16(0x5E, (0xC0 | encode)); 6819 } 6820 6821 void Assembler::divps(XMMRegister dst, XMMRegister src) { 6822 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6823 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6824 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6825 emit_int16(0x5E, (0xC0 | encode)); 6826 } 6827 6828 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6829 assert(VM_Version::supports_avx(), ""); 6830 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6831 attributes.set_rex_vex_w_reverted(); 6832 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6833 emit_int16(0x5E, (0xC0 | encode)); 6834 } 6835 6836 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6837 assert(VM_Version::supports_avx(), ""); 6838 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6839 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6840 emit_int16(0x5E, (0xC0 | encode)); 6841 } 6842 6843 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6844 assert(VM_Version::supports_avx(), ""); 6845 InstructionMark im(this); 6846 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6847 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6848 attributes.set_rex_vex_w_reverted(); 6849 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6850 emit_int8(0x5E); 6851 emit_operand(dst, src, 0); 6852 } 6853 6854 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6855 assert(VM_Version::supports_avx(), ""); 6856 InstructionMark im(this); 6857 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6858 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6859 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6860 emit_int8(0x5E); 6861 emit_operand(dst, src, 0); 6862 } 6863 6864 void Assembler::vroundpd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len) { 6865 assert(VM_Version::supports_avx(), ""); 6866 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 6867 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6868 emit_int24(0x09, (0xC0 | encode), (rmode)); 6869 } 6870 6871 void Assembler::vroundpd(XMMRegister dst, Address src, int32_t rmode, int vector_len) { 6872 assert(VM_Version::supports_avx(), ""); 6873 InstructionMark im(this); 6874 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 6875 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6876 emit_int8(0x09); 6877 emit_operand(dst, src, 1); 6878 emit_int8((rmode)); 6879 } 6880 6881 void Assembler::vrndscalepd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len) { 6882 assert(VM_Version::supports_evex(), "requires EVEX support"); 6883 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6884 attributes.set_is_evex_instruction(); 6885 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6886 emit_int24(0x09, (0xC0 | encode), (rmode)); 6887 } 6888 6889 void Assembler::vrndscalepd(XMMRegister dst, Address src, int32_t rmode, int vector_len) { 6890 assert(VM_Version::supports_evex(), "requires EVEX support"); 6891 assert(dst != xnoreg, "sanity"); 6892 InstructionMark im(this); 6893 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6894 attributes.set_is_evex_instruction(); 6895 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6896 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6897 emit_int8(0x09); 6898 emit_operand(dst, src, 1); 6899 emit_int8((rmode)); 6900 } 6901 6902 void Assembler::vsqrtpd(XMMRegister dst, XMMRegister src, int vector_len) { 6903 assert(VM_Version::supports_avx(), ""); 6904 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6905 attributes.set_rex_vex_w_reverted(); 6906 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6907 emit_int16(0x51, (0xC0 | encode)); 6908 } 6909 6910 void Assembler::vsqrtpd(XMMRegister dst, Address src, int vector_len) { 6911 assert(VM_Version::supports_avx(), ""); 6912 InstructionMark im(this); 6913 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6914 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6915 attributes.set_rex_vex_w_reverted(); 6916 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6917 emit_int8(0x51); 6918 emit_operand(dst, src, 0); 6919 } 6920 6921 void Assembler::vsqrtps(XMMRegister dst, XMMRegister src, int vector_len) { 6922 assert(VM_Version::supports_avx(), ""); 6923 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6924 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6925 emit_int16(0x51, (0xC0 | encode)); 6926 } 6927 6928 void Assembler::vsqrtps(XMMRegister dst, Address src, int vector_len) { 6929 assert(VM_Version::supports_avx(), ""); 6930 InstructionMark im(this); 6931 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6932 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6933 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6934 emit_int8(0x51); 6935 emit_operand(dst, src, 0); 6936 } 6937 6938 void Assembler::andpd(XMMRegister dst, XMMRegister src) { 6939 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6940 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 6941 attributes.set_rex_vex_w_reverted(); 6942 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6943 emit_int16(0x54, (0xC0 | encode)); 6944 } 6945 6946 void Assembler::andps(XMMRegister dst, XMMRegister src) { 6947 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6948 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 6949 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6950 emit_int16(0x54, (0xC0 | encode)); 6951 } 6952 6953 void Assembler::andps(XMMRegister dst, Address src) { 6954 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6955 InstructionMark im(this); 6956 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 6957 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6958 simd_prefix(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6959 emit_int8(0x54); 6960 emit_operand(dst, src, 0); 6961 } 6962 6963 void Assembler::andpd(XMMRegister dst, Address src) { 6964 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6965 InstructionMark im(this); 6966 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 6967 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6968 attributes.set_rex_vex_w_reverted(); 6969 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6970 emit_int8(0x54); 6971 emit_operand(dst, src, 0); 6972 } 6973 6974 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6975 assert(VM_Version::supports_avx(), ""); 6976 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 6977 attributes.set_rex_vex_w_reverted(); 6978 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6979 emit_int16(0x54, (0xC0 | encode)); 6980 } 6981 6982 void Assembler::vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6983 assert(VM_Version::supports_avx(), ""); 6984 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 6985 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6986 emit_int16(0x54, (0xC0 | encode)); 6987 } 6988 6989 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6990 assert(VM_Version::supports_avx(), ""); 6991 InstructionMark im(this); 6992 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 6993 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6994 attributes.set_rex_vex_w_reverted(); 6995 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6996 emit_int8(0x54); 6997 emit_operand(dst, src, 0); 6998 } 6999 7000 void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7001 assert(VM_Version::supports_avx(), ""); 7002 InstructionMark im(this); 7003 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 7004 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 7005 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 7006 emit_int8(0x54); 7007 emit_operand(dst, src, 0); 7008 } 7009 7010 void Assembler::unpckhpd(XMMRegister dst, XMMRegister src) { 7011 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7012 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7013 attributes.set_rex_vex_w_reverted(); 7014 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7015 emit_int8(0x15); 7016 emit_int8((0xC0 | encode)); 7017 } 7018 7019 void Assembler::unpcklpd(XMMRegister dst, XMMRegister src) { 7020 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7021 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7022 attributes.set_rex_vex_w_reverted(); 7023 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7024 emit_int16(0x14, (0xC0 | encode)); 7025 } 7026 7027 void Assembler::xorpd(XMMRegister dst, XMMRegister src) { 7028 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7029 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 7030 attributes.set_rex_vex_w_reverted(); 7031 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7032 emit_int16(0x57, (0xC0 | encode)); 7033 } 7034 7035 void Assembler::xorps(XMMRegister dst, XMMRegister src) { 7036 NOT_LP64(assert(VM_Version::supports_sse(), "")); 7037 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 7038 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 7039 emit_int16(0x57, (0xC0 | encode)); 7040 } 7041 7042 void Assembler::xorpd(XMMRegister dst, Address src) { 7043 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7044 InstructionMark im(this); 7045 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 7046 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 7047 attributes.set_rex_vex_w_reverted(); 7048 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7049 emit_int8(0x57); 7050 emit_operand(dst, src, 0); 7051 } 7052 7053 void Assembler::xorps(XMMRegister dst, Address src) { 7054 NOT_LP64(assert(VM_Version::supports_sse(), "")); 7055 InstructionMark im(this); 7056 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 7057 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 7058 simd_prefix(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 7059 emit_int8(0x57); 7060 emit_operand(dst, src, 0); 7061 } 7062 7063 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7064 assert(VM_Version::supports_avx(), ""); 7065 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 7066 attributes.set_rex_vex_w_reverted(); 7067 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7068 emit_int16(0x57, (0xC0 | encode)); 7069 } 7070 7071 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7072 assert(VM_Version::supports_avx(), ""); 7073 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 7074 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 7075 emit_int16(0x57, (0xC0 | encode)); 7076 } 7077 7078 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7079 assert(VM_Version::supports_avx(), ""); 7080 InstructionMark im(this); 7081 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 7082 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 7083 attributes.set_rex_vex_w_reverted(); 7084 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7085 emit_int8(0x57); 7086 emit_operand(dst, src, 0); 7087 } 7088 7089 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7090 assert(VM_Version::supports_avx(), ""); 7091 InstructionMark im(this); 7092 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 7093 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 7094 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 7095 emit_int8(0x57); 7096 emit_operand(dst, src, 0); 7097 } 7098 7099 // Integer vector arithmetic 7100 void Assembler::vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7101 assert(VM_Version::supports_avx() && (vector_len == 0) || 7102 VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2"); 7103 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 7104 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7105 emit_int16(0x01, (0xC0 | encode)); 7106 } 7107 7108 void Assembler::vphaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7109 assert(VM_Version::supports_avx() && (vector_len == 0) || 7110 VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2"); 7111 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 7112 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7113 emit_int16(0x02, (0xC0 | encode)); 7114 } 7115 7116 void Assembler::paddb(XMMRegister dst, XMMRegister src) { 7117 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7118 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7119 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7120 emit_int16((unsigned char)0xFC, (0xC0 | encode)); 7121 } 7122 7123 void Assembler::paddw(XMMRegister dst, XMMRegister src) { 7124 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7125 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7126 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7127 emit_int16((unsigned char)0xFD, (0xC0 | encode)); 7128 } 7129 7130 void Assembler::paddd(XMMRegister dst, XMMRegister src) { 7131 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7132 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7133 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7134 emit_int16((unsigned char)0xFE, (0xC0 | encode)); 7135 } 7136 7137 void Assembler::paddd(XMMRegister dst, Address src) { 7138 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7139 InstructionMark im(this); 7140 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7141 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7142 emit_int8((unsigned char)0xFE); 7143 emit_operand(dst, src, 0); 7144 } 7145 7146 void Assembler::paddq(XMMRegister dst, XMMRegister src) { 7147 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7148 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7149 attributes.set_rex_vex_w_reverted(); 7150 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7151 emit_int16((unsigned char)0xD4, (0xC0 | encode)); 7152 } 7153 7154 void Assembler::phaddw(XMMRegister dst, XMMRegister src) { 7155 assert(VM_Version::supports_sse3(), ""); 7156 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 7157 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7158 emit_int16(0x01, (0xC0 | encode)); 7159 } 7160 7161 void Assembler::phaddd(XMMRegister dst, XMMRegister src) { 7162 assert(VM_Version::supports_sse3(), ""); 7163 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 7164 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7165 emit_int16(0x02, (0xC0 | encode)); 7166 } 7167 7168 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7169 assert(UseAVX > 0, "requires some form of AVX"); 7170 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7171 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7172 emit_int16((unsigned char)0xFC, (0xC0 | encode)); 7173 } 7174 7175 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7176 assert(UseAVX > 0, "requires some form of AVX"); 7177 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7178 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7179 emit_int16((unsigned char)0xFD, (0xC0 | encode)); 7180 } 7181 7182 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7183 assert(UseAVX > 0, "requires some form of AVX"); 7184 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7185 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7186 emit_int16((unsigned char)0xFE, (0xC0 | encode)); 7187 } 7188 7189 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7190 assert(UseAVX > 0, "requires some form of AVX"); 7191 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7192 attributes.set_rex_vex_w_reverted(); 7193 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7194 emit_int16((unsigned char)0xD4, (0xC0 | encode)); 7195 } 7196 7197 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7198 assert(UseAVX > 0, "requires some form of AVX"); 7199 InstructionMark im(this); 7200 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7201 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 7202 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7203 emit_int8((unsigned char)0xFC); 7204 emit_operand(dst, src, 0); 7205 } 7206 7207 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7208 assert(UseAVX > 0, "requires some form of AVX"); 7209 InstructionMark im(this); 7210 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7211 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 7212 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7213 emit_int8((unsigned char)0xFD); 7214 emit_operand(dst, src, 0); 7215 } 7216 7217 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7218 assert(UseAVX > 0, "requires some form of AVX"); 7219 InstructionMark im(this); 7220 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7221 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 7222 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7223 emit_int8((unsigned char)0xFE); 7224 emit_operand(dst, src, 0); 7225 } 7226 7227 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7228 assert(UseAVX > 0, "requires some form of AVX"); 7229 InstructionMark im(this); 7230 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7231 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 7232 attributes.set_rex_vex_w_reverted(); 7233 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7234 emit_int8((unsigned char)0xD4); 7235 emit_operand(dst, src, 0); 7236 } 7237 7238 void Assembler::psubb(XMMRegister dst, XMMRegister src) { 7239 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7240 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7241 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7242 emit_int16((unsigned char)0xF8, (0xC0 | encode)); 7243 } 7244 7245 void Assembler::psubw(XMMRegister dst, XMMRegister src) { 7246 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7247 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7248 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7249 emit_int16((unsigned char)0xF9, (0xC0 | encode)); 7250 } 7251 7252 void Assembler::psubd(XMMRegister dst, XMMRegister src) { 7253 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7254 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7255 emit_int16((unsigned char)0xFA, (0xC0 | encode)); 7256 } 7257 7258 void Assembler::psubq(XMMRegister dst, XMMRegister src) { 7259 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7260 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7261 attributes.set_rex_vex_w_reverted(); 7262 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7263 emit_int8((unsigned char)0xFB); 7264 emit_int8((0xC0 | encode)); 7265 } 7266 7267 void Assembler::vpsubusb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7268 assert(UseAVX > 0, "requires some form of AVX"); 7269 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7270 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7271 emit_int16((unsigned char)0xD8, (0xC0 | encode)); 7272 } 7273 7274 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7275 assert(UseAVX > 0, "requires some form of AVX"); 7276 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7277 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7278 emit_int16((unsigned char)0xF8, (0xC0 | encode)); 7279 } 7280 7281 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7282 assert(UseAVX > 0, "requires some form of AVX"); 7283 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7284 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7285 emit_int16((unsigned char)0xF9, (0xC0 | encode)); 7286 } 7287 7288 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7289 assert(UseAVX > 0, "requires some form of AVX"); 7290 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7291 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7292 emit_int16((unsigned char)0xFA, (0xC0 | encode)); 7293 } 7294 7295 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7296 assert(UseAVX > 0, "requires some form of AVX"); 7297 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7298 attributes.set_rex_vex_w_reverted(); 7299 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7300 emit_int16((unsigned char)0xFB, (0xC0 | encode)); 7301 } 7302 7303 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7304 assert(UseAVX > 0, "requires some form of AVX"); 7305 InstructionMark im(this); 7306 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7307 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 7308 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7309 emit_int8((unsigned char)0xF8); 7310 emit_operand(dst, src, 0); 7311 } 7312 7313 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7314 assert(UseAVX > 0, "requires some form of AVX"); 7315 InstructionMark im(this); 7316 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7317 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 7318 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7319 emit_int8((unsigned char)0xF9); 7320 emit_operand(dst, src, 0); 7321 } 7322 7323 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7324 assert(UseAVX > 0, "requires some form of AVX"); 7325 InstructionMark im(this); 7326 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7327 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 7328 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7329 emit_int8((unsigned char)0xFA); 7330 emit_operand(dst, src, 0); 7331 } 7332 7333 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7334 assert(UseAVX > 0, "requires some form of AVX"); 7335 InstructionMark im(this); 7336 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7337 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 7338 attributes.set_rex_vex_w_reverted(); 7339 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7340 emit_int8((unsigned char)0xFB); 7341 emit_operand(dst, src, 0); 7342 } 7343 7344 void Assembler::pmullw(XMMRegister dst, XMMRegister src) { 7345 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7346 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7347 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7348 emit_int16((unsigned char)0xD5, (0xC0 | encode)); 7349 } 7350 7351 void Assembler::pmulld(XMMRegister dst, XMMRegister src) { 7352 assert(VM_Version::supports_sse4_1(), ""); 7353 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7354 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7355 emit_int16(0x40, (0xC0 | encode)); 7356 } 7357 7358 void Assembler::pmuludq(XMMRegister dst, XMMRegister src) { 7359 assert(VM_Version::supports_sse2(), ""); 7360 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7361 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7362 emit_int16((unsigned char)0xF4, (0xC0 | encode)); 7363 } 7364 7365 void Assembler::vpmulhuw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7366 assert((vector_len == AVX_128bit && VM_Version::supports_avx()) || 7367 (vector_len == AVX_256bit && VM_Version::supports_avx2()) || 7368 (vector_len == AVX_512bit && VM_Version::supports_avx512bw()), ""); 7369 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7370 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7371 emit_int16((unsigned char)0xE4, (0xC0 | encode)); 7372 } 7373 7374 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7375 assert(UseAVX > 0, "requires some form of AVX"); 7376 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7377 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7378 emit_int16((unsigned char)0xD5, (0xC0 | encode)); 7379 } 7380 7381 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7382 assert(UseAVX > 0, "requires some form of AVX"); 7383 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7384 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7385 emit_int16(0x40, (0xC0 | encode)); 7386 } 7387 7388 void Assembler::evpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7389 assert(UseAVX > 2, "requires some form of EVEX"); 7390 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 7391 attributes.set_is_evex_instruction(); 7392 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7393 emit_int16(0x40, (0xC0 | encode)); 7394 } 7395 7396 void Assembler::vpmuludq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7397 assert(UseAVX > 0, "requires some form of AVX"); 7398 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7399 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7400 emit_int16((unsigned char)0xF4, (0xC0 | encode)); 7401 } 7402 7403 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7404 assert(UseAVX > 0, "requires some form of AVX"); 7405 InstructionMark im(this); 7406 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7407 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 7408 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7409 emit_int8((unsigned char)0xD5); 7410 emit_operand(dst, src, 0); 7411 } 7412 7413 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7414 assert(UseAVX > 0, "requires some form of AVX"); 7415 InstructionMark im(this); 7416 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7417 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 7418 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7419 emit_int8(0x40); 7420 emit_operand(dst, src, 0); 7421 } 7422 7423 void Assembler::evpmullq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7424 assert(UseAVX > 2, "requires some form of EVEX"); 7425 InstructionMark im(this); 7426 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 7427 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 7428 attributes.set_is_evex_instruction(); 7429 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7430 emit_int8(0x40); 7431 emit_operand(dst, src, 0); 7432 } 7433 7434 // Min, max 7435 void Assembler::pminsb(XMMRegister dst, XMMRegister src) { 7436 assert(VM_Version::supports_sse4_1(), ""); 7437 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7438 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7439 emit_int16(0x38, (0xC0 | encode)); 7440 } 7441 7442 void Assembler::vpminsb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7443 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 7444 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), ""); 7445 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7446 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7447 emit_int16(0x38, (0xC0 | encode)); 7448 } 7449 7450 void Assembler::pminsw(XMMRegister dst, XMMRegister src) { 7451 assert(VM_Version::supports_sse2(), ""); 7452 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7453 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7454 emit_int16((unsigned char)0xEA, (0xC0 | encode)); 7455 } 7456 7457 void Assembler::vpminsw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7458 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 7459 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), ""); 7460 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7461 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7462 emit_int16((unsigned char)0xEA, (0xC0 | encode)); 7463 } 7464 7465 void Assembler::pminsd(XMMRegister dst, XMMRegister src) { 7466 assert(VM_Version::supports_sse4_1(), ""); 7467 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7468 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7469 emit_int16(0x39, (0xC0 | encode)); 7470 } 7471 7472 void Assembler::vpminsd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7473 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 7474 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_evex()), ""); 7475 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7476 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7477 emit_int16(0x39, (0xC0 | encode)); 7478 } 7479 7480 void Assembler::vpminsq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7481 assert(UseAVX > 2, "requires AVX512F"); 7482 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7483 attributes.set_is_evex_instruction(); 7484 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7485 emit_int16(0x39, (0xC0 | encode)); 7486 } 7487 7488 void Assembler::minps(XMMRegister dst, XMMRegister src) { 7489 NOT_LP64(assert(VM_Version::supports_sse(), "")); 7490 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7491 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 7492 emit_int16(0x5D, (0xC0 | encode)); 7493 } 7494 void Assembler::vminps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7495 assert(vector_len >= AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), ""); 7496 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7497 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 7498 emit_int16(0x5D, (0xC0 | encode)); 7499 } 7500 7501 void Assembler::minpd(XMMRegister dst, XMMRegister src) { 7502 NOT_LP64(assert(VM_Version::supports_sse(), "")); 7503 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7504 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7505 emit_int16(0x5D, (0xC0 | encode)); 7506 } 7507 void Assembler::vminpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7508 assert(vector_len >= AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), ""); 7509 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7510 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7511 emit_int16(0x5D, (0xC0 | encode)); 7512 } 7513 7514 void Assembler::pmaxsb(XMMRegister dst, XMMRegister src) { 7515 assert(VM_Version::supports_sse4_1(), ""); 7516 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7517 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7518 emit_int16(0x3C, (0xC0 | encode)); 7519 } 7520 7521 void Assembler::vpmaxsb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7522 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 7523 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), ""); 7524 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7525 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7526 emit_int16(0x3C, (0xC0 | encode)); 7527 } 7528 7529 void Assembler::pmaxsw(XMMRegister dst, XMMRegister src) { 7530 assert(VM_Version::supports_sse2(), ""); 7531 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7532 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7533 emit_int16((unsigned char)0xEE, (0xC0 | encode)); 7534 } 7535 7536 void Assembler::vpmaxsw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7537 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 7538 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), ""); 7539 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7540 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7541 emit_int16((unsigned char)0xEE, (0xC0 | encode)); 7542 } 7543 7544 void Assembler::pmaxsd(XMMRegister dst, XMMRegister src) { 7545 assert(VM_Version::supports_sse4_1(), ""); 7546 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7547 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7548 emit_int16(0x3D, (0xC0 | encode)); 7549 } 7550 7551 void Assembler::vpmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7552 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 7553 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_evex()), ""); 7554 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7555 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7556 emit_int16(0x3D, (0xC0 | encode)); 7557 } 7558 7559 void Assembler::vpmaxsq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7560 assert(UseAVX > 2, "requires AVX512F"); 7561 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7562 attributes.set_is_evex_instruction(); 7563 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7564 emit_int16(0x3D, (0xC0 | encode)); 7565 } 7566 7567 void Assembler::maxps(XMMRegister dst, XMMRegister src) { 7568 NOT_LP64(assert(VM_Version::supports_sse(), "")); 7569 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7570 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 7571 emit_int16(0x5F, (0xC0 | encode)); 7572 } 7573 7574 void Assembler::vmaxps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7575 assert(vector_len >= AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), ""); 7576 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7577 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 7578 emit_int16(0x5F, (0xC0 | encode)); 7579 } 7580 7581 void Assembler::maxpd(XMMRegister dst, XMMRegister src) { 7582 NOT_LP64(assert(VM_Version::supports_sse(), "")); 7583 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7584 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7585 emit_int16(0x5F, (0xC0 | encode)); 7586 } 7587 7588 void Assembler::vmaxpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7589 assert(vector_len >= AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), ""); 7590 InstructionAttr attributes(vector_len, /* vex_w */true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7591 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7592 emit_int16(0x5F, (0xC0 | encode)); 7593 } 7594 7595 // Shift packed integers left by specified number of bits. 7596 void Assembler::psllw(XMMRegister dst, int shift) { 7597 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7598 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7599 // XMM6 is for /6 encoding: 66 0F 71 /6 ib 7600 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7601 emit_int24(0x71, (0xC0 | encode), shift & 0xFF); 7602 } 7603 7604 void Assembler::pslld(XMMRegister dst, int shift) { 7605 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7606 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7607 // XMM6 is for /6 encoding: 66 0F 72 /6 ib 7608 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7609 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 7610 } 7611 7612 void Assembler::psllq(XMMRegister dst, int shift) { 7613 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7614 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7615 // XMM6 is for /6 encoding: 66 0F 73 /6 ib 7616 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7617 emit_int24(0x73, (0xC0 | encode), shift & 0xFF); 7618 } 7619 7620 void Assembler::psllw(XMMRegister dst, XMMRegister shift) { 7621 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7622 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7623 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7624 emit_int16((unsigned char)0xF1, (0xC0 | encode)); 7625 } 7626 7627 void Assembler::pslld(XMMRegister dst, XMMRegister shift) { 7628 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7629 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7630 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7631 emit_int16((unsigned char)0xF2, (0xC0 | encode)); 7632 } 7633 7634 void Assembler::psllq(XMMRegister dst, XMMRegister shift) { 7635 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7636 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7637 attributes.set_rex_vex_w_reverted(); 7638 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7639 emit_int16((unsigned char)0xF3, (0xC0 | encode)); 7640 } 7641 7642 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 7643 assert(UseAVX > 0, "requires some form of AVX"); 7644 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7645 // XMM6 is for /6 encoding: 66 0F 71 /6 ib 7646 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7647 emit_int24(0x71, (0xC0 | encode), shift & 0xFF); 7648 } 7649 7650 void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 7651 assert(UseAVX > 0, "requires some form of AVX"); 7652 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7653 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7654 // XMM6 is for /6 encoding: 66 0F 72 /6 ib 7655 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7656 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 7657 } 7658 7659 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 7660 assert(UseAVX > 0, "requires some form of AVX"); 7661 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7662 attributes.set_rex_vex_w_reverted(); 7663 // XMM6 is for /6 encoding: 66 0F 73 /6 ib 7664 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7665 emit_int24(0x73, (0xC0 | encode), shift & 0xFF); 7666 } 7667 7668 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7669 assert(UseAVX > 0, "requires some form of AVX"); 7670 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7671 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7672 emit_int16((unsigned char)0xF1, (0xC0 | encode)); 7673 } 7674 7675 void Assembler::vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7676 assert(UseAVX > 0, "requires some form of AVX"); 7677 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7678 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7679 emit_int16((unsigned char)0xF2, (0xC0 | encode)); 7680 } 7681 7682 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7683 assert(UseAVX > 0, "requires some form of AVX"); 7684 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7685 attributes.set_rex_vex_w_reverted(); 7686 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7687 emit_int16((unsigned char)0xF3, (0xC0 | encode)); 7688 } 7689 7690 // Shift packed integers logically right by specified number of bits. 7691 void Assembler::psrlw(XMMRegister dst, int shift) { 7692 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7693 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7694 // XMM2 is for /2 encoding: 66 0F 71 /2 ib 7695 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7696 emit_int24(0x71, (0xC0 | encode), shift & 0xFF); 7697 } 7698 7699 void Assembler::psrld(XMMRegister dst, int shift) { 7700 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7701 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7702 // XMM2 is for /2 encoding: 66 0F 72 /2 ib 7703 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7704 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 7705 } 7706 7707 void Assembler::psrlq(XMMRegister dst, int shift) { 7708 // Do not confuse it with psrldq SSE2 instruction which 7709 // shifts 128 bit value in xmm register by number of bytes. 7710 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7711 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7712 attributes.set_rex_vex_w_reverted(); 7713 // XMM2 is for /2 encoding: 66 0F 73 /2 ib 7714 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7715 emit_int24(0x73, (0xC0 | encode), shift & 0xFF); 7716 } 7717 7718 void Assembler::psrlw(XMMRegister dst, XMMRegister shift) { 7719 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7720 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7721 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7722 emit_int16((unsigned char)0xD1, (0xC0 | encode)); 7723 } 7724 7725 void Assembler::psrld(XMMRegister dst, XMMRegister shift) { 7726 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7727 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7728 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7729 emit_int16((unsigned char)0xD2, (0xC0 | encode)); 7730 } 7731 7732 void Assembler::psrlq(XMMRegister dst, XMMRegister shift) { 7733 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7734 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7735 attributes.set_rex_vex_w_reverted(); 7736 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7737 emit_int16((unsigned char)0xD3, (0xC0 | encode)); 7738 } 7739 7740 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 7741 assert(UseAVX > 0, "requires some form of AVX"); 7742 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7743 // XMM2 is for /2 encoding: 66 0F 71 /2 ib 7744 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7745 emit_int24(0x71, (0xC0 | encode), shift & 0xFF); 7746 } 7747 7748 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 7749 assert(UseAVX > 0, "requires some form of AVX"); 7750 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7751 // XMM2 is for /2 encoding: 66 0F 72 /2 ib 7752 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7753 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 7754 } 7755 7756 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 7757 assert(UseAVX > 0, "requires some form of AVX"); 7758 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7759 attributes.set_rex_vex_w_reverted(); 7760 // XMM2 is for /2 encoding: 66 0F 73 /2 ib 7761 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7762 emit_int24(0x73, (0xC0 | encode), shift & 0xFF); 7763 } 7764 7765 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7766 assert(UseAVX > 0, "requires some form of AVX"); 7767 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7768 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7769 emit_int16((unsigned char)0xD1, (0xC0 | encode)); 7770 } 7771 7772 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7773 assert(UseAVX > 0, "requires some form of AVX"); 7774 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7775 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7776 emit_int16((unsigned char)0xD2, (0xC0 | encode)); 7777 } 7778 7779 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7780 assert(UseAVX > 0, "requires some form of AVX"); 7781 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7782 attributes.set_rex_vex_w_reverted(); 7783 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7784 emit_int16((unsigned char)0xD3, (0xC0 | encode)); 7785 } 7786 7787 void Assembler::evpsrlvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7788 assert(VM_Version::supports_avx512bw(), ""); 7789 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7790 attributes.set_is_evex_instruction(); 7791 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7792 emit_int16(0x10, (0xC0 | encode)); 7793 } 7794 7795 void Assembler::evpsllvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7796 assert(VM_Version::supports_avx512bw(), ""); 7797 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7798 attributes.set_is_evex_instruction(); 7799 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7800 emit_int16(0x12, (0xC0 | encode)); 7801 } 7802 7803 // Shift packed integers arithmetically right by specified number of bits. 7804 void Assembler::psraw(XMMRegister dst, int shift) { 7805 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7806 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7807 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 7808 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7809 emit_int24(0x71, (0xC0 | encode), shift & 0xFF); 7810 } 7811 7812 void Assembler::psrad(XMMRegister dst, int shift) { 7813 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7814 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7815 // XMM4 is for /4 encoding: 66 0F 72 /4 ib 7816 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7817 emit_int8(0x72); 7818 emit_int8((0xC0 | encode)); 7819 emit_int8(shift & 0xFF); 7820 } 7821 7822 void Assembler::psraw(XMMRegister dst, XMMRegister shift) { 7823 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7824 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7825 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7826 emit_int16((unsigned char)0xE1, (0xC0 | encode)); 7827 } 7828 7829 void Assembler::psrad(XMMRegister dst, XMMRegister shift) { 7830 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7831 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7832 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7833 emit_int16((unsigned char)0xE2, (0xC0 | encode)); 7834 } 7835 7836 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 7837 assert(UseAVX > 0, "requires some form of AVX"); 7838 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7839 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 7840 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7841 emit_int24(0x71, (0xC0 | encode), shift & 0xFF); 7842 } 7843 7844 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 7845 assert(UseAVX > 0, "requires some form of AVX"); 7846 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7847 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 7848 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7849 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 7850 } 7851 7852 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7853 assert(UseAVX > 0, "requires some form of AVX"); 7854 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7855 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7856 emit_int16((unsigned char)0xE1, (0xC0 | encode)); 7857 } 7858 7859 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7860 assert(UseAVX > 0, "requires some form of AVX"); 7861 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7862 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7863 emit_int16((unsigned char)0xE2, (0xC0 | encode)); 7864 } 7865 7866 void Assembler::evpsraq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 7867 assert(UseAVX > 2, "requires AVX512"); 7868 assert ((VM_Version::supports_avx512vl() || vector_len == 2), "requires AVX512vl"); 7869 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7870 attributes.set_is_evex_instruction(); 7871 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7872 emit_int24((unsigned char)0x72, (0xC0 | encode), shift & 0xFF); 7873 } 7874 7875 void Assembler::evpsraq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7876 assert(UseAVX > 2, "requires AVX512"); 7877 assert ((VM_Version::supports_avx512vl() || vector_len == 2), "requires AVX512vl"); 7878 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7879 attributes.set_is_evex_instruction(); 7880 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7881 emit_int16((unsigned char)0xE2, (0xC0 | encode)); 7882 } 7883 7884 // logical operations packed integers 7885 void Assembler::pand(XMMRegister dst, XMMRegister src) { 7886 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7887 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7888 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7889 emit_int16((unsigned char)0xDB, (0xC0 | encode)); 7890 } 7891 7892 void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7893 assert(UseAVX > 0, "requires some form of AVX"); 7894 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7895 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7896 emit_int16((unsigned char)0xDB, (0xC0 | encode)); 7897 } 7898 7899 void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7900 assert(UseAVX > 0, "requires some form of AVX"); 7901 InstructionMark im(this); 7902 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7903 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 7904 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7905 emit_int8((unsigned char)0xDB); 7906 emit_operand(dst, src, 0); 7907 } 7908 7909 void Assembler::evpandq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7910 evpandq(dst, k0, nds, src, false, vector_len); 7911 } 7912 7913 void Assembler::evpandq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7914 evpandq(dst, k0, nds, src, false, vector_len); 7915 } 7916 7917 //Variable Shift packed integers logically left. 7918 void Assembler::vpsllvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7919 assert(UseAVX > 1, "requires AVX2"); 7920 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7921 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7922 emit_int16(0x47, (0xC0 | encode)); 7923 } 7924 7925 void Assembler::vpsllvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7926 assert(UseAVX > 1, "requires AVX2"); 7927 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7928 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7929 emit_int16(0x47, (0xC0 | encode)); 7930 } 7931 7932 //Variable Shift packed integers logically right. 7933 void Assembler::vpsrlvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7934 assert(UseAVX > 1, "requires AVX2"); 7935 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7936 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7937 emit_int16(0x45, (0xC0 | encode)); 7938 } 7939 7940 void Assembler::vpsrlvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7941 assert(UseAVX > 1, "requires AVX2"); 7942 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7943 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7944 emit_int16(0x45, (0xC0 | encode)); 7945 } 7946 7947 //Variable right Shift arithmetic packed integers . 7948 void Assembler::vpsravd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7949 assert(UseAVX > 1, "requires AVX2"); 7950 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7951 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7952 emit_int16(0x46, (0xC0 | encode)); 7953 } 7954 7955 void Assembler::evpsravw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7956 assert(VM_Version::supports_avx512bw(), ""); 7957 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7958 attributes.set_is_evex_instruction(); 7959 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7960 emit_int16(0x11, (0xC0 | encode)); 7961 } 7962 7963 void Assembler::evpsravq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7964 assert(UseAVX > 2, "requires AVX512"); 7965 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL"); 7966 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7967 attributes.set_is_evex_instruction(); 7968 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7969 emit_int16(0x46, (0xC0 | encode)); 7970 } 7971 7972 void Assembler::vpshldvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7973 assert(VM_Version::supports_avx512_vbmi2(), "requires vbmi2"); 7974 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7975 attributes.set_is_evex_instruction(); 7976 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7977 emit_int16(0x71, (0xC0 | encode)); 7978 } 7979 7980 void Assembler::vpshrdvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7981 assert(VM_Version::supports_avx512_vbmi2(), "requires vbmi2"); 7982 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7983 attributes.set_is_evex_instruction(); 7984 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7985 emit_int16(0x73, (0xC0 | encode)); 7986 } 7987 7988 void Assembler::pandn(XMMRegister dst, XMMRegister src) { 7989 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7990 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7991 attributes.set_rex_vex_w_reverted(); 7992 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7993 emit_int16((unsigned char)0xDF, (0xC0 | encode)); 7994 } 7995 7996 void Assembler::vpandn(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7997 assert(UseAVX > 0, "requires some form of AVX"); 7998 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7999 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8000 emit_int16((unsigned char)0xDF, (0xC0 | encode)); 8001 } 8002 8003 void Assembler::por(XMMRegister dst, XMMRegister src) { 8004 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 8005 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8006 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8007 emit_int16((unsigned char)0xEB, (0xC0 | encode)); 8008 } 8009 8010 void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 8011 assert(UseAVX > 0, "requires some form of AVX"); 8012 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8013 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8014 emit_int16((unsigned char)0xEB, (0xC0 | encode)); 8015 } 8016 8017 void Assembler::vpor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 8018 assert(UseAVX > 0, "requires some form of AVX"); 8019 InstructionMark im(this); 8020 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8021 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 8022 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8023 emit_int8((unsigned char)0xEB); 8024 emit_operand(dst, src, 0); 8025 } 8026 8027 void Assembler::evporq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 8028 evporq(dst, k0, nds, src, false, vector_len); 8029 } 8030 8031 void Assembler::evporq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 8032 evporq(dst, k0, nds, src, false, vector_len); 8033 } 8034 8035 void Assembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8036 assert(VM_Version::supports_evex(), ""); 8037 // Encoding: EVEX.NDS.XXX.66.0F.W0 EB /r 8038 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 8039 attributes.set_is_evex_instruction(); 8040 attributes.set_embedded_opmask_register_specifier(mask); 8041 if (merge) { 8042 attributes.reset_is_clear_context(); 8043 } 8044 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8045 emit_int16((unsigned char)0xEB, (0xC0 | encode)); 8046 } 8047 8048 void Assembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8049 assert(VM_Version::supports_evex(), ""); 8050 // Encoding: EVEX.NDS.XXX.66.0F.W0 EB /r 8051 InstructionMark im(this); 8052 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 8053 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit); 8054 attributes.set_is_evex_instruction(); 8055 attributes.set_embedded_opmask_register_specifier(mask); 8056 if (merge) { 8057 attributes.reset_is_clear_context(); 8058 } 8059 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8060 emit_int8((unsigned char)0xEB); 8061 emit_operand(dst, src, 0); 8062 } 8063 8064 void Assembler::pxor(XMMRegister dst, XMMRegister src) { 8065 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 8066 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8067 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8068 emit_int16((unsigned char)0xEF, (0xC0 | encode)); 8069 } 8070 8071 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 8072 assert(UseAVX > 0, "requires some form of AVX"); 8073 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 8074 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 8075 vector_len == AVX_512bit ? VM_Version::supports_evex() : 0, ""); 8076 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8077 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8078 emit_int16((unsigned char)0xEF, (0xC0 | encode)); 8079 } 8080 8081 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 8082 assert(UseAVX > 0, "requires some form of AVX"); 8083 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 8084 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 8085 vector_len == AVX_512bit ? VM_Version::supports_evex() : 0, ""); 8086 InstructionMark im(this); 8087 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8088 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 8089 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8090 emit_int8((unsigned char)0xEF); 8091 emit_operand(dst, src, 0); 8092 } 8093 8094 void Assembler::vpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 8095 assert(UseAVX > 2, "requires some form of EVEX"); 8096 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8097 attributes.set_rex_vex_w_reverted(); 8098 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8099 emit_int16((unsigned char)0xEF, (0xC0 | encode)); 8100 } 8101 8102 void Assembler::evpxord(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8103 // Encoding: EVEX.NDS.XXX.66.0F.W0 EF /r 8104 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8105 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 8106 attributes.set_is_evex_instruction(); 8107 attributes.set_embedded_opmask_register_specifier(mask); 8108 if (merge) { 8109 attributes.reset_is_clear_context(); 8110 } 8111 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8112 emit_int16((unsigned char)0xEF, (0xC0 | encode)); 8113 } 8114 8115 void Assembler::evpxord(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8116 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8117 InstructionMark im(this); 8118 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8119 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8120 attributes.set_is_evex_instruction(); 8121 attributes.set_embedded_opmask_register_specifier(mask); 8122 if (merge) { 8123 attributes.reset_is_clear_context(); 8124 } 8125 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8126 emit_int8((unsigned char)0xEF); 8127 emit_operand(dst, src, 0); 8128 } 8129 8130 void Assembler::evpxorq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8131 // Encoding: EVEX.NDS.XXX.66.0F.W1 EF /r 8132 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8133 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 8134 attributes.set_is_evex_instruction(); 8135 attributes.set_embedded_opmask_register_specifier(mask); 8136 if (merge) { 8137 attributes.reset_is_clear_context(); 8138 } 8139 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8140 emit_int16((unsigned char)0xEF, (0xC0 | encode)); 8141 } 8142 8143 void Assembler::evpxorq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8144 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8145 InstructionMark im(this); 8146 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8147 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8148 attributes.set_is_evex_instruction(); 8149 attributes.set_embedded_opmask_register_specifier(mask); 8150 if (merge) { 8151 attributes.reset_is_clear_context(); 8152 } 8153 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8154 emit_int8((unsigned char)0xEF); 8155 emit_operand(dst, src, 0); 8156 } 8157 8158 void Assembler::evpandd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8159 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8160 InstructionMark im(this); 8161 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8162 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8163 attributes.set_is_evex_instruction(); 8164 attributes.set_embedded_opmask_register_specifier(mask); 8165 if (merge) { 8166 attributes.reset_is_clear_context(); 8167 } 8168 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8169 emit_int8((unsigned char)0xDB); 8170 emit_operand(dst, src, 0); 8171 } 8172 8173 void Assembler::evpandq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8174 assert(VM_Version::supports_evex(), "requires AVX512F"); 8175 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL"); 8176 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 8177 attributes.set_is_evex_instruction(); 8178 attributes.set_embedded_opmask_register_specifier(mask); 8179 if (merge) { 8180 attributes.reset_is_clear_context(); 8181 } 8182 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8183 emit_int16((unsigned char)0xDB, (0xC0 | encode)); 8184 } 8185 8186 void Assembler::evpandq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8187 assert(VM_Version::supports_evex(), "requires AVX512F"); 8188 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL"); 8189 InstructionMark im(this); 8190 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8191 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8192 attributes.set_is_evex_instruction(); 8193 attributes.set_embedded_opmask_register_specifier(mask); 8194 if (merge) { 8195 attributes.reset_is_clear_context(); 8196 } 8197 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8198 emit_int8((unsigned char)0xDB); 8199 emit_operand(dst, src, 0); 8200 } 8201 8202 void Assembler::evporq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8203 assert(VM_Version::supports_evex(), "requires AVX512F"); 8204 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL"); 8205 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 8206 attributes.set_is_evex_instruction(); 8207 attributes.set_embedded_opmask_register_specifier(mask); 8208 if (merge) { 8209 attributes.reset_is_clear_context(); 8210 } 8211 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8212 emit_int16((unsigned char)0xEB, (0xC0 | encode)); 8213 } 8214 8215 void Assembler::evporq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8216 assert(VM_Version::supports_evex(), "requires AVX512F"); 8217 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL"); 8218 InstructionMark im(this); 8219 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8220 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8221 attributes.set_is_evex_instruction(); 8222 attributes.set_embedded_opmask_register_specifier(mask); 8223 if (merge) { 8224 attributes.reset_is_clear_context(); 8225 } 8226 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8227 emit_int8((unsigned char)0xEB); 8228 emit_operand(dst, src, 0); 8229 } 8230 8231 void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 8232 assert(VM_Version::supports_evex(), "requires EVEX support"); 8233 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8234 attributes.set_is_evex_instruction(); 8235 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8236 emit_int16((unsigned char)0xEF, (0xC0 | encode)); 8237 } 8238 8239 void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 8240 assert(VM_Version::supports_evex(), "requires EVEX support"); 8241 assert(dst != xnoreg, "sanity"); 8242 InstructionMark im(this); 8243 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8244 attributes.set_is_evex_instruction(); 8245 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 8246 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8247 emit_int8((unsigned char)0xEF); 8248 emit_operand(dst, src, 0); 8249 } 8250 8251 void Assembler::evprold(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 8252 assert(VM_Version::supports_evex(), "requires EVEX support"); 8253 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 8254 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8255 attributes.set_is_evex_instruction(); 8256 int encode = vex_prefix_and_encode(xmm1->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8257 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 8258 } 8259 8260 void Assembler::evprolq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 8261 assert(VM_Version::supports_evex(), "requires EVEX support"); 8262 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 8263 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8264 attributes.set_is_evex_instruction(); 8265 int encode = vex_prefix_and_encode(xmm1->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8266 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 8267 } 8268 8269 void Assembler::evprord(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 8270 assert(VM_Version::supports_evex(), "requires EVEX support"); 8271 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 8272 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8273 attributes.set_is_evex_instruction(); 8274 int encode = vex_prefix_and_encode(xmm0->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8275 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 8276 } 8277 8278 void Assembler::evprorq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 8279 assert(VM_Version::supports_evex(), "requires EVEX support"); 8280 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 8281 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8282 attributes.set_is_evex_instruction(); 8283 int encode = vex_prefix_and_encode(xmm0->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8284 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 8285 } 8286 8287 void Assembler::evprolvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 8288 assert(VM_Version::supports_evex(), "requires EVEX support"); 8289 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 8290 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8291 attributes.set_is_evex_instruction(); 8292 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8293 emit_int16(0x15, (unsigned char)(0xC0 | encode)); 8294 } 8295 8296 void Assembler::evprolvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 8297 assert(VM_Version::supports_evex(), "requires EVEX support"); 8298 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 8299 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8300 attributes.set_is_evex_instruction(); 8301 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8302 emit_int16(0x15, (unsigned char)(0xC0 | encode)); 8303 } 8304 8305 void Assembler::evprorvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 8306 assert(VM_Version::supports_evex(), "requires EVEX support"); 8307 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 8308 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8309 attributes.set_is_evex_instruction(); 8310 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8311 emit_int16(0x14, (unsigned char)(0xC0 | encode)); 8312 } 8313 8314 void Assembler::evprorvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 8315 assert(VM_Version::supports_evex(), "requires EVEX support"); 8316 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 8317 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8318 attributes.set_is_evex_instruction(); 8319 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8320 emit_int16(0x14, (unsigned char)(0xC0 | encode)); 8321 } 8322 8323 void Assembler::evplzcntd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 8324 assert(VM_Version::supports_avx512cd(), ""); 8325 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8326 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8327 attributes.set_is_evex_instruction(); 8328 attributes.set_embedded_opmask_register_specifier(mask); 8329 if (merge) { 8330 attributes.reset_is_clear_context(); 8331 } 8332 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8333 emit_int16(0x44, (0xC0 | encode)); 8334 } 8335 8336 void Assembler::evplzcntq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 8337 assert(VM_Version::supports_avx512cd(), ""); 8338 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8339 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8340 attributes.set_is_evex_instruction(); 8341 attributes.set_embedded_opmask_register_specifier(mask); 8342 if (merge) { 8343 attributes.reset_is_clear_context(); 8344 } 8345 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8346 emit_int16(0x44, (0xC0 | encode)); 8347 } 8348 8349 void Assembler::vpternlogd(XMMRegister dst, int imm8, XMMRegister src2, XMMRegister src3, int vector_len) { 8350 assert(VM_Version::supports_evex(), "requires EVEX support"); 8351 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 8352 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8353 attributes.set_is_evex_instruction(); 8354 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8355 emit_int8(0x25); 8356 emit_int8((unsigned char)(0xC0 | encode)); 8357 emit_int8(imm8); 8358 } 8359 8360 void Assembler::vpternlogd(XMMRegister dst, int imm8, XMMRegister src2, Address src3, int vector_len) { 8361 assert(VM_Version::supports_evex(), "requires EVEX support"); 8362 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 8363 assert(dst != xnoreg, "sanity"); 8364 InstructionMark im(this); 8365 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8366 attributes.set_is_evex_instruction(); 8367 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 8368 vex_prefix(src3, src2->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8369 emit_int8(0x25); 8370 emit_operand(dst, src3, 1); 8371 emit_int8(imm8); 8372 } 8373 8374 void Assembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, XMMRegister src3, int vector_len) { 8375 assert(VM_Version::supports_evex(), "requires AVX512F"); 8376 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL"); 8377 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8378 attributes.set_is_evex_instruction(); 8379 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8380 emit_int8(0x25); 8381 emit_int8((unsigned char)(0xC0 | encode)); 8382 emit_int8(imm8); 8383 } 8384 8385 void Assembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, Address src3, int vector_len) { 8386 assert(VM_Version::supports_evex(), "requires EVEX support"); 8387 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 8388 assert(dst != xnoreg, "sanity"); 8389 InstructionMark im(this); 8390 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8391 attributes.set_is_evex_instruction(); 8392 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 8393 vex_prefix(src3, src2->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8394 emit_int8(0x25); 8395 emit_operand(dst, src3, 1); 8396 emit_int8(imm8); 8397 } 8398 8399 void Assembler::evexpandps(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 8400 assert(VM_Version::supports_evex(), ""); 8401 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8402 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8403 attributes.set_is_evex_instruction(); 8404 attributes.set_embedded_opmask_register_specifier(mask); 8405 if (merge) { 8406 attributes.reset_is_clear_context(); 8407 } 8408 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8409 emit_int16((unsigned char)0x88, (0xC0 | encode)); 8410 } 8411 8412 void Assembler::evexpandpd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 8413 assert(VM_Version::supports_evex(), ""); 8414 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8415 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8416 attributes.set_is_evex_instruction(); 8417 attributes.set_embedded_opmask_register_specifier(mask); 8418 if (merge) { 8419 attributes.reset_is_clear_context(); 8420 } 8421 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8422 emit_int16((unsigned char)0x88, (0xC0 | encode)); 8423 } 8424 8425 void Assembler::evpexpandb(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 8426 assert(VM_Version::supports_avx512_vbmi2(), ""); 8427 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8428 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8429 attributes.set_is_evex_instruction(); 8430 attributes.set_embedded_opmask_register_specifier(mask); 8431 if (merge) { 8432 attributes.reset_is_clear_context(); 8433 } 8434 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8435 emit_int16(0x62, (0xC0 | encode)); 8436 } 8437 8438 void Assembler::evpexpandw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 8439 assert(VM_Version::supports_avx512_vbmi2(), ""); 8440 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8441 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8442 attributes.set_is_evex_instruction(); 8443 attributes.set_embedded_opmask_register_specifier(mask); 8444 if (merge) { 8445 attributes.reset_is_clear_context(); 8446 } 8447 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8448 emit_int16(0x62, (0xC0 | encode)); 8449 } 8450 8451 void Assembler::evpexpandd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 8452 assert(VM_Version::supports_evex(), ""); 8453 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8454 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8455 attributes.set_is_evex_instruction(); 8456 attributes.set_embedded_opmask_register_specifier(mask); 8457 if (merge) { 8458 attributes.reset_is_clear_context(); 8459 } 8460 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8461 emit_int16((unsigned char)0x89, (0xC0 | encode)); 8462 } 8463 8464 void Assembler::evpexpandq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 8465 assert(VM_Version::supports_evex(), ""); 8466 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8467 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8468 attributes.set_is_evex_instruction(); 8469 attributes.set_embedded_opmask_register_specifier(mask); 8470 if (merge) { 8471 attributes.reset_is_clear_context(); 8472 } 8473 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8474 emit_int16((unsigned char)0x89, (0xC0 | encode)); 8475 } 8476 8477 // vinserti forms 8478 8479 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 8480 assert(VM_Version::supports_avx2(), ""); 8481 assert(imm8 <= 0x01, "imm8: %u", imm8); 8482 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8483 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8484 // last byte: 8485 // 0x00 - insert into lower 128 bits 8486 // 0x01 - insert into upper 128 bits 8487 emit_int24(0x38, (0xC0 | encode), imm8 & 0x01); 8488 } 8489 8490 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 8491 assert(VM_Version::supports_avx2(), ""); 8492 assert(dst != xnoreg, "sanity"); 8493 assert(imm8 <= 0x01, "imm8: %u", imm8); 8494 InstructionMark im(this); 8495 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8496 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 8497 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8498 emit_int8(0x38); 8499 emit_operand(dst, src, 1); 8500 // 0x00 - insert into lower 128 bits 8501 // 0x01 - insert into upper 128 bits 8502 emit_int8(imm8 & 0x01); 8503 } 8504 8505 void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 8506 assert(VM_Version::supports_evex(), ""); 8507 assert(imm8 <= 0x03, "imm8: %u", imm8); 8508 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8509 attributes.set_is_evex_instruction(); 8510 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8511 // imm8: 8512 // 0x00 - insert into q0 128 bits (0..127) 8513 // 0x01 - insert into q1 128 bits (128..255) 8514 // 0x02 - insert into q2 128 bits (256..383) 8515 // 0x03 - insert into q3 128 bits (384..511) 8516 emit_int24(0x38, (0xC0 | encode), imm8 & 0x03); 8517 } 8518 8519 void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 8520 assert(VM_Version::supports_evex(), ""); 8521 assert(dst != xnoreg, "sanity"); 8522 assert(imm8 <= 0x03, "imm8: %u", imm8); 8523 InstructionMark im(this); 8524 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8525 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 8526 attributes.set_is_evex_instruction(); 8527 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8528 emit_int8(0x18); 8529 emit_operand(dst, src, 1); 8530 // 0x00 - insert into q0 128 bits (0..127) 8531 // 0x01 - insert into q1 128 bits (128..255) 8532 // 0x02 - insert into q2 128 bits (256..383) 8533 // 0x03 - insert into q3 128 bits (384..511) 8534 emit_int8(imm8 & 0x03); 8535 } 8536 8537 void Assembler::vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 8538 assert(VM_Version::supports_evex(), ""); 8539 assert(imm8 <= 0x01, "imm8: %u", imm8); 8540 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8541 attributes.set_is_evex_instruction(); 8542 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8543 //imm8: 8544 // 0x00 - insert into lower 256 bits 8545 // 0x01 - insert into upper 256 bits 8546 emit_int24(0x3A, (0xC0 | encode), imm8 & 0x01); 8547 } 8548 8549 8550 // vinsertf forms 8551 8552 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 8553 assert(VM_Version::supports_avx(), ""); 8554 assert(imm8 <= 0x01, "imm8: %u", imm8); 8555 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8556 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8557 // imm8: 8558 // 0x00 - insert into lower 128 bits 8559 // 0x01 - insert into upper 128 bits 8560 emit_int24(0x18, (0xC0 | encode), imm8 & 0x01); 8561 } 8562 8563 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 8564 assert(VM_Version::supports_avx(), ""); 8565 assert(dst != xnoreg, "sanity"); 8566 assert(imm8 <= 0x01, "imm8: %u", imm8); 8567 InstructionMark im(this); 8568 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8569 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 8570 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8571 emit_int8(0x18); 8572 emit_operand(dst, src, 1); 8573 // 0x00 - insert into lower 128 bits 8574 // 0x01 - insert into upper 128 bits 8575 emit_int8(imm8 & 0x01); 8576 } 8577 8578 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 8579 assert(VM_Version::supports_evex(), ""); 8580 assert(imm8 <= 0x03, "imm8: %u", imm8); 8581 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8582 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8583 // imm8: 8584 // 0x00 - insert into q0 128 bits (0..127) 8585 // 0x01 - insert into q1 128 bits (128..255) 8586 // 0x02 - insert into q0 128 bits (256..383) 8587 // 0x03 - insert into q1 128 bits (384..512) 8588 emit_int24(0x18, (0xC0 | encode), imm8 & 0x03); 8589 } 8590 8591 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 8592 assert(VM_Version::supports_evex(), ""); 8593 assert(dst != xnoreg, "sanity"); 8594 assert(imm8 <= 0x03, "imm8: %u", imm8); 8595 InstructionMark im(this); 8596 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8597 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 8598 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8599 emit_int8(0x18); 8600 emit_operand(dst, src, 1); 8601 // 0x00 - insert into q0 128 bits (0..127) 8602 // 0x01 - insert into q1 128 bits (128..255) 8603 // 0x02 - insert into q0 128 bits (256..383) 8604 // 0x03 - insert into q1 128 bits (384..512) 8605 emit_int8(imm8 & 0x03); 8606 } 8607 8608 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 8609 assert(VM_Version::supports_evex(), ""); 8610 assert(imm8 <= 0x01, "imm8: %u", imm8); 8611 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8612 attributes.set_is_evex_instruction(); 8613 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8614 // imm8: 8615 // 0x00 - insert into lower 256 bits 8616 // 0x01 - insert into upper 256 bits 8617 emit_int24(0x1A, (0xC0 | encode), imm8 & 0x01); 8618 } 8619 8620 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 8621 assert(VM_Version::supports_evex(), ""); 8622 assert(dst != xnoreg, "sanity"); 8623 assert(imm8 <= 0x01, "imm8: %u", imm8); 8624 InstructionMark im(this); 8625 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8626 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit); 8627 attributes.set_is_evex_instruction(); 8628 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8629 emit_int8(0x1A); 8630 emit_operand(dst, src, 1); 8631 // 0x00 - insert into lower 256 bits 8632 // 0x01 - insert into upper 256 bits 8633 emit_int8(imm8 & 0x01); 8634 } 8635 8636 8637 // vextracti forms 8638 8639 void Assembler::vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 8640 assert(VM_Version::supports_avx2(), ""); 8641 assert(imm8 <= 0x01, "imm8: %u", imm8); 8642 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8643 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8644 // imm8: 8645 // 0x00 - extract from lower 128 bits 8646 // 0x01 - extract from upper 128 bits 8647 emit_int24(0x39, (0xC0 | encode), imm8 & 0x01); 8648 } 8649 8650 void Assembler::vextracti128(Address dst, XMMRegister src, uint8_t imm8) { 8651 assert(VM_Version::supports_avx2(), ""); 8652 assert(src != xnoreg, "sanity"); 8653 assert(imm8 <= 0x01, "imm8: %u", imm8); 8654 InstructionMark im(this); 8655 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8656 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 8657 attributes.reset_is_clear_context(); 8658 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8659 emit_int8(0x39); 8660 emit_operand(src, dst, 1); 8661 // 0x00 - extract from lower 128 bits 8662 // 0x01 - extract from upper 128 bits 8663 emit_int8(imm8 & 0x01); 8664 } 8665 8666 void Assembler::vextracti32x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { 8667 assert(VM_Version::supports_evex(), ""); 8668 assert(imm8 <= 0x03, "imm8: %u", imm8); 8669 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8670 attributes.set_is_evex_instruction(); 8671 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8672 // imm8: 8673 // 0x00 - extract from bits 127:0 8674 // 0x01 - extract from bits 255:128 8675 // 0x02 - extract from bits 383:256 8676 // 0x03 - extract from bits 511:384 8677 emit_int24(0x39, (0xC0 | encode), imm8 & 0x03); 8678 } 8679 8680 void Assembler::vextracti32x4(Address dst, XMMRegister src, uint8_t imm8) { 8681 assert(VM_Version::supports_evex(), ""); 8682 assert(src != xnoreg, "sanity"); 8683 assert(imm8 <= 0x03, "imm8: %u", imm8); 8684 InstructionMark im(this); 8685 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8686 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 8687 attributes.reset_is_clear_context(); 8688 attributes.set_is_evex_instruction(); 8689 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8690 emit_int8(0x39); 8691 emit_operand(src, dst, 1); 8692 // 0x00 - extract from bits 127:0 8693 // 0x01 - extract from bits 255:128 8694 // 0x02 - extract from bits 383:256 8695 // 0x03 - extract from bits 511:384 8696 emit_int8(imm8 & 0x03); 8697 } 8698 8699 void Assembler::vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8) { 8700 assert(VM_Version::supports_avx512dq(), ""); 8701 assert(imm8 <= 0x03, "imm8: %u", imm8); 8702 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8703 attributes.set_is_evex_instruction(); 8704 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8705 // imm8: 8706 // 0x00 - extract from bits 127:0 8707 // 0x01 - extract from bits 255:128 8708 // 0x02 - extract from bits 383:256 8709 // 0x03 - extract from bits 511:384 8710 emit_int24(0x39, (0xC0 | encode), imm8 & 0x03); 8711 } 8712 8713 void Assembler::vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { 8714 assert(VM_Version::supports_evex(), ""); 8715 assert(imm8 <= 0x01, "imm8: %u", imm8); 8716 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8717 attributes.set_is_evex_instruction(); 8718 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8719 // imm8: 8720 // 0x00 - extract from lower 256 bits 8721 // 0x01 - extract from upper 256 bits 8722 emit_int24(0x3B, (0xC0 | encode), imm8 & 0x01); 8723 } 8724 8725 void Assembler::vextracti64x4(Address dst, XMMRegister src, uint8_t imm8) { 8726 assert(VM_Version::supports_evex(), ""); 8727 assert(src != xnoreg, "sanity"); 8728 assert(imm8 <= 0x01, "imm8: %u", imm8); 8729 InstructionMark im(this); 8730 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8731 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit); 8732 attributes.reset_is_clear_context(); 8733 attributes.set_is_evex_instruction(); 8734 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8735 emit_int8(0x38); 8736 emit_operand(src, dst, 1); 8737 // 0x00 - extract from lower 256 bits 8738 // 0x01 - extract from upper 256 bits 8739 emit_int8(imm8 & 0x01); 8740 } 8741 // vextractf forms 8742 8743 void Assembler::vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 8744 assert(VM_Version::supports_avx(), ""); 8745 assert(imm8 <= 0x01, "imm8: %u", imm8); 8746 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8747 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8748 // imm8: 8749 // 0x00 - extract from lower 128 bits 8750 // 0x01 - extract from upper 128 bits 8751 emit_int24(0x19, (0xC0 | encode), imm8 & 0x01); 8752 } 8753 8754 void Assembler::vextractf128(Address dst, XMMRegister src, uint8_t imm8) { 8755 assert(VM_Version::supports_avx(), ""); 8756 assert(src != xnoreg, "sanity"); 8757 assert(imm8 <= 0x01, "imm8: %u", imm8); 8758 InstructionMark im(this); 8759 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8760 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 8761 attributes.reset_is_clear_context(); 8762 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8763 emit_int8(0x19); 8764 emit_operand(src, dst, 1); 8765 // 0x00 - extract from lower 128 bits 8766 // 0x01 - extract from upper 128 bits 8767 emit_int8(imm8 & 0x01); 8768 } 8769 8770 void Assembler::vextractf32x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { 8771 assert(VM_Version::supports_evex(), ""); 8772 assert(imm8 <= 0x03, "imm8: %u", imm8); 8773 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8774 attributes.set_is_evex_instruction(); 8775 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8776 // imm8: 8777 // 0x00 - extract from bits 127:0 8778 // 0x01 - extract from bits 255:128 8779 // 0x02 - extract from bits 383:256 8780 // 0x03 - extract from bits 511:384 8781 emit_int24(0x19, (0xC0 | encode), imm8 & 0x03); 8782 } 8783 8784 void Assembler::vextractf32x4(Address dst, XMMRegister src, uint8_t imm8) { 8785 assert(VM_Version::supports_evex(), ""); 8786 assert(src != xnoreg, "sanity"); 8787 assert(imm8 <= 0x03, "imm8: %u", imm8); 8788 InstructionMark im(this); 8789 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8790 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 8791 attributes.reset_is_clear_context(); 8792 attributes.set_is_evex_instruction(); 8793 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8794 emit_int8(0x19); 8795 emit_operand(src, dst, 1); 8796 // 0x00 - extract from bits 127:0 8797 // 0x01 - extract from bits 255:128 8798 // 0x02 - extract from bits 383:256 8799 // 0x03 - extract from bits 511:384 8800 emit_int8(imm8 & 0x03); 8801 } 8802 8803 void Assembler::vextractf64x2(XMMRegister dst, XMMRegister src, uint8_t imm8) { 8804 assert(VM_Version::supports_avx512dq(), ""); 8805 assert(imm8 <= 0x03, "imm8: %u", imm8); 8806 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8807 attributes.set_is_evex_instruction(); 8808 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8809 // imm8: 8810 // 0x00 - extract from bits 127:0 8811 // 0x01 - extract from bits 255:128 8812 // 0x02 - extract from bits 383:256 8813 // 0x03 - extract from bits 511:384 8814 emit_int24(0x19, (0xC0 | encode), imm8 & 0x03); 8815 } 8816 8817 void Assembler::vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { 8818 assert(VM_Version::supports_evex(), ""); 8819 assert(imm8 <= 0x01, "imm8: %u", imm8); 8820 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8821 attributes.set_is_evex_instruction(); 8822 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8823 // imm8: 8824 // 0x00 - extract from lower 256 bits 8825 // 0x01 - extract from upper 256 bits 8826 emit_int24(0x1B, (0xC0 | encode), imm8 & 0x01); 8827 } 8828 8829 void Assembler::vextractf64x4(Address dst, XMMRegister src, uint8_t imm8) { 8830 assert(VM_Version::supports_evex(), ""); 8831 assert(src != xnoreg, "sanity"); 8832 assert(imm8 <= 0x01, "imm8: %u", imm8); 8833 InstructionMark im(this); 8834 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8835 attributes.set_address_attributes(/* tuple_type */ EVEX_T4,/* input_size_in_bits */ EVEX_64bit); 8836 attributes.reset_is_clear_context(); 8837 attributes.set_is_evex_instruction(); 8838 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8839 emit_int8(0x1B); 8840 emit_operand(src, dst, 1); 8841 // 0x00 - extract from lower 256 bits 8842 // 0x01 - extract from upper 256 bits 8843 emit_int8(imm8 & 0x01); 8844 } 8845 8846 // duplicate 1-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL 8847 void Assembler::vpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len) { 8848 assert(VM_Version::supports_avx2(), ""); 8849 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 8850 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8851 emit_int16(0x78, (0xC0 | encode)); 8852 } 8853 8854 void Assembler::vpbroadcastb(XMMRegister dst, Address src, int vector_len) { 8855 assert(VM_Version::supports_avx2(), ""); 8856 assert(dst != xnoreg, "sanity"); 8857 InstructionMark im(this); 8858 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 8859 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit); 8860 // swap src<->dst for encoding 8861 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8862 emit_int8(0x78); 8863 emit_operand(dst, src, 0); 8864 } 8865 8866 // duplicate 2-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL 8867 void Assembler::vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len) { 8868 assert(VM_Version::supports_avx2(), ""); 8869 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 8870 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8871 emit_int16(0x79, (0xC0 | encode)); 8872 } 8873 8874 void Assembler::vpbroadcastw(XMMRegister dst, Address src, int vector_len) { 8875 assert(VM_Version::supports_avx2(), ""); 8876 assert(dst != xnoreg, "sanity"); 8877 InstructionMark im(this); 8878 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 8879 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit); 8880 // swap src<->dst for encoding 8881 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8882 emit_int8(0x79); 8883 emit_operand(dst, src, 0); 8884 } 8885 8886 void Assembler::vpsadbw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 8887 assert(UseAVX > 0, "requires some form of AVX"); 8888 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 8889 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8890 emit_int16((unsigned char)0xF6, (0xC0 | encode)); 8891 } 8892 8893 void Assembler::vpunpckhwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 8894 assert(UseAVX > 0, "requires some form of AVX"); 8895 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8896 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8897 emit_int16(0x69, (0xC0 | encode)); 8898 } 8899 8900 void Assembler::vpunpcklwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 8901 assert(UseAVX > 0, "requires some form of AVX"); 8902 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8903 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8904 emit_int16(0x61, (0xC0 | encode)); 8905 } 8906 8907 void Assembler::vpunpckhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 8908 assert(UseAVX > 0, "requires some form of AVX"); 8909 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8910 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8911 emit_int16(0x6A, (0xC0 | encode)); 8912 } 8913 8914 void Assembler::vpunpckldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 8915 assert(UseAVX > 0, "requires some form of AVX"); 8916 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8917 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8918 emit_int16(0x62, (0xC0 | encode)); 8919 } 8920 8921 // xmm/mem sourced byte/word/dword/qword replicate 8922 void Assembler::evpaddb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8923 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 8924 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8925 attributes.set_is_evex_instruction(); 8926 attributes.set_embedded_opmask_register_specifier(mask); 8927 if (merge) { 8928 attributes.reset_is_clear_context(); 8929 } 8930 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8931 emit_int16((unsigned char)0xFC, (0xC0 | encode)); 8932 } 8933 8934 void Assembler::evpaddb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8935 InstructionMark im(this); 8936 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 8937 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8938 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8939 attributes.set_is_evex_instruction(); 8940 attributes.set_embedded_opmask_register_specifier(mask); 8941 if (merge) { 8942 attributes.reset_is_clear_context(); 8943 } 8944 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8945 emit_int8((unsigned char)0xFC); 8946 emit_operand(dst, src, 0); 8947 } 8948 8949 void Assembler::evpaddw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8950 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 8951 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8952 attributes.set_is_evex_instruction(); 8953 attributes.set_embedded_opmask_register_specifier(mask); 8954 if (merge) { 8955 attributes.reset_is_clear_context(); 8956 } 8957 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8958 emit_int16((unsigned char)0xFD, (0xC0 | encode)); 8959 } 8960 8961 void Assembler::evpaddw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8962 InstructionMark im(this); 8963 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 8964 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8965 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8966 attributes.set_is_evex_instruction(); 8967 attributes.set_embedded_opmask_register_specifier(mask); 8968 if (merge) { 8969 attributes.reset_is_clear_context(); 8970 } 8971 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8972 emit_int8((unsigned char)0xFD); 8973 emit_operand(dst, src, 0); 8974 } 8975 8976 void Assembler::evpaddd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8977 assert(VM_Version::supports_evex(), ""); 8978 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8979 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8980 attributes.set_is_evex_instruction(); 8981 attributes.set_embedded_opmask_register_specifier(mask); 8982 if (merge) { 8983 attributes.reset_is_clear_context(); 8984 } 8985 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8986 emit_int16((unsigned char)0xFE, (0xC0 | encode)); 8987 } 8988 8989 void Assembler::evpaddd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8990 InstructionMark im(this); 8991 assert(VM_Version::supports_evex(), ""); 8992 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8993 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8994 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8995 attributes.set_is_evex_instruction(); 8996 attributes.set_embedded_opmask_register_specifier(mask); 8997 if (merge) { 8998 attributes.reset_is_clear_context(); 8999 } 9000 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9001 emit_int8((unsigned char)0xFE); 9002 emit_operand(dst, src, 0); 9003 } 9004 9005 void Assembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9006 assert(VM_Version::supports_evex(), ""); 9007 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9008 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9009 attributes.set_is_evex_instruction(); 9010 attributes.set_embedded_opmask_register_specifier(mask); 9011 if (merge) { 9012 attributes.reset_is_clear_context(); 9013 } 9014 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9015 emit_int16((unsigned char)0xD4, (0xC0 | encode)); 9016 } 9017 9018 void Assembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9019 InstructionMark im(this); 9020 assert(VM_Version::supports_evex(), ""); 9021 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9022 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9023 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9024 attributes.set_is_evex_instruction(); 9025 attributes.set_embedded_opmask_register_specifier(mask); 9026 if (merge) { 9027 attributes.reset_is_clear_context(); 9028 } 9029 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9030 emit_int8((unsigned char)0xD4); 9031 emit_operand(dst, src, 0); 9032 } 9033 9034 void Assembler::evaddps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9035 assert(VM_Version::supports_evex(), ""); 9036 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9037 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9038 attributes.set_is_evex_instruction(); 9039 attributes.set_embedded_opmask_register_specifier(mask); 9040 if (merge) { 9041 attributes.reset_is_clear_context(); 9042 } 9043 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 9044 emit_int16(0x58, (0xC0 | encode)); 9045 } 9046 9047 void Assembler::evaddps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9048 InstructionMark im(this); 9049 assert(VM_Version::supports_evex(), ""); 9050 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9051 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9052 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9053 attributes.set_is_evex_instruction(); 9054 attributes.set_embedded_opmask_register_specifier(mask); 9055 if (merge) { 9056 attributes.reset_is_clear_context(); 9057 } 9058 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 9059 emit_int8(0x58); 9060 emit_operand(dst, src, 0); 9061 } 9062 9063 void Assembler::evaddpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9064 assert(VM_Version::supports_evex(), ""); 9065 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9066 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9067 attributes.set_is_evex_instruction(); 9068 attributes.set_embedded_opmask_register_specifier(mask); 9069 if (merge) { 9070 attributes.reset_is_clear_context(); 9071 } 9072 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9073 emit_int16(0x58, (0xC0 | encode)); 9074 } 9075 9076 void Assembler::evaddpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9077 InstructionMark im(this); 9078 assert(VM_Version::supports_evex(), ""); 9079 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9080 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9081 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9082 attributes.set_is_evex_instruction(); 9083 attributes.set_embedded_opmask_register_specifier(mask); 9084 if (merge) { 9085 attributes.reset_is_clear_context(); 9086 } 9087 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9088 emit_int8(0x58); 9089 emit_operand(dst, src, 0); 9090 } 9091 9092 void Assembler::evpsubb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9093 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9094 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9095 attributes.set_is_evex_instruction(); 9096 attributes.set_embedded_opmask_register_specifier(mask); 9097 if (merge) { 9098 attributes.reset_is_clear_context(); 9099 } 9100 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9101 emit_int16((unsigned char)0xF8, (0xC0 | encode)); 9102 } 9103 9104 void Assembler::evpsubb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9105 InstructionMark im(this); 9106 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9107 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9108 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9109 attributes.set_is_evex_instruction(); 9110 attributes.set_embedded_opmask_register_specifier(mask); 9111 if (merge) { 9112 attributes.reset_is_clear_context(); 9113 } 9114 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9115 emit_int8((unsigned char)0xF8); 9116 emit_operand(dst, src, 0); 9117 } 9118 9119 void Assembler::evpsubw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9120 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9121 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9122 attributes.set_is_evex_instruction(); 9123 attributes.set_embedded_opmask_register_specifier(mask); 9124 if (merge) { 9125 attributes.reset_is_clear_context(); 9126 } 9127 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9128 emit_int16((unsigned char)0xF9, (0xC0 | encode)); 9129 } 9130 9131 void Assembler::evpsubw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9132 InstructionMark im(this); 9133 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9134 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9135 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9136 attributes.set_is_evex_instruction(); 9137 attributes.set_embedded_opmask_register_specifier(mask); 9138 if (merge) { 9139 attributes.reset_is_clear_context(); 9140 } 9141 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9142 emit_int8((unsigned char)0xF9); 9143 emit_operand(dst, src, 0); 9144 } 9145 9146 void Assembler::evpsubd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9147 assert(VM_Version::supports_evex(), ""); 9148 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9149 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9150 attributes.set_is_evex_instruction(); 9151 attributes.set_embedded_opmask_register_specifier(mask); 9152 if (merge) { 9153 attributes.reset_is_clear_context(); 9154 } 9155 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9156 emit_int16((unsigned char)0xFA, (0xC0 | encode)); 9157 } 9158 9159 void Assembler::evpsubd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9160 InstructionMark im(this); 9161 assert(VM_Version::supports_evex(), ""); 9162 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9163 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9164 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9165 attributes.set_is_evex_instruction(); 9166 attributes.set_embedded_opmask_register_specifier(mask); 9167 if (merge) { 9168 attributes.reset_is_clear_context(); 9169 } 9170 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9171 emit_int8((unsigned char)0xFA); 9172 emit_operand(dst, src, 0); 9173 } 9174 9175 void Assembler::evpsubq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9176 assert(VM_Version::supports_evex(), ""); 9177 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9178 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9179 attributes.set_is_evex_instruction(); 9180 attributes.set_embedded_opmask_register_specifier(mask); 9181 if (merge) { 9182 attributes.reset_is_clear_context(); 9183 } 9184 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9185 emit_int16((unsigned char)0xFB, (0xC0 | encode)); 9186 } 9187 9188 void Assembler::evpsubq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9189 InstructionMark im(this); 9190 assert(VM_Version::supports_evex(), ""); 9191 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9192 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9193 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9194 attributes.set_is_evex_instruction(); 9195 attributes.set_embedded_opmask_register_specifier(mask); 9196 if (merge) { 9197 attributes.reset_is_clear_context(); 9198 } 9199 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9200 emit_int8((unsigned char)0xFB); 9201 emit_operand(dst, src, 0); 9202 } 9203 9204 void Assembler::evsubps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9205 assert(VM_Version::supports_evex(), ""); 9206 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9207 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9208 attributes.set_is_evex_instruction(); 9209 attributes.set_embedded_opmask_register_specifier(mask); 9210 if (merge) { 9211 attributes.reset_is_clear_context(); 9212 } 9213 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 9214 emit_int16(0x5C, (0xC0 | encode)); 9215 } 9216 9217 void Assembler::evsubps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9218 InstructionMark im(this); 9219 assert(VM_Version::supports_evex(), ""); 9220 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9221 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9222 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9223 attributes.set_is_evex_instruction(); 9224 attributes.set_embedded_opmask_register_specifier(mask); 9225 if (merge) { 9226 attributes.reset_is_clear_context(); 9227 } 9228 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 9229 emit_int8(0x5C); 9230 emit_operand(dst, src, 0); 9231 } 9232 9233 void Assembler::evsubpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9234 assert(VM_Version::supports_evex(), ""); 9235 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9236 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9237 attributes.set_is_evex_instruction(); 9238 attributes.set_embedded_opmask_register_specifier(mask); 9239 if (merge) { 9240 attributes.reset_is_clear_context(); 9241 } 9242 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9243 emit_int16(0x5C, (0xC0 | encode)); 9244 } 9245 9246 void Assembler::evsubpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9247 InstructionMark im(this); 9248 assert(VM_Version::supports_evex(), ""); 9249 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9250 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9251 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9252 attributes.set_is_evex_instruction(); 9253 attributes.set_embedded_opmask_register_specifier(mask); 9254 if (merge) { 9255 attributes.reset_is_clear_context(); 9256 } 9257 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9258 emit_int8(0x5C); 9259 emit_operand(dst, src, 0); 9260 } 9261 9262 void Assembler::evpmullw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9263 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9264 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9265 attributes.set_is_evex_instruction(); 9266 attributes.set_embedded_opmask_register_specifier(mask); 9267 if (merge) { 9268 attributes.reset_is_clear_context(); 9269 } 9270 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9271 emit_int16((unsigned char)0xD5, (0xC0 | encode)); 9272 } 9273 9274 void Assembler::evpmullw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9275 InstructionMark im(this); 9276 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9277 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9278 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9279 attributes.set_is_evex_instruction(); 9280 attributes.set_embedded_opmask_register_specifier(mask); 9281 if (merge) { 9282 attributes.reset_is_clear_context(); 9283 } 9284 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9285 emit_int8((unsigned char)0xD5); 9286 emit_operand(dst, src, 0); 9287 } 9288 9289 void Assembler::evpmulld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9290 assert(VM_Version::supports_evex(), ""); 9291 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9292 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9293 attributes.set_is_evex_instruction(); 9294 attributes.set_embedded_opmask_register_specifier(mask); 9295 if (merge) { 9296 attributes.reset_is_clear_context(); 9297 } 9298 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9299 emit_int16(0x40, (0xC0 | encode)); 9300 } 9301 9302 void Assembler::evpmulld(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9303 InstructionMark im(this); 9304 assert(VM_Version::supports_evex(), ""); 9305 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9306 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9307 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9308 attributes.set_is_evex_instruction(); 9309 attributes.set_embedded_opmask_register_specifier(mask); 9310 if (merge) { 9311 attributes.reset_is_clear_context(); 9312 } 9313 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9314 emit_int8(0x40); 9315 emit_operand(dst, src, 0); 9316 } 9317 9318 void Assembler::evpmullq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9319 assert(VM_Version::supports_avx512dq() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9320 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9321 attributes.set_is_evex_instruction(); 9322 attributes.set_embedded_opmask_register_specifier(mask); 9323 if (merge) { 9324 attributes.reset_is_clear_context(); 9325 } 9326 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9327 emit_int16(0x40, (0xC0 | encode)); 9328 } 9329 9330 void Assembler::evpmullq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9331 InstructionMark im(this); 9332 assert(VM_Version::supports_avx512dq() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9333 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9334 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9335 attributes.set_is_evex_instruction(); 9336 attributes.set_embedded_opmask_register_specifier(mask); 9337 if (merge) { 9338 attributes.reset_is_clear_context(); 9339 } 9340 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9341 emit_int8(0x40); 9342 emit_operand(dst, src, 0); 9343 } 9344 9345 void Assembler::evmulps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9346 assert(VM_Version::supports_evex(), ""); 9347 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9348 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9349 attributes.set_is_evex_instruction(); 9350 attributes.set_embedded_opmask_register_specifier(mask); 9351 if (merge) { 9352 attributes.reset_is_clear_context(); 9353 } 9354 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 9355 emit_int16(0x59, (0xC0 | encode)); 9356 } 9357 9358 void Assembler::evmulps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9359 InstructionMark im(this); 9360 assert(VM_Version::supports_evex(), ""); 9361 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9362 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9363 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9364 attributes.set_is_evex_instruction(); 9365 attributes.set_embedded_opmask_register_specifier(mask); 9366 if (merge) { 9367 attributes.reset_is_clear_context(); 9368 } 9369 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 9370 emit_int8(0x59); 9371 emit_operand(dst, src, 0); 9372 } 9373 9374 void Assembler::evmulpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9375 assert(VM_Version::supports_evex(), ""); 9376 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9377 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9378 attributes.set_is_evex_instruction(); 9379 attributes.set_embedded_opmask_register_specifier(mask); 9380 if (merge) { 9381 attributes.reset_is_clear_context(); 9382 } 9383 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9384 emit_int16(0x59, (0xC0 | encode)); 9385 } 9386 9387 void Assembler::evmulpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9388 InstructionMark im(this); 9389 assert(VM_Version::supports_evex(), ""); 9390 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9391 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9392 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9393 attributes.set_is_evex_instruction(); 9394 attributes.set_embedded_opmask_register_specifier(mask); 9395 if (merge) { 9396 attributes.reset_is_clear_context(); 9397 } 9398 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9399 emit_int8(0x59); 9400 emit_operand(dst, src, 0); 9401 } 9402 9403 void Assembler::evsqrtps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9404 assert(VM_Version::supports_evex(), ""); 9405 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9406 InstructionAttr attributes(vector_len,/* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9407 attributes.set_is_evex_instruction(); 9408 attributes.set_embedded_opmask_register_specifier(mask); 9409 if (merge) { 9410 attributes.reset_is_clear_context(); 9411 } 9412 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 9413 emit_int16(0x51, (0xC0 | encode)); 9414 } 9415 9416 void Assembler::evsqrtps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9417 InstructionMark im(this); 9418 assert(VM_Version::supports_evex(), ""); 9419 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9420 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9421 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9422 attributes.set_is_evex_instruction(); 9423 attributes.set_embedded_opmask_register_specifier(mask); 9424 if (merge) { 9425 attributes.reset_is_clear_context(); 9426 } 9427 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 9428 emit_int8(0x51); 9429 emit_operand(dst, src, 0); 9430 } 9431 9432 void Assembler::evsqrtpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9433 assert(VM_Version::supports_evex(), ""); 9434 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9435 InstructionAttr attributes(vector_len,/* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9436 attributes.set_is_evex_instruction(); 9437 attributes.set_embedded_opmask_register_specifier(mask); 9438 if (merge) { 9439 attributes.reset_is_clear_context(); 9440 } 9441 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9442 emit_int16(0x51, (0xC0 | encode)); 9443 } 9444 9445 void Assembler::evsqrtpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9446 InstructionMark im(this); 9447 assert(VM_Version::supports_evex(), ""); 9448 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9449 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9450 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9451 attributes.set_is_evex_instruction(); 9452 attributes.set_embedded_opmask_register_specifier(mask); 9453 if (merge) { 9454 attributes.reset_is_clear_context(); 9455 } 9456 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9457 emit_int8(0x51); 9458 emit_operand(dst, src, 0); 9459 } 9460 9461 9462 void Assembler::evdivps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9463 assert(VM_Version::supports_evex(), ""); 9464 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9465 InstructionAttr attributes(vector_len,/* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9466 attributes.set_is_evex_instruction(); 9467 attributes.set_embedded_opmask_register_specifier(mask); 9468 if (merge) { 9469 attributes.reset_is_clear_context(); 9470 } 9471 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 9472 emit_int16(0x5E, (0xC0 | encode)); 9473 } 9474 9475 void Assembler::evdivps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9476 InstructionMark im(this); 9477 assert(VM_Version::supports_evex(), ""); 9478 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9479 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9480 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9481 attributes.set_is_evex_instruction(); 9482 attributes.set_embedded_opmask_register_specifier(mask); 9483 if (merge) { 9484 attributes.reset_is_clear_context(); 9485 } 9486 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 9487 emit_int8(0x5E); 9488 emit_operand(dst, src, 0); 9489 } 9490 9491 void Assembler::evdivpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9492 assert(VM_Version::supports_evex(), ""); 9493 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9494 InstructionAttr attributes(vector_len,/* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9495 attributes.set_is_evex_instruction(); 9496 attributes.set_embedded_opmask_register_specifier(mask); 9497 if (merge) { 9498 attributes.reset_is_clear_context(); 9499 } 9500 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9501 emit_int16(0x5E, (0xC0 | encode)); 9502 } 9503 9504 void Assembler::evdivpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9505 InstructionMark im(this); 9506 assert(VM_Version::supports_evex(), ""); 9507 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9508 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9509 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9510 attributes.set_is_evex_instruction(); 9511 attributes.set_embedded_opmask_register_specifier(mask); 9512 if (merge) { 9513 attributes.reset_is_clear_context(); 9514 } 9515 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9516 emit_int8(0x5E); 9517 emit_operand(dst, src, 0); 9518 } 9519 9520 void Assembler::evpabsb(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 9521 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9522 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9523 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9524 attributes.set_is_evex_instruction(); 9525 attributes.set_embedded_opmask_register_specifier(mask); 9526 if (merge) { 9527 attributes.reset_is_clear_context(); 9528 } 9529 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9530 emit_int16(0x1C, (0xC0 | encode)); 9531 } 9532 9533 9534 void Assembler::evpabsb(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { 9535 InstructionMark im(this); 9536 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9537 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9538 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9539 attributes.set_is_evex_instruction(); 9540 attributes.set_embedded_opmask_register_specifier(mask); 9541 if (merge) { 9542 attributes.reset_is_clear_context(); 9543 } 9544 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9545 emit_int8(0x1C); 9546 emit_operand(dst, src, 0); 9547 } 9548 9549 void Assembler::evpabsw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 9550 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9551 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9552 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9553 attributes.set_is_evex_instruction(); 9554 attributes.set_embedded_opmask_register_specifier(mask); 9555 if (merge) { 9556 attributes.reset_is_clear_context(); 9557 } 9558 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9559 emit_int16(0x1D, (0xC0 | encode)); 9560 } 9561 9562 9563 void Assembler::evpabsw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { 9564 InstructionMark im(this); 9565 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9566 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9567 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9568 attributes.set_is_evex_instruction(); 9569 attributes.set_embedded_opmask_register_specifier(mask); 9570 if (merge) { 9571 attributes.reset_is_clear_context(); 9572 } 9573 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9574 emit_int8(0x1D); 9575 emit_operand(dst, src, 0); 9576 } 9577 9578 void Assembler::evpabsd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 9579 assert(VM_Version::supports_evex(), ""); 9580 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9581 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9582 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9583 attributes.set_is_evex_instruction(); 9584 attributes.set_embedded_opmask_register_specifier(mask); 9585 if (merge) { 9586 attributes.reset_is_clear_context(); 9587 } 9588 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9589 emit_int16(0x1E, (0xC0 | encode)); 9590 } 9591 9592 9593 void Assembler::evpabsd(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { 9594 InstructionMark im(this); 9595 assert(VM_Version::supports_evex(), ""); 9596 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9597 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9598 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9599 attributes.set_is_evex_instruction(); 9600 attributes.set_embedded_opmask_register_specifier(mask); 9601 if (merge) { 9602 attributes.reset_is_clear_context(); 9603 } 9604 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9605 emit_int8(0x1E); 9606 emit_operand(dst, src, 0); 9607 } 9608 9609 void Assembler::evpabsq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 9610 assert(VM_Version::supports_evex(), ""); 9611 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9612 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9613 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9614 attributes.set_is_evex_instruction(); 9615 attributes.set_embedded_opmask_register_specifier(mask); 9616 if (merge) { 9617 attributes.reset_is_clear_context(); 9618 } 9619 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9620 emit_int16(0x1F, (0xC0 | encode)); 9621 } 9622 9623 9624 void Assembler::evpabsq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { 9625 InstructionMark im(this); 9626 assert(VM_Version::supports_evex(), ""); 9627 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9628 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9629 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9630 attributes.set_is_evex_instruction(); 9631 attributes.set_embedded_opmask_register_specifier(mask); 9632 if (merge) { 9633 attributes.reset_is_clear_context(); 9634 } 9635 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9636 emit_int8(0x1F); 9637 emit_operand(dst, src, 0); 9638 } 9639 9640 void Assembler::evpfma213ps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9641 assert(VM_Version::supports_evex(), ""); 9642 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9643 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9644 attributes.set_is_evex_instruction(); 9645 attributes.set_embedded_opmask_register_specifier(mask); 9646 if (merge) { 9647 attributes.reset_is_clear_context(); 9648 } 9649 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9650 emit_int16((unsigned char)0xA8, (0xC0 | encode)); 9651 } 9652 9653 void Assembler::evpfma213ps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9654 InstructionMark im(this); 9655 assert(VM_Version::supports_evex(), ""); 9656 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9657 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9658 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9659 attributes.set_is_evex_instruction(); 9660 attributes.set_embedded_opmask_register_specifier(mask); 9661 if (merge) { 9662 attributes.reset_is_clear_context(); 9663 } 9664 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9665 emit_int8((unsigned char)0xA8); 9666 emit_operand(dst, src, 0); 9667 } 9668 9669 void Assembler::evpfma213pd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9670 assert(VM_Version::supports_evex(), ""); 9671 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9672 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9673 attributes.set_is_evex_instruction(); 9674 attributes.set_embedded_opmask_register_specifier(mask); 9675 if (merge) { 9676 attributes.reset_is_clear_context(); 9677 } 9678 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9679 emit_int16((unsigned char)0xA8, (0xC0 | encode)); 9680 } 9681 9682 void Assembler::evpfma213pd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9683 InstructionMark im(this); 9684 assert(VM_Version::supports_evex(), ""); 9685 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9686 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9687 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9688 attributes.set_is_evex_instruction(); 9689 attributes.set_embedded_opmask_register_specifier(mask); 9690 if (merge) { 9691 attributes.reset_is_clear_context(); 9692 } 9693 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9694 emit_int8((unsigned char)0xA8); 9695 emit_operand(dst, src, 0); 9696 } 9697 9698 void Assembler::evpermb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9699 assert(VM_Version::supports_avx512_vbmi() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9700 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9701 attributes.set_is_evex_instruction(); 9702 attributes.set_embedded_opmask_register_specifier(mask); 9703 if (merge) { 9704 attributes.reset_is_clear_context(); 9705 } 9706 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9707 emit_int16((unsigned char)0x8D, (0xC0 | encode)); 9708 } 9709 9710 void Assembler::evpermb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9711 assert(VM_Version::supports_avx512_vbmi() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9712 InstructionMark im(this); 9713 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9714 attributes.set_is_evex_instruction(); 9715 attributes.set_embedded_opmask_register_specifier(mask); 9716 if (merge) { 9717 attributes.reset_is_clear_context(); 9718 } 9719 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9720 emit_int8((unsigned char)0x8D); 9721 emit_operand(dst, src, 0); 9722 } 9723 9724 void Assembler::evpermw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9725 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9726 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9727 attributes.set_is_evex_instruction(); 9728 attributes.set_embedded_opmask_register_specifier(mask); 9729 if (merge) { 9730 attributes.reset_is_clear_context(); 9731 } 9732 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9733 emit_int16((unsigned char)0x8D, (0xC0 | encode)); 9734 } 9735 9736 void Assembler::evpermw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9737 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9738 InstructionMark im(this); 9739 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9740 attributes.set_is_evex_instruction(); 9741 attributes.set_embedded_opmask_register_specifier(mask); 9742 if (merge) { 9743 attributes.reset_is_clear_context(); 9744 } 9745 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9746 emit_int8((unsigned char)0x8D); 9747 emit_operand(dst, src, 0); 9748 } 9749 9750 void Assembler::evpermd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9751 assert(VM_Version::supports_evex() && vector_len > AVX_128bit, ""); 9752 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9753 attributes.set_is_evex_instruction(); 9754 attributes.set_embedded_opmask_register_specifier(mask); 9755 if (merge) { 9756 attributes.reset_is_clear_context(); 9757 } 9758 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9759 emit_int16(0x36, (0xC0 | encode)); 9760 } 9761 9762 void Assembler::evpermd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9763 assert(VM_Version::supports_evex() && vector_len > AVX_128bit, ""); 9764 InstructionMark im(this); 9765 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9766 attributes.set_is_evex_instruction(); 9767 attributes.set_embedded_opmask_register_specifier(mask); 9768 if (merge) { 9769 attributes.reset_is_clear_context(); 9770 } 9771 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9772 emit_int8(0x36); 9773 emit_operand(dst, src, 0); 9774 } 9775 9776 void Assembler::evpermq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9777 assert(VM_Version::supports_evex() && vector_len > AVX_128bit, ""); 9778 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9779 attributes.set_is_evex_instruction(); 9780 attributes.set_embedded_opmask_register_specifier(mask); 9781 if (merge) { 9782 attributes.reset_is_clear_context(); 9783 } 9784 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9785 emit_int16(0x36, (0xC0 | encode)); 9786 } 9787 9788 void Assembler::evpermq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9789 assert(VM_Version::supports_evex() && vector_len > AVX_128bit, ""); 9790 InstructionMark im(this); 9791 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9792 attributes.set_is_evex_instruction(); 9793 attributes.set_embedded_opmask_register_specifier(mask); 9794 if (merge) { 9795 attributes.reset_is_clear_context(); 9796 } 9797 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9798 emit_int8(0x36); 9799 emit_operand(dst, src, 0); 9800 } 9801 9802 void Assembler::evpsllw(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 9803 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9804 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9805 attributes.set_is_evex_instruction(); 9806 attributes.set_embedded_opmask_register_specifier(mask); 9807 if (merge) { 9808 attributes.reset_is_clear_context(); 9809 } 9810 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9811 emit_int24(0x71, (0xC0 | encode), shift & 0xFF); 9812 } 9813 9814 void Assembler::evpslld(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 9815 assert(VM_Version::supports_evex(), ""); 9816 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9817 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9818 attributes.set_is_evex_instruction(); 9819 attributes.set_embedded_opmask_register_specifier(mask); 9820 if (merge) { 9821 attributes.reset_is_clear_context(); 9822 } 9823 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9824 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 9825 } 9826 9827 void Assembler::evpsllq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 9828 assert(VM_Version::supports_evex(), ""); 9829 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9830 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9831 attributes.set_is_evex_instruction(); 9832 attributes.set_embedded_opmask_register_specifier(mask); 9833 if (merge) { 9834 attributes.reset_is_clear_context(); 9835 } 9836 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9837 emit_int24(0x73, (0xC0 | encode), shift & 0xFF); 9838 } 9839 9840 void Assembler::evpsrlw(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 9841 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9842 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9843 attributes.set_is_evex_instruction(); 9844 attributes.set_embedded_opmask_register_specifier(mask); 9845 if (merge) { 9846 attributes.reset_is_clear_context(); 9847 } 9848 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9849 emit_int24(0x71, (0xC0 | encode), shift & 0xFF); 9850 } 9851 9852 void Assembler::evpsrld(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 9853 assert(VM_Version::supports_evex(), ""); 9854 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9855 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9856 attributes.set_is_evex_instruction(); 9857 attributes.set_embedded_opmask_register_specifier(mask); 9858 if (merge) { 9859 attributes.reset_is_clear_context(); 9860 } 9861 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9862 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 9863 } 9864 9865 void Assembler::evpsrlq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 9866 assert(VM_Version::supports_evex(), ""); 9867 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9868 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9869 attributes.set_is_evex_instruction(); 9870 attributes.set_embedded_opmask_register_specifier(mask); 9871 if (merge) { 9872 attributes.reset_is_clear_context(); 9873 } 9874 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9875 emit_int24(0x73, (0xC0 | encode), shift & 0xFF); 9876 } 9877 9878 void Assembler::evpsraw(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 9879 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9880 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9881 attributes.set_is_evex_instruction(); 9882 attributes.set_embedded_opmask_register_specifier(mask); 9883 if (merge) { 9884 attributes.reset_is_clear_context(); 9885 } 9886 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9887 emit_int24(0x71, (0xC0 | encode), shift & 0xFF); 9888 } 9889 9890 void Assembler::evpsrad(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 9891 assert(VM_Version::supports_evex(), ""); 9892 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9893 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9894 attributes.set_is_evex_instruction(); 9895 attributes.set_embedded_opmask_register_specifier(mask); 9896 if (merge) { 9897 attributes.reset_is_clear_context(); 9898 } 9899 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9900 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 9901 } 9902 9903 void Assembler::evpsraq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 9904 assert(VM_Version::supports_evex(), ""); 9905 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9906 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9907 attributes.set_is_evex_instruction(); 9908 attributes.set_embedded_opmask_register_specifier(mask); 9909 if (merge) { 9910 attributes.reset_is_clear_context(); 9911 } 9912 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9913 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 9914 } 9915 9916 void Assembler::evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9917 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9918 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9919 attributes.set_is_evex_instruction(); 9920 attributes.set_embedded_opmask_register_specifier(mask); 9921 if (merge) { 9922 attributes.reset_is_clear_context(); 9923 } 9924 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9925 emit_int16((unsigned char)0xF1, (0xC0 | encode)); 9926 } 9927 9928 void Assembler::evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9929 assert(VM_Version::supports_evex(), ""); 9930 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9931 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9932 attributes.set_is_evex_instruction(); 9933 attributes.set_embedded_opmask_register_specifier(mask); 9934 if (merge) { 9935 attributes.reset_is_clear_context(); 9936 } 9937 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9938 emit_int16((unsigned char)0xF2, (0xC0 | encode)); 9939 } 9940 9941 void Assembler::evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9942 assert(VM_Version::supports_evex(), ""); 9943 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9944 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9945 attributes.set_is_evex_instruction(); 9946 attributes.set_embedded_opmask_register_specifier(mask); 9947 if (merge) { 9948 attributes.reset_is_clear_context(); 9949 } 9950 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9951 emit_int16((unsigned char)0xF3, (0xC0 | encode)); 9952 } 9953 9954 void Assembler::evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9955 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9956 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9957 attributes.set_is_evex_instruction(); 9958 attributes.set_embedded_opmask_register_specifier(mask); 9959 if (merge) { 9960 attributes.reset_is_clear_context(); 9961 } 9962 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9963 emit_int16((unsigned char)0xD1, (0xC0 | encode)); 9964 } 9965 9966 void Assembler::evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9967 assert(VM_Version::supports_evex(), ""); 9968 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9969 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9970 attributes.set_is_evex_instruction(); 9971 attributes.set_embedded_opmask_register_specifier(mask); 9972 if (merge) { 9973 attributes.reset_is_clear_context(); 9974 } 9975 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9976 emit_int16((unsigned char)0xD2, (0xC0 | encode)); 9977 } 9978 9979 void Assembler::evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9980 assert(VM_Version::supports_evex(), ""); 9981 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9982 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9983 attributes.set_is_evex_instruction(); 9984 attributes.set_embedded_opmask_register_specifier(mask); 9985 if (merge) { 9986 attributes.reset_is_clear_context(); 9987 } 9988 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9989 emit_int16((unsigned char)0xD3, (0xC0 | encode)); 9990 } 9991 9992 void Assembler::evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9993 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9994 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9995 attributes.set_is_evex_instruction(); 9996 attributes.set_embedded_opmask_register_specifier(mask); 9997 if (merge) { 9998 attributes.reset_is_clear_context(); 9999 } 10000 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 10001 emit_int16((unsigned char)0xE1, (0xC0 | encode)); 10002 } 10003 10004 void Assembler::evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10005 assert(VM_Version::supports_evex(), ""); 10006 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 10007 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10008 attributes.set_is_evex_instruction(); 10009 attributes.set_embedded_opmask_register_specifier(mask); 10010 if (merge) { 10011 attributes.reset_is_clear_context(); 10012 } 10013 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 10014 emit_int16((unsigned char)0xE2, (0xC0 | encode)); 10015 } 10016 10017 void Assembler::evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10018 assert(VM_Version::supports_evex(), ""); 10019 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 10020 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10021 attributes.set_is_evex_instruction(); 10022 attributes.set_embedded_opmask_register_specifier(mask); 10023 if (merge) { 10024 attributes.reset_is_clear_context(); 10025 } 10026 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 10027 emit_int16((unsigned char)0xE2, (0xC0 | encode)); 10028 } 10029 10030 void Assembler::evpsllvw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10031 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 10032 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10033 attributes.set_is_evex_instruction(); 10034 attributes.set_embedded_opmask_register_specifier(mask); 10035 if (merge) { 10036 attributes.reset_is_clear_context(); 10037 } 10038 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10039 emit_int16(0x12, (0xC0 | encode)); 10040 } 10041 10042 void Assembler::evpsllvd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10043 assert(VM_Version::supports_evex(), ""); 10044 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 10045 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10046 attributes.set_is_evex_instruction(); 10047 attributes.set_embedded_opmask_register_specifier(mask); 10048 if (merge) { 10049 attributes.reset_is_clear_context(); 10050 } 10051 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10052 emit_int16(0x47, (0xC0 | encode)); 10053 } 10054 10055 void Assembler::evpsllvq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10056 assert(VM_Version::supports_evex(), ""); 10057 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 10058 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10059 attributes.set_is_evex_instruction(); 10060 attributes.set_embedded_opmask_register_specifier(mask); 10061 if (merge) { 10062 attributes.reset_is_clear_context(); 10063 } 10064 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10065 emit_int16(0x47, (0xC0 | encode)); 10066 } 10067 10068 void Assembler::evpsrlvw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10069 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 10070 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10071 attributes.set_is_evex_instruction(); 10072 attributes.set_embedded_opmask_register_specifier(mask); 10073 if (merge) { 10074 attributes.reset_is_clear_context(); 10075 } 10076 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10077 emit_int16(0x10, (0xC0 | encode)); 10078 } 10079 10080 void Assembler::evpsrlvd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10081 assert(VM_Version::supports_evex(), ""); 10082 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 10083 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10084 attributes.set_is_evex_instruction(); 10085 attributes.set_embedded_opmask_register_specifier(mask); 10086 if (merge) { 10087 attributes.reset_is_clear_context(); 10088 } 10089 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10090 emit_int16(0x45, (0xC0 | encode)); 10091 } 10092 10093 void Assembler::evpsrlvq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10094 assert(VM_Version::supports_evex(), ""); 10095 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 10096 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10097 attributes.set_is_evex_instruction(); 10098 attributes.set_embedded_opmask_register_specifier(mask); 10099 if (merge) { 10100 attributes.reset_is_clear_context(); 10101 } 10102 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10103 emit_int16(0x45, (0xC0 | encode)); 10104 } 10105 10106 void Assembler::evpsravw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10107 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 10108 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10109 attributes.set_is_evex_instruction(); 10110 attributes.set_embedded_opmask_register_specifier(mask); 10111 if (merge) { 10112 attributes.reset_is_clear_context(); 10113 } 10114 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10115 emit_int16(0x11, (0xC0 | encode)); 10116 } 10117 10118 void Assembler::evpsravd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10119 assert(VM_Version::supports_evex(), ""); 10120 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 10121 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10122 attributes.set_is_evex_instruction(); 10123 attributes.set_embedded_opmask_register_specifier(mask); 10124 if (merge) { 10125 attributes.reset_is_clear_context(); 10126 } 10127 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10128 emit_int16(0x46, (0xC0 | encode)); 10129 } 10130 10131 void Assembler::evpsravq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10132 assert(VM_Version::supports_evex(), ""); 10133 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 10134 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10135 attributes.set_is_evex_instruction(); 10136 attributes.set_embedded_opmask_register_specifier(mask); 10137 if (merge) { 10138 attributes.reset_is_clear_context(); 10139 } 10140 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10141 emit_int16(0x46, (0xC0 | encode)); 10142 } 10143 10144 void Assembler::evpminsb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10145 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 10146 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10147 attributes.set_is_evex_instruction(); 10148 attributes.set_embedded_opmask_register_specifier(mask); 10149 if (merge) { 10150 attributes.reset_is_clear_context(); 10151 } 10152 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10153 emit_int16(0x38, (0xC0 | encode)); 10154 } 10155 10156 void Assembler::evpminsb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10157 assert(VM_Version::supports_avx512bw(), ""); 10158 InstructionMark im(this); 10159 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10160 attributes.set_is_evex_instruction(); 10161 attributes.set_embedded_opmask_register_specifier(mask); 10162 if (merge) { 10163 attributes.reset_is_clear_context(); 10164 } 10165 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10166 emit_int8(0x38); 10167 emit_operand(dst, src, 0); 10168 } 10169 10170 void Assembler::evpminsw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10171 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 10172 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10173 attributes.set_is_evex_instruction(); 10174 attributes.set_embedded_opmask_register_specifier(mask); 10175 if (merge) { 10176 attributes.reset_is_clear_context(); 10177 } 10178 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 10179 emit_int16((unsigned char)0xEA, (0xC0 | encode)); 10180 } 10181 10182 void Assembler::evpminsw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10183 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 10184 InstructionMark im(this); 10185 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10186 attributes.set_is_evex_instruction(); 10187 attributes.set_embedded_opmask_register_specifier(mask); 10188 if (merge) { 10189 attributes.reset_is_clear_context(); 10190 } 10191 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 10192 emit_int8((unsigned char)0xEA); 10193 emit_operand(dst, src, 0); 10194 } 10195 10196 void Assembler::evpminsd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10197 assert(VM_Version::supports_evex(), ""); 10198 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 10199 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10200 attributes.set_is_evex_instruction(); 10201 attributes.set_embedded_opmask_register_specifier(mask); 10202 if (merge) { 10203 attributes.reset_is_clear_context(); 10204 } 10205 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10206 emit_int16(0x39, (0xC0 | encode)); 10207 } 10208 10209 void Assembler::evpminsd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10210 assert(VM_Version::supports_evex(), ""); 10211 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 10212 InstructionMark im(this); 10213 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10214 attributes.set_is_evex_instruction(); 10215 attributes.set_embedded_opmask_register_specifier(mask); 10216 if (merge) { 10217 attributes.reset_is_clear_context(); 10218 } 10219 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10220 emit_int8(0x39); 10221 emit_operand(dst, src, 0); 10222 } 10223 10224 void Assembler::evpminsq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10225 assert(VM_Version::supports_evex(), ""); 10226 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 10227 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10228 attributes.set_is_evex_instruction(); 10229 attributes.set_embedded_opmask_register_specifier(mask); 10230 if (merge) { 10231 attributes.reset_is_clear_context(); 10232 } 10233 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10234 emit_int16(0x39, (0xC0 | encode)); 10235 } 10236 10237 void Assembler::evpminsq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10238 assert(VM_Version::supports_evex(), ""); 10239 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 10240 InstructionMark im(this); 10241 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10242 attributes.set_is_evex_instruction(); 10243 attributes.set_embedded_opmask_register_specifier(mask); 10244 if (merge) { 10245 attributes.reset_is_clear_context(); 10246 } 10247 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10248 emit_int8(0x39); 10249 emit_operand(dst, src, 0); 10250 } 10251 10252 10253 void Assembler::evpmaxsb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10254 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 10255 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10256 attributes.set_is_evex_instruction(); 10257 attributes.set_embedded_opmask_register_specifier(mask); 10258 if (merge) { 10259 attributes.reset_is_clear_context(); 10260 } 10261 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10262 emit_int16(0x3C, (0xC0 | encode)); 10263 } 10264 10265 void Assembler::evpmaxsb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10266 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 10267 InstructionMark im(this); 10268 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10269 attributes.set_is_evex_instruction(); 10270 attributes.set_embedded_opmask_register_specifier(mask); 10271 if (merge) { 10272 attributes.reset_is_clear_context(); 10273 } 10274 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10275 emit_int8(0x3C); 10276 emit_operand(dst, src, 0); 10277 } 10278 10279 void Assembler::evpmaxsw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10280 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 10281 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10282 attributes.set_is_evex_instruction(); 10283 attributes.set_embedded_opmask_register_specifier(mask); 10284 if (merge) { 10285 attributes.reset_is_clear_context(); 10286 } 10287 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 10288 emit_int16((unsigned char)0xEE, (0xC0 | encode)); 10289 } 10290 10291 void Assembler::evpmaxsw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10292 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 10293 InstructionMark im(this); 10294 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10295 attributes.set_is_evex_instruction(); 10296 attributes.set_embedded_opmask_register_specifier(mask); 10297 if (merge) { 10298 attributes.reset_is_clear_context(); 10299 } 10300 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 10301 emit_int8((unsigned char)0xEE); 10302 emit_operand(dst, src, 0); 10303 } 10304 10305 void Assembler::evpmaxsd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10306 assert(VM_Version::supports_evex(), ""); 10307 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 10308 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10309 attributes.set_is_evex_instruction(); 10310 attributes.set_embedded_opmask_register_specifier(mask); 10311 if (merge) { 10312 attributes.reset_is_clear_context(); 10313 } 10314 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10315 emit_int16(0x3D, (0xC0 | encode)); 10316 } 10317 10318 void Assembler::evpmaxsd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10319 assert(VM_Version::supports_evex(), ""); 10320 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 10321 InstructionMark im(this); 10322 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10323 attributes.set_is_evex_instruction(); 10324 attributes.set_embedded_opmask_register_specifier(mask); 10325 if (merge) { 10326 attributes.reset_is_clear_context(); 10327 } 10328 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10329 emit_int8(0x3D); 10330 emit_operand(dst, src, 0); 10331 } 10332 10333 void Assembler::evpmaxsq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10334 assert(VM_Version::supports_evex(), ""); 10335 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 10336 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10337 attributes.set_is_evex_instruction(); 10338 attributes.set_embedded_opmask_register_specifier(mask); 10339 if (merge) { 10340 attributes.reset_is_clear_context(); 10341 } 10342 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10343 emit_int16(0x3D, (0xC0 | encode)); 10344 } 10345 10346 void Assembler::evpmaxsq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10347 assert(VM_Version::supports_evex(), ""); 10348 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 10349 InstructionMark im(this); 10350 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10351 attributes.set_is_evex_instruction(); 10352 attributes.set_embedded_opmask_register_specifier(mask); 10353 if (merge) { 10354 attributes.reset_is_clear_context(); 10355 } 10356 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10357 emit_int8(0x3D); 10358 emit_operand(dst, src, 0); 10359 } 10360 10361 void Assembler::evpternlogd(XMMRegister dst, int imm8, KRegister mask, XMMRegister src2, XMMRegister src3, bool merge, int vector_len) { 10362 assert(VM_Version::supports_evex(), "requires EVEX support"); 10363 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 10364 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10365 attributes.set_is_evex_instruction(); 10366 attributes.set_embedded_opmask_register_specifier(mask); 10367 if (merge) { 10368 attributes.reset_is_clear_context(); 10369 } 10370 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 10371 emit_int24(0x25, (unsigned char)(0xC0 | encode), imm8); 10372 } 10373 10374 void Assembler::evpternlogd(XMMRegister dst, int imm8, KRegister mask, XMMRegister src2, Address src3, bool merge, int vector_len) { 10375 assert(VM_Version::supports_evex(), "requires EVEX support"); 10376 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 10377 assert(dst != xnoreg, "sanity"); 10378 InstructionMark im(this); 10379 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10380 attributes.set_is_evex_instruction(); 10381 attributes.set_embedded_opmask_register_specifier(mask); 10382 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 10383 if (merge) { 10384 attributes.reset_is_clear_context(); 10385 } 10386 vex_prefix(src3, src2->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 10387 emit_int8(0x25); 10388 emit_operand(dst, src3, 1); 10389 emit_int8(imm8); 10390 } 10391 10392 void Assembler::evpternlogq(XMMRegister dst, int imm8, KRegister mask, XMMRegister src2, XMMRegister src3, bool merge, int vector_len) { 10393 assert(VM_Version::supports_evex(), "requires EVEX support"); 10394 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 10395 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10396 attributes.set_is_evex_instruction(); 10397 attributes.set_embedded_opmask_register_specifier(mask); 10398 if (merge) { 10399 attributes.reset_is_clear_context(); 10400 } 10401 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 10402 emit_int24(0x25, (unsigned char)(0xC0 | encode), imm8); 10403 } 10404 10405 void Assembler::evpternlogq(XMMRegister dst, int imm8, KRegister mask, XMMRegister src2, Address src3, bool merge, int vector_len) { 10406 assert(VM_Version::supports_evex(), "requires EVEX support"); 10407 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 10408 assert(dst != xnoreg, "sanity"); 10409 InstructionMark im(this); 10410 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10411 attributes.set_is_evex_instruction(); 10412 attributes.set_embedded_opmask_register_specifier(mask); 10413 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 10414 if (merge) { 10415 attributes.reset_is_clear_context(); 10416 } 10417 vex_prefix(src3, src2->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 10418 emit_int8(0x25); 10419 emit_operand(dst, src3, 1); 10420 emit_int8(imm8); 10421 } 10422 10423 void Assembler::gf2p8affineqb(XMMRegister dst, XMMRegister src, int imm8) { 10424 assert(VM_Version::supports_gfni(), ""); 10425 assert(VM_Version::supports_sse(), ""); 10426 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 10427 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 10428 emit_int24((unsigned char)0xCE, (unsigned char)(0xC0 | encode), imm8); 10429 } 10430 10431 void Assembler::vgf2p8affineqb(XMMRegister dst, XMMRegister src2, XMMRegister src3, int imm8, int vector_len) { 10432 assert(VM_Version::supports_gfni(), "requires GFNI support"); 10433 assert(VM_Version::supports_sse(), ""); 10434 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 10435 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 10436 emit_int24((unsigned char)0xCE, (unsigned char)(0xC0 | encode), imm8); 10437 } 10438 10439 // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL 10440 void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len) { 10441 assert(UseAVX >= 2, ""); 10442 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 10443 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10444 emit_int16(0x58, (0xC0 | encode)); 10445 } 10446 10447 void Assembler::vpbroadcastd(XMMRegister dst, Address src, int vector_len) { 10448 assert(VM_Version::supports_avx2(), ""); 10449 assert(dst != xnoreg, "sanity"); 10450 InstructionMark im(this); 10451 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 10452 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 10453 // swap src<->dst for encoding 10454 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10455 emit_int8(0x58); 10456 emit_operand(dst, src, 0); 10457 } 10458 10459 // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL 10460 void Assembler::vpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len) { 10461 assert(VM_Version::supports_avx2(), ""); 10462 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 10463 attributes.set_rex_vex_w_reverted(); 10464 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10465 emit_int16(0x59, (0xC0 | encode)); 10466 } 10467 10468 void Assembler::vpbroadcastq(XMMRegister dst, Address src, int vector_len) { 10469 assert(VM_Version::supports_avx2(), ""); 10470 assert(dst != xnoreg, "sanity"); 10471 InstructionMark im(this); 10472 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 10473 attributes.set_rex_vex_w_reverted(); 10474 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 10475 // swap src<->dst for encoding 10476 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10477 emit_int8(0x59); 10478 emit_operand(dst, src, 0); 10479 } 10480 10481 void Assembler::evbroadcasti32x4(XMMRegister dst, Address src, int vector_len) { 10482 assert(vector_len != Assembler::AVX_128bit, ""); 10483 assert(VM_Version::supports_evex(), ""); 10484 assert(dst != xnoreg, "sanity"); 10485 InstructionMark im(this); 10486 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 10487 attributes.set_rex_vex_w_reverted(); 10488 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 10489 // swap src<->dst for encoding 10490 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10491 emit_int8(0x5A); 10492 emit_operand(dst, src, 0); 10493 } 10494 10495 void Assembler::evbroadcasti64x2(XMMRegister dst, XMMRegister src, int vector_len) { 10496 assert(vector_len != Assembler::AVX_128bit, ""); 10497 assert(VM_Version::supports_avx512dq(), ""); 10498 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 10499 attributes.set_rex_vex_w_reverted(); 10500 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10501 emit_int16(0x5A, (0xC0 | encode)); 10502 } 10503 10504 void Assembler::evbroadcasti64x2(XMMRegister dst, Address src, int vector_len) { 10505 assert(vector_len != Assembler::AVX_128bit, ""); 10506 assert(VM_Version::supports_avx512dq(), ""); 10507 assert(dst != xnoreg, "sanity"); 10508 InstructionMark im(this); 10509 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 10510 attributes.set_rex_vex_w_reverted(); 10511 attributes.set_address_attributes(/* tuple_type */ EVEX_T2, /* input_size_in_bits */ EVEX_64bit); 10512 // swap src<->dst for encoding 10513 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10514 emit_int8(0x5A); 10515 emit_operand(dst, src, 0); 10516 } 10517 10518 // scalar single/double precision replicate 10519 10520 // duplicate single precision data from src into programmed locations in dest : requires AVX512VL 10521 void Assembler::vbroadcastss(XMMRegister dst, XMMRegister src, int vector_len) { 10522 assert(VM_Version::supports_avx2(), ""); 10523 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 10524 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10525 emit_int16(0x18, (0xC0 | encode)); 10526 } 10527 10528 void Assembler::vbroadcastss(XMMRegister dst, Address src, int vector_len) { 10529 assert(VM_Version::supports_avx(), ""); 10530 assert(dst != xnoreg, "sanity"); 10531 InstructionMark im(this); 10532 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 10533 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 10534 // swap src<->dst for encoding 10535 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10536 emit_int8(0x18); 10537 emit_operand(dst, src, 0); 10538 } 10539 10540 // duplicate double precision data from src into programmed locations in dest : requires AVX512VL 10541 void Assembler::vbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len) { 10542 assert(VM_Version::supports_avx2(), ""); 10543 assert(vector_len == AVX_256bit || vector_len == AVX_512bit, ""); 10544 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 10545 attributes.set_rex_vex_w_reverted(); 10546 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10547 emit_int16(0x19, (0xC0 | encode)); 10548 } 10549 10550 void Assembler::vbroadcastsd(XMMRegister dst, Address src, int vector_len) { 10551 assert(VM_Version::supports_avx(), ""); 10552 assert(vector_len == AVX_256bit || vector_len == AVX_512bit, ""); 10553 assert(dst != xnoreg, "sanity"); 10554 InstructionMark im(this); 10555 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 10556 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 10557 attributes.set_rex_vex_w_reverted(); 10558 // swap src<->dst for encoding 10559 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10560 emit_int8(0x19); 10561 emit_operand(dst, src, 0); 10562 } 10563 10564 void Assembler::vbroadcastf128(XMMRegister dst, Address src, int vector_len) { 10565 assert(VM_Version::supports_avx(), ""); 10566 assert(vector_len == AVX_256bit, ""); 10567 assert(dst != xnoreg, "sanity"); 10568 InstructionMark im(this); 10569 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 10570 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 10571 // swap src<->dst for encoding 10572 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10573 emit_int8(0x1A); 10574 emit_operand(dst, src, 0); 10575 } 10576 10577 // gpr source broadcast forms 10578 10579 // duplicate 1-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL 10580 void Assembler::evpbroadcastb(XMMRegister dst, Register src, int vector_len) { 10581 assert(VM_Version::supports_avx512bw(), ""); 10582 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 10583 attributes.set_is_evex_instruction(); 10584 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10585 emit_int16(0x7A, (0xC0 | encode)); 10586 } 10587 10588 // duplicate 2-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL 10589 void Assembler::evpbroadcastw(XMMRegister dst, Register src, int vector_len) { 10590 assert(VM_Version::supports_avx512bw(), ""); 10591 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 10592 attributes.set_is_evex_instruction(); 10593 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10594 emit_int16(0x7B, (0xC0 | encode)); 10595 } 10596 10597 // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL 10598 void Assembler::evpbroadcastd(XMMRegister dst, Register src, int vector_len) { 10599 assert(VM_Version::supports_evex(), ""); 10600 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 10601 attributes.set_is_evex_instruction(); 10602 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10603 emit_int16(0x7C, (0xC0 | encode)); 10604 } 10605 10606 // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL 10607 void Assembler::evpbroadcastq(XMMRegister dst, Register src, int vector_len) { 10608 assert(VM_Version::supports_evex(), ""); 10609 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 10610 attributes.set_is_evex_instruction(); 10611 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10612 emit_int16(0x7C, (0xC0 | encode)); 10613 } 10614 10615 void Assembler::vpgatherdd(XMMRegister dst, Address src, XMMRegister mask, int vector_len) { 10616 assert(VM_Version::supports_avx2(), ""); 10617 assert(vector_len == Assembler::AVX_128bit || vector_len == Assembler::AVX_256bit, ""); 10618 assert(dst != xnoreg, "sanity"); 10619 assert(src.isxmmindex(),"expected to be xmm index"); 10620 assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same"); 10621 InstructionMark im(this); 10622 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 10623 vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10624 emit_int8((unsigned char)0x90); 10625 emit_operand(dst, src, 0); 10626 } 10627 10628 void Assembler::vpgatherdq(XMMRegister dst, Address src, XMMRegister mask, int vector_len) { 10629 assert(VM_Version::supports_avx2(), ""); 10630 assert(vector_len == Assembler::AVX_128bit || vector_len == Assembler::AVX_256bit, ""); 10631 assert(dst != xnoreg, "sanity"); 10632 assert(src.isxmmindex(),"expected to be xmm index"); 10633 assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same"); 10634 InstructionMark im(this); 10635 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 10636 vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10637 emit_int8((unsigned char)0x90); 10638 emit_operand(dst, src, 0); 10639 } 10640 10641 void Assembler::vgatherdpd(XMMRegister dst, Address src, XMMRegister mask, int vector_len) { 10642 assert(VM_Version::supports_avx2(), ""); 10643 assert(vector_len == Assembler::AVX_128bit || vector_len == Assembler::AVX_256bit, ""); 10644 assert(dst != xnoreg, "sanity"); 10645 assert(src.isxmmindex(),"expected to be xmm index"); 10646 assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same"); 10647 InstructionMark im(this); 10648 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 10649 vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10650 emit_int8((unsigned char)0x92); 10651 emit_operand(dst, src, 0); 10652 } 10653 10654 void Assembler::vgatherdps(XMMRegister dst, Address src, XMMRegister mask, int vector_len) { 10655 assert(VM_Version::supports_avx2(), ""); 10656 assert(vector_len == Assembler::AVX_128bit || vector_len == Assembler::AVX_256bit, ""); 10657 assert(dst != xnoreg, "sanity"); 10658 assert(src.isxmmindex(),"expected to be xmm index"); 10659 assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same"); 10660 InstructionMark im(this); 10661 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ true); 10662 vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10663 emit_int8((unsigned char)0x92); 10664 emit_operand(dst, src, 0); 10665 } 10666 void Assembler::evpgatherdd(XMMRegister dst, KRegister mask, Address src, int vector_len) { 10667 assert(VM_Version::supports_evex(), ""); 10668 assert(dst != xnoreg, "sanity"); 10669 assert(src.isxmmindex(),"expected to be xmm index"); 10670 assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same"); 10671 assert(mask != k0, "instruction will #UD if mask is in k0"); 10672 InstructionMark im(this); 10673 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10674 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 10675 attributes.reset_is_clear_context(); 10676 attributes.set_embedded_opmask_register_specifier(mask); 10677 attributes.set_is_evex_instruction(); 10678 // swap src<->dst for encoding 10679 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10680 emit_int8((unsigned char)0x90); 10681 emit_operand(dst, src, 0); 10682 } 10683 10684 void Assembler::evpgatherdq(XMMRegister dst, KRegister mask, Address src, int vector_len) { 10685 assert(VM_Version::supports_evex(), ""); 10686 assert(dst != xnoreg, "sanity"); 10687 assert(src.isxmmindex(),"expected to be xmm index"); 10688 assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same"); 10689 assert(mask != k0, "instruction will #UD if mask is in k0"); 10690 InstructionMark im(this); 10691 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10692 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 10693 attributes.reset_is_clear_context(); 10694 attributes.set_embedded_opmask_register_specifier(mask); 10695 attributes.set_is_evex_instruction(); 10696 // swap src<->dst for encoding 10697 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10698 emit_int8((unsigned char)0x90); 10699 emit_operand(dst, src, 0); 10700 } 10701 10702 void Assembler::evgatherdpd(XMMRegister dst, KRegister mask, Address src, int vector_len) { 10703 assert(VM_Version::supports_evex(), ""); 10704 assert(dst != xnoreg, "sanity"); 10705 assert(src.isxmmindex(),"expected to be xmm index"); 10706 assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same"); 10707 assert(mask != k0, "instruction will #UD if mask is in k0"); 10708 InstructionMark im(this); 10709 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10710 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 10711 attributes.reset_is_clear_context(); 10712 attributes.set_embedded_opmask_register_specifier(mask); 10713 attributes.set_is_evex_instruction(); 10714 // swap src<->dst for encoding 10715 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10716 emit_int8((unsigned char)0x92); 10717 emit_operand(dst, src, 0); 10718 } 10719 10720 void Assembler::evgatherdps(XMMRegister dst, KRegister mask, Address src, int vector_len) { 10721 assert(VM_Version::supports_evex(), ""); 10722 assert(dst != xnoreg, "sanity"); 10723 assert(src.isxmmindex(),"expected to be xmm index"); 10724 assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same"); 10725 assert(mask != k0, "instruction will #UD if mask is in k0"); 10726 InstructionMark im(this); 10727 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10728 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 10729 attributes.reset_is_clear_context(); 10730 attributes.set_embedded_opmask_register_specifier(mask); 10731 attributes.set_is_evex_instruction(); 10732 // swap src<->dst for encoding 10733 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10734 emit_int8((unsigned char)0x92); 10735 emit_operand(dst, src, 0); 10736 } 10737 10738 void Assembler::evpscatterdd(Address dst, KRegister mask, XMMRegister src, int vector_len) { 10739 assert(VM_Version::supports_evex(), ""); 10740 assert(mask != k0, "instruction will #UD if mask is in k0"); 10741 InstructionMark im(this); 10742 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10743 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 10744 attributes.reset_is_clear_context(); 10745 attributes.set_embedded_opmask_register_specifier(mask); 10746 attributes.set_is_evex_instruction(); 10747 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10748 emit_int8((unsigned char)0xA0); 10749 emit_operand(src, dst, 0); 10750 } 10751 10752 void Assembler::evpscatterdq(Address dst, KRegister mask, XMMRegister src, int vector_len) { 10753 assert(VM_Version::supports_evex(), ""); 10754 assert(mask != k0, "instruction will #UD if mask is in k0"); 10755 InstructionMark im(this); 10756 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10757 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 10758 attributes.reset_is_clear_context(); 10759 attributes.set_embedded_opmask_register_specifier(mask); 10760 attributes.set_is_evex_instruction(); 10761 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10762 emit_int8((unsigned char)0xA0); 10763 emit_operand(src, dst, 0); 10764 } 10765 10766 void Assembler::evscatterdps(Address dst, KRegister mask, XMMRegister src, int vector_len) { 10767 assert(VM_Version::supports_evex(), ""); 10768 assert(mask != k0, "instruction will #UD if mask is in k0"); 10769 InstructionMark im(this); 10770 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10771 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 10772 attributes.reset_is_clear_context(); 10773 attributes.set_embedded_opmask_register_specifier(mask); 10774 attributes.set_is_evex_instruction(); 10775 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10776 emit_int8((unsigned char)0xA2); 10777 emit_operand(src, dst, 0); 10778 } 10779 10780 void Assembler::evscatterdpd(Address dst, KRegister mask, XMMRegister src, int vector_len) { 10781 assert(VM_Version::supports_evex(), ""); 10782 assert(mask != k0, "instruction will #UD if mask is in k0"); 10783 InstructionMark im(this); 10784 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10785 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 10786 attributes.reset_is_clear_context(); 10787 attributes.set_embedded_opmask_register_specifier(mask); 10788 attributes.set_is_evex_instruction(); 10789 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10790 emit_int8((unsigned char)0xA2); 10791 emit_operand(src, dst, 0); 10792 } 10793 // Carry-Less Multiplication Quadword 10794 void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) { 10795 assert(VM_Version::supports_clmul(), ""); 10796 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 10797 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 10798 emit_int24(0x44, (0xC0 | encode), (unsigned char)mask); 10799 } 10800 10801 // Carry-Less Multiplication Quadword 10802 void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) { 10803 assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), ""); 10804 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 10805 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 10806 emit_int24(0x44, (0xC0 | encode), (unsigned char)mask); 10807 } 10808 10809 void Assembler::evpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask, int vector_len) { 10810 assert(VM_Version::supports_avx512_vpclmulqdq(), "Requires vector carryless multiplication support"); 10811 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 10812 attributes.set_is_evex_instruction(); 10813 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 10814 emit_int24(0x44, (0xC0 | encode), (unsigned char)mask); 10815 } 10816 10817 void Assembler::vzeroupper_uncached() { 10818 if (VM_Version::supports_vzeroupper()) { 10819 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 10820 (void)vex_prefix_and_encode(0, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 10821 emit_int8(0x77); 10822 } 10823 } 10824 10825 void Assembler::vfpclassss(KRegister kdst, XMMRegister src, uint8_t imm8) { 10826 // Encoding: EVEX.LIG.66.0F3A.W0 67 /r ib 10827 assert(VM_Version::supports_evex(), ""); 10828 assert(VM_Version::supports_avx512dq(), ""); 10829 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 10830 attributes.set_is_evex_instruction(); 10831 int encode = vex_prefix_and_encode(kdst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 10832 emit_int24((unsigned char)0x67, (unsigned char)(0xC0 | encode), imm8); 10833 } 10834 10835 void Assembler::vfpclasssd(KRegister kdst, XMMRegister src, uint8_t imm8) { 10836 // Encoding: EVEX.LIG.66.0F3A.W1 67 /r ib 10837 assert(VM_Version::supports_evex(), ""); 10838 assert(VM_Version::supports_avx512dq(), ""); 10839 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 10840 attributes.set_is_evex_instruction(); 10841 int encode = vex_prefix_and_encode(kdst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 10842 emit_int24((unsigned char)0x67, (unsigned char)(0xC0 | encode), imm8); 10843 } 10844 10845 void Assembler::fld_x(Address adr) { 10846 InstructionMark im(this); 10847 emit_int8((unsigned char)0xDB); 10848 emit_operand32(rbp, adr, 0); 10849 } 10850 10851 void Assembler::fstp_x(Address adr) { 10852 InstructionMark im(this); 10853 emit_int8((unsigned char)0xDB); 10854 emit_operand32(rdi, adr, 0); 10855 } 10856 10857 void Assembler::emit_operand32(Register reg, Address adr, int post_addr_length) { 10858 assert(reg->encoding() < 8, "no extended registers"); 10859 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); 10860 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec, post_addr_length); 10861 } 10862 10863 #ifndef _LP64 10864 // 32bit only pieces of the assembler 10865 10866 void Assembler::emms() { 10867 NOT_LP64(assert(VM_Version::supports_mmx(), "")); 10868 emit_int16(0x0F, 0x77); 10869 } 10870 10871 void Assembler::vzeroupper() { 10872 vzeroupper_uncached(); 10873 } 10874 10875 void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) { 10876 // NO PREFIX AS NEVER 64BIT 10877 InstructionMark im(this); 10878 emit_int16((unsigned char)0x81, (0xF8 | src1->encoding())); 10879 emit_data(imm32, rspec, 0); 10880 } 10881 10882 void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) { 10883 // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs 10884 InstructionMark im(this); 10885 emit_int8((unsigned char)0x81); 10886 emit_operand(rdi, src1, 4); 10887 emit_data(imm32, rspec, 0); 10888 } 10889 10890 // The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax, 10891 // and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded 10892 // into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise. 10893 void Assembler::cmpxchg8(Address adr) { 10894 InstructionMark im(this); 10895 emit_int16(0x0F, (unsigned char)0xC7); 10896 emit_operand(rcx, adr, 0); 10897 } 10898 10899 void Assembler::decl(Register dst) { 10900 // Don't use it directly. Use MacroAssembler::decrementl() instead. 10901 emit_int8(0x48 | dst->encoding()); 10902 } 10903 10904 // 64bit doesn't use the x87 10905 10906 void Assembler::emit_farith(int b1, int b2, int i) { 10907 assert(isByte(b1) && isByte(b2), "wrong opcode"); 10908 assert(0 <= i && i < 8, "illegal stack offset"); 10909 emit_int16(b1, b2 + i); 10910 } 10911 10912 void Assembler::fabs() { 10913 emit_int16((unsigned char)0xD9, (unsigned char)0xE1); 10914 } 10915 10916 void Assembler::fadd(int i) { 10917 emit_farith(0xD8, 0xC0, i); 10918 } 10919 10920 void Assembler::fadd_d(Address src) { 10921 InstructionMark im(this); 10922 emit_int8((unsigned char)0xDC); 10923 emit_operand32(rax, src, 0); 10924 } 10925 10926 void Assembler::fadd_s(Address src) { 10927 InstructionMark im(this); 10928 emit_int8((unsigned char)0xD8); 10929 emit_operand32(rax, src, 0); 10930 } 10931 10932 void Assembler::fadda(int i) { 10933 emit_farith(0xDC, 0xC0, i); 10934 } 10935 10936 void Assembler::faddp(int i) { 10937 emit_farith(0xDE, 0xC0, i); 10938 } 10939 10940 void Assembler::fchs() { 10941 emit_int16((unsigned char)0xD9, (unsigned char)0xE0); 10942 } 10943 10944 void Assembler::fcom(int i) { 10945 emit_farith(0xD8, 0xD0, i); 10946 } 10947 10948 void Assembler::fcomp(int i) { 10949 emit_farith(0xD8, 0xD8, i); 10950 } 10951 10952 void Assembler::fcomp_d(Address src) { 10953 InstructionMark im(this); 10954 emit_int8((unsigned char)0xDC); 10955 emit_operand32(rbx, src, 0); 10956 } 10957 10958 void Assembler::fcomp_s(Address src) { 10959 InstructionMark im(this); 10960 emit_int8((unsigned char)0xD8); 10961 emit_operand32(rbx, src, 0); 10962 } 10963 10964 void Assembler::fcompp() { 10965 emit_int16((unsigned char)0xDE, (unsigned char)0xD9); 10966 } 10967 10968 void Assembler::fcos() { 10969 emit_int16((unsigned char)0xD9, (unsigned char)0xFF); 10970 } 10971 10972 void Assembler::fdecstp() { 10973 emit_int16((unsigned char)0xD9, (unsigned char)0xF6); 10974 } 10975 10976 void Assembler::fdiv(int i) { 10977 emit_farith(0xD8, 0xF0, i); 10978 } 10979 10980 void Assembler::fdiv_d(Address src) { 10981 InstructionMark im(this); 10982 emit_int8((unsigned char)0xDC); 10983 emit_operand32(rsi, src, 0); 10984 } 10985 10986 void Assembler::fdiv_s(Address src) { 10987 InstructionMark im(this); 10988 emit_int8((unsigned char)0xD8); 10989 emit_operand32(rsi, src, 0); 10990 } 10991 10992 void Assembler::fdiva(int i) { 10993 emit_farith(0xDC, 0xF8, i); 10994 } 10995 10996 // Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994) 10997 // is erroneous for some of the floating-point instructions below. 10998 10999 void Assembler::fdivp(int i) { 11000 emit_farith(0xDE, 0xF8, i); // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong) 11001 } 11002 11003 void Assembler::fdivr(int i) { 11004 emit_farith(0xD8, 0xF8, i); 11005 } 11006 11007 void Assembler::fdivr_d(Address src) { 11008 InstructionMark im(this); 11009 emit_int8((unsigned char)0xDC); 11010 emit_operand32(rdi, src, 0); 11011 } 11012 11013 void Assembler::fdivr_s(Address src) { 11014 InstructionMark im(this); 11015 emit_int8((unsigned char)0xD8); 11016 emit_operand32(rdi, src, 0); 11017 } 11018 11019 void Assembler::fdivra(int i) { 11020 emit_farith(0xDC, 0xF0, i); 11021 } 11022 11023 void Assembler::fdivrp(int i) { 11024 emit_farith(0xDE, 0xF0, i); // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong) 11025 } 11026 11027 void Assembler::ffree(int i) { 11028 emit_farith(0xDD, 0xC0, i); 11029 } 11030 11031 void Assembler::fild_d(Address adr) { 11032 InstructionMark im(this); 11033 emit_int8((unsigned char)0xDF); 11034 emit_operand32(rbp, adr, 0); 11035 } 11036 11037 void Assembler::fild_s(Address adr) { 11038 InstructionMark im(this); 11039 emit_int8((unsigned char)0xDB); 11040 emit_operand32(rax, adr, 0); 11041 } 11042 11043 void Assembler::fincstp() { 11044 emit_int16((unsigned char)0xD9, (unsigned char)0xF7); 11045 } 11046 11047 void Assembler::finit() { 11048 emit_int24((unsigned char)0x9B, (unsigned char)0xDB, (unsigned char)0xE3); 11049 } 11050 11051 void Assembler::fist_s(Address adr) { 11052 InstructionMark im(this); 11053 emit_int8((unsigned char)0xDB); 11054 emit_operand32(rdx, adr, 0); 11055 } 11056 11057 void Assembler::fistp_d(Address adr) { 11058 InstructionMark im(this); 11059 emit_int8((unsigned char)0xDF); 11060 emit_operand32(rdi, adr, 0); 11061 } 11062 11063 void Assembler::fistp_s(Address adr) { 11064 InstructionMark im(this); 11065 emit_int8((unsigned char)0xDB); 11066 emit_operand32(rbx, adr, 0); 11067 } 11068 11069 void Assembler::fld1() { 11070 emit_int16((unsigned char)0xD9, (unsigned char)0xE8); 11071 } 11072 11073 void Assembler::fld_d(Address adr) { 11074 InstructionMark im(this); 11075 emit_int8((unsigned char)0xDD); 11076 emit_operand32(rax, adr, 0); 11077 } 11078 11079 void Assembler::fld_s(Address adr) { 11080 InstructionMark im(this); 11081 emit_int8((unsigned char)0xD9); 11082 emit_operand32(rax, adr, 0); 11083 } 11084 11085 11086 void Assembler::fld_s(int index) { 11087 emit_farith(0xD9, 0xC0, index); 11088 } 11089 11090 void Assembler::fldcw(Address src) { 11091 InstructionMark im(this); 11092 emit_int8((unsigned char)0xD9); 11093 emit_operand32(rbp, src, 0); 11094 } 11095 11096 void Assembler::fldenv(Address src) { 11097 InstructionMark im(this); 11098 emit_int8((unsigned char)0xD9); 11099 emit_operand32(rsp, src, 0); 11100 } 11101 11102 void Assembler::fldlg2() { 11103 emit_int16((unsigned char)0xD9, (unsigned char)0xEC); 11104 } 11105 11106 void Assembler::fldln2() { 11107 emit_int16((unsigned char)0xD9, (unsigned char)0xED); 11108 } 11109 11110 void Assembler::fldz() { 11111 emit_int16((unsigned char)0xD9, (unsigned char)0xEE); 11112 } 11113 11114 void Assembler::flog() { 11115 fldln2(); 11116 fxch(); 11117 fyl2x(); 11118 } 11119 11120 void Assembler::flog10() { 11121 fldlg2(); 11122 fxch(); 11123 fyl2x(); 11124 } 11125 11126 void Assembler::fmul(int i) { 11127 emit_farith(0xD8, 0xC8, i); 11128 } 11129 11130 void Assembler::fmul_d(Address src) { 11131 InstructionMark im(this); 11132 emit_int8((unsigned char)0xDC); 11133 emit_operand32(rcx, src, 0); 11134 } 11135 11136 void Assembler::fmul_s(Address src) { 11137 InstructionMark im(this); 11138 emit_int8((unsigned char)0xD8); 11139 emit_operand32(rcx, src, 0); 11140 } 11141 11142 void Assembler::fmula(int i) { 11143 emit_farith(0xDC, 0xC8, i); 11144 } 11145 11146 void Assembler::fmulp(int i) { 11147 emit_farith(0xDE, 0xC8, i); 11148 } 11149 11150 void Assembler::fnsave(Address dst) { 11151 InstructionMark im(this); 11152 emit_int8((unsigned char)0xDD); 11153 emit_operand32(rsi, dst, 0); 11154 } 11155 11156 void Assembler::fnstcw(Address src) { 11157 InstructionMark im(this); 11158 emit_int16((unsigned char)0x9B, (unsigned char)0xD9); 11159 emit_operand32(rdi, src, 0); 11160 } 11161 11162 void Assembler::fnstsw_ax() { 11163 emit_int16((unsigned char)0xDF, (unsigned char)0xE0); 11164 } 11165 11166 void Assembler::fprem() { 11167 emit_int16((unsigned char)0xD9, (unsigned char)0xF8); 11168 } 11169 11170 void Assembler::fprem1() { 11171 emit_int16((unsigned char)0xD9, (unsigned char)0xF5); 11172 } 11173 11174 void Assembler::frstor(Address src) { 11175 InstructionMark im(this); 11176 emit_int8((unsigned char)0xDD); 11177 emit_operand32(rsp, src, 0); 11178 } 11179 11180 void Assembler::fsin() { 11181 emit_int16((unsigned char)0xD9, (unsigned char)0xFE); 11182 } 11183 11184 void Assembler::fsqrt() { 11185 emit_int16((unsigned char)0xD9, (unsigned char)0xFA); 11186 } 11187 11188 void Assembler::fst_d(Address adr) { 11189 InstructionMark im(this); 11190 emit_int8((unsigned char)0xDD); 11191 emit_operand32(rdx, adr, 0); 11192 } 11193 11194 void Assembler::fst_s(Address adr) { 11195 InstructionMark im(this); 11196 emit_int8((unsigned char)0xD9); 11197 emit_operand32(rdx, adr, 0); 11198 } 11199 11200 void Assembler::fstp_d(Address adr) { 11201 InstructionMark im(this); 11202 emit_int8((unsigned char)0xDD); 11203 emit_operand32(rbx, adr, 0); 11204 } 11205 11206 void Assembler::fstp_d(int index) { 11207 emit_farith(0xDD, 0xD8, index); 11208 } 11209 11210 void Assembler::fstp_s(Address adr) { 11211 InstructionMark im(this); 11212 emit_int8((unsigned char)0xD9); 11213 emit_operand32(rbx, adr, 0); 11214 } 11215 11216 void Assembler::fsub(int i) { 11217 emit_farith(0xD8, 0xE0, i); 11218 } 11219 11220 void Assembler::fsub_d(Address src) { 11221 InstructionMark im(this); 11222 emit_int8((unsigned char)0xDC); 11223 emit_operand32(rsp, src, 0); 11224 } 11225 11226 void Assembler::fsub_s(Address src) { 11227 InstructionMark im(this); 11228 emit_int8((unsigned char)0xD8); 11229 emit_operand32(rsp, src, 0); 11230 } 11231 11232 void Assembler::fsuba(int i) { 11233 emit_farith(0xDC, 0xE8, i); 11234 } 11235 11236 void Assembler::fsubp(int i) { 11237 emit_farith(0xDE, 0xE8, i); // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong) 11238 } 11239 11240 void Assembler::fsubr(int i) { 11241 emit_farith(0xD8, 0xE8, i); 11242 } 11243 11244 void Assembler::fsubr_d(Address src) { 11245 InstructionMark im(this); 11246 emit_int8((unsigned char)0xDC); 11247 emit_operand32(rbp, src, 0); 11248 } 11249 11250 void Assembler::fsubr_s(Address src) { 11251 InstructionMark im(this); 11252 emit_int8((unsigned char)0xD8); 11253 emit_operand32(rbp, src, 0); 11254 } 11255 11256 void Assembler::fsubra(int i) { 11257 emit_farith(0xDC, 0xE0, i); 11258 } 11259 11260 void Assembler::fsubrp(int i) { 11261 emit_farith(0xDE, 0xE0, i); // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong) 11262 } 11263 11264 void Assembler::ftan() { 11265 emit_int32((unsigned char)0xD9, (unsigned char)0xF2, (unsigned char)0xDD, (unsigned char)0xD8); 11266 } 11267 11268 void Assembler::ftst() { 11269 emit_int16((unsigned char)0xD9, (unsigned char)0xE4); 11270 } 11271 11272 void Assembler::fucomi(int i) { 11273 // make sure the instruction is supported (introduced for P6, together with cmov) 11274 guarantee(VM_Version::supports_cmov(), "illegal instruction"); 11275 emit_farith(0xDB, 0xE8, i); 11276 } 11277 11278 void Assembler::fucomip(int i) { 11279 // make sure the instruction is supported (introduced for P6, together with cmov) 11280 guarantee(VM_Version::supports_cmov(), "illegal instruction"); 11281 emit_farith(0xDF, 0xE8, i); 11282 } 11283 11284 void Assembler::fwait() { 11285 emit_int8((unsigned char)0x9B); 11286 } 11287 11288 void Assembler::fxch(int i) { 11289 emit_farith(0xD9, 0xC8, i); 11290 } 11291 11292 void Assembler::fyl2x() { 11293 emit_int16((unsigned char)0xD9, (unsigned char)0xF1); 11294 } 11295 11296 void Assembler::frndint() { 11297 emit_int16((unsigned char)0xD9, (unsigned char)0xFC); 11298 } 11299 11300 void Assembler::f2xm1() { 11301 emit_int16((unsigned char)0xD9, (unsigned char)0xF0); 11302 } 11303 11304 void Assembler::fldl2e() { 11305 emit_int16((unsigned char)0xD9, (unsigned char)0xEA); 11306 } 11307 #endif // !_LP64 11308 11309 // SSE SIMD prefix byte values corresponding to VexSimdPrefix encoding. 11310 static int simd_pre[4] = { 0, 0x66, 0xF3, 0xF2 }; 11311 // SSE opcode second byte values (first is 0x0F) corresponding to VexOpcode encoding. 11312 static int simd_opc[4] = { 0, 0, 0x38, 0x3A }; 11313 11314 // Generate SSE legacy REX prefix and SIMD opcode based on VEX encoding. 11315 void Assembler::rex_prefix(Address adr, XMMRegister xreg, VexSimdPrefix pre, VexOpcode opc, bool rex_w) { 11316 if (pre > 0) { 11317 emit_int8(simd_pre[pre]); 11318 } 11319 if (rex_w) { 11320 prefixq(adr, xreg); 11321 } else { 11322 prefix(adr, xreg); 11323 } 11324 if (opc > 0) { 11325 emit_int8(0x0F); 11326 int opc2 = simd_opc[opc]; 11327 if (opc2 > 0) { 11328 emit_int8(opc2); 11329 } 11330 } 11331 } 11332 11333 int Assembler::rex_prefix_and_encode(int dst_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool rex_w) { 11334 if (pre > 0) { 11335 emit_int8(simd_pre[pre]); 11336 } 11337 int encode = (rex_w) ? prefixq_and_encode(dst_enc, src_enc) : prefix_and_encode(dst_enc, src_enc); 11338 if (opc > 0) { 11339 emit_int8(0x0F); 11340 int opc2 = simd_opc[opc]; 11341 if (opc2 > 0) { 11342 emit_int8(opc2); 11343 } 11344 } 11345 return encode; 11346 } 11347 11348 11349 void Assembler::vex_prefix(bool vex_r, bool vex_b, bool vex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc) { 11350 int vector_len = _attributes->get_vector_len(); 11351 bool vex_w = _attributes->is_rex_vex_w(); 11352 if (vex_b || vex_x || vex_w || (opc == VEX_OPCODE_0F_38) || (opc == VEX_OPCODE_0F_3A)) { 11353 int byte1 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0); 11354 byte1 = (~byte1) & 0xE0; 11355 byte1 |= opc; 11356 11357 int byte2 = ((~nds_enc) & 0xf) << 3; 11358 byte2 |= (vex_w ? VEX_W : 0) | ((vector_len > 0) ? 4 : 0) | pre; 11359 11360 emit_int24((unsigned char)VEX_3bytes, byte1, byte2); 11361 } else { 11362 int byte1 = vex_r ? VEX_R : 0; 11363 byte1 = (~byte1) & 0x80; 11364 byte1 |= ((~nds_enc) & 0xf) << 3; 11365 byte1 |= ((vector_len > 0 ) ? 4 : 0) | pre; 11366 emit_int16((unsigned char)VEX_2bytes, byte1); 11367 } 11368 } 11369 11370 // This is a 4 byte encoding 11371 void Assembler::evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_r, bool evex_v, int nds_enc, VexSimdPrefix pre, VexOpcode opc){ 11372 // EVEX 0x62 prefix 11373 // byte1 = EVEX_4bytes; 11374 11375 bool vex_w = _attributes->is_rex_vex_w(); 11376 int evex_encoding = (vex_w ? VEX_W : 0); 11377 // EVEX.b is not currently used for broadcast of single element or data rounding modes 11378 _attributes->set_evex_encoding(evex_encoding); 11379 11380 // P0: byte 2, initialized to RXBR`00mm 11381 // instead of not'd 11382 int byte2 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0) | (evex_r ? EVEX_Rb : 0); 11383 byte2 = (~byte2) & 0xF0; 11384 // confine opc opcode extensions in mm bits to lower two bits 11385 // of form {0F, 0F_38, 0F_3A} 11386 byte2 |= opc; 11387 11388 // P1: byte 3 as Wvvvv1pp 11389 int byte3 = ((~nds_enc) & 0xf) << 3; 11390 // p[10] is always 1 11391 byte3 |= EVEX_F; 11392 byte3 |= (vex_w & 1) << 7; 11393 // confine pre opcode extensions in pp bits to lower two bits 11394 // of form {66, F3, F2} 11395 byte3 |= pre; 11396 11397 // P2: byte 4 as zL'Lbv'aaa 11398 // kregs are implemented in the low 3 bits as aaa 11399 int byte4 = (_attributes->is_no_reg_mask()) ? 11400 0 : 11401 _attributes->get_embedded_opmask_register_specifier(); 11402 // EVEX.v` for extending EVEX.vvvv or VIDX 11403 byte4 |= (evex_v ? 0: EVEX_V); 11404 // third EXEC.b for broadcast actions 11405 byte4 |= (_attributes->is_extended_context() ? EVEX_Rb : 0); 11406 // fourth EVEX.L'L for vector length : 0 is 128, 1 is 256, 2 is 512, currently we do not support 1024 11407 byte4 |= ((_attributes->get_vector_len())& 0x3) << 5; 11408 // last is EVEX.z for zero/merge actions 11409 if (_attributes->is_no_reg_mask() == false && 11410 _attributes->get_embedded_opmask_register_specifier() != 0) { 11411 byte4 |= (_attributes->is_clear_context() ? EVEX_Z : 0); 11412 } 11413 11414 emit_int32(EVEX_4bytes, byte2, byte3, byte4); 11415 } 11416 11417 void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes) { 11418 bool vex_r = (xreg_enc & 8) == 8; 11419 bool vex_b = adr.base_needs_rex(); 11420 bool vex_x; 11421 if (adr.isxmmindex()) { 11422 vex_x = adr.xmmindex_needs_rex(); 11423 } else { 11424 vex_x = adr.index_needs_rex(); 11425 } 11426 set_attributes(attributes); 11427 attributes->set_current_assembler(this); 11428 11429 // For EVEX instruction (which is not marked as pure EVEX instruction) check and see if this instruction 11430 // is allowed in legacy mode and has resources which will fit in it. 11431 // Pure EVEX instructions will have is_evex_instruction set in their definition. 11432 if (!attributes->is_legacy_mode()) { 11433 if (UseAVX > 2 && !attributes->is_evex_instruction() && !is_managed()) { 11434 if ((attributes->get_vector_len() != AVX_512bit) && (nds_enc < 16) && (xreg_enc < 16)) { 11435 attributes->set_is_legacy_mode(); 11436 } 11437 } 11438 } 11439 11440 if (UseAVX > 2) { 11441 assert(((!attributes->uses_vl()) || 11442 (attributes->get_vector_len() == AVX_512bit) || 11443 (!_legacy_mode_vl) || 11444 (attributes->is_legacy_mode())),"XMM register should be 0-15"); 11445 assert(((nds_enc < 16 && xreg_enc < 16) || (!attributes->is_legacy_mode())),"XMM register should be 0-15"); 11446 } 11447 11448 clear_managed(); 11449 if (UseAVX > 2 && !attributes->is_legacy_mode()) 11450 { 11451 bool evex_r = (xreg_enc >= 16); 11452 bool evex_v; 11453 // EVEX.V' is set to true when VSIB is used as we may need to use higher order XMM registers (16-31) 11454 if (adr.isxmmindex()) { 11455 evex_v = ((adr._xmmindex->encoding() > 15) ? true : false); 11456 } else { 11457 evex_v = (nds_enc >= 16); 11458 } 11459 attributes->set_is_evex_instruction(); 11460 evex_prefix(vex_r, vex_b, vex_x, evex_r, evex_v, nds_enc, pre, opc); 11461 } else { 11462 if (UseAVX > 2 && attributes->is_rex_vex_w_reverted()) { 11463 attributes->set_rex_vex_w(false); 11464 } 11465 vex_prefix(vex_r, vex_b, vex_x, nds_enc, pre, opc); 11466 } 11467 } 11468 11469 int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes) { 11470 bool vex_r = (dst_enc & 8) == 8; 11471 bool vex_b = (src_enc & 8) == 8; 11472 bool vex_x = false; 11473 set_attributes(attributes); 11474 attributes->set_current_assembler(this); 11475 11476 // For EVEX instruction (which is not marked as pure EVEX instruction) check and see if this instruction 11477 // is allowed in legacy mode and has resources which will fit in it. 11478 // Pure EVEX instructions will have is_evex_instruction set in their definition. 11479 if (!attributes->is_legacy_mode()) { 11480 if (UseAVX > 2 && !attributes->is_evex_instruction() && !is_managed()) { 11481 if ((!attributes->uses_vl() || (attributes->get_vector_len() != AVX_512bit)) && 11482 (dst_enc < 16) && (nds_enc < 16) && (src_enc < 16)) { 11483 attributes->set_is_legacy_mode(); 11484 } 11485 } 11486 } 11487 11488 if (UseAVX > 2) { 11489 // All the scalar fp instructions (with uses_vl as false) can have legacy_mode as false 11490 // Instruction with uses_vl true are vector instructions 11491 // All the vector instructions with AVX_512bit length can have legacy_mode as false 11492 // All the vector instructions with < AVX_512bit length can have legacy_mode as false if AVX512vl() is supported 11493 // Rest all should have legacy_mode set as true 11494 assert(((!attributes->uses_vl()) || 11495 (attributes->get_vector_len() == AVX_512bit) || 11496 (!_legacy_mode_vl) || 11497 (attributes->is_legacy_mode())),"XMM register should be 0-15"); 11498 // Instruction with legacy_mode true should have dst, nds and src < 15 11499 assert(((dst_enc < 16 && nds_enc < 16 && src_enc < 16) || (!attributes->is_legacy_mode())),"XMM register should be 0-15"); 11500 } 11501 11502 clear_managed(); 11503 if (UseAVX > 2 && !attributes->is_legacy_mode()) 11504 { 11505 bool evex_r = (dst_enc >= 16); 11506 bool evex_v = (nds_enc >= 16); 11507 // can use vex_x as bank extender on rm encoding 11508 vex_x = (src_enc >= 16); 11509 attributes->set_is_evex_instruction(); 11510 evex_prefix(vex_r, vex_b, vex_x, evex_r, evex_v, nds_enc, pre, opc); 11511 } else { 11512 if (UseAVX > 2 && attributes->is_rex_vex_w_reverted()) { 11513 attributes->set_rex_vex_w(false); 11514 } 11515 vex_prefix(vex_r, vex_b, vex_x, nds_enc, pre, opc); 11516 } 11517 11518 // return modrm byte components for operands 11519 return (((dst_enc & 7) << 3) | (src_enc & 7)); 11520 } 11521 11522 11523 void Assembler::simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre, 11524 VexOpcode opc, InstructionAttr *attributes) { 11525 if (UseAVX > 0) { 11526 int xreg_enc = xreg->encoding(); 11527 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 11528 vex_prefix(adr, nds_enc, xreg_enc, pre, opc, attributes); 11529 } else { 11530 assert((nds == xreg) || (nds == xnoreg), "wrong sse encoding"); 11531 rex_prefix(adr, xreg, pre, opc, attributes->is_rex_vex_w()); 11532 } 11533 } 11534 11535 int Assembler::simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre, 11536 VexOpcode opc, InstructionAttr *attributes) { 11537 int dst_enc = dst->encoding(); 11538 int src_enc = src->encoding(); 11539 if (UseAVX > 0) { 11540 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 11541 return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, attributes); 11542 } else { 11543 assert((nds == dst) || (nds == src) || (nds == xnoreg), "wrong sse encoding"); 11544 return rex_prefix_and_encode(dst_enc, src_enc, pre, opc, attributes->is_rex_vex_w()); 11545 } 11546 } 11547 11548 void Assembler::vmaxss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 11549 assert(VM_Version::supports_avx(), ""); 11550 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 11551 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 11552 emit_int16(0x5F, (0xC0 | encode)); 11553 } 11554 11555 void Assembler::vmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 11556 assert(VM_Version::supports_avx(), ""); 11557 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 11558 attributes.set_rex_vex_w_reverted(); 11559 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 11560 emit_int16(0x5F, (0xC0 | encode)); 11561 } 11562 11563 void Assembler::vminss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 11564 assert(VM_Version::supports_avx(), ""); 11565 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 11566 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 11567 emit_int16(0x5D, (0xC0 | encode)); 11568 } 11569 11570 void Assembler::vminsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 11571 assert(VM_Version::supports_avx(), ""); 11572 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 11573 attributes.set_rex_vex_w_reverted(); 11574 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 11575 emit_int16(0x5D, (0xC0 | encode)); 11576 } 11577 11578 void Assembler::vcmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len) { 11579 assert(VM_Version::supports_avx(), ""); 11580 assert(vector_len <= AVX_256bit, ""); 11581 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 11582 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 11583 emit_int24((unsigned char)0xC2, (0xC0 | encode), (0xF & cop)); 11584 } 11585 11586 void Assembler::blendvpb(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) { 11587 assert(VM_Version::supports_avx(), ""); 11588 assert(vector_len <= AVX_256bit, ""); 11589 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 11590 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11591 int src2_enc = src2->encoding(); 11592 emit_int24(0x4C, (0xC0 | encode), (0xF0 & src2_enc << 4)); 11593 } 11594 11595 void Assembler::vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) { 11596 assert(UseAVX > 0 && (vector_len == AVX_128bit || vector_len == AVX_256bit), ""); 11597 assert(vector_len <= AVX_256bit, ""); 11598 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 11599 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11600 int src2_enc = src2->encoding(); 11601 emit_int24(0x4B, (0xC0 | encode), (0xF0 & src2_enc << 4)); 11602 } 11603 11604 void Assembler::vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { 11605 assert(VM_Version::supports_avx2(), ""); 11606 assert(vector_len <= AVX_256bit, ""); 11607 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 11608 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11609 emit_int24(0x02, (0xC0 | encode), (unsigned char)imm8); 11610 } 11611 11612 void Assembler::vcmpps(XMMRegister dst, XMMRegister nds, XMMRegister src, int comparison, int vector_len) { 11613 assert(VM_Version::supports_avx(), ""); 11614 assert(vector_len <= AVX_256bit, ""); 11615 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 11616 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 11617 emit_int24((unsigned char)0xC2, (0xC0 | encode), (unsigned char)comparison); 11618 } 11619 11620 void Assembler::evcmpps(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 11621 ComparisonPredicateFP comparison, int vector_len) { 11622 assert(VM_Version::supports_evex(), ""); 11623 // Encoding: EVEX.NDS.XXX.0F.W0 C2 /r ib 11624 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11625 attributes.set_is_evex_instruction(); 11626 attributes.set_embedded_opmask_register_specifier(mask); 11627 attributes.reset_is_clear_context(); 11628 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 11629 emit_int24((unsigned char)0xC2, (0xC0 | encode), comparison); 11630 } 11631 11632 void Assembler::evcmppd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 11633 ComparisonPredicateFP comparison, int vector_len) { 11634 assert(VM_Version::supports_evex(), ""); 11635 // Encoding: EVEX.NDS.XXX.66.0F.W1 C2 /r ib 11636 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11637 attributes.set_is_evex_instruction(); 11638 attributes.set_embedded_opmask_register_specifier(mask); 11639 attributes.reset_is_clear_context(); 11640 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 11641 emit_int24((unsigned char)0xC2, (0xC0 | encode), comparison); 11642 } 11643 11644 void Assembler::blendvps(XMMRegister dst, XMMRegister src) { 11645 assert(VM_Version::supports_sse4_1(), ""); 11646 assert(UseAVX <= 0, "sse encoding is inconsistent with avx encoding"); 11647 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 11648 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11649 emit_int16(0x14, (0xC0 | encode)); 11650 } 11651 11652 void Assembler::blendvpd(XMMRegister dst, XMMRegister src) { 11653 assert(VM_Version::supports_sse4_1(), ""); 11654 assert(UseAVX <= 0, "sse encoding is inconsistent with avx encoding"); 11655 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 11656 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11657 emit_int16(0x15, (0xC0 | encode)); 11658 } 11659 11660 void Assembler::pblendvb(XMMRegister dst, XMMRegister src) { 11661 assert(VM_Version::supports_sse4_1(), ""); 11662 assert(UseAVX <= 0, "sse encoding is inconsistent with avx encoding"); 11663 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 11664 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11665 emit_int16(0x10, (0xC0 | encode)); 11666 } 11667 11668 void Assembler::vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) { 11669 assert(UseAVX > 0 && (vector_len == AVX_128bit || vector_len == AVX_256bit), ""); 11670 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 11671 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11672 int src2_enc = src2->encoding(); 11673 emit_int24(0x4A, (0xC0 | encode), (0xF0 & src2_enc << 4)); 11674 } 11675 11676 void Assembler::vblendps(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { 11677 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 11678 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11679 emit_int24(0x0C, (0xC0 | encode), imm8); 11680 } 11681 11682 void Assembler::vpcmpgtb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 11683 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), ""); 11684 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest"); 11685 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 11686 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 11687 emit_int16(0x64, (0xC0 | encode)); 11688 } 11689 11690 void Assembler::vpcmpgtw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 11691 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), ""); 11692 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest"); 11693 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 11694 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 11695 emit_int16(0x65, (0xC0 | encode)); 11696 } 11697 11698 void Assembler::vpcmpgtd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 11699 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), ""); 11700 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest"); 11701 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 11702 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 11703 emit_int16(0x66, (0xC0 | encode)); 11704 } 11705 11706 void Assembler::vpcmpgtq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 11707 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), ""); 11708 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest"); 11709 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 11710 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11711 emit_int16(0x37, (0xC0 | encode)); 11712 } 11713 11714 void Assembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 11715 int comparison, bool is_signed, int vector_len) { 11716 assert(VM_Version::supports_evex(), ""); 11717 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, ""); 11718 // Encoding: EVEX.NDS.XXX.66.0F3A.W0 1F /r ib 11719 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11720 attributes.set_is_evex_instruction(); 11721 attributes.set_embedded_opmask_register_specifier(mask); 11722 attributes.reset_is_clear_context(); 11723 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11724 int opcode = is_signed ? 0x1F : 0x1E; 11725 emit_int24(opcode, (0xC0 | encode), comparison); 11726 } 11727 11728 void Assembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, Address src, 11729 int comparison, bool is_signed, int vector_len) { 11730 assert(VM_Version::supports_evex(), ""); 11731 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, ""); 11732 // Encoding: EVEX.NDS.XXX.66.0F3A.W0 1F /r ib 11733 InstructionMark im(this); 11734 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11735 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit); 11736 attributes.set_is_evex_instruction(); 11737 attributes.set_embedded_opmask_register_specifier(mask); 11738 attributes.reset_is_clear_context(); 11739 int dst_enc = kdst->encoding(); 11740 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11741 int opcode = is_signed ? 0x1F : 0x1E; 11742 emit_int8((unsigned char)opcode); 11743 emit_operand(as_Register(dst_enc), src, 1); 11744 emit_int8((unsigned char)comparison); 11745 } 11746 11747 void Assembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 11748 int comparison, bool is_signed, int vector_len) { 11749 assert(VM_Version::supports_evex(), ""); 11750 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, ""); 11751 // Encoding: EVEX.NDS.XXX.66.0F3A.W1 1F /r ib 11752 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11753 attributes.set_is_evex_instruction(); 11754 attributes.set_embedded_opmask_register_specifier(mask); 11755 attributes.reset_is_clear_context(); 11756 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11757 int opcode = is_signed ? 0x1F : 0x1E; 11758 emit_int24(opcode, (0xC0 | encode), comparison); 11759 } 11760 11761 void Assembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, Address src, 11762 int comparison, bool is_signed, int vector_len) { 11763 assert(VM_Version::supports_evex(), ""); 11764 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, ""); 11765 // Encoding: EVEX.NDS.XXX.66.0F3A.W1 1F /r ib 11766 InstructionMark im(this); 11767 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11768 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit); 11769 attributes.set_is_evex_instruction(); 11770 attributes.set_embedded_opmask_register_specifier(mask); 11771 attributes.reset_is_clear_context(); 11772 int dst_enc = kdst->encoding(); 11773 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11774 int opcode = is_signed ? 0x1F : 0x1E; 11775 emit_int8((unsigned char)opcode); 11776 emit_operand(as_Register(dst_enc), src, 1); 11777 emit_int8((unsigned char)comparison); 11778 } 11779 11780 void Assembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 11781 int comparison, bool is_signed, int vector_len) { 11782 assert(VM_Version::supports_evex(), ""); 11783 assert(VM_Version::supports_avx512bw(), ""); 11784 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, ""); 11785 // Encoding: EVEX.NDS.XXX.66.0F3A.W0 3F /r ib 11786 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true); 11787 attributes.set_is_evex_instruction(); 11788 attributes.set_embedded_opmask_register_specifier(mask); 11789 attributes.reset_is_clear_context(); 11790 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11791 int opcode = is_signed ? 0x3F : 0x3E; 11792 emit_int24(opcode, (0xC0 | encode), comparison); 11793 } 11794 11795 void Assembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, 11796 int comparison, bool is_signed, int vector_len) { 11797 assert(VM_Version::supports_evex(), ""); 11798 assert(VM_Version::supports_avx512bw(), ""); 11799 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, ""); 11800 // Encoding: EVEX.NDS.XXX.66.0F3A.W0 3F /r ib 11801 InstructionMark im(this); 11802 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true); 11803 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 11804 attributes.set_is_evex_instruction(); 11805 attributes.set_embedded_opmask_register_specifier(mask); 11806 attributes.reset_is_clear_context(); 11807 int dst_enc = kdst->encoding(); 11808 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11809 int opcode = is_signed ? 0x3F : 0x3E; 11810 emit_int8((unsigned char)opcode); 11811 emit_operand(as_Register(dst_enc), src, 1); 11812 emit_int8((unsigned char)comparison); 11813 } 11814 11815 void Assembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 11816 int comparison, bool is_signed, int vector_len) { 11817 assert(VM_Version::supports_evex(), ""); 11818 assert(VM_Version::supports_avx512bw(), ""); 11819 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, ""); 11820 // Encoding: EVEX.NDS.XXX.66.0F3A.W1 3F /r ib 11821 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true); 11822 attributes.set_is_evex_instruction(); 11823 attributes.set_embedded_opmask_register_specifier(mask); 11824 attributes.reset_is_clear_context(); 11825 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11826 int opcode = is_signed ? 0x3F : 0x3E; 11827 emit_int24(opcode, (0xC0 | encode), comparison); 11828 } 11829 11830 void Assembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, Address src, 11831 int comparison, bool is_signed, int vector_len) { 11832 assert(VM_Version::supports_evex(), ""); 11833 assert(VM_Version::supports_avx512bw(), ""); 11834 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, ""); 11835 // Encoding: EVEX.NDS.XXX.66.0F3A.W1 3F /r ib 11836 InstructionMark im(this); 11837 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true); 11838 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 11839 attributes.set_is_evex_instruction(); 11840 attributes.set_embedded_opmask_register_specifier(mask); 11841 attributes.reset_is_clear_context(); 11842 int dst_enc = kdst->encoding(); 11843 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11844 int opcode = is_signed ? 0x3F : 0x3E; 11845 emit_int8((unsigned char)opcode); 11846 emit_operand(as_Register(dst_enc), src, 1); 11847 emit_int8((unsigned char)comparison); 11848 } 11849 11850 void Assembler::evprord(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 11851 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 11852 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11853 attributes.set_is_evex_instruction(); 11854 attributes.set_embedded_opmask_register_specifier(mask); 11855 if (merge) { 11856 attributes.reset_is_clear_context(); 11857 } 11858 int encode = vex_prefix_and_encode(xmm0->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 11859 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 11860 } 11861 11862 void Assembler::evprorq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 11863 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 11864 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11865 attributes.set_is_evex_instruction(); 11866 attributes.set_embedded_opmask_register_specifier(mask); 11867 if (merge) { 11868 attributes.reset_is_clear_context(); 11869 } 11870 int encode = vex_prefix_and_encode(xmm0->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 11871 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 11872 } 11873 11874 void Assembler::evprorvd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 11875 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 11876 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11877 attributes.set_is_evex_instruction(); 11878 attributes.set_embedded_opmask_register_specifier(mask); 11879 if (merge) { 11880 attributes.reset_is_clear_context(); 11881 } 11882 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11883 emit_int16(0x14, (0xC0 | encode)); 11884 } 11885 11886 void Assembler::evprorvq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 11887 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 11888 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11889 attributes.set_is_evex_instruction(); 11890 attributes.set_embedded_opmask_register_specifier(mask); 11891 if (merge) { 11892 attributes.reset_is_clear_context(); 11893 } 11894 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11895 emit_int16(0x14, (0xC0 | encode)); 11896 } 11897 11898 void Assembler::evprold(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 11899 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 11900 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11901 attributes.set_is_evex_instruction(); 11902 attributes.set_embedded_opmask_register_specifier(mask); 11903 if (merge) { 11904 attributes.reset_is_clear_context(); 11905 } 11906 int encode = vex_prefix_and_encode(xmm1->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 11907 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 11908 } 11909 11910 void Assembler::evprolq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 11911 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 11912 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11913 attributes.set_is_evex_instruction(); 11914 attributes.set_embedded_opmask_register_specifier(mask); 11915 if (merge) { 11916 attributes.reset_is_clear_context(); 11917 } 11918 int encode = vex_prefix_and_encode(xmm1->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 11919 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 11920 } 11921 11922 void Assembler::evprolvd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 11923 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 11924 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11925 attributes.set_is_evex_instruction(); 11926 attributes.set_embedded_opmask_register_specifier(mask); 11927 if (merge) { 11928 attributes.reset_is_clear_context(); 11929 } 11930 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11931 emit_int16(0x15, (0xC0 | encode)); 11932 } 11933 11934 void Assembler::evprolvq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 11935 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 11936 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11937 attributes.set_is_evex_instruction(); 11938 attributes.set_embedded_opmask_register_specifier(mask); 11939 if (merge) { 11940 attributes.reset_is_clear_context(); 11941 } 11942 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11943 emit_int16(0x15, (0xC0 | encode)); 11944 } 11945 11946 void Assembler::vpblendvb(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len) { 11947 assert(VM_Version::supports_avx(), ""); 11948 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 11949 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11950 int mask_enc = mask->encoding(); 11951 emit_int24(0x4C, (0xC0 | encode), 0xF0 & mask_enc << 4); 11952 } 11953 11954 void Assembler::evblendmpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 11955 assert(VM_Version::supports_evex(), ""); 11956 // Encoding: EVEX.NDS.XXX.66.0F38.W1 65 /r 11957 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11958 attributes.set_is_evex_instruction(); 11959 attributes.set_embedded_opmask_register_specifier(mask); 11960 if (merge) { 11961 attributes.reset_is_clear_context(); 11962 } 11963 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11964 emit_int16(0x65, (0xC0 | encode)); 11965 } 11966 11967 void Assembler::evblendmps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 11968 assert(VM_Version::supports_evex(), ""); 11969 // Encoding: EVEX.NDS.XXX.66.0F38.W0 65 /r 11970 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11971 attributes.set_is_evex_instruction(); 11972 attributes.set_embedded_opmask_register_specifier(mask); 11973 if (merge) { 11974 attributes.reset_is_clear_context(); 11975 } 11976 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11977 emit_int16(0x65, (0xC0 | encode)); 11978 } 11979 11980 void Assembler::evpblendmb (XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 11981 assert(VM_Version::supports_evex(), ""); 11982 assert(VM_Version::supports_avx512bw(), ""); 11983 // Encoding: EVEX.NDS.512.66.0F38.W0 66 /r 11984 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true); 11985 attributes.set_is_evex_instruction(); 11986 attributes.set_embedded_opmask_register_specifier(mask); 11987 if (merge) { 11988 attributes.reset_is_clear_context(); 11989 } 11990 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11991 emit_int16(0x66, (0xC0 | encode)); 11992 } 11993 11994 void Assembler::evpblendmw (XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 11995 assert(VM_Version::supports_evex(), ""); 11996 assert(VM_Version::supports_avx512bw(), ""); 11997 // Encoding: EVEX.NDS.512.66.0F38.W1 66 /r 11998 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true); 11999 attributes.set_is_evex_instruction(); 12000 attributes.set_embedded_opmask_register_specifier(mask); 12001 if (merge) { 12002 attributes.reset_is_clear_context(); 12003 } 12004 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 12005 emit_int16(0x66, (0xC0 | encode)); 12006 } 12007 12008 void Assembler::evpblendmd (XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 12009 assert(VM_Version::supports_evex(), ""); 12010 //Encoding: EVEX.NDS.512.66.0F38.W0 64 /r 12011 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 12012 attributes.set_is_evex_instruction(); 12013 attributes.set_embedded_opmask_register_specifier(mask); 12014 if (merge) { 12015 attributes.reset_is_clear_context(); 12016 } 12017 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 12018 emit_int16(0x64, (0xC0 | encode)); 12019 } 12020 12021 void Assembler::evpblendmq (XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 12022 assert(VM_Version::supports_evex(), ""); 12023 //Encoding: EVEX.NDS.512.66.0F38.W1 64 /r 12024 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 12025 attributes.set_is_evex_instruction(); 12026 attributes.set_embedded_opmask_register_specifier(mask); 12027 if (merge) { 12028 attributes.reset_is_clear_context(); 12029 } 12030 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 12031 emit_int16(0x64, (0xC0 | encode)); 12032 } 12033 12034 void Assembler::bzhiq(Register dst, Register src1, Register src2) { 12035 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 12036 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12037 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 12038 emit_int16((unsigned char)0xF5, (0xC0 | encode)); 12039 } 12040 12041 void Assembler::pextl(Register dst, Register src1, Register src2) { 12042 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 12043 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12044 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 12045 emit_int16((unsigned char)0xF5, (0xC0 | encode)); 12046 } 12047 12048 void Assembler::pdepl(Register dst, Register src1, Register src2) { 12049 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 12050 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12051 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes); 12052 emit_int16((unsigned char)0xF5, (0xC0 | encode)); 12053 } 12054 12055 void Assembler::pextq(Register dst, Register src1, Register src2) { 12056 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 12057 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12058 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 12059 emit_int16((unsigned char)0xF5, (0xC0 | encode)); 12060 } 12061 12062 void Assembler::pdepq(Register dst, Register src1, Register src2) { 12063 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 12064 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12065 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes); 12066 emit_int16((unsigned char)0xF5, (0xC0 | encode)); 12067 } 12068 12069 void Assembler::pextl(Register dst, Register src1, Address src2) { 12070 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 12071 InstructionMark im(this); 12072 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12073 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 12074 emit_int8((unsigned char)0xF5); 12075 emit_operand(dst, src2, 0); 12076 } 12077 12078 void Assembler::pdepl(Register dst, Register src1, Address src2) { 12079 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 12080 InstructionMark im(this); 12081 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12082 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes); 12083 emit_int8((unsigned char)0xF5); 12084 emit_operand(dst, src2, 0); 12085 } 12086 12087 void Assembler::pextq(Register dst, Register src1, Address src2) { 12088 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 12089 InstructionMark im(this); 12090 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12091 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 12092 emit_int8((unsigned char)0xF5); 12093 emit_operand(dst, src2, 0); 12094 } 12095 12096 void Assembler::pdepq(Register dst, Register src1, Address src2) { 12097 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 12098 InstructionMark im(this); 12099 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12100 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes); 12101 emit_int8((unsigned char)0xF5); 12102 emit_operand(dst, src2, 0); 12103 } 12104 12105 void Assembler::sarxl(Register dst, Register src1, Register src2) { 12106 assert(VM_Version::supports_bmi2(), ""); 12107 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 12108 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 12109 emit_int16((unsigned char)0xF7, (0xC0 | encode)); 12110 } 12111 12112 void Assembler::sarxl(Register dst, Address src1, Register src2) { 12113 assert(VM_Version::supports_bmi2(), ""); 12114 InstructionMark im(this); 12115 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 12116 vex_prefix(src1, src2->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 12117 emit_int8((unsigned char)0xF7); 12118 emit_operand(dst, src1, 0); 12119 } 12120 12121 void Assembler::sarxq(Register dst, Register src1, Register src2) { 12122 assert(VM_Version::supports_bmi2(), ""); 12123 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 12124 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 12125 emit_int16((unsigned char)0xF7, (0xC0 | encode)); 12126 } 12127 12128 void Assembler::sarxq(Register dst, Address src1, Register src2) { 12129 assert(VM_Version::supports_bmi2(), ""); 12130 InstructionMark im(this); 12131 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 12132 vex_prefix(src1, src2->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 12133 emit_int8((unsigned char)0xF7); 12134 emit_operand(dst, src1, 0); 12135 } 12136 12137 void Assembler::shlxl(Register dst, Register src1, Register src2) { 12138 assert(VM_Version::supports_bmi2(), ""); 12139 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 12140 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 12141 emit_int16((unsigned char)0xF7, (0xC0 | encode)); 12142 } 12143 12144 void Assembler::shlxl(Register dst, Address src1, Register src2) { 12145 assert(VM_Version::supports_bmi2(), ""); 12146 InstructionMark im(this); 12147 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 12148 vex_prefix(src1, src2->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 12149 emit_int8((unsigned char)0xF7); 12150 emit_operand(dst, src1, 0); 12151 } 12152 12153 void Assembler::shlxq(Register dst, Register src1, Register src2) { 12154 assert(VM_Version::supports_bmi2(), ""); 12155 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 12156 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 12157 emit_int16((unsigned char)0xF7, (0xC0 | encode)); 12158 } 12159 12160 void Assembler::shlxq(Register dst, Address src1, Register src2) { 12161 assert(VM_Version::supports_bmi2(), ""); 12162 InstructionMark im(this); 12163 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 12164 vex_prefix(src1, src2->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 12165 emit_int8((unsigned char)0xF7); 12166 emit_operand(dst, src1, 0); 12167 } 12168 12169 void Assembler::shrxl(Register dst, Register src1, Register src2) { 12170 assert(VM_Version::supports_bmi2(), ""); 12171 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 12172 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes); 12173 emit_int16((unsigned char)0xF7, (0xC0 | encode)); 12174 } 12175 12176 void Assembler::shrxl(Register dst, Address src1, Register src2) { 12177 assert(VM_Version::supports_bmi2(), ""); 12178 InstructionMark im(this); 12179 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 12180 vex_prefix(src1, src2->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes); 12181 emit_int8((unsigned char)0xF7); 12182 emit_operand(dst, src1, 0); 12183 } 12184 12185 void Assembler::shrxq(Register dst, Register src1, Register src2) { 12186 assert(VM_Version::supports_bmi2(), ""); 12187 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 12188 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes); 12189 emit_int16((unsigned char)0xF7, (0xC0 | encode)); 12190 } 12191 12192 void Assembler::shrxq(Register dst, Address src1, Register src2) { 12193 assert(VM_Version::supports_bmi2(), ""); 12194 InstructionMark im(this); 12195 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 12196 vex_prefix(src1, src2->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes); 12197 emit_int8((unsigned char)0xF7); 12198 emit_operand(dst, src1, 0); 12199 } 12200 12201 void Assembler::evpmovq2m(KRegister dst, XMMRegister src, int vector_len) { 12202 assert(VM_Version::supports_avx512vldq(), ""); 12203 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 12204 attributes.set_is_evex_instruction(); 12205 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 12206 emit_int16(0x39, (0xC0 | encode)); 12207 } 12208 12209 void Assembler::evpmovd2m(KRegister dst, XMMRegister src, int vector_len) { 12210 assert(VM_Version::supports_avx512vldq(), ""); 12211 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 12212 attributes.set_is_evex_instruction(); 12213 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 12214 emit_int16(0x39, (0xC0 | encode)); 12215 } 12216 12217 void Assembler::evpmovw2m(KRegister dst, XMMRegister src, int vector_len) { 12218 assert(VM_Version::supports_avx512vlbw(), ""); 12219 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 12220 attributes.set_is_evex_instruction(); 12221 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 12222 emit_int16(0x29, (0xC0 | encode)); 12223 } 12224 12225 void Assembler::evpmovb2m(KRegister dst, XMMRegister src, int vector_len) { 12226 assert(VM_Version::supports_avx512vlbw(), ""); 12227 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 12228 attributes.set_is_evex_instruction(); 12229 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 12230 emit_int16(0x29, (0xC0 | encode)); 12231 } 12232 12233 void Assembler::evpmovm2q(XMMRegister dst, KRegister src, int vector_len) { 12234 assert(VM_Version::supports_avx512vldq(), ""); 12235 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 12236 attributes.set_is_evex_instruction(); 12237 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 12238 emit_int16(0x38, (0xC0 | encode)); 12239 } 12240 12241 void Assembler::evpmovm2d(XMMRegister dst, KRegister src, int vector_len) { 12242 assert(VM_Version::supports_avx512vldq(), ""); 12243 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 12244 attributes.set_is_evex_instruction(); 12245 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 12246 emit_int16(0x38, (0xC0 | encode)); 12247 } 12248 12249 void Assembler::evpmovm2w(XMMRegister dst, KRegister src, int vector_len) { 12250 assert(VM_Version::supports_avx512vlbw(), ""); 12251 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 12252 attributes.set_is_evex_instruction(); 12253 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 12254 emit_int16(0x28, (0xC0 | encode)); 12255 } 12256 12257 void Assembler::evpmovm2b(XMMRegister dst, KRegister src, int vector_len) { 12258 assert(VM_Version::supports_avx512vlbw(), ""); 12259 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 12260 attributes.set_is_evex_instruction(); 12261 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 12262 emit_int16(0x28, (0xC0 | encode)); 12263 } 12264 12265 void Assembler::evpcompressb(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 12266 assert(VM_Version::supports_avx512_vbmi2(), ""); 12267 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 12268 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 12269 attributes.set_embedded_opmask_register_specifier(mask); 12270 attributes.set_is_evex_instruction(); 12271 if (merge) { 12272 attributes.reset_is_clear_context(); 12273 } 12274 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 12275 emit_int16((unsigned char)0x63, (0xC0 | encode)); 12276 } 12277 12278 void Assembler::evpcompressw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 12279 assert(VM_Version::supports_avx512_vbmi2(), ""); 12280 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 12281 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 12282 attributes.set_embedded_opmask_register_specifier(mask); 12283 attributes.set_is_evex_instruction(); 12284 if (merge) { 12285 attributes.reset_is_clear_context(); 12286 } 12287 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 12288 emit_int16((unsigned char)0x63, (0xC0 | encode)); 12289 } 12290 12291 void Assembler::evpcompressd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 12292 assert(VM_Version::supports_evex(), ""); 12293 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 12294 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 12295 attributes.set_embedded_opmask_register_specifier(mask); 12296 attributes.set_is_evex_instruction(); 12297 if (merge) { 12298 attributes.reset_is_clear_context(); 12299 } 12300 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 12301 emit_int16((unsigned char)0x8B, (0xC0 | encode)); 12302 } 12303 12304 void Assembler::evpcompressq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 12305 assert(VM_Version::supports_evex(), ""); 12306 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 12307 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 12308 attributes.set_embedded_opmask_register_specifier(mask); 12309 attributes.set_is_evex_instruction(); 12310 if (merge) { 12311 attributes.reset_is_clear_context(); 12312 } 12313 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 12314 emit_int16((unsigned char)0x8B, (0xC0 | encode)); 12315 } 12316 12317 void Assembler::evcompressps(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 12318 assert(VM_Version::supports_evex(), ""); 12319 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 12320 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 12321 attributes.set_embedded_opmask_register_specifier(mask); 12322 attributes.set_is_evex_instruction(); 12323 if (merge) { 12324 attributes.reset_is_clear_context(); 12325 } 12326 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 12327 emit_int16((unsigned char)0x8A, (0xC0 | encode)); 12328 } 12329 12330 void Assembler::evcompresspd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 12331 assert(VM_Version::supports_evex(), ""); 12332 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 12333 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 12334 attributes.set_embedded_opmask_register_specifier(mask); 12335 attributes.set_is_evex_instruction(); 12336 if (merge) { 12337 attributes.reset_is_clear_context(); 12338 } 12339 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 12340 emit_int16((unsigned char)0x8A, (0xC0 | encode)); 12341 } 12342 12343 #ifndef _LP64 12344 12345 void Assembler::incl(Register dst) { 12346 // Don't use it directly. Use MacroAssembler::incrementl() instead. 12347 emit_int8(0x40 | dst->encoding()); 12348 } 12349 12350 void Assembler::lea(Register dst, Address src) { 12351 leal(dst, src); 12352 } 12353 12354 void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) { 12355 InstructionMark im(this); 12356 emit_int8((unsigned char)0xC7); 12357 emit_operand(rax, dst, 4); 12358 emit_data((int)imm32, rspec, 0); 12359 } 12360 12361 void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) { 12362 InstructionMark im(this); 12363 int encode = prefix_and_encode(dst->encoding()); 12364 emit_int8((0xB8 | encode)); 12365 emit_data((int)imm32, rspec, 0); 12366 } 12367 12368 void Assembler::popa() { // 32bit 12369 emit_int8(0x61); 12370 } 12371 12372 void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) { 12373 InstructionMark im(this); 12374 emit_int8(0x68); 12375 emit_data(imm32, rspec, 0); 12376 } 12377 12378 void Assembler::pusha() { // 32bit 12379 emit_int8(0x60); 12380 } 12381 12382 void Assembler::set_byte_if_not_zero(Register dst) { 12383 emit_int24(0x0F, (unsigned char)0x95, (0xC0 | dst->encoding())); 12384 } 12385 12386 #else // LP64 12387 12388 // 64bit only pieces of the assembler 12389 12390 void Assembler::set_byte_if_not_zero(Register dst) { 12391 int enc = prefix_and_encode(dst->encoding(), true); 12392 emit_int24(0x0F, (unsigned char)0x95, (0xC0 | enc)); 12393 } 12394 12395 // This should only be used by 64bit instructions that can use rip-relative 12396 // it cannot be used by instructions that want an immediate value. 12397 12398 // Determine whether an address is always reachable in rip-relative addressing mode 12399 // when accessed from the code cache. 12400 static bool is_always_reachable(address target, relocInfo::relocType reloc_type) { 12401 switch (reloc_type) { 12402 // This should be rip-relative and easily reachable. 12403 case relocInfo::internal_word_type: { 12404 return true; 12405 } 12406 // This should be rip-relative within the code cache and easily 12407 // reachable until we get huge code caches. (At which point 12408 // IC code is going to have issues). 12409 case relocInfo::virtual_call_type: 12410 case relocInfo::opt_virtual_call_type: 12411 case relocInfo::static_call_type: 12412 case relocInfo::static_stub_type: { 12413 return true; 12414 } 12415 case relocInfo::runtime_call_type: 12416 case relocInfo::external_word_type: 12417 case relocInfo::poll_return_type: // these are really external_word but need special 12418 case relocInfo::poll_type: { // relocs to identify them 12419 return CodeCache::contains(target); 12420 } 12421 default: { 12422 return false; 12423 } 12424 } 12425 } 12426 12427 // Determine whether an address is reachable in rip-relative addressing mode from the code cache. 12428 static bool is_reachable(address target, relocInfo::relocType reloc_type) { 12429 if (is_always_reachable(target, reloc_type)) { 12430 return true; 12431 } 12432 switch (reloc_type) { 12433 // None will force a 64bit literal to the code stream. Likely a placeholder 12434 // for something that will be patched later and we need to certain it will 12435 // always be reachable. 12436 case relocInfo::none: { 12437 return false; 12438 } 12439 case relocInfo::runtime_call_type: 12440 case relocInfo::external_word_type: 12441 case relocInfo::poll_return_type: // these are really external_word but need special 12442 case relocInfo::poll_type: { // relocs to identify them 12443 assert(!CodeCache::contains(target), "always reachable"); 12444 if (ForceUnreachable) { 12445 return false; // stress the correction code 12446 } 12447 // For external_word_type/runtime_call_type if it is reachable from where we 12448 // are now (possibly a temp buffer) and where we might end up 12449 // anywhere in the code cache then we are always reachable. 12450 // This would have to change if we ever save/restore shared code to be more pessimistic. 12451 // Code buffer has to be allocated in the code cache, so check against 12452 // code cache boundaries cover that case. 12453 // 12454 // In rip-relative addressing mode, an effective address is formed by adding displacement 12455 // to the 64-bit RIP of the next instruction which is not known yet. Considering target address 12456 // is guaranteed to be outside of the code cache, checking against code cache boundaries is enough 12457 // to account for that. 12458 return Assembler::is_simm32(target - CodeCache::low_bound()) && 12459 Assembler::is_simm32(target - CodeCache::high_bound()); 12460 } 12461 default: { 12462 return false; 12463 } 12464 } 12465 } 12466 12467 bool Assembler::reachable(AddressLiteral adr) { 12468 assert(CodeCache::contains(pc()), "required"); 12469 if (adr.is_lval()) { 12470 return false; 12471 } 12472 return is_reachable(adr.target(), adr.reloc()); 12473 } 12474 12475 bool Assembler::always_reachable(AddressLiteral adr) { 12476 assert(CodeCache::contains(pc()), "required"); 12477 if (adr.is_lval()) { 12478 return false; 12479 } 12480 return is_always_reachable(adr.target(), adr.reloc()); 12481 } 12482 12483 void Assembler::emit_data64(jlong data, 12484 relocInfo::relocType rtype, 12485 int format) { 12486 if (rtype == relocInfo::none) { 12487 emit_int64(data); 12488 } else { 12489 emit_data64(data, Relocation::spec_simple(rtype), format); 12490 } 12491 } 12492 12493 void Assembler::emit_data64(jlong data, 12494 RelocationHolder const& rspec, 12495 int format) { 12496 assert(imm_operand == 0, "default format must be immediate in this file"); 12497 assert(imm_operand == format, "must be immediate"); 12498 assert(inst_mark() != NULL, "must be inside InstructionMark"); 12499 // Do not use AbstractAssembler::relocate, which is not intended for 12500 // embedded words. Instead, relocate to the enclosing instruction. 12501 code_section()->relocate(inst_mark(), rspec, format); 12502 #ifdef ASSERT 12503 check_relocation(rspec, format); 12504 #endif 12505 emit_int64(data); 12506 } 12507 12508 void Assembler::prefix(Register reg) { 12509 if (reg->encoding() >= 8) { 12510 prefix(REX_B); 12511 } 12512 } 12513 12514 void Assembler::prefix(Register dst, Register src, Prefix p) { 12515 if (src->encoding() >= 8) { 12516 p = (Prefix)(p | REX_B); 12517 } 12518 if (dst->encoding() >= 8) { 12519 p = (Prefix)(p | REX_R); 12520 } 12521 if (p != Prefix_EMPTY) { 12522 // do not generate an empty prefix 12523 prefix(p); 12524 } 12525 } 12526 12527 void Assembler::prefix(Register dst, Address adr, Prefix p) { 12528 if (adr.base_needs_rex()) { 12529 if (adr.index_needs_rex()) { 12530 assert(false, "prefix(Register dst, Address adr, Prefix p) does not support handling of an X"); 12531 } else { 12532 prefix(REX_B); 12533 } 12534 } else { 12535 if (adr.index_needs_rex()) { 12536 assert(false, "prefix(Register dst, Address adr, Prefix p) does not support handling of an X"); 12537 } 12538 } 12539 if (dst->encoding() >= 8) { 12540 p = (Prefix)(p | REX_R); 12541 } 12542 if (p != Prefix_EMPTY) { 12543 // do not generate an empty prefix 12544 prefix(p); 12545 } 12546 } 12547 12548 void Assembler::prefix(Address adr) { 12549 if (adr.base_needs_rex()) { 12550 if (adr.index_needs_rex()) { 12551 prefix(REX_XB); 12552 } else { 12553 prefix(REX_B); 12554 } 12555 } else { 12556 if (adr.index_needs_rex()) { 12557 prefix(REX_X); 12558 } 12559 } 12560 } 12561 12562 void Assembler::prefix(Address adr, Register reg, bool byteinst) { 12563 if (reg->encoding() < 8) { 12564 if (adr.base_needs_rex()) { 12565 if (adr.index_needs_rex()) { 12566 prefix(REX_XB); 12567 } else { 12568 prefix(REX_B); 12569 } 12570 } else { 12571 if (adr.index_needs_rex()) { 12572 prefix(REX_X); 12573 } else if (byteinst && reg->encoding() >= 4) { 12574 prefix(REX); 12575 } 12576 } 12577 } else { 12578 if (adr.base_needs_rex()) { 12579 if (adr.index_needs_rex()) { 12580 prefix(REX_RXB); 12581 } else { 12582 prefix(REX_RB); 12583 } 12584 } else { 12585 if (adr.index_needs_rex()) { 12586 prefix(REX_RX); 12587 } else { 12588 prefix(REX_R); 12589 } 12590 } 12591 } 12592 } 12593 12594 void Assembler::prefix(Address adr, XMMRegister reg) { 12595 if (reg->encoding() < 8) { 12596 if (adr.base_needs_rex()) { 12597 if (adr.index_needs_rex()) { 12598 prefix(REX_XB); 12599 } else { 12600 prefix(REX_B); 12601 } 12602 } else { 12603 if (adr.index_needs_rex()) { 12604 prefix(REX_X); 12605 } 12606 } 12607 } else { 12608 if (adr.base_needs_rex()) { 12609 if (adr.index_needs_rex()) { 12610 prefix(REX_RXB); 12611 } else { 12612 prefix(REX_RB); 12613 } 12614 } else { 12615 if (adr.index_needs_rex()) { 12616 prefix(REX_RX); 12617 } else { 12618 prefix(REX_R); 12619 } 12620 } 12621 } 12622 } 12623 12624 int Assembler::prefix_and_encode(int reg_enc, bool byteinst) { 12625 if (reg_enc >= 8) { 12626 prefix(REX_B); 12627 reg_enc -= 8; 12628 } else if (byteinst && reg_enc >= 4) { 12629 prefix(REX); 12630 } 12631 return reg_enc; 12632 } 12633 12634 int Assembler::prefix_and_encode(int dst_enc, bool dst_is_byte, int src_enc, bool src_is_byte) { 12635 if (dst_enc < 8) { 12636 if (src_enc >= 8) { 12637 prefix(REX_B); 12638 src_enc -= 8; 12639 } else if ((src_is_byte && src_enc >= 4) || (dst_is_byte && dst_enc >= 4)) { 12640 prefix(REX); 12641 } 12642 } else { 12643 if (src_enc < 8) { 12644 prefix(REX_R); 12645 } else { 12646 prefix(REX_RB); 12647 src_enc -= 8; 12648 } 12649 dst_enc -= 8; 12650 } 12651 return dst_enc << 3 | src_enc; 12652 } 12653 12654 int8_t Assembler::get_prefixq(Address adr) { 12655 int8_t prfx = get_prefixq(adr, rax); 12656 assert(REX_W <= prfx && prfx <= REX_WXB, "must be"); 12657 return prfx; 12658 } 12659 12660 int8_t Assembler::get_prefixq(Address adr, Register src) { 12661 int8_t prfx = (int8_t)(REX_W + 12662 ((int)adr.base_needs_rex()) + 12663 ((int)adr.index_needs_rex() << 1) + 12664 ((int)(src->encoding() >= 8) << 2)); 12665 #ifdef ASSERT 12666 if (src->encoding() < 8) { 12667 if (adr.base_needs_rex()) { 12668 if (adr.index_needs_rex()) { 12669 assert(prfx == REX_WXB, "must be"); 12670 } else { 12671 assert(prfx == REX_WB, "must be"); 12672 } 12673 } else { 12674 if (adr.index_needs_rex()) { 12675 assert(prfx == REX_WX, "must be"); 12676 } else { 12677 assert(prfx == REX_W, "must be"); 12678 } 12679 } 12680 } else { 12681 if (adr.base_needs_rex()) { 12682 if (adr.index_needs_rex()) { 12683 assert(prfx == REX_WRXB, "must be"); 12684 } else { 12685 assert(prfx == REX_WRB, "must be"); 12686 } 12687 } else { 12688 if (adr.index_needs_rex()) { 12689 assert(prfx == REX_WRX, "must be"); 12690 } else { 12691 assert(prfx == REX_WR, "must be"); 12692 } 12693 } 12694 } 12695 #endif 12696 return prfx; 12697 } 12698 12699 void Assembler::prefixq(Address adr) { 12700 emit_int8(get_prefixq(adr)); 12701 } 12702 12703 void Assembler::prefixq(Address adr, Register src) { 12704 emit_int8(get_prefixq(adr, src)); 12705 } 12706 12707 void Assembler::prefixq(Address adr, XMMRegister src) { 12708 if (src->encoding() < 8) { 12709 if (adr.base_needs_rex()) { 12710 if (adr.index_needs_rex()) { 12711 prefix(REX_WXB); 12712 } else { 12713 prefix(REX_WB); 12714 } 12715 } else { 12716 if (adr.index_needs_rex()) { 12717 prefix(REX_WX); 12718 } else { 12719 prefix(REX_W); 12720 } 12721 } 12722 } else { 12723 if (adr.base_needs_rex()) { 12724 if (adr.index_needs_rex()) { 12725 prefix(REX_WRXB); 12726 } else { 12727 prefix(REX_WRB); 12728 } 12729 } else { 12730 if (adr.index_needs_rex()) { 12731 prefix(REX_WRX); 12732 } else { 12733 prefix(REX_WR); 12734 } 12735 } 12736 } 12737 } 12738 12739 int Assembler::prefixq_and_encode(int reg_enc) { 12740 if (reg_enc < 8) { 12741 prefix(REX_W); 12742 } else { 12743 prefix(REX_WB); 12744 reg_enc -= 8; 12745 } 12746 return reg_enc; 12747 } 12748 12749 int Assembler::prefixq_and_encode(int dst_enc, int src_enc) { 12750 if (dst_enc < 8) { 12751 if (src_enc < 8) { 12752 prefix(REX_W); 12753 } else { 12754 prefix(REX_WB); 12755 src_enc -= 8; 12756 } 12757 } else { 12758 if (src_enc < 8) { 12759 prefix(REX_WR); 12760 } else { 12761 prefix(REX_WRB); 12762 src_enc -= 8; 12763 } 12764 dst_enc -= 8; 12765 } 12766 return dst_enc << 3 | src_enc; 12767 } 12768 12769 void Assembler::adcq(Register dst, int32_t imm32) { 12770 (void) prefixq_and_encode(dst->encoding()); 12771 emit_arith(0x81, 0xD0, dst, imm32); 12772 } 12773 12774 void Assembler::adcq(Register dst, Address src) { 12775 InstructionMark im(this); 12776 emit_int16(get_prefixq(src, dst), 0x13); 12777 emit_operand(dst, src, 0); 12778 } 12779 12780 void Assembler::adcq(Register dst, Register src) { 12781 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 12782 emit_arith(0x13, 0xC0, dst, src); 12783 } 12784 12785 void Assembler::addq(Address dst, int32_t imm32) { 12786 InstructionMark im(this); 12787 prefixq(dst); 12788 emit_arith_operand(0x81, rax, dst, imm32); 12789 } 12790 12791 void Assembler::addq(Address dst, Register src) { 12792 InstructionMark im(this); 12793 emit_int16(get_prefixq(dst, src), 0x01); 12794 emit_operand(src, dst, 0); 12795 } 12796 12797 void Assembler::addq(Register dst, int32_t imm32) { 12798 (void) prefixq_and_encode(dst->encoding()); 12799 emit_arith(0x81, 0xC0, dst, imm32); 12800 } 12801 12802 void Assembler::addq(Register dst, Address src) { 12803 InstructionMark im(this); 12804 emit_int16(get_prefixq(src, dst), 0x03); 12805 emit_operand(dst, src, 0); 12806 } 12807 12808 void Assembler::addq(Register dst, Register src) { 12809 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 12810 emit_arith(0x03, 0xC0, dst, src); 12811 } 12812 12813 void Assembler::adcxq(Register dst, Register src) { 12814 //assert(VM_Version::supports_adx(), "adx instructions not supported"); 12815 emit_int8(0x66); 12816 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 12817 emit_int32(0x0F, 12818 0x38, 12819 (unsigned char)0xF6, 12820 (0xC0 | encode)); 12821 } 12822 12823 void Assembler::adoxq(Register dst, Register src) { 12824 //assert(VM_Version::supports_adx(), "adx instructions not supported"); 12825 emit_int8((unsigned char)0xF3); 12826 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 12827 emit_int32(0x0F, 12828 0x38, 12829 (unsigned char)0xF6, 12830 (0xC0 | encode)); 12831 } 12832 12833 void Assembler::andq(Address dst, int32_t imm32) { 12834 InstructionMark im(this); 12835 prefixq(dst); 12836 emit_arith_operand(0x81, as_Register(4), dst, imm32); 12837 } 12838 12839 void Assembler::andq(Register dst, int32_t imm32) { 12840 (void) prefixq_and_encode(dst->encoding()); 12841 emit_arith(0x81, 0xE0, dst, imm32); 12842 } 12843 12844 void Assembler::andq(Register dst, Address src) { 12845 InstructionMark im(this); 12846 emit_int16(get_prefixq(src, dst), 0x23); 12847 emit_operand(dst, src, 0); 12848 } 12849 12850 void Assembler::andq(Register dst, Register src) { 12851 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 12852 emit_arith(0x23, 0xC0, dst, src); 12853 } 12854 12855 void Assembler::andq(Address dst, Register src) { 12856 InstructionMark im(this); 12857 emit_int16(get_prefixq(dst, src), 0x21); 12858 emit_operand(src, dst, 0); 12859 } 12860 12861 void Assembler::andnq(Register dst, Register src1, Register src2) { 12862 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 12863 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12864 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 12865 emit_int16((unsigned char)0xF2, (0xC0 | encode)); 12866 } 12867 12868 void Assembler::andnq(Register dst, Register src1, Address src2) { 12869 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 12870 InstructionMark im(this); 12871 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12872 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 12873 emit_int8((unsigned char)0xF2); 12874 emit_operand(dst, src2, 0); 12875 } 12876 12877 void Assembler::bsfq(Register dst, Register src) { 12878 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 12879 emit_int24(0x0F, (unsigned char)0xBC, (0xC0 | encode)); 12880 } 12881 12882 void Assembler::bsrq(Register dst, Register src) { 12883 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 12884 emit_int24(0x0F, (unsigned char)0xBD, (0xC0 | encode)); 12885 } 12886 12887 void Assembler::bswapq(Register reg) { 12888 int encode = prefixq_and_encode(reg->encoding()); 12889 emit_int16(0x0F, (0xC8 | encode)); 12890 } 12891 12892 void Assembler::blsiq(Register dst, Register src) { 12893 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 12894 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12895 int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 12896 emit_int16((unsigned char)0xF3, (0xC0 | encode)); 12897 } 12898 12899 void Assembler::blsiq(Register dst, Address src) { 12900 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 12901 InstructionMark im(this); 12902 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12903 vex_prefix(src, dst->encoding(), rbx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 12904 emit_int8((unsigned char)0xF3); 12905 emit_operand(rbx, src, 0); 12906 } 12907 12908 void Assembler::blsmskq(Register dst, Register src) { 12909 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 12910 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12911 int encode = vex_prefix_and_encode(rdx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 12912 emit_int16((unsigned char)0xF3, (0xC0 | encode)); 12913 } 12914 12915 void Assembler::blsmskq(Register dst, Address src) { 12916 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 12917 InstructionMark im(this); 12918 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12919 vex_prefix(src, dst->encoding(), rdx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 12920 emit_int8((unsigned char)0xF3); 12921 emit_operand(rdx, src, 0); 12922 } 12923 12924 void Assembler::blsrq(Register dst, Register src) { 12925 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 12926 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12927 int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 12928 emit_int16((unsigned char)0xF3, (0xC0 | encode)); 12929 } 12930 12931 void Assembler::blsrq(Register dst, Address src) { 12932 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 12933 InstructionMark im(this); 12934 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12935 vex_prefix(src, dst->encoding(), rcx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 12936 emit_int8((unsigned char)0xF3); 12937 emit_operand(rcx, src, 0); 12938 } 12939 12940 void Assembler::cdqq() { 12941 emit_int16(REX_W, (unsigned char)0x99); 12942 } 12943 12944 void Assembler::clflush(Address adr) { 12945 assert(VM_Version::supports_clflush(), "should do"); 12946 prefix(adr); 12947 emit_int16(0x0F, (unsigned char)0xAE); 12948 emit_operand(rdi, adr, 0); 12949 } 12950 12951 void Assembler::clflushopt(Address adr) { 12952 assert(VM_Version::supports_clflushopt(), "should do!"); 12953 // adr should be base reg only with no index or offset 12954 assert(adr.index() == noreg, "index should be noreg"); 12955 assert(adr.scale() == Address::no_scale, "scale should be no_scale"); 12956 assert(adr.disp() == 0, "displacement should be 0"); 12957 // instruction prefix is 0x66 12958 emit_int8(0x66); 12959 prefix(adr); 12960 // opcode family is 0x0F 0xAE 12961 emit_int16(0x0F, (unsigned char)0xAE); 12962 // extended opcode byte is 7 == rdi 12963 emit_operand(rdi, adr, 0); 12964 } 12965 12966 void Assembler::clwb(Address adr) { 12967 assert(VM_Version::supports_clwb(), "should do!"); 12968 // adr should be base reg only with no index or offset 12969 assert(adr.index() == noreg, "index should be noreg"); 12970 assert(adr.scale() == Address::no_scale, "scale should be no_scale"); 12971 assert(adr.disp() == 0, "displacement should be 0"); 12972 // instruction prefix is 0x66 12973 emit_int8(0x66); 12974 prefix(adr); 12975 // opcode family is 0x0f 0xAE 12976 emit_int16(0x0F, (unsigned char)0xAE); 12977 // extended opcode byte is 6 == rsi 12978 emit_operand(rsi, adr, 0); 12979 } 12980 12981 void Assembler::cmovq(Condition cc, Register dst, Register src) { 12982 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 12983 emit_int24(0x0F, (0x40 | cc), (0xC0 | encode)); 12984 } 12985 12986 void Assembler::cmovq(Condition cc, Register dst, Address src) { 12987 InstructionMark im(this); 12988 emit_int24(get_prefixq(src, dst), 0x0F, (0x40 | cc)); 12989 emit_operand(dst, src, 0); 12990 } 12991 12992 void Assembler::cmpq(Address dst, int32_t imm32) { 12993 InstructionMark im(this); 12994 prefixq(dst); 12995 emit_arith_operand(0x81, as_Register(7), dst, imm32); 12996 } 12997 12998 void Assembler::cmpq(Register dst, int32_t imm32) { 12999 (void) prefixq_and_encode(dst->encoding()); 13000 emit_arith(0x81, 0xF8, dst, imm32); 13001 } 13002 13003 void Assembler::cmpq(Address dst, Register src) { 13004 InstructionMark im(this); 13005 emit_int16(get_prefixq(dst, src), 0x39); 13006 emit_operand(src, dst, 0); 13007 } 13008 13009 void Assembler::cmpq(Register dst, Register src) { 13010 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 13011 emit_arith(0x3B, 0xC0, dst, src); 13012 } 13013 13014 void Assembler::cmpq(Register dst, Address src) { 13015 InstructionMark im(this); 13016 emit_int16(get_prefixq(src, dst), 0x3B); 13017 emit_operand(dst, src, 0); 13018 } 13019 13020 void Assembler::cmpxchgq(Register reg, Address adr) { 13021 InstructionMark im(this); 13022 emit_int24(get_prefixq(adr, reg), 0x0F, (unsigned char)0xB1); 13023 emit_operand(reg, adr, 0); 13024 } 13025 13026 void Assembler::cvtsi2sdq(XMMRegister dst, Register src) { 13027 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 13028 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 13029 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 13030 emit_int16(0x2A, (0xC0 | encode)); 13031 } 13032 13033 void Assembler::cvtsi2sdq(XMMRegister dst, Address src) { 13034 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 13035 InstructionMark im(this); 13036 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 13037 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 13038 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 13039 emit_int8(0x2A); 13040 emit_operand(dst, src, 0); 13041 } 13042 13043 void Assembler::cvtsi2ssq(XMMRegister dst, Address src) { 13044 NOT_LP64(assert(VM_Version::supports_sse(), "")); 13045 InstructionMark im(this); 13046 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 13047 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 13048 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 13049 emit_int8(0x2A); 13050 emit_operand(dst, src, 0); 13051 } 13052 13053 void Assembler::cvttsd2siq(Register dst, Address src) { 13054 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 13055 // F2 REX.W 0F 2C /r 13056 // CVTTSD2SI r64, xmm1/m64 13057 InstructionMark im(this); 13058 emit_int32((unsigned char)0xF2, REX_W, 0x0F, 0x2C); 13059 emit_operand(dst, src, 0); 13060 } 13061 13062 void Assembler::cvttsd2siq(Register dst, XMMRegister src) { 13063 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 13064 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 13065 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 13066 emit_int16(0x2C, (0xC0 | encode)); 13067 } 13068 13069 void Assembler::cvtsd2siq(Register dst, XMMRegister src) { 13070 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 13071 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 13072 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 13073 emit_int16(0x2D, (0xC0 | encode)); 13074 } 13075 13076 void Assembler::cvttss2siq(Register dst, XMMRegister src) { 13077 NOT_LP64(assert(VM_Version::supports_sse(), "")); 13078 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 13079 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 13080 emit_int16(0x2C, (0xC0 | encode)); 13081 } 13082 13083 void Assembler::decl(Register dst) { 13084 // Don't use it directly. Use MacroAssembler::decrementl() instead. 13085 // Use two-byte form (one-byte form is a REX prefix in 64-bit mode) 13086 int encode = prefix_and_encode(dst->encoding()); 13087 emit_int16((unsigned char)0xFF, (0xC8 | encode)); 13088 } 13089 13090 void Assembler::decq(Register dst) { 13091 // Don't use it directly. Use MacroAssembler::decrementq() instead. 13092 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 13093 int encode = prefixq_and_encode(dst->encoding()); 13094 emit_int16((unsigned char)0xFF, 0xC8 | encode); 13095 } 13096 13097 void Assembler::decq(Address dst) { 13098 // Don't use it directly. Use MacroAssembler::decrementq() instead. 13099 InstructionMark im(this); 13100 emit_int16(get_prefixq(dst), (unsigned char)0xFF); 13101 emit_operand(rcx, dst, 0); 13102 } 13103 13104 void Assembler::fxrstor(Address src) { 13105 emit_int24(get_prefixq(src), 0x0F, (unsigned char)0xAE); 13106 emit_operand(as_Register(1), src, 0); 13107 } 13108 13109 void Assembler::xrstor(Address src) { 13110 emit_int24(get_prefixq(src), 0x0F, (unsigned char)0xAE); 13111 emit_operand(as_Register(5), src, 0); 13112 } 13113 13114 void Assembler::fxsave(Address dst) { 13115 emit_int24(get_prefixq(dst), 0x0F, (unsigned char)0xAE); 13116 emit_operand(as_Register(0), dst, 0); 13117 } 13118 13119 void Assembler::xsave(Address dst) { 13120 emit_int24(get_prefixq(dst), 0x0F, (unsigned char)0xAE); 13121 emit_operand(as_Register(4), dst, 0); 13122 } 13123 13124 void Assembler::idivq(Register src) { 13125 int encode = prefixq_and_encode(src->encoding()); 13126 emit_int16((unsigned char)0xF7, (0xF8 | encode)); 13127 } 13128 13129 void Assembler::divq(Register src) { 13130 int encode = prefixq_and_encode(src->encoding()); 13131 emit_int16((unsigned char)0xF7, (0xF0 | encode)); 13132 } 13133 13134 void Assembler::imulq(Register dst, Register src) { 13135 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 13136 emit_int24(0x0F, (unsigned char)0xAF, (0xC0 | encode)); 13137 } 13138 13139 void Assembler::imulq(Register src) { 13140 int encode = prefixq_and_encode(src->encoding()); 13141 emit_int16((unsigned char)0xF7, (0xE8 | encode)); 13142 } 13143 13144 void Assembler::imulq(Register dst, Address src, int32_t value) { 13145 InstructionMark im(this); 13146 prefixq(src, dst); 13147 if (is8bit(value)) { 13148 emit_int8((unsigned char)0x6B); 13149 emit_operand(dst, src, 1); 13150 emit_int8(value); 13151 } else { 13152 emit_int8((unsigned char)0x69); 13153 emit_operand(dst, src, 4); 13154 emit_int32(value); 13155 } 13156 } 13157 13158 void Assembler::imulq(Register dst, Register src, int value) { 13159 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 13160 if (is8bit(value)) { 13161 emit_int24(0x6B, (0xC0 | encode), (value & 0xFF)); 13162 } else { 13163 emit_int16(0x69, (0xC0 | encode)); 13164 emit_int32(value); 13165 } 13166 } 13167 13168 void Assembler::imulq(Register dst, Address src) { 13169 InstructionMark im(this); 13170 emit_int24(get_prefixq(src, dst), 0x0F, (unsigned char)0xAF); 13171 emit_operand(dst, src, 0); 13172 } 13173 13174 void Assembler::incl(Register dst) { 13175 // Don't use it directly. Use MacroAssembler::incrementl() instead. 13176 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 13177 int encode = prefix_and_encode(dst->encoding()); 13178 emit_int16((unsigned char)0xFF, (0xC0 | encode)); 13179 } 13180 13181 void Assembler::incq(Register dst) { 13182 // Don't use it directly. Use MacroAssembler::incrementq() instead. 13183 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 13184 int encode = prefixq_and_encode(dst->encoding()); 13185 emit_int16((unsigned char)0xFF, (0xC0 | encode)); 13186 } 13187 13188 void Assembler::incq(Address dst) { 13189 // Don't use it directly. Use MacroAssembler::incrementq() instead. 13190 InstructionMark im(this); 13191 emit_int16(get_prefixq(dst), (unsigned char)0xFF); 13192 emit_operand(rax, dst, 0); 13193 } 13194 13195 void Assembler::lea(Register dst, Address src) { 13196 leaq(dst, src); 13197 } 13198 13199 void Assembler::leaq(Register dst, Address src) { 13200 InstructionMark im(this); 13201 emit_int16(get_prefixq(src, dst), (unsigned char)0x8D); 13202 emit_operand(dst, src, 0); 13203 } 13204 13205 void Assembler::mov64(Register dst, int64_t imm64) { 13206 InstructionMark im(this); 13207 int encode = prefixq_and_encode(dst->encoding()); 13208 emit_int8(0xB8 | encode); 13209 emit_int64(imm64); 13210 } 13211 13212 void Assembler::mov64(Register dst, int64_t imm64, relocInfo::relocType rtype, int format) { 13213 InstructionMark im(this); 13214 int encode = prefixq_and_encode(dst->encoding()); 13215 emit_int8(0xB8 | encode); 13216 emit_data64(imm64, rtype, format); 13217 } 13218 13219 void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) { 13220 InstructionMark im(this); 13221 int encode = prefixq_and_encode(dst->encoding()); 13222 emit_int8(0xB8 | encode); 13223 emit_data64(imm64, rspec); 13224 } 13225 13226 void Assembler::mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec) { 13227 InstructionMark im(this); 13228 int encode = prefix_and_encode(dst->encoding()); 13229 emit_int8(0xB8 | encode); 13230 emit_data((int)imm32, rspec, narrow_oop_operand); 13231 } 13232 13233 void Assembler::mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec) { 13234 InstructionMark im(this); 13235 prefix(dst); 13236 emit_int8((unsigned char)0xC7); 13237 emit_operand(rax, dst, 4); 13238 emit_data((int)imm32, rspec, narrow_oop_operand); 13239 } 13240 13241 void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) { 13242 InstructionMark im(this); 13243 int encode = prefix_and_encode(src1->encoding()); 13244 emit_int16((unsigned char)0x81, (0xF8 | encode)); 13245 emit_data((int)imm32, rspec, narrow_oop_operand); 13246 } 13247 13248 void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) { 13249 InstructionMark im(this); 13250 prefix(src1); 13251 emit_int8((unsigned char)0x81); 13252 emit_operand(rax, src1, 4); 13253 emit_data((int)imm32, rspec, narrow_oop_operand); 13254 } 13255 13256 void Assembler::lzcntq(Register dst, Register src) { 13257 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); 13258 emit_int8((unsigned char)0xF3); 13259 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 13260 emit_int24(0x0F, (unsigned char)0xBD, (0xC0 | encode)); 13261 } 13262 13263 void Assembler::lzcntq(Register dst, Address src) { 13264 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); 13265 InstructionMark im(this); 13266 emit_int8((unsigned char)0xF3); 13267 prefixq(src, dst); 13268 emit_int16(0x0F, (unsigned char)0xBD); 13269 emit_operand(dst, src, 0); 13270 } 13271 13272 void Assembler::movdq(XMMRegister dst, Register src) { 13273 // table D-1 says MMX/SSE2 13274 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 13275 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 13276 int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 13277 emit_int16(0x6E, (0xC0 | encode)); 13278 } 13279 13280 void Assembler::movdq(Register dst, XMMRegister src) { 13281 // table D-1 says MMX/SSE2 13282 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 13283 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 13284 // swap src/dst to get correct prefix 13285 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 13286 emit_int16(0x7E, 13287 (0xC0 | encode)); 13288 } 13289 13290 void Assembler::movq(Register dst, Register src) { 13291 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 13292 emit_int16((unsigned char)0x8B, 13293 (0xC0 | encode)); 13294 } 13295 13296 void Assembler::movq(Register dst, Address src) { 13297 InstructionMark im(this); 13298 emit_int16(get_prefixq(src, dst), (unsigned char)0x8B); 13299 emit_operand(dst, src, 0); 13300 } 13301 13302 void Assembler::movq(Address dst, Register src) { 13303 InstructionMark im(this); 13304 emit_int16(get_prefixq(dst, src), (unsigned char)0x89); 13305 emit_operand(src, dst, 0); 13306 } 13307 13308 void Assembler::movq(Address dst, int32_t imm32) { 13309 InstructionMark im(this); 13310 emit_int16(get_prefixq(dst), (unsigned char)0xC7); 13311 emit_operand(as_Register(0), dst, 4); 13312 emit_int32(imm32); 13313 } 13314 13315 void Assembler::movq(Register dst, int32_t imm32) { 13316 int encode = prefixq_and_encode(dst->encoding()); 13317 emit_int16((unsigned char)0xC7, (0xC0 | encode)); 13318 emit_int32(imm32); 13319 } 13320 13321 void Assembler::movsbq(Register dst, Address src) { 13322 InstructionMark im(this); 13323 emit_int24(get_prefixq(src, dst), 13324 0x0F, 13325 (unsigned char)0xBE); 13326 emit_operand(dst, src, 0); 13327 } 13328 13329 void Assembler::movsbq(Register dst, Register src) { 13330 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 13331 emit_int24(0x0F, (unsigned char)0xBE, (0xC0 | encode)); 13332 } 13333 13334 void Assembler::movslq(Register dst, int32_t imm32) { 13335 // dbx shows movslq(rcx, 3) as movq $0x0000000049000000,(%rbx) 13336 // and movslq(r8, 3); as movl $0x0000000048000000,(%rbx) 13337 // as a result we shouldn't use until tested at runtime... 13338 ShouldNotReachHere(); 13339 InstructionMark im(this); 13340 int encode = prefixq_and_encode(dst->encoding()); 13341 emit_int8(0xC7 | encode); 13342 emit_int32(imm32); 13343 } 13344 13345 void Assembler::movslq(Address dst, int32_t imm32) { 13346 assert(is_simm32(imm32), "lost bits"); 13347 InstructionMark im(this); 13348 emit_int16(get_prefixq(dst), (unsigned char)0xC7); 13349 emit_operand(rax, dst, 4); 13350 emit_int32(imm32); 13351 } 13352 13353 void Assembler::movslq(Register dst, Address src) { 13354 InstructionMark im(this); 13355 emit_int16(get_prefixq(src, dst), 0x63); 13356 emit_operand(dst, src, 0); 13357 } 13358 13359 void Assembler::movslq(Register dst, Register src) { 13360 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 13361 emit_int16(0x63, (0xC0 | encode)); 13362 } 13363 13364 void Assembler::movswq(Register dst, Address src) { 13365 InstructionMark im(this); 13366 emit_int24(get_prefixq(src, dst), 13367 0x0F, 13368 (unsigned char)0xBF); 13369 emit_operand(dst, src, 0); 13370 } 13371 13372 void Assembler::movswq(Register dst, Register src) { 13373 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 13374 emit_int24(0x0F, (unsigned char)0xBF, (0xC0 | encode)); 13375 } 13376 13377 void Assembler::movzbq(Register dst, Address src) { 13378 InstructionMark im(this); 13379 emit_int24(get_prefixq(src, dst), 13380 0x0F, 13381 (unsigned char)0xB6); 13382 emit_operand(dst, src, 0); 13383 } 13384 13385 void Assembler::movzbq(Register dst, Register src) { 13386 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 13387 emit_int24(0x0F, (unsigned char)0xB6, (0xC0 | encode)); 13388 } 13389 13390 void Assembler::movzwq(Register dst, Address src) { 13391 InstructionMark im(this); 13392 emit_int24(get_prefixq(src, dst), 13393 0x0F, 13394 (unsigned char)0xB7); 13395 emit_operand(dst, src, 0); 13396 } 13397 13398 void Assembler::movzwq(Register dst, Register src) { 13399 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 13400 emit_int24(0x0F, (unsigned char)0xB7, (0xC0 | encode)); 13401 } 13402 13403 void Assembler::mulq(Address src) { 13404 InstructionMark im(this); 13405 emit_int16(get_prefixq(src), (unsigned char)0xF7); 13406 emit_operand(rsp, src, 0); 13407 } 13408 13409 void Assembler::mulq(Register src) { 13410 int encode = prefixq_and_encode(src->encoding()); 13411 emit_int16((unsigned char)0xF7, (0xE0 | encode)); 13412 } 13413 13414 void Assembler::mulxq(Register dst1, Register dst2, Register src) { 13415 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 13416 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 13417 int encode = vex_prefix_and_encode(dst1->encoding(), dst2->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes); 13418 emit_int16((unsigned char)0xF6, (0xC0 | encode)); 13419 } 13420 13421 void Assembler::negq(Register dst) { 13422 int encode = prefixq_and_encode(dst->encoding()); 13423 emit_int16((unsigned char)0xF7, (0xD8 | encode)); 13424 } 13425 13426 void Assembler::negq(Address dst) { 13427 InstructionMark im(this); 13428 emit_int16(get_prefixq(dst), (unsigned char)0xF7); 13429 emit_operand(as_Register(3), dst, 0); 13430 } 13431 13432 void Assembler::notq(Register dst) { 13433 int encode = prefixq_and_encode(dst->encoding()); 13434 emit_int16((unsigned char)0xF7, (0xD0 | encode)); 13435 } 13436 13437 void Assembler::btsq(Address dst, int imm8) { 13438 assert(isByte(imm8), "not a byte"); 13439 InstructionMark im(this); 13440 emit_int24(get_prefixq(dst), 13441 0x0F, 13442 (unsigned char)0xBA); 13443 emit_operand(rbp /* 5 */, dst, 1); 13444 emit_int8(imm8); 13445 } 13446 13447 void Assembler::btrq(Address dst, int imm8) { 13448 assert(isByte(imm8), "not a byte"); 13449 InstructionMark im(this); 13450 emit_int24(get_prefixq(dst), 13451 0x0F, 13452 (unsigned char)0xBA); 13453 emit_operand(rsi /* 6 */, dst, 1); 13454 emit_int8(imm8); 13455 } 13456 13457 void Assembler::orq(Address dst, int32_t imm32) { 13458 InstructionMark im(this); 13459 prefixq(dst); 13460 emit_arith_operand(0x81, as_Register(1), dst, imm32); 13461 } 13462 13463 void Assembler::orq(Address dst, Register src) { 13464 InstructionMark im(this); 13465 emit_int16(get_prefixq(dst, src), (unsigned char)0x09); 13466 emit_operand(src, dst, 0); 13467 } 13468 13469 void Assembler::orq(Register dst, int32_t imm32) { 13470 (void) prefixq_and_encode(dst->encoding()); 13471 emit_arith(0x81, 0xC8, dst, imm32); 13472 } 13473 13474 void Assembler::orq_imm32(Register dst, int32_t imm32) { 13475 (void) prefixq_and_encode(dst->encoding()); 13476 emit_arith_imm32(0x81, 0xC8, dst, imm32); 13477 } 13478 13479 void Assembler::orq(Register dst, Address src) { 13480 InstructionMark im(this); 13481 emit_int16(get_prefixq(src, dst), 0x0B); 13482 emit_operand(dst, src, 0); 13483 } 13484 13485 void Assembler::orq(Register dst, Register src) { 13486 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 13487 emit_arith(0x0B, 0xC0, dst, src); 13488 } 13489 13490 void Assembler::popcntq(Register dst, Address src) { 13491 assert(VM_Version::supports_popcnt(), "must support"); 13492 InstructionMark im(this); 13493 emit_int32((unsigned char)0xF3, 13494 get_prefixq(src, dst), 13495 0x0F, 13496 (unsigned char)0xB8); 13497 emit_operand(dst, src, 0); 13498 } 13499 13500 void Assembler::popcntq(Register dst, Register src) { 13501 assert(VM_Version::supports_popcnt(), "must support"); 13502 emit_int8((unsigned char)0xF3); 13503 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 13504 emit_int24(0x0F, (unsigned char)0xB8, (0xC0 | encode)); 13505 } 13506 13507 void Assembler::popq(Address dst) { 13508 InstructionMark im(this); 13509 emit_int16(get_prefixq(dst), (unsigned char)0x8F); 13510 emit_operand(rax, dst, 0); 13511 } 13512 13513 void Assembler::popq(Register dst) { 13514 emit_int8((unsigned char)0x58 | dst->encoding()); 13515 } 13516 13517 // Precomputable: popa, pusha, vzeroupper 13518 13519 // The result of these routines are invariant from one invocation to another 13520 // invocation for the duration of a run. Caching the result on bootstrap 13521 // and copying it out on subsequent invocations can thus be beneficial 13522 static bool precomputed = false; 13523 13524 static u_char* popa_code = NULL; 13525 static int popa_len = 0; 13526 13527 static u_char* pusha_code = NULL; 13528 static int pusha_len = 0; 13529 13530 static u_char* vzup_code = NULL; 13531 static int vzup_len = 0; 13532 13533 void Assembler::precompute_instructions() { 13534 assert(!Universe::is_fully_initialized(), "must still be single threaded"); 13535 guarantee(!precomputed, "only once"); 13536 precomputed = true; 13537 ResourceMark rm; 13538 13539 // Make a temporary buffer big enough for the routines we're capturing 13540 int size = 256; 13541 char* tmp_code = NEW_RESOURCE_ARRAY(char, size); 13542 CodeBuffer buffer((address)tmp_code, size); 13543 MacroAssembler masm(&buffer); 13544 13545 address begin_popa = masm.code_section()->end(); 13546 masm.popa_uncached(); 13547 address end_popa = masm.code_section()->end(); 13548 masm.pusha_uncached(); 13549 address end_pusha = masm.code_section()->end(); 13550 masm.vzeroupper_uncached(); 13551 address end_vzup = masm.code_section()->end(); 13552 13553 // Save the instructions to permanent buffers. 13554 popa_len = (int)(end_popa - begin_popa); 13555 popa_code = NEW_C_HEAP_ARRAY(u_char, popa_len, mtInternal); 13556 memcpy(popa_code, begin_popa, popa_len); 13557 13558 pusha_len = (int)(end_pusha - end_popa); 13559 pusha_code = NEW_C_HEAP_ARRAY(u_char, pusha_len, mtInternal); 13560 memcpy(pusha_code, end_popa, pusha_len); 13561 13562 vzup_len = (int)(end_vzup - end_pusha); 13563 if (vzup_len > 0) { 13564 vzup_code = NEW_C_HEAP_ARRAY(u_char, vzup_len, mtInternal); 13565 memcpy(vzup_code, end_pusha, vzup_len); 13566 } else { 13567 vzup_code = pusha_code; // dummy 13568 } 13569 13570 assert(masm.code()->total_oop_size() == 0 && 13571 masm.code()->total_metadata_size() == 0 && 13572 masm.code()->total_relocation_size() == 0, 13573 "pre-computed code can't reference oops, metadata or contain relocations"); 13574 } 13575 13576 static void emit_copy(CodeSection* code_section, u_char* src, int src_len) { 13577 assert(src != NULL, "code to copy must have been pre-computed"); 13578 assert(code_section->limit() - code_section->end() > src_len, "code buffer not large enough"); 13579 address end = code_section->end(); 13580 memcpy(end, src, src_len); 13581 code_section->set_end(end + src_len); 13582 } 13583 13584 void Assembler::popa() { // 64bit 13585 emit_copy(code_section(), popa_code, popa_len); 13586 } 13587 13588 void Assembler::popa_uncached() { // 64bit 13589 movq(r15, Address(rsp, 0)); 13590 movq(r14, Address(rsp, wordSize)); 13591 movq(r13, Address(rsp, 2 * wordSize)); 13592 movq(r12, Address(rsp, 3 * wordSize)); 13593 movq(r11, Address(rsp, 4 * wordSize)); 13594 movq(r10, Address(rsp, 5 * wordSize)); 13595 movq(r9, Address(rsp, 6 * wordSize)); 13596 movq(r8, Address(rsp, 7 * wordSize)); 13597 movq(rdi, Address(rsp, 8 * wordSize)); 13598 movq(rsi, Address(rsp, 9 * wordSize)); 13599 movq(rbp, Address(rsp, 10 * wordSize)); 13600 // Skip rsp as it is restored automatically to the value 13601 // before the corresponding pusha when popa is done. 13602 movq(rbx, Address(rsp, 12 * wordSize)); 13603 movq(rdx, Address(rsp, 13 * wordSize)); 13604 movq(rcx, Address(rsp, 14 * wordSize)); 13605 movq(rax, Address(rsp, 15 * wordSize)); 13606 13607 addq(rsp, 16 * wordSize); 13608 } 13609 13610 // Does not actually store the value of rsp on the stack. 13611 // The slot for rsp just contains an arbitrary value. 13612 void Assembler::pusha() { // 64bit 13613 emit_copy(code_section(), pusha_code, pusha_len); 13614 } 13615 13616 // Does not actually store the value of rsp on the stack. 13617 // The slot for rsp just contains an arbitrary value. 13618 void Assembler::pusha_uncached() { // 64bit 13619 subq(rsp, 16 * wordSize); 13620 13621 movq(Address(rsp, 15 * wordSize), rax); 13622 movq(Address(rsp, 14 * wordSize), rcx); 13623 movq(Address(rsp, 13 * wordSize), rdx); 13624 movq(Address(rsp, 12 * wordSize), rbx); 13625 // Skip rsp as the value is normally not used. There are a few places where 13626 // the original value of rsp needs to be known but that can be computed 13627 // from the value of rsp immediately after pusha (rsp + 16 * wordSize). 13628 movq(Address(rsp, 10 * wordSize), rbp); 13629 movq(Address(rsp, 9 * wordSize), rsi); 13630 movq(Address(rsp, 8 * wordSize), rdi); 13631 movq(Address(rsp, 7 * wordSize), r8); 13632 movq(Address(rsp, 6 * wordSize), r9); 13633 movq(Address(rsp, 5 * wordSize), r10); 13634 movq(Address(rsp, 4 * wordSize), r11); 13635 movq(Address(rsp, 3 * wordSize), r12); 13636 movq(Address(rsp, 2 * wordSize), r13); 13637 movq(Address(rsp, wordSize), r14); 13638 movq(Address(rsp, 0), r15); 13639 } 13640 13641 void Assembler::vzeroupper() { 13642 emit_copy(code_section(), vzup_code, vzup_len); 13643 } 13644 13645 void Assembler::vzeroall() { 13646 assert(VM_Version::supports_avx(), "requires AVX"); 13647 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 13648 (void)vex_prefix_and_encode(0, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 13649 emit_int8(0x77); 13650 } 13651 13652 void Assembler::pushq(Address src) { 13653 InstructionMark im(this); 13654 emit_int16(get_prefixq(src), (unsigned char)0xFF); 13655 emit_operand(rsi, src, 0); 13656 } 13657 13658 void Assembler::rclq(Register dst, int imm8) { 13659 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 13660 int encode = prefixq_and_encode(dst->encoding()); 13661 if (imm8 == 1) { 13662 emit_int16((unsigned char)0xD1, (0xD0 | encode)); 13663 } else { 13664 emit_int24((unsigned char)0xC1, (0xD0 | encode), imm8); 13665 } 13666 } 13667 13668 void Assembler::rcrq(Register dst, int imm8) { 13669 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 13670 int encode = prefixq_and_encode(dst->encoding()); 13671 if (imm8 == 1) { 13672 emit_int16((unsigned char)0xD1, (0xD8 | encode)); 13673 } else { 13674 emit_int24((unsigned char)0xC1, (0xD8 | encode), imm8); 13675 } 13676 } 13677 13678 void Assembler::rorxl(Register dst, Register src, int imm8) { 13679 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 13680 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 13681 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes); 13682 emit_int24((unsigned char)0xF0, (0xC0 | encode), imm8); 13683 } 13684 13685 void Assembler::rorxl(Register dst, Address src, int imm8) { 13686 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 13687 InstructionMark im(this); 13688 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 13689 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes); 13690 emit_int8((unsigned char)0xF0); 13691 emit_operand(dst, src, 1); 13692 emit_int8(imm8); 13693 } 13694 13695 void Assembler::rorxq(Register dst, Register src, int imm8) { 13696 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 13697 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 13698 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes); 13699 emit_int24((unsigned char)0xF0, (0xC0 | encode), imm8); 13700 } 13701 13702 void Assembler::rorxq(Register dst, Address src, int imm8) { 13703 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 13704 InstructionMark im(this); 13705 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 13706 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes); 13707 emit_int8((unsigned char)0xF0); 13708 emit_operand(dst, src, 1); 13709 emit_int8(imm8); 13710 } 13711 13712 #ifdef _LP64 13713 void Assembler::salq(Address dst, int imm8) { 13714 InstructionMark im(this); 13715 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 13716 if (imm8 == 1) { 13717 emit_int16(get_prefixq(dst), (unsigned char)0xD1); 13718 emit_operand(as_Register(4), dst, 0); 13719 } 13720 else { 13721 emit_int16(get_prefixq(dst), (unsigned char)0xC1); 13722 emit_operand(as_Register(4), dst, 1); 13723 emit_int8(imm8); 13724 } 13725 } 13726 13727 void Assembler::salq(Address dst) { 13728 InstructionMark im(this); 13729 emit_int16(get_prefixq(dst), (unsigned char)0xD3); 13730 emit_operand(as_Register(4), dst, 0); 13731 } 13732 13733 void Assembler::salq(Register dst, int imm8) { 13734 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 13735 int encode = prefixq_and_encode(dst->encoding()); 13736 if (imm8 == 1) { 13737 emit_int16((unsigned char)0xD1, (0xE0 | encode)); 13738 } else { 13739 emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8); 13740 } 13741 } 13742 13743 void Assembler::salq(Register dst) { 13744 int encode = prefixq_and_encode(dst->encoding()); 13745 emit_int16((unsigned char)0xD3, (0xE0 | encode)); 13746 } 13747 13748 void Assembler::sarq(Address dst, int imm8) { 13749 InstructionMark im(this); 13750 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 13751 if (imm8 == 1) { 13752 emit_int16(get_prefixq(dst), (unsigned char)0xD1); 13753 emit_operand(as_Register(7), dst, 0); 13754 } 13755 else { 13756 emit_int16(get_prefixq(dst), (unsigned char)0xC1); 13757 emit_operand(as_Register(7), dst, 1); 13758 emit_int8(imm8); 13759 } 13760 } 13761 13762 void Assembler::sarq(Address dst) { 13763 InstructionMark im(this); 13764 emit_int16(get_prefixq(dst), (unsigned char)0xD3); 13765 emit_operand(as_Register(7), dst, 0); 13766 } 13767 13768 void Assembler::sarq(Register dst, int imm8) { 13769 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 13770 int encode = prefixq_and_encode(dst->encoding()); 13771 if (imm8 == 1) { 13772 emit_int16((unsigned char)0xD1, (0xF8 | encode)); 13773 } else { 13774 emit_int24((unsigned char)0xC1, (0xF8 | encode), imm8); 13775 } 13776 } 13777 13778 void Assembler::sarq(Register dst) { 13779 int encode = prefixq_and_encode(dst->encoding()); 13780 emit_int16((unsigned char)0xD3, (0xF8 | encode)); 13781 } 13782 #endif 13783 13784 void Assembler::sbbq(Address dst, int32_t imm32) { 13785 InstructionMark im(this); 13786 prefixq(dst); 13787 emit_arith_operand(0x81, rbx, dst, imm32); 13788 } 13789 13790 void Assembler::sbbq(Register dst, int32_t imm32) { 13791 (void) prefixq_and_encode(dst->encoding()); 13792 emit_arith(0x81, 0xD8, dst, imm32); 13793 } 13794 13795 void Assembler::sbbq(Register dst, Address src) { 13796 InstructionMark im(this); 13797 emit_int16(get_prefixq(src, dst), 0x1B); 13798 emit_operand(dst, src, 0); 13799 } 13800 13801 void Assembler::sbbq(Register dst, Register src) { 13802 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 13803 emit_arith(0x1B, 0xC0, dst, src); 13804 } 13805 13806 void Assembler::shlq(Register dst, int imm8) { 13807 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 13808 int encode = prefixq_and_encode(dst->encoding()); 13809 if (imm8 == 1) { 13810 emit_int16((unsigned char)0xD1, (0xE0 | encode)); 13811 } else { 13812 emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8); 13813 } 13814 } 13815 13816 void Assembler::shlq(Register dst) { 13817 int encode = prefixq_and_encode(dst->encoding()); 13818 emit_int16((unsigned char)0xD3, (0xE0 | encode)); 13819 } 13820 13821 void Assembler::shrq(Register dst, int imm8) { 13822 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 13823 int encode = prefixq_and_encode(dst->encoding()); 13824 if (imm8 == 1) { 13825 emit_int16((unsigned char)0xD1, (0xE8 | encode)); 13826 } 13827 else { 13828 emit_int24((unsigned char)0xC1, (0xE8 | encode), imm8); 13829 } 13830 } 13831 13832 void Assembler::shrq(Register dst) { 13833 int encode = prefixq_and_encode(dst->encoding()); 13834 emit_int16((unsigned char)0xD3, 0xE8 | encode); 13835 } 13836 13837 void Assembler::shrq(Address dst) { 13838 InstructionMark im(this); 13839 emit_int16(get_prefixq(dst), (unsigned char)0xD3); 13840 emit_operand(as_Register(5), dst, 0); 13841 } 13842 13843 void Assembler::shrq(Address dst, int imm8) { 13844 InstructionMark im(this); 13845 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 13846 if (imm8 == 1) { 13847 emit_int16(get_prefixq(dst), (unsigned char)0xD1); 13848 emit_operand(as_Register(5), dst, 0); 13849 } 13850 else { 13851 emit_int16(get_prefixq(dst), (unsigned char)0xC1); 13852 emit_operand(as_Register(5), dst, 1); 13853 emit_int8(imm8); 13854 } 13855 } 13856 13857 void Assembler::subq(Address dst, int32_t imm32) { 13858 InstructionMark im(this); 13859 prefixq(dst); 13860 emit_arith_operand(0x81, rbp, dst, imm32); 13861 } 13862 13863 void Assembler::subq(Address dst, Register src) { 13864 InstructionMark im(this); 13865 emit_int16(get_prefixq(dst, src), 0x29); 13866 emit_operand(src, dst, 0); 13867 } 13868 13869 void Assembler::subq(Register dst, int32_t imm32) { 13870 (void) prefixq_and_encode(dst->encoding()); 13871 emit_arith(0x81, 0xE8, dst, imm32); 13872 } 13873 13874 // Force generation of a 4 byte immediate value even if it fits into 8bit 13875 void Assembler::subq_imm32(Register dst, int32_t imm32) { 13876 (void) prefixq_and_encode(dst->encoding()); 13877 emit_arith_imm32(0x81, 0xE8, dst, imm32); 13878 } 13879 13880 void Assembler::subq(Register dst, Address src) { 13881 InstructionMark im(this); 13882 emit_int16(get_prefixq(src, dst), 0x2B); 13883 emit_operand(dst, src, 0); 13884 } 13885 13886 void Assembler::subq(Register dst, Register src) { 13887 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 13888 emit_arith(0x2B, 0xC0, dst, src); 13889 } 13890 13891 void Assembler::testq(Address dst, int32_t imm32) { 13892 InstructionMark im(this); 13893 emit_int16(get_prefixq(dst), (unsigned char)0xF7); 13894 emit_operand(as_Register(0), dst, 4); 13895 emit_int32(imm32); 13896 } 13897 13898 void Assembler::testq(Register dst, int32_t imm32) { 13899 // not using emit_arith because test 13900 // doesn't support sign-extension of 13901 // 8bit operands 13902 if (dst == rax) { 13903 prefix(REX_W); 13904 emit_int8((unsigned char)0xA9); 13905 emit_int32(imm32); 13906 } else { 13907 int encode = dst->encoding(); 13908 encode = prefixq_and_encode(encode); 13909 emit_int16((unsigned char)0xF7, (0xC0 | encode)); 13910 emit_int32(imm32); 13911 } 13912 } 13913 13914 void Assembler::testq(Register dst, Register src) { 13915 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 13916 emit_arith(0x85, 0xC0, dst, src); 13917 } 13918 13919 void Assembler::testq(Register dst, Address src) { 13920 InstructionMark im(this); 13921 emit_int16(get_prefixq(src, dst), (unsigned char)0x85); 13922 emit_operand(dst, src, 0); 13923 } 13924 13925 void Assembler::xaddq(Address dst, Register src) { 13926 InstructionMark im(this); 13927 emit_int24(get_prefixq(dst, src), 0x0F, (unsigned char)0xC1); 13928 emit_operand(src, dst, 0); 13929 } 13930 13931 void Assembler::xchgq(Register dst, Address src) { 13932 InstructionMark im(this); 13933 emit_int16(get_prefixq(src, dst), (unsigned char)0x87); 13934 emit_operand(dst, src, 0); 13935 } 13936 13937 void Assembler::xchgq(Register dst, Register src) { 13938 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 13939 emit_int16((unsigned char)0x87, (0xc0 | encode)); 13940 } 13941 13942 void Assembler::xorq(Register dst, Register src) { 13943 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 13944 emit_arith(0x33, 0xC0, dst, src); 13945 } 13946 13947 void Assembler::xorq(Register dst, Address src) { 13948 InstructionMark im(this); 13949 emit_int16(get_prefixq(src, dst), 0x33); 13950 emit_operand(dst, src, 0); 13951 } 13952 13953 void Assembler::xorq(Register dst, int32_t imm32) { 13954 (void) prefixq_and_encode(dst->encoding()); 13955 emit_arith(0x81, 0xF0, dst, imm32); 13956 } 13957 13958 void Assembler::xorq(Address dst, int32_t imm32) { 13959 InstructionMark im(this); 13960 prefixq(dst); 13961 emit_arith_operand(0x81, as_Register(6), dst, imm32); 13962 } 13963 13964 void Assembler::xorq(Address dst, Register src) { 13965 InstructionMark im(this); 13966 emit_int16(get_prefixq(dst, src), 0x31); 13967 emit_operand(src, dst, 0); 13968 } 13969 13970 #endif // !LP64 13971 13972 void InstructionAttr::set_address_attributes(int tuple_type, int input_size_in_bits) { 13973 if (VM_Version::supports_evex()) { 13974 _tuple_type = tuple_type; 13975 _input_size_in_bits = input_size_in_bits; 13976 } 13977 }