1 /* 2 * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "asm/assembler.inline.hpp" 28 #include "gc/shared/cardTableBarrierSet.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "prims/methodHandles.hpp" 33 #include "runtime/objectMonitor.hpp" 34 #include "runtime/os.hpp" 35 #include "runtime/sharedRuntime.hpp" 36 #include "runtime/stubRoutines.hpp" 37 #include "runtime/vm_version.hpp" 38 #include "utilities/macros.hpp" 39 40 #ifdef PRODUCT 41 #define BLOCK_COMMENT(str) /* nothing */ 42 #define STOP(error) stop(error) 43 #else 44 #define BLOCK_COMMENT(str) block_comment(str) 45 #define STOP(error) block_comment(error); stop(error) 46 #endif 47 48 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 49 // Implementation of AddressLiteral 50 51 // A 2-D table for managing compressed displacement(disp8) on EVEX enabled platforms. 52 unsigned char tuple_table[Assembler::EVEX_ETUP + 1][Assembler::AVX_512bit + 1] = { 53 // -----------------Table 4.5 -------------------- // 54 16, 32, 64, // EVEX_FV(0) 55 4, 4, 4, // EVEX_FV(1) - with Evex.b 56 16, 32, 64, // EVEX_FV(2) - with Evex.w 57 8, 8, 8, // EVEX_FV(3) - with Evex.w and Evex.b 58 8, 16, 32, // EVEX_HV(0) 59 4, 4, 4, // EVEX_HV(1) - with Evex.b 60 // -----------------Table 4.6 -------------------- // 61 16, 32, 64, // EVEX_FVM(0) 62 1, 1, 1, // EVEX_T1S(0) 63 2, 2, 2, // EVEX_T1S(1) 64 4, 4, 4, // EVEX_T1S(2) 65 8, 8, 8, // EVEX_T1S(3) 66 4, 4, 4, // EVEX_T1F(0) 67 8, 8, 8, // EVEX_T1F(1) 68 8, 8, 8, // EVEX_T2(0) 69 0, 16, 16, // EVEX_T2(1) 70 0, 16, 16, // EVEX_T4(0) 71 0, 0, 32, // EVEX_T4(1) 72 0, 0, 32, // EVEX_T8(0) 73 8, 16, 32, // EVEX_HVM(0) 74 4, 8, 16, // EVEX_QVM(0) 75 2, 4, 8, // EVEX_OVM(0) 76 16, 16, 16, // EVEX_M128(0) 77 8, 32, 64, // EVEX_DUP(0) 78 0, 0, 0 // EVEX_NTUP 79 }; 80 81 AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) { 82 _is_lval = false; 83 _target = target; 84 switch (rtype) { 85 case relocInfo::oop_type: 86 case relocInfo::metadata_type: 87 // Oops are a special case. Normally they would be their own section 88 // but in cases like icBuffer they are literals in the code stream that 89 // we don't have a section for. We use none so that we get a literal address 90 // which is always patchable. 91 break; 92 case relocInfo::external_word_type: 93 _rspec = external_word_Relocation::spec(target); 94 break; 95 case relocInfo::internal_word_type: 96 _rspec = internal_word_Relocation::spec(target); 97 break; 98 case relocInfo::opt_virtual_call_type: 99 _rspec = opt_virtual_call_Relocation::spec(); 100 break; 101 case relocInfo::static_call_type: 102 _rspec = static_call_Relocation::spec(); 103 break; 104 case relocInfo::runtime_call_type: 105 _rspec = runtime_call_Relocation::spec(); 106 break; 107 case relocInfo::poll_type: 108 case relocInfo::poll_return_type: 109 _rspec = Relocation::spec_simple(rtype); 110 break; 111 case relocInfo::none: 112 break; 113 default: 114 ShouldNotReachHere(); 115 break; 116 } 117 } 118 119 // Implementation of Address 120 121 #ifdef _LP64 122 123 Address Address::make_array(ArrayAddress adr) { 124 // Not implementable on 64bit machines 125 // Should have been handled higher up the call chain. 126 ShouldNotReachHere(); 127 return Address(); 128 } 129 130 // exceedingly dangerous constructor 131 Address::Address(int disp, address loc, relocInfo::relocType rtype) { 132 _base = noreg; 133 _index = noreg; 134 _scale = no_scale; 135 _disp = disp; 136 _xmmindex = xnoreg; 137 _isxmmindex = false; 138 switch (rtype) { 139 case relocInfo::external_word_type: 140 _rspec = external_word_Relocation::spec(loc); 141 break; 142 case relocInfo::internal_word_type: 143 _rspec = internal_word_Relocation::spec(loc); 144 break; 145 case relocInfo::runtime_call_type: 146 // HMM 147 _rspec = runtime_call_Relocation::spec(); 148 break; 149 case relocInfo::poll_type: 150 case relocInfo::poll_return_type: 151 _rspec = Relocation::spec_simple(rtype); 152 break; 153 case relocInfo::none: 154 break; 155 default: 156 ShouldNotReachHere(); 157 } 158 } 159 #else // LP64 160 161 Address Address::make_array(ArrayAddress adr) { 162 AddressLiteral base = adr.base(); 163 Address index = adr.index(); 164 assert(index._disp == 0, "must not have disp"); // maybe it can? 165 Address array(index._base, index._index, index._scale, (intptr_t) base.target()); 166 array._rspec = base._rspec; 167 return array; 168 } 169 170 // exceedingly dangerous constructor 171 Address::Address(address loc, RelocationHolder spec) { 172 _base = noreg; 173 _index = noreg; 174 _scale = no_scale; 175 _disp = (intptr_t) loc; 176 _rspec = spec; 177 _xmmindex = xnoreg; 178 _isxmmindex = false; 179 } 180 181 #endif // _LP64 182 183 184 185 // Convert the raw encoding form into the form expected by the constructor for 186 // Address. An index of 4 (rsp) corresponds to having no index, so convert 187 // that to noreg for the Address constructor. 188 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) { 189 RelocationHolder rspec = RelocationHolder::none; 190 if (disp_reloc != relocInfo::none) { 191 rspec = Relocation::spec_simple(disp_reloc); 192 } 193 bool valid_index = index != rsp->encoding(); 194 if (valid_index) { 195 Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp)); 196 madr._rspec = rspec; 197 return madr; 198 } else { 199 Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp)); 200 madr._rspec = rspec; 201 return madr; 202 } 203 } 204 205 // Implementation of Assembler 206 207 int AbstractAssembler::code_fill_byte() { 208 return (u_char)'\xF4'; // hlt 209 } 210 211 void Assembler::init_attributes(void) { 212 _legacy_mode_bw = (VM_Version::supports_avx512bw() == false); 213 _legacy_mode_dq = (VM_Version::supports_avx512dq() == false); 214 _legacy_mode_vl = (VM_Version::supports_avx512vl() == false); 215 _legacy_mode_vlbw = (VM_Version::supports_avx512vlbw() == false); 216 NOT_LP64(_is_managed = false;) 217 _attributes = NULL; 218 } 219 220 221 void Assembler::membar(Membar_mask_bits order_constraint) { 222 // We only have to handle StoreLoad 223 if (order_constraint & StoreLoad) { 224 // All usable chips support "locked" instructions which suffice 225 // as barriers, and are much faster than the alternative of 226 // using cpuid instruction. We use here a locked add [esp-C],0. 227 // This is conveniently otherwise a no-op except for blowing 228 // flags, and introducing a false dependency on target memory 229 // location. We can't do anything with flags, but we can avoid 230 // memory dependencies in the current method by locked-adding 231 // somewhere else on the stack. Doing [esp+C] will collide with 232 // something on stack in current method, hence we go for [esp-C]. 233 // It is convenient since it is almost always in data cache, for 234 // any small C. We need to step back from SP to avoid data 235 // dependencies with other things on below SP (callee-saves, for 236 // example). Without a clear way to figure out the minimal safe 237 // distance from SP, it makes sense to step back the complete 238 // cache line, as this will also avoid possible second-order effects 239 // with locked ops against the cache line. Our choice of offset 240 // is bounded by x86 operand encoding, which should stay within 241 // [-128; +127] to have the 8-byte displacement encoding. 242 // 243 // Any change to this code may need to revisit other places in 244 // the code where this idiom is used, in particular the 245 // orderAccess code. 246 247 int offset = -VM_Version::L1_line_size(); 248 if (offset < -128) { 249 offset = -128; 250 } 251 252 lock(); 253 addl(Address(rsp, offset), 0);// Assert the lock# signal here 254 } 255 } 256 257 // make this go away someday 258 void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) { 259 if (rtype == relocInfo::none) 260 emit_int32(data); 261 else 262 emit_data(data, Relocation::spec_simple(rtype), format); 263 } 264 265 void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) { 266 assert(imm_operand == 0, "default format must be immediate in this file"); 267 assert(inst_mark() != NULL, "must be inside InstructionMark"); 268 if (rspec.type() != relocInfo::none) { 269 #ifdef ASSERT 270 check_relocation(rspec, format); 271 #endif 272 // Do not use AbstractAssembler::relocate, which is not intended for 273 // embedded words. Instead, relocate to the enclosing instruction. 274 275 // hack. call32 is too wide for mask so use disp32 276 if (format == call32_operand) 277 code_section()->relocate(inst_mark(), rspec, disp32_operand); 278 else 279 code_section()->relocate(inst_mark(), rspec, format); 280 } 281 emit_int32(data); 282 } 283 284 static int encode(Register r) { 285 int enc = r->encoding(); 286 if (enc >= 8) { 287 enc -= 8; 288 } 289 return enc; 290 } 291 292 void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) { 293 assert(dst->has_byte_register(), "must have byte register"); 294 assert(isByte(op1) && isByte(op2), "wrong opcode"); 295 assert(isByte(imm8), "not a byte"); 296 assert((op1 & 0x01) == 0, "should be 8bit operation"); 297 emit_int24(op1, (op2 | encode(dst)), imm8); 298 } 299 300 301 void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) { 302 assert(isByte(op1) && isByte(op2), "wrong opcode"); 303 assert(op1 == 0x81, "Unexpected opcode"); 304 if (is8bit(imm32)) { 305 emit_int24(op1 | 0x02, // set sign bit 306 op2 | encode(dst), 307 imm32 & 0xFF); 308 } else if (dst == rax) { 309 switch (op2) { 310 case 0xD0: emit_int8(0x15); break; // adc 311 case 0xC0: emit_int8(0x05); break; // add 312 case 0xE0: emit_int8(0x25); break; // and 313 case 0xF8: emit_int8(0x3D); break; // cmp 314 case 0xC8: emit_int8(0x0D); break; // or 315 case 0xD8: emit_int8(0x1D); break; // sbb 316 case 0xE8: emit_int8(0x2D); break; // sub 317 case 0xF0: emit_int8(0x35); break; // xor 318 default: ShouldNotReachHere(); 319 } 320 emit_int32(imm32); 321 } else { 322 emit_int16(op1, (op2 | encode(dst))); 323 emit_int32(imm32); 324 } 325 } 326 327 // Force generation of a 4 byte immediate value even if it fits into 8bit 328 void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) { 329 assert(isByte(op1) && isByte(op2), "wrong opcode"); 330 assert((op1 & 0x01) == 1, "should be 32bit operation"); 331 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); 332 emit_int16(op1, (op2 | encode(dst))); 333 emit_int32(imm32); 334 } 335 336 // immediate-to-memory forms 337 void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) { 338 assert((op1 & 0x01) == 1, "should be 32bit operation"); 339 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); 340 if (is8bit(imm32)) { 341 emit_int8(op1 | 0x02); // set sign bit 342 emit_operand(rm, adr, 1); 343 emit_int8(imm32 & 0xFF); 344 } else { 345 emit_int8(op1); 346 emit_operand(rm, adr, 4); 347 emit_int32(imm32); 348 } 349 } 350 351 352 void Assembler::emit_arith(int op1, int op2, Register dst, Register src) { 353 assert(isByte(op1) && isByte(op2), "wrong opcode"); 354 emit_int16(op1, (op2 | encode(dst) << 3 | encode(src))); 355 } 356 357 358 bool Assembler::query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len, 359 int cur_tuple_type, int in_size_in_bits, int cur_encoding) { 360 int mod_idx = 0; 361 // We will test if the displacement fits the compressed format and if so 362 // apply the compression to the displacement iff the result is8bit. 363 if (VM_Version::supports_evex() && is_evex_inst) { 364 switch (cur_tuple_type) { 365 case EVEX_FV: 366 if ((cur_encoding & VEX_W) == VEX_W) { 367 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 3 : 2; 368 } else { 369 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 370 } 371 break; 372 373 case EVEX_HV: 374 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 375 break; 376 377 case EVEX_FVM: 378 break; 379 380 case EVEX_T1S: 381 switch (in_size_in_bits) { 382 case EVEX_8bit: 383 break; 384 385 case EVEX_16bit: 386 mod_idx = 1; 387 break; 388 389 case EVEX_32bit: 390 mod_idx = 2; 391 break; 392 393 case EVEX_64bit: 394 mod_idx = 3; 395 break; 396 } 397 break; 398 399 case EVEX_T1F: 400 case EVEX_T2: 401 case EVEX_T4: 402 mod_idx = (in_size_in_bits == EVEX_64bit) ? 1 : 0; 403 break; 404 405 case EVEX_T8: 406 break; 407 408 case EVEX_HVM: 409 break; 410 411 case EVEX_QVM: 412 break; 413 414 case EVEX_OVM: 415 break; 416 417 case EVEX_M128: 418 break; 419 420 case EVEX_DUP: 421 break; 422 423 default: 424 assert(0, "no valid evex tuple_table entry"); 425 break; 426 } 427 428 if (vector_len >= AVX_128bit && vector_len <= AVX_512bit) { 429 int disp_factor = tuple_table[cur_tuple_type + mod_idx][vector_len]; 430 if ((disp % disp_factor) == 0) { 431 int new_disp = disp / disp_factor; 432 if ((-0x80 <= new_disp && new_disp < 0x80)) { 433 disp = new_disp; 434 } 435 } else { 436 return false; 437 } 438 } 439 } 440 return (-0x80 <= disp && disp < 0x80); 441 } 442 443 444 bool Assembler::emit_compressed_disp_byte(int &disp) { 445 int mod_idx = 0; 446 // We will test if the displacement fits the compressed format and if so 447 // apply the compression to the displacement iff the result is8bit. 448 if (VM_Version::supports_evex() && _attributes && _attributes->is_evex_instruction()) { 449 int evex_encoding = _attributes->get_evex_encoding(); 450 int tuple_type = _attributes->get_tuple_type(); 451 switch (tuple_type) { 452 case EVEX_FV: 453 if ((evex_encoding & VEX_W) == VEX_W) { 454 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 3 : 2; 455 } else { 456 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 457 } 458 break; 459 460 case EVEX_HV: 461 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 462 break; 463 464 case EVEX_FVM: 465 break; 466 467 case EVEX_T1S: 468 switch (_attributes->get_input_size()) { 469 case EVEX_8bit: 470 break; 471 472 case EVEX_16bit: 473 mod_idx = 1; 474 break; 475 476 case EVEX_32bit: 477 mod_idx = 2; 478 break; 479 480 case EVEX_64bit: 481 mod_idx = 3; 482 break; 483 } 484 break; 485 486 case EVEX_T1F: 487 case EVEX_T2: 488 case EVEX_T4: 489 mod_idx = (_attributes->get_input_size() == EVEX_64bit) ? 1 : 0; 490 break; 491 492 case EVEX_T8: 493 break; 494 495 case EVEX_HVM: 496 break; 497 498 case EVEX_QVM: 499 break; 500 501 case EVEX_OVM: 502 break; 503 504 case EVEX_M128: 505 break; 506 507 case EVEX_DUP: 508 break; 509 510 default: 511 assert(0, "no valid evex tuple_table entry"); 512 break; 513 } 514 515 int vector_len = _attributes->get_vector_len(); 516 if (vector_len >= AVX_128bit && vector_len <= AVX_512bit) { 517 int disp_factor = tuple_table[tuple_type + mod_idx][vector_len]; 518 if ((disp % disp_factor) == 0) { 519 int new_disp = disp / disp_factor; 520 if (is8bit(new_disp)) { 521 disp = new_disp; 522 } 523 } else { 524 return false; 525 } 526 } 527 } 528 return is8bit(disp); 529 } 530 531 static bool is_valid_encoding(int reg_enc) { 532 return reg_enc >= 0; 533 } 534 535 static int raw_encode(Register reg) { 536 assert(reg == noreg || reg->is_valid(), "sanity"); 537 int reg_enc = (intptr_t)reg; 538 assert(reg_enc == -1 || is_valid_encoding(reg_enc), "sanity"); 539 return reg_enc; 540 } 541 542 static int raw_encode(XMMRegister xmmreg) { 543 assert(xmmreg == xnoreg || xmmreg->is_valid(), "sanity"); 544 int xmmreg_enc = (intptr_t)xmmreg; 545 assert(xmmreg_enc == -1 || is_valid_encoding(xmmreg_enc), "sanity"); 546 return xmmreg_enc; 547 } 548 549 static int modrm_encoding(int mod, int dst_enc, int src_enc) { 550 return (mod & 3) << 6 | (dst_enc & 7) << 3 | (src_enc & 7); 551 } 552 553 static int sib_encoding(Address::ScaleFactor scale, int index_enc, int base_enc) { 554 return (scale & 3) << 6 | (index_enc & 7) << 3 | (base_enc & 7); 555 } 556 557 inline void Assembler::emit_modrm(int mod, int dst_enc, int src_enc) { 558 assert((mod & 3) != 0b11, "forbidden"); 559 int modrm = modrm_encoding(mod, dst_enc, src_enc); 560 emit_int8(modrm); 561 } 562 563 inline void Assembler::emit_modrm_disp8(int mod, int dst_enc, int src_enc, 564 int disp) { 565 int modrm = modrm_encoding(mod, dst_enc, src_enc); 566 emit_int16(modrm, disp & 0xFF); 567 } 568 569 inline void Assembler::emit_modrm_sib(int mod, int dst_enc, int src_enc, 570 Address::ScaleFactor scale, int index_enc, int base_enc) { 571 int modrm = modrm_encoding(mod, dst_enc, src_enc); 572 int sib = sib_encoding(scale, index_enc, base_enc); 573 emit_int16(modrm, sib); 574 } 575 576 inline void Assembler::emit_modrm_sib_disp8(int mod, int dst_enc, int src_enc, 577 Address::ScaleFactor scale, int index_enc, int base_enc, 578 int disp) { 579 int modrm = modrm_encoding(mod, dst_enc, src_enc); 580 int sib = sib_encoding(scale, index_enc, base_enc); 581 emit_int24(modrm, sib, disp & 0xFF); 582 } 583 584 void Assembler::emit_operand_helper(int reg_enc, int base_enc, int index_enc, 585 Address::ScaleFactor scale, int disp, 586 RelocationHolder const& rspec, 587 int rip_relative_correction) { 588 bool no_relocation = (rspec.type() == relocInfo::none); 589 590 if (is_valid_encoding(base_enc)) { 591 if (is_valid_encoding(index_enc)) { 592 assert(scale != Address::no_scale, "inconsistent address"); 593 // [base + index*scale + disp] 594 if (disp == 0 && no_relocation && 595 base_enc != rbp->encoding() LP64_ONLY(&& base_enc != r13->encoding())) { 596 // [base + index*scale] 597 // [00 reg 100][ss index base] 598 emit_modrm_sib(0b00, reg_enc, 0b100, 599 scale, index_enc, base_enc); 600 } else if (emit_compressed_disp_byte(disp) && no_relocation) { 601 // [base + index*scale + imm8] 602 // [01 reg 100][ss index base] imm8 603 emit_modrm_sib_disp8(0b01, reg_enc, 0b100, 604 scale, index_enc, base_enc, 605 disp); 606 } else { 607 // [base + index*scale + disp32] 608 // [10 reg 100][ss index base] disp32 609 emit_modrm_sib(0b10, reg_enc, 0b100, 610 scale, index_enc, base_enc); 611 emit_data(disp, rspec, disp32_operand); 612 } 613 } else if (base_enc == rsp->encoding() LP64_ONLY(|| base_enc == r12->encoding())) { 614 // [rsp + disp] 615 if (disp == 0 && no_relocation) { 616 // [rsp] 617 // [00 reg 100][00 100 100] 618 emit_modrm_sib(0b00, reg_enc, 0b100, 619 Address::times_1, 0b100, 0b100); 620 } else if (emit_compressed_disp_byte(disp) && no_relocation) { 621 // [rsp + imm8] 622 // [01 reg 100][00 100 100] disp8 623 emit_modrm_sib_disp8(0b01, reg_enc, 0b100, 624 Address::times_1, 0b100, 0b100, 625 disp); 626 } else { 627 // [rsp + imm32] 628 // [10 reg 100][00 100 100] disp32 629 emit_modrm_sib(0b10, reg_enc, 0b100, 630 Address::times_1, 0b100, 0b100); 631 emit_data(disp, rspec, disp32_operand); 632 } 633 } else { 634 // [base + disp] 635 assert(base_enc != rsp->encoding() LP64_ONLY(&& base_enc != r12->encoding()), "illegal addressing mode"); 636 if (disp == 0 && no_relocation && 637 base_enc != rbp->encoding() LP64_ONLY(&& base_enc != r13->encoding())) { 638 // [base] 639 // [00 reg base] 640 emit_modrm(0, reg_enc, base_enc); 641 } else if (emit_compressed_disp_byte(disp) && no_relocation) { 642 // [base + disp8] 643 // [01 reg base] disp8 644 emit_modrm_disp8(0b01, reg_enc, base_enc, 645 disp); 646 } else { 647 // [base + disp32] 648 // [10 reg base] disp32 649 emit_modrm(0b10, reg_enc, base_enc); 650 emit_data(disp, rspec, disp32_operand); 651 } 652 } 653 } else { 654 if (is_valid_encoding(index_enc)) { 655 assert(scale != Address::no_scale, "inconsistent address"); 656 // base == noreg 657 // [index*scale + disp] 658 // [00 reg 100][ss index 101] disp32 659 emit_modrm_sib(0b00, reg_enc, 0b100, 660 scale, index_enc, 0b101 /* no base */); 661 emit_data(disp, rspec, disp32_operand); 662 } else if (!no_relocation) { 663 // base == noreg, index == noreg 664 // [disp] (64bit) RIP-RELATIVE (32bit) abs 665 // [00 reg 101] disp32 666 667 emit_modrm(0b00, reg_enc, 0b101 /* no base */); 668 // Note that the RIP-rel. correction applies to the generated 669 // disp field, but _not_ to the target address in the rspec. 670 671 // disp was created by converting the target address minus the pc 672 // at the start of the instruction. That needs more correction here. 673 // intptr_t disp = target - next_ip; 674 assert(inst_mark() != NULL, "must be inside InstructionMark"); 675 address next_ip = pc() + sizeof(int32_t) + rip_relative_correction; 676 int64_t adjusted = disp; 677 // Do rip-rel adjustment for 64bit 678 LP64_ONLY(adjusted -= (next_ip - inst_mark())); 679 assert(is_simm32(adjusted), 680 "must be 32bit offset (RIP relative address)"); 681 emit_data((int32_t) adjusted, rspec, disp32_operand); 682 683 } else { 684 // base == noreg, index == noreg, no_relocation == true 685 // 32bit never did this, did everything as the rip-rel/disp code above 686 // [disp] ABSOLUTE 687 // [00 reg 100][00 100 101] disp32 688 emit_modrm_sib(0b00, reg_enc, 0b100 /* no base */, 689 Address::times_1, 0b100, 0b101); 690 emit_data(disp, rspec, disp32_operand); 691 } 692 } 693 } 694 695 void Assembler::emit_operand(Register reg, Register base, Register index, 696 Address::ScaleFactor scale, int disp, 697 RelocationHolder const& rspec, 698 int rip_relative_correction) { 699 assert(!index->is_valid() || index != rsp, "illegal addressing mode"); 700 emit_operand_helper(raw_encode(reg), raw_encode(base), raw_encode(index), 701 scale, disp, rspec, rip_relative_correction); 702 703 } 704 void Assembler::emit_operand(XMMRegister xmmreg, Register base, Register index, 705 Address::ScaleFactor scale, int disp, 706 RelocationHolder const& rspec) { 707 assert(!index->is_valid() || index != rsp, "illegal addressing mode"); 708 assert(xmmreg->encoding() < 16 || UseAVX > 2, "not supported"); 709 emit_operand_helper(raw_encode(xmmreg), raw_encode(base), raw_encode(index), 710 scale, disp, rspec); 711 } 712 713 void Assembler::emit_operand(XMMRegister xmmreg, Register base, XMMRegister xmmindex, 714 Address::ScaleFactor scale, int disp, 715 RelocationHolder const& rspec) { 716 assert(xmmreg->encoding() < 16 || UseAVX > 2, "not supported"); 717 assert(xmmindex->encoding() < 16 || UseAVX > 2, "not supported"); 718 emit_operand_helper(raw_encode(xmmreg), raw_encode(base), raw_encode(xmmindex), 719 scale, disp, rspec, /* rip_relative_correction */ 0); 720 } 721 722 // Secret local extension to Assembler::WhichOperand: 723 #define end_pc_operand (_WhichOperand_limit) 724 725 address Assembler::locate_operand(address inst, WhichOperand which) { 726 // Decode the given instruction, and return the address of 727 // an embedded 32-bit operand word. 728 729 // If "which" is disp32_operand, selects the displacement portion 730 // of an effective address specifier. 731 // If "which" is imm64_operand, selects the trailing immediate constant. 732 // If "which" is call32_operand, selects the displacement of a call or jump. 733 // Caller is responsible for ensuring that there is such an operand, 734 // and that it is 32/64 bits wide. 735 736 // If "which" is end_pc_operand, find the end of the instruction. 737 738 address ip = inst; 739 bool is_64bit = false; 740 741 debug_only(bool has_disp32 = false); 742 int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn 743 744 again_after_prefix: 745 switch (0xFF & *ip++) { 746 747 // These convenience macros generate groups of "case" labels for the switch. 748 #define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3 749 #define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \ 750 case (x)+4: case (x)+5: case (x)+6: case (x)+7 751 #define REP16(x) REP8((x)+0): \ 752 case REP8((x)+8) 753 754 case CS_segment: 755 case SS_segment: 756 case DS_segment: 757 case ES_segment: 758 case FS_segment: 759 case GS_segment: 760 // Seems dubious 761 LP64_ONLY(assert(false, "shouldn't have that prefix")); 762 assert(ip == inst+1, "only one prefix allowed"); 763 goto again_after_prefix; 764 765 case 0x67: 766 case REX: 767 case REX_B: 768 case REX_X: 769 case REX_XB: 770 case REX_R: 771 case REX_RB: 772 case REX_RX: 773 case REX_RXB: 774 NOT_LP64(assert(false, "64bit prefixes")); 775 goto again_after_prefix; 776 777 case REX_W: 778 case REX_WB: 779 case REX_WX: 780 case REX_WXB: 781 case REX_WR: 782 case REX_WRB: 783 case REX_WRX: 784 case REX_WRXB: 785 NOT_LP64(assert(false, "64bit prefixes")); 786 is_64bit = true; 787 goto again_after_prefix; 788 789 case 0xFF: // pushq a; decl a; incl a; call a; jmp a 790 case 0x88: // movb a, r 791 case 0x89: // movl a, r 792 case 0x8A: // movb r, a 793 case 0x8B: // movl r, a 794 case 0x8F: // popl a 795 debug_only(has_disp32 = true); 796 break; 797 798 case 0x68: // pushq #32 799 if (which == end_pc_operand) { 800 return ip + 4; 801 } 802 assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate"); 803 return ip; // not produced by emit_operand 804 805 case 0x66: // movw ... (size prefix) 806 again_after_size_prefix2: 807 switch (0xFF & *ip++) { 808 case REX: 809 case REX_B: 810 case REX_X: 811 case REX_XB: 812 case REX_R: 813 case REX_RB: 814 case REX_RX: 815 case REX_RXB: 816 case REX_W: 817 case REX_WB: 818 case REX_WX: 819 case REX_WXB: 820 case REX_WR: 821 case REX_WRB: 822 case REX_WRX: 823 case REX_WRXB: 824 NOT_LP64(assert(false, "64bit prefix found")); 825 goto again_after_size_prefix2; 826 case 0x8B: // movw r, a 827 case 0x89: // movw a, r 828 debug_only(has_disp32 = true); 829 break; 830 case 0xC7: // movw a, #16 831 debug_only(has_disp32 = true); 832 tail_size = 2; // the imm16 833 break; 834 case 0x0F: // several SSE/SSE2 variants 835 ip--; // reparse the 0x0F 836 goto again_after_prefix; 837 default: 838 ShouldNotReachHere(); 839 } 840 break; 841 842 case REP8(0xB8): // movl/q r, #32/#64(oop?) 843 if (which == end_pc_operand) return ip + (is_64bit ? 8 : 4); 844 // these asserts are somewhat nonsensical 845 #ifndef _LP64 846 assert(which == imm_operand || which == disp32_operand, 847 "which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip)); 848 #else 849 assert((which == call32_operand || which == imm_operand) && is_64bit || 850 which == narrow_oop_operand && !is_64bit, 851 "which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip)); 852 #endif // _LP64 853 return ip; 854 855 case 0x69: // imul r, a, #32 856 case 0xC7: // movl a, #32(oop?) 857 tail_size = 4; 858 debug_only(has_disp32 = true); // has both kinds of operands! 859 break; 860 861 case 0x0F: // movx..., etc. 862 switch (0xFF & *ip++) { 863 case 0x3A: // pcmpestri 864 tail_size = 1; 865 case 0x38: // ptest, pmovzxbw 866 ip++; // skip opcode 867 debug_only(has_disp32 = true); // has both kinds of operands! 868 break; 869 870 case 0x70: // pshufd r, r/a, #8 871 debug_only(has_disp32 = true); // has both kinds of operands! 872 case 0x73: // psrldq r, #8 873 tail_size = 1; 874 break; 875 876 case 0x12: // movlps 877 case 0x28: // movaps 878 case 0x2E: // ucomiss 879 case 0x2F: // comiss 880 case 0x54: // andps 881 case 0x55: // andnps 882 case 0x56: // orps 883 case 0x57: // xorps 884 case 0x58: // addpd 885 case 0x59: // mulpd 886 case 0x6E: // movd 887 case 0x7E: // movd 888 case 0x6F: // movdq 889 case 0x7F: // movdq 890 case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush 891 case 0xFE: // paddd 892 debug_only(has_disp32 = true); 893 break; 894 895 case 0xAD: // shrd r, a, %cl 896 case 0xAF: // imul r, a 897 case 0xBE: // movsbl r, a (movsxb) 898 case 0xBF: // movswl r, a (movsxw) 899 case 0xB6: // movzbl r, a (movzxb) 900 case 0xB7: // movzwl r, a (movzxw) 901 case REP16(0x40): // cmovl cc, r, a 902 case 0xB0: // cmpxchgb 903 case 0xB1: // cmpxchg 904 case 0xC1: // xaddl 905 case 0xC7: // cmpxchg8 906 case REP16(0x90): // setcc a 907 debug_only(has_disp32 = true); 908 // fall out of the switch to decode the address 909 break; 910 911 case 0xC4: // pinsrw r, a, #8 912 debug_only(has_disp32 = true); 913 case 0xC5: // pextrw r, r, #8 914 tail_size = 1; // the imm8 915 break; 916 917 case 0xAC: // shrd r, a, #8 918 debug_only(has_disp32 = true); 919 tail_size = 1; // the imm8 920 break; 921 922 case REP16(0x80): // jcc rdisp32 923 if (which == end_pc_operand) return ip + 4; 924 assert(which == call32_operand, "jcc has no disp32 or imm"); 925 return ip; 926 default: 927 ShouldNotReachHere(); 928 } 929 break; 930 931 case 0x81: // addl a, #32; addl r, #32 932 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl 933 // on 32bit in the case of cmpl, the imm might be an oop 934 tail_size = 4; 935 debug_only(has_disp32 = true); // has both kinds of operands! 936 break; 937 938 case 0x83: // addl a, #8; addl r, #8 939 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl 940 debug_only(has_disp32 = true); // has both kinds of operands! 941 tail_size = 1; 942 break; 943 944 case 0x15: // adc rax, #32 945 case 0x05: // add rax, #32 946 case 0x25: // and rax, #32 947 case 0x3D: // cmp rax, #32 948 case 0x0D: // or rax, #32 949 case 0x1D: // sbb rax, #32 950 case 0x2D: // sub rax, #32 951 case 0x35: // xor rax, #32 952 return which == end_pc_operand ? ip + 4 : ip; 953 954 case 0x9B: 955 switch (0xFF & *ip++) { 956 case 0xD9: // fnstcw a 957 debug_only(has_disp32 = true); 958 break; 959 default: 960 ShouldNotReachHere(); 961 } 962 break; 963 964 case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a 965 case REP4(0x10): // adc... 966 case REP4(0x20): // and... 967 case REP4(0x30): // xor... 968 case REP4(0x08): // or... 969 case REP4(0x18): // sbb... 970 case REP4(0x28): // sub... 971 case 0xF7: // mull a 972 case 0x8D: // lea r, a 973 case 0x87: // xchg r, a 974 case REP4(0x38): // cmp... 975 case 0x85: // test r, a 976 debug_only(has_disp32 = true); // has both kinds of operands! 977 break; 978 979 case 0xA8: // testb rax, #8 980 return which == end_pc_operand ? ip + 1 : ip; 981 case 0xA9: // testl/testq rax, #32 982 return which == end_pc_operand ? ip + 4 : ip; 983 984 case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8 985 case 0xC6: // movb a, #8 986 case 0x80: // cmpb a, #8 987 case 0x6B: // imul r, a, #8 988 debug_only(has_disp32 = true); // has both kinds of operands! 989 tail_size = 1; // the imm8 990 break; 991 992 case 0xC4: // VEX_3bytes 993 case 0xC5: // VEX_2bytes 994 assert((UseAVX > 0), "shouldn't have VEX prefix"); 995 assert(ip == inst+1, "no prefixes allowed"); 996 // C4 and C5 are also used as opcodes for PINSRW and PEXTRW instructions 997 // but they have prefix 0x0F and processed when 0x0F processed above. 998 // 999 // In 32-bit mode the VEX first byte C4 and C5 alias onto LDS and LES 1000 // instructions (these instructions are not supported in 64-bit mode). 1001 // To distinguish them bits [7:6] are set in the VEX second byte since 1002 // ModRM byte can not be of the form 11xxxxxx in 32-bit mode. To set 1003 // those VEX bits REX and vvvv bits are inverted. 1004 // 1005 // Fortunately C2 doesn't generate these instructions so we don't need 1006 // to check for them in product version. 1007 1008 // Check second byte 1009 NOT_LP64(assert((0xC0 & *ip) == 0xC0, "shouldn't have LDS and LES instructions")); 1010 1011 int vex_opcode; 1012 // First byte 1013 if ((0xFF & *inst) == VEX_3bytes) { 1014 vex_opcode = VEX_OPCODE_MASK & *ip; 1015 ip++; // third byte 1016 is_64bit = ((VEX_W & *ip) == VEX_W); 1017 } else { 1018 vex_opcode = VEX_OPCODE_0F; 1019 } 1020 ip++; // opcode 1021 // To find the end of instruction (which == end_pc_operand). 1022 switch (vex_opcode) { 1023 case VEX_OPCODE_0F: 1024 switch (0xFF & *ip) { 1025 case 0x70: // pshufd r, r/a, #8 1026 case 0x71: // ps[rl|ra|ll]w r, #8 1027 case 0x72: // ps[rl|ra|ll]d r, #8 1028 case 0x73: // ps[rl|ra|ll]q r, #8 1029 case 0xC2: // cmp[ps|pd|ss|sd] r, r, r/a, #8 1030 case 0xC4: // pinsrw r, r, r/a, #8 1031 case 0xC5: // pextrw r/a, r, #8 1032 case 0xC6: // shufp[s|d] r, r, r/a, #8 1033 tail_size = 1; // the imm8 1034 break; 1035 } 1036 break; 1037 case VEX_OPCODE_0F_3A: 1038 tail_size = 1; 1039 break; 1040 } 1041 ip++; // skip opcode 1042 debug_only(has_disp32 = true); // has both kinds of operands! 1043 break; 1044 1045 case 0x62: // EVEX_4bytes 1046 assert(VM_Version::supports_evex(), "shouldn't have EVEX prefix"); 1047 assert(ip == inst+1, "no prefixes allowed"); 1048 // no EVEX collisions, all instructions that have 0x62 opcodes 1049 // have EVEX versions and are subopcodes of 0x66 1050 ip++; // skip P0 and examine W in P1 1051 is_64bit = ((VEX_W & *ip) == VEX_W); 1052 ip++; // move to P2 1053 ip++; // skip P2, move to opcode 1054 // To find the end of instruction (which == end_pc_operand). 1055 switch (0xFF & *ip) { 1056 case 0x22: // pinsrd r, r/a, #8 1057 case 0x61: // pcmpestri r, r/a, #8 1058 case 0x70: // pshufd r, r/a, #8 1059 case 0x73: // psrldq r, #8 1060 case 0x1f: // evpcmpd/evpcmpq 1061 case 0x3f: // evpcmpb/evpcmpw 1062 tail_size = 1; // the imm8 1063 break; 1064 default: 1065 break; 1066 } 1067 ip++; // skip opcode 1068 debug_only(has_disp32 = true); // has both kinds of operands! 1069 break; 1070 1071 case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1 1072 case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl 1073 case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a 1074 case 0xDD: // fld_d a; fst_d a; fstp_d a 1075 case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a 1076 case 0xDF: // fild_d a; fistp_d a 1077 case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a 1078 case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a 1079 case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a 1080 debug_only(has_disp32 = true); 1081 break; 1082 1083 case 0xE8: // call rdisp32 1084 case 0xE9: // jmp rdisp32 1085 if (which == end_pc_operand) return ip + 4; 1086 assert(which == call32_operand, "call has no disp32 or imm"); 1087 return ip; 1088 1089 case 0xF0: // Lock 1090 goto again_after_prefix; 1091 1092 case 0xF3: // For SSE 1093 case 0xF2: // For SSE2 1094 switch (0xFF & *ip++) { 1095 case REX: 1096 case REX_B: 1097 case REX_X: 1098 case REX_XB: 1099 case REX_R: 1100 case REX_RB: 1101 case REX_RX: 1102 case REX_RXB: 1103 case REX_W: 1104 case REX_WB: 1105 case REX_WX: 1106 case REX_WXB: 1107 case REX_WR: 1108 case REX_WRB: 1109 case REX_WRX: 1110 case REX_WRXB: 1111 NOT_LP64(assert(false, "found 64bit prefix")); 1112 ip++; 1113 default: 1114 ip++; 1115 } 1116 debug_only(has_disp32 = true); // has both kinds of operands! 1117 break; 1118 1119 default: 1120 ShouldNotReachHere(); 1121 1122 #undef REP8 1123 #undef REP16 1124 } 1125 1126 assert(which != call32_operand, "instruction is not a call, jmp, or jcc"); 1127 #ifdef _LP64 1128 assert(which != imm_operand, "instruction is not a movq reg, imm64"); 1129 #else 1130 // assert(which != imm_operand || has_imm32, "instruction has no imm32 field"); 1131 assert(which != imm_operand || has_disp32, "instruction has no imm32 field"); 1132 #endif // LP64 1133 assert(which != disp32_operand || has_disp32, "instruction has no disp32 field"); 1134 1135 // parse the output of emit_operand 1136 int op2 = 0xFF & *ip++; 1137 int base = op2 & 0x07; 1138 int op3 = -1; 1139 const int b100 = 4; 1140 const int b101 = 5; 1141 if (base == b100 && (op2 >> 6) != 3) { 1142 op3 = 0xFF & *ip++; 1143 base = op3 & 0x07; // refetch the base 1144 } 1145 // now ip points at the disp (if any) 1146 1147 switch (op2 >> 6) { 1148 case 0: 1149 // [00 reg 100][ss index base] 1150 // [00 reg 100][00 100 esp] 1151 // [00 reg base] 1152 // [00 reg 100][ss index 101][disp32] 1153 // [00 reg 101] [disp32] 1154 1155 if (base == b101) { 1156 if (which == disp32_operand) 1157 return ip; // caller wants the disp32 1158 ip += 4; // skip the disp32 1159 } 1160 break; 1161 1162 case 1: 1163 // [01 reg 100][ss index base][disp8] 1164 // [01 reg 100][00 100 esp][disp8] 1165 // [01 reg base] [disp8] 1166 ip += 1; // skip the disp8 1167 break; 1168 1169 case 2: 1170 // [10 reg 100][ss index base][disp32] 1171 // [10 reg 100][00 100 esp][disp32] 1172 // [10 reg base] [disp32] 1173 if (which == disp32_operand) 1174 return ip; // caller wants the disp32 1175 ip += 4; // skip the disp32 1176 break; 1177 1178 case 3: 1179 // [11 reg base] (not a memory addressing mode) 1180 break; 1181 } 1182 1183 if (which == end_pc_operand) { 1184 return ip + tail_size; 1185 } 1186 1187 #ifdef _LP64 1188 assert(which == narrow_oop_operand && !is_64bit, "instruction is not a movl adr, imm32"); 1189 #else 1190 assert(which == imm_operand, "instruction has only an imm field"); 1191 #endif // LP64 1192 return ip; 1193 } 1194 1195 address Assembler::locate_next_instruction(address inst) { 1196 // Secretly share code with locate_operand: 1197 return locate_operand(inst, end_pc_operand); 1198 } 1199 1200 1201 #ifdef ASSERT 1202 void Assembler::check_relocation(RelocationHolder const& rspec, int format) { 1203 address inst = inst_mark(); 1204 assert(inst != NULL && inst < pc(), "must point to beginning of instruction"); 1205 address opnd; 1206 1207 Relocation* r = rspec.reloc(); 1208 if (r->type() == relocInfo::none) { 1209 return; 1210 } else if (r->is_call() || format == call32_operand) { 1211 // assert(format == imm32_operand, "cannot specify a nonzero format"); 1212 opnd = locate_operand(inst, call32_operand); 1213 } else if (r->is_data()) { 1214 assert(format == imm_operand || format == disp32_operand 1215 LP64_ONLY(|| format == narrow_oop_operand), "format ok"); 1216 opnd = locate_operand(inst, (WhichOperand)format); 1217 } else { 1218 assert(format == imm_operand, "cannot specify a format"); 1219 return; 1220 } 1221 assert(opnd == pc(), "must put operand where relocs can find it"); 1222 } 1223 #endif // ASSERT 1224 1225 void Assembler::emit_operand(Register reg, Address adr, 1226 int rip_relative_correction) { 1227 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, 1228 adr._rspec, 1229 rip_relative_correction); 1230 } 1231 1232 void Assembler::emit_operand(XMMRegister reg, Address adr) { 1233 if (adr.isxmmindex()) { 1234 emit_operand(reg, adr._base, adr._xmmindex, adr._scale, adr._disp, adr._rspec); 1235 } else { 1236 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, 1237 adr._rspec); 1238 } 1239 } 1240 1241 // Now the Assembler instructions (identical for 32/64 bits) 1242 1243 void Assembler::adcl(Address dst, int32_t imm32) { 1244 InstructionMark im(this); 1245 prefix(dst); 1246 emit_arith_operand(0x81, rdx, dst, imm32); 1247 } 1248 1249 void Assembler::adcl(Address dst, Register src) { 1250 InstructionMark im(this); 1251 prefix(dst, src); 1252 emit_int8(0x11); 1253 emit_operand(src, dst); 1254 } 1255 1256 void Assembler::adcl(Register dst, int32_t imm32) { 1257 prefix(dst); 1258 emit_arith(0x81, 0xD0, dst, imm32); 1259 } 1260 1261 void Assembler::adcl(Register dst, Address src) { 1262 InstructionMark im(this); 1263 prefix(src, dst); 1264 emit_int8(0x13); 1265 emit_operand(dst, src); 1266 } 1267 1268 void Assembler::adcl(Register dst, Register src) { 1269 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1270 emit_arith(0x13, 0xC0, dst, src); 1271 } 1272 1273 void Assembler::addl(Address dst, int32_t imm32) { 1274 InstructionMark im(this); 1275 prefix(dst); 1276 emit_arith_operand(0x81, rax, dst, imm32); 1277 } 1278 1279 void Assembler::addb(Address dst, int imm8) { 1280 InstructionMark im(this); 1281 prefix(dst); 1282 emit_int8((unsigned char)0x80); 1283 emit_operand(rax, dst, 1); 1284 emit_int8(imm8); 1285 } 1286 1287 void Assembler::addw(Register dst, Register src) { 1288 (void)prefix_and_encode(dst->encoding(), src->encoding()); 1289 emit_arith(0x03, 0xC0, dst, src); 1290 } 1291 1292 void Assembler::addw(Address dst, int imm16) { 1293 InstructionMark im(this); 1294 emit_int8(0x66); 1295 prefix(dst); 1296 emit_int8((unsigned char)0x81); 1297 emit_operand(rax, dst, 2); 1298 emit_int16(imm16); 1299 } 1300 1301 void Assembler::addl(Address dst, Register src) { 1302 InstructionMark im(this); 1303 prefix(dst, src); 1304 emit_int8(0x01); 1305 emit_operand(src, dst); 1306 } 1307 1308 void Assembler::addl(Register dst, int32_t imm32) { 1309 prefix(dst); 1310 emit_arith(0x81, 0xC0, dst, imm32); 1311 } 1312 1313 void Assembler::addl(Register dst, Address src) { 1314 InstructionMark im(this); 1315 prefix(src, dst); 1316 emit_int8(0x03); 1317 emit_operand(dst, src); 1318 } 1319 1320 void Assembler::addl(Register dst, Register src) { 1321 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1322 emit_arith(0x03, 0xC0, dst, src); 1323 } 1324 1325 void Assembler::addr_nop_4() { 1326 assert(UseAddressNop, "no CPU support"); 1327 // 4 bytes: NOP DWORD PTR [EAX+0] 1328 emit_int32(0x0F, 1329 0x1F, 1330 0x40, // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc); 1331 0); // 8-bits offset (1 byte) 1332 } 1333 1334 void Assembler::addr_nop_5() { 1335 assert(UseAddressNop, "no CPU support"); 1336 // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset 1337 emit_int32(0x0F, 1338 0x1F, 1339 0x44, // emit_rm(cbuf, 0x1, EAX_enc, 0x4); 1340 0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); 1341 emit_int8(0); // 8-bits offset (1 byte) 1342 } 1343 1344 void Assembler::addr_nop_7() { 1345 assert(UseAddressNop, "no CPU support"); 1346 // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset 1347 emit_int24(0x0F, 1348 0x1F, 1349 (unsigned char)0x80); 1350 // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc); 1351 emit_int32(0); // 32-bits offset (4 bytes) 1352 } 1353 1354 void Assembler::addr_nop_8() { 1355 assert(UseAddressNop, "no CPU support"); 1356 // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset 1357 emit_int32(0x0F, 1358 0x1F, 1359 (unsigned char)0x84, 1360 // emit_rm(cbuf, 0x2, EAX_enc, 0x4); 1361 0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); 1362 emit_int32(0); // 32-bits offset (4 bytes) 1363 } 1364 1365 void Assembler::addsd(XMMRegister dst, XMMRegister src) { 1366 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1367 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1368 attributes.set_rex_vex_w_reverted(); 1369 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1370 emit_int16(0x58, (0xC0 | encode)); 1371 } 1372 1373 void Assembler::addsd(XMMRegister dst, Address src) { 1374 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1375 InstructionMark im(this); 1376 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1377 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 1378 attributes.set_rex_vex_w_reverted(); 1379 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1380 emit_int8(0x58); 1381 emit_operand(dst, src); 1382 } 1383 1384 void Assembler::addss(XMMRegister dst, XMMRegister src) { 1385 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1386 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1387 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1388 emit_int16(0x58, (0xC0 | encode)); 1389 } 1390 1391 void Assembler::addss(XMMRegister dst, Address src) { 1392 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1393 InstructionMark im(this); 1394 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1395 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1396 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1397 emit_int8(0x58); 1398 emit_operand(dst, src); 1399 } 1400 1401 void Assembler::aesdec(XMMRegister dst, Address src) { 1402 assert(VM_Version::supports_aes(), ""); 1403 InstructionMark im(this); 1404 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1405 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1406 emit_int8((unsigned char)0xDE); 1407 emit_operand(dst, src); 1408 } 1409 1410 void Assembler::aesdec(XMMRegister dst, XMMRegister src) { 1411 assert(VM_Version::supports_aes(), ""); 1412 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1413 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1414 emit_int16((unsigned char)0xDE, (0xC0 | encode)); 1415 } 1416 1417 void Assembler::vaesdec(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1418 assert(VM_Version::supports_avx512_vaes(), ""); 1419 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1420 attributes.set_is_evex_instruction(); 1421 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1422 emit_int16((unsigned char)0xDE, (0xC0 | encode)); 1423 } 1424 1425 1426 void Assembler::aesdeclast(XMMRegister dst, Address src) { 1427 assert(VM_Version::supports_aes(), ""); 1428 InstructionMark im(this); 1429 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1430 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1431 emit_int8((unsigned char)0xDF); 1432 emit_operand(dst, src); 1433 } 1434 1435 void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) { 1436 assert(VM_Version::supports_aes(), ""); 1437 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1438 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1439 emit_int16((unsigned char)0xDF, (0xC0 | encode)); 1440 } 1441 1442 void Assembler::vaesdeclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1443 assert(VM_Version::supports_avx512_vaes(), ""); 1444 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1445 attributes.set_is_evex_instruction(); 1446 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1447 emit_int16((unsigned char)0xDF, (0xC0 | encode)); 1448 } 1449 1450 void Assembler::aesenc(XMMRegister dst, Address src) { 1451 assert(VM_Version::supports_aes(), ""); 1452 InstructionMark im(this); 1453 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1454 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1455 emit_int8((unsigned char)0xDC); 1456 emit_operand(dst, src); 1457 } 1458 1459 void Assembler::aesenc(XMMRegister dst, XMMRegister src) { 1460 assert(VM_Version::supports_aes(), ""); 1461 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1462 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1463 emit_int16((unsigned char)0xDC, 0xC0 | encode); 1464 } 1465 1466 void Assembler::vaesenc(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1467 assert(VM_Version::supports_avx512_vaes(), "requires vaes support/enabling"); 1468 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1469 attributes.set_is_evex_instruction(); 1470 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1471 emit_int16((unsigned char)0xDC, (0xC0 | encode)); 1472 } 1473 1474 void Assembler::aesenclast(XMMRegister dst, Address src) { 1475 assert(VM_Version::supports_aes(), ""); 1476 InstructionMark im(this); 1477 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1478 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1479 emit_int8((unsigned char)0xDD); 1480 emit_operand(dst, src); 1481 } 1482 1483 void Assembler::aesenclast(XMMRegister dst, XMMRegister src) { 1484 assert(VM_Version::supports_aes(), ""); 1485 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1486 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1487 emit_int16((unsigned char)0xDD, (0xC0 | encode)); 1488 } 1489 1490 void Assembler::vaesenclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1491 assert(VM_Version::supports_avx512_vaes(), "requires vaes support/enabling"); 1492 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1493 attributes.set_is_evex_instruction(); 1494 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1495 emit_int16((unsigned char)0xDD, (0xC0 | encode)); 1496 } 1497 1498 void Assembler::andb(Address dst, Register src) { 1499 InstructionMark im(this); 1500 prefix(dst, src, true); 1501 emit_int8(0x20); 1502 emit_operand(src, dst); 1503 } 1504 1505 void Assembler::andw(Register dst, Register src) { 1506 (void)prefix_and_encode(dst->encoding(), src->encoding()); 1507 emit_arith(0x23, 0xC0, dst, src); 1508 } 1509 1510 void Assembler::andl(Address dst, int32_t imm32) { 1511 InstructionMark im(this); 1512 prefix(dst); 1513 emit_arith_operand(0x81, as_Register(4), dst, imm32); 1514 } 1515 1516 void Assembler::andl(Register dst, int32_t imm32) { 1517 prefix(dst); 1518 emit_arith(0x81, 0xE0, dst, imm32); 1519 } 1520 1521 void Assembler::andl(Address dst, Register src) { 1522 InstructionMark im(this); 1523 prefix(dst, src); 1524 emit_int8(0x21); 1525 emit_operand(src, dst); 1526 } 1527 1528 void Assembler::andl(Register dst, Address src) { 1529 InstructionMark im(this); 1530 prefix(src, dst); 1531 emit_int8(0x23); 1532 emit_operand(dst, src); 1533 } 1534 1535 void Assembler::andl(Register dst, Register src) { 1536 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1537 emit_arith(0x23, 0xC0, dst, src); 1538 } 1539 1540 void Assembler::andnl(Register dst, Register src1, Register src2) { 1541 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1542 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1543 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1544 emit_int16((unsigned char)0xF2, (0xC0 | encode)); 1545 } 1546 1547 void Assembler::andnl(Register dst, Register src1, Address src2) { 1548 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1549 InstructionMark im(this); 1550 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1551 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1552 emit_int8((unsigned char)0xF2); 1553 emit_operand(dst, src2); 1554 } 1555 1556 void Assembler::bsfl(Register dst, Register src) { 1557 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1558 emit_int24(0x0F, 1559 (unsigned char)0xBC, 1560 0xC0 | encode); 1561 } 1562 1563 void Assembler::bsrl(Register dst, Register src) { 1564 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1565 emit_int24(0x0F, 1566 (unsigned char)0xBD, 1567 0xC0 | encode); 1568 } 1569 1570 void Assembler::bswapl(Register reg) { // bswap 1571 int encode = prefix_and_encode(reg->encoding()); 1572 emit_int16(0x0F, (0xC8 | encode)); 1573 } 1574 1575 void Assembler::blsil(Register dst, Register src) { 1576 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1577 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1578 int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1579 emit_int16((unsigned char)0xF3, (0xC0 | encode)); 1580 } 1581 1582 void Assembler::blsil(Register dst, Address src) { 1583 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1584 InstructionMark im(this); 1585 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1586 vex_prefix(src, dst->encoding(), rbx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1587 emit_int8((unsigned char)0xF3); 1588 emit_operand(rbx, src); 1589 } 1590 1591 void Assembler::blsmskl(Register dst, Register src) { 1592 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1593 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1594 int encode = vex_prefix_and_encode(rdx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1595 emit_int16((unsigned char)0xF3, 1596 0xC0 | encode); 1597 } 1598 1599 void Assembler::blsmskl(Register dst, Address src) { 1600 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1601 InstructionMark im(this); 1602 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1603 vex_prefix(src, dst->encoding(), rdx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1604 emit_int8((unsigned char)0xF3); 1605 emit_operand(rdx, src); 1606 } 1607 1608 void Assembler::blsrl(Register dst, Register src) { 1609 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1610 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1611 int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1612 emit_int16((unsigned char)0xF3, (0xC0 | encode)); 1613 } 1614 1615 void Assembler::blsrl(Register dst, Address src) { 1616 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1617 InstructionMark im(this); 1618 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1619 vex_prefix(src, dst->encoding(), rcx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1620 emit_int8((unsigned char)0xF3); 1621 emit_operand(rcx, src); 1622 } 1623 1624 void Assembler::call(Label& L, relocInfo::relocType rtype) { 1625 // suspect disp32 is always good 1626 int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand); 1627 1628 if (L.is_bound()) { 1629 const int long_size = 5; 1630 int offs = (int)( target(L) - pc() ); 1631 assert(offs <= 0, "assembler error"); 1632 InstructionMark im(this); 1633 // 1110 1000 #32-bit disp 1634 emit_int8((unsigned char)0xE8); 1635 emit_data(offs - long_size, rtype, operand); 1636 } else { 1637 InstructionMark im(this); 1638 // 1110 1000 #32-bit disp 1639 L.add_patch_at(code(), locator()); 1640 1641 emit_int8((unsigned char)0xE8); 1642 emit_data(int(0), rtype, operand); 1643 } 1644 } 1645 1646 void Assembler::call(Register dst) { 1647 int encode = prefix_and_encode(dst->encoding()); 1648 emit_int16((unsigned char)0xFF, (0xD0 | encode)); 1649 } 1650 1651 1652 void Assembler::call(Address adr) { 1653 InstructionMark im(this); 1654 prefix(adr); 1655 emit_int8((unsigned char)0xFF); 1656 emit_operand(rdx, adr); 1657 } 1658 1659 void Assembler::call_literal(address entry, RelocationHolder const& rspec) { 1660 InstructionMark im(this); 1661 emit_int8((unsigned char)0xE8); 1662 intptr_t disp = entry - (pc() + sizeof(int32_t)); 1663 // Entry is NULL in case of a scratch emit. 1664 assert(entry == NULL || is_simm32(disp), "disp=" INTPTR_FORMAT " must be 32bit offset (call2)", disp); 1665 // Technically, should use call32_operand, but this format is 1666 // implied by the fact that we're emitting a call instruction. 1667 1668 int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand); 1669 emit_data((int) disp, rspec, operand); 1670 } 1671 1672 void Assembler::cdql() { 1673 emit_int8((unsigned char)0x99); 1674 } 1675 1676 void Assembler::cld() { 1677 emit_int8((unsigned char)0xFC); 1678 } 1679 1680 void Assembler::cmovl(Condition cc, Register dst, Register src) { 1681 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); 1682 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1683 emit_int24(0x0F, 1684 0x40 | cc, 1685 0xC0 | encode); 1686 } 1687 1688 1689 void Assembler::cmovl(Condition cc, Register dst, Address src) { 1690 InstructionMark im(this); 1691 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); 1692 prefix(src, dst); 1693 emit_int16(0x0F, (0x40 | cc)); 1694 emit_operand(dst, src); 1695 } 1696 1697 void Assembler::cmpb(Address dst, int imm8) { 1698 InstructionMark im(this); 1699 prefix(dst); 1700 emit_int8((unsigned char)0x80); 1701 emit_operand(rdi, dst, 1); 1702 emit_int8(imm8); 1703 } 1704 1705 void Assembler::cmpl(Address dst, int32_t imm32) { 1706 InstructionMark im(this); 1707 prefix(dst); 1708 emit_int8((unsigned char)0x81); 1709 emit_operand(rdi, dst, 4); 1710 emit_int32(imm32); 1711 } 1712 1713 void Assembler::cmpl(Register dst, int32_t imm32) { 1714 prefix(dst); 1715 emit_arith(0x81, 0xF8, dst, imm32); 1716 } 1717 1718 void Assembler::cmpl(Register dst, Register src) { 1719 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1720 emit_arith(0x3B, 0xC0, dst, src); 1721 } 1722 1723 void Assembler::cmpl(Register dst, Address src) { 1724 InstructionMark im(this); 1725 prefix(src, dst); 1726 emit_int8(0x3B); 1727 emit_operand(dst, src); 1728 } 1729 1730 void Assembler::cmpw(Address dst, int imm16) { 1731 InstructionMark im(this); 1732 assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers"); 1733 emit_int16(0x66, (unsigned char)0x81); 1734 emit_operand(rdi, dst, 2); 1735 emit_int16(imm16); 1736 } 1737 1738 // The 32-bit cmpxchg compares the value at adr with the contents of rax, 1739 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. 1740 // The ZF is set if the compared values were equal, and cleared otherwise. 1741 void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg 1742 InstructionMark im(this); 1743 prefix(adr, reg); 1744 emit_int16(0x0F, (unsigned char)0xB1); 1745 emit_operand(reg, adr); 1746 } 1747 1748 void Assembler::cmpxchgw(Register reg, Address adr) { // cmpxchg 1749 InstructionMark im(this); 1750 size_prefix(); 1751 prefix(adr, reg); 1752 emit_int16(0x0F, (unsigned char)0xB1); 1753 emit_operand(reg, adr); 1754 } 1755 1756 // The 8-bit cmpxchg compares the value at adr with the contents of rax, 1757 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. 1758 // The ZF is set if the compared values were equal, and cleared otherwise. 1759 void Assembler::cmpxchgb(Register reg, Address adr) { // cmpxchg 1760 InstructionMark im(this); 1761 prefix(adr, reg, true); 1762 emit_int16(0x0F, (unsigned char)0xB0); 1763 emit_operand(reg, adr); 1764 } 1765 1766 void Assembler::comisd(XMMRegister dst, Address src) { 1767 // NOTE: dbx seems to decode this as comiss even though the 1768 // 0x66 is there. Strangely ucomisd comes out correct 1769 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1770 InstructionMark im(this); 1771 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);; 1772 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 1773 attributes.set_rex_vex_w_reverted(); 1774 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 1775 emit_int8(0x2F); 1776 emit_operand(dst, src); 1777 } 1778 1779 void Assembler::comisd(XMMRegister dst, XMMRegister src) { 1780 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1781 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1782 attributes.set_rex_vex_w_reverted(); 1783 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 1784 emit_int16(0x2F, (0xC0 | encode)); 1785 } 1786 1787 void Assembler::comiss(XMMRegister dst, Address src) { 1788 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1789 InstructionMark im(this); 1790 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1791 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1792 simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 1793 emit_int8(0x2F); 1794 emit_operand(dst, src); 1795 } 1796 1797 void Assembler::comiss(XMMRegister dst, XMMRegister src) { 1798 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1799 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1800 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 1801 emit_int16(0x2F, (0xC0 | encode)); 1802 } 1803 1804 void Assembler::cpuid() { 1805 emit_int16(0x0F, (unsigned char)0xA2); 1806 } 1807 1808 // Opcode / Instruction Op / En 64 - Bit Mode Compat / Leg Mode Description Implemented 1809 // F2 0F 38 F0 / r CRC32 r32, r / m8 RM Valid Valid Accumulate CRC32 on r / m8. v 1810 // F2 REX 0F 38 F0 / r CRC32 r32, r / m8* RM Valid N.E. Accumulate CRC32 on r / m8. - 1811 // F2 REX.W 0F 38 F0 / r CRC32 r64, r / m8 RM Valid N.E. Accumulate CRC32 on r / m8. - 1812 // 1813 // F2 0F 38 F1 / r CRC32 r32, r / m16 RM Valid Valid Accumulate CRC32 on r / m16. v 1814 // 1815 // F2 0F 38 F1 / r CRC32 r32, r / m32 RM Valid Valid Accumulate CRC32 on r / m32. v 1816 // 1817 // F2 REX.W 0F 38 F1 / r CRC32 r64, r / m64 RM Valid N.E. Accumulate CRC32 on r / m64. v 1818 void Assembler::crc32(Register crc, Register v, int8_t sizeInBytes) { 1819 assert(VM_Version::supports_sse4_2(), ""); 1820 int8_t w = 0x01; 1821 Prefix p = Prefix_EMPTY; 1822 1823 emit_int8((unsigned char)0xF2); 1824 switch (sizeInBytes) { 1825 case 1: 1826 w = 0; 1827 break; 1828 case 2: 1829 case 4: 1830 break; 1831 LP64_ONLY(case 8:) 1832 // This instruction is not valid in 32 bits 1833 // Note: 1834 // http://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf 1835 // 1836 // Page B - 72 Vol. 2C says 1837 // qwreg2 to qwreg 1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : 11 qwreg1 qwreg2 1838 // mem64 to qwreg 1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : mod qwreg r / m 1839 // F0!!! 1840 // while 3 - 208 Vol. 2A 1841 // F2 REX.W 0F 38 F1 / r CRC32 r64, r / m64 RM Valid N.E.Accumulate CRC32 on r / m64. 1842 // 1843 // the 0 on a last bit is reserved for a different flavor of this instruction : 1844 // F2 REX.W 0F 38 F0 / r CRC32 r64, r / m8 RM Valid N.E.Accumulate CRC32 on r / m8. 1845 p = REX_W; 1846 break; 1847 default: 1848 assert(0, "Unsupported value for a sizeInBytes argument"); 1849 break; 1850 } 1851 LP64_ONLY(prefix(crc, v, p);) 1852 emit_int32(0x0F, 1853 0x38, 1854 0xF0 | w, 1855 0xC0 | ((crc->encoding() & 0x7) << 3) | (v->encoding() & 7)); 1856 } 1857 1858 void Assembler::crc32(Register crc, Address adr, int8_t sizeInBytes) { 1859 assert(VM_Version::supports_sse4_2(), ""); 1860 InstructionMark im(this); 1861 int8_t w = 0x01; 1862 Prefix p = Prefix_EMPTY; 1863 1864 emit_int8((int8_t)0xF2); 1865 switch (sizeInBytes) { 1866 case 1: 1867 w = 0; 1868 break; 1869 case 2: 1870 case 4: 1871 break; 1872 LP64_ONLY(case 8:) 1873 // This instruction is not valid in 32 bits 1874 p = REX_W; 1875 break; 1876 default: 1877 assert(0, "Unsupported value for a sizeInBytes argument"); 1878 break; 1879 } 1880 LP64_ONLY(prefix(crc, adr, p);) 1881 emit_int24(0x0F, 0x38, (0xF0 | w)); 1882 emit_operand(crc, adr); 1883 } 1884 1885 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) { 1886 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1887 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1888 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1889 emit_int16((unsigned char)0xE6, (0xC0 | encode)); 1890 } 1891 1892 void Assembler::vcvtdq2pd(XMMRegister dst, XMMRegister src, int vector_len) { 1893 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), ""); 1894 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1895 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1896 emit_int16((unsigned char)0xE6, (0xC0 | encode)); 1897 } 1898 1899 void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) { 1900 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1901 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1902 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 1903 emit_int16(0x5B, (0xC0 | encode)); 1904 } 1905 1906 void Assembler::vcvtdq2ps(XMMRegister dst, XMMRegister src, int vector_len) { 1907 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), ""); 1908 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1909 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 1910 emit_int16(0x5B, (0xC0 | encode)); 1911 } 1912 1913 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) { 1914 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1915 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1916 attributes.set_rex_vex_w_reverted(); 1917 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1918 emit_int16(0x5A, (0xC0 | encode)); 1919 } 1920 1921 void Assembler::cvtsd2ss(XMMRegister dst, Address src) { 1922 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1923 InstructionMark im(this); 1924 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1925 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 1926 attributes.set_rex_vex_w_reverted(); 1927 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1928 emit_int8(0x5A); 1929 emit_operand(dst, src); 1930 } 1931 1932 void Assembler::cvtsi2sdl(XMMRegister dst, Register src) { 1933 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1934 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1935 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1936 emit_int16(0x2A, (0xC0 | encode)); 1937 } 1938 1939 void Assembler::cvtsi2sdl(XMMRegister dst, Address src) { 1940 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1941 InstructionMark im(this); 1942 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1943 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1944 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1945 emit_int8(0x2A); 1946 emit_operand(dst, src); 1947 } 1948 1949 void Assembler::cvtsi2ssl(XMMRegister dst, Register src) { 1950 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1951 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1952 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1953 emit_int16(0x2A, (0xC0 | encode)); 1954 } 1955 1956 void Assembler::cvtsi2ssl(XMMRegister dst, Address src) { 1957 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1958 InstructionMark im(this); 1959 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1960 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1961 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1962 emit_int8(0x2A); 1963 emit_operand(dst, src); 1964 } 1965 1966 void Assembler::cvtsi2ssq(XMMRegister dst, Register src) { 1967 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1968 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1969 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1970 emit_int16(0x2A, (0xC0 | encode)); 1971 } 1972 1973 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) { 1974 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1975 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1976 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1977 emit_int16(0x5A, (0xC0 | encode)); 1978 } 1979 1980 void Assembler::cvtss2sd(XMMRegister dst, Address src) { 1981 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1982 InstructionMark im(this); 1983 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1984 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1985 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1986 emit_int8(0x5A); 1987 emit_operand(dst, src); 1988 } 1989 1990 1991 void Assembler::cvttsd2sil(Register dst, XMMRegister src) { 1992 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1993 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1994 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1995 emit_int16(0x2C, (0xC0 | encode)); 1996 } 1997 1998 void Assembler::cvtss2sil(Register dst, XMMRegister src) { 1999 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2000 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2001 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2002 emit_int16(0x2D, (0xC0 | encode)); 2003 } 2004 2005 void Assembler::cvttss2sil(Register dst, XMMRegister src) { 2006 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2007 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2008 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2009 emit_int16(0x2C, (0xC0 | encode)); 2010 } 2011 2012 void Assembler::cvttpd2dq(XMMRegister dst, XMMRegister src) { 2013 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2014 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 2015 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2016 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2017 emit_int16((unsigned char)0xE6, (0xC0 | encode)); 2018 } 2019 2020 void Assembler::pabsb(XMMRegister dst, XMMRegister src) { 2021 assert(VM_Version::supports_ssse3(), ""); 2022 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2023 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 2024 emit_int16(0x1C, (0xC0 | encode)); 2025 } 2026 2027 void Assembler::pabsw(XMMRegister dst, XMMRegister src) { 2028 assert(VM_Version::supports_ssse3(), ""); 2029 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2030 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 2031 emit_int16(0x1D, (0xC0 | encode)); 2032 } 2033 2034 void Assembler::pabsd(XMMRegister dst, XMMRegister src) { 2035 assert(VM_Version::supports_ssse3(), ""); 2036 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2037 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 2038 emit_int16(0x1E, (0xC0 | encode)); 2039 } 2040 2041 void Assembler::vpabsb(XMMRegister dst, XMMRegister src, int vector_len) { 2042 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 2043 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 2044 vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : false, "not supported"); 2045 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2046 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 2047 emit_int16(0x1C, (0xC0 | encode)); 2048 } 2049 2050 void Assembler::vpabsw(XMMRegister dst, XMMRegister src, int vector_len) { 2051 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 2052 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 2053 vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : false, ""); 2054 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2055 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 2056 emit_int16(0x1D, (0xC0 | encode)); 2057 } 2058 2059 void Assembler::vpabsd(XMMRegister dst, XMMRegister src, int vector_len) { 2060 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 2061 vector_len == AVX_256bit? VM_Version::supports_avx2() : 2062 vector_len == AVX_512bit? VM_Version::supports_evex() : 0, ""); 2063 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2064 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 2065 emit_int16(0x1E, (0xC0 | encode)); 2066 } 2067 2068 void Assembler::evpabsq(XMMRegister dst, XMMRegister src, int vector_len) { 2069 assert(UseAVX > 2, ""); 2070 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2071 attributes.set_is_evex_instruction(); 2072 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 2073 emit_int16(0x1F, (0xC0 | encode)); 2074 } 2075 2076 void Assembler::vcvtps2pd(XMMRegister dst, XMMRegister src, int vector_len) { 2077 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), ""); 2078 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2079 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2080 emit_int16(0x5A, (0xC0 | encode)); 2081 } 2082 2083 void Assembler::vcvtpd2ps(XMMRegister dst, XMMRegister src, int vector_len) { 2084 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), ""); 2085 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2086 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2087 attributes.set_rex_vex_w_reverted(); 2088 emit_int16(0x5A, (0xC0 | encode)); 2089 } 2090 2091 void Assembler::vcvttps2dq(XMMRegister dst, XMMRegister src, int vector_len) { 2092 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), ""); 2093 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2094 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2095 emit_int16(0x5B, (0xC0 | encode)); 2096 } 2097 2098 void Assembler::vcvtps2dq(XMMRegister dst, XMMRegister src, int vector_len) { 2099 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), ""); 2100 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2101 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2102 emit_int16(0x5B, (0xC0 | encode)); 2103 } 2104 2105 void Assembler::evcvtpd2qq(XMMRegister dst, XMMRegister src, int vector_len) { 2106 assert(UseAVX > 2 && VM_Version::supports_avx512dq(), ""); 2107 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2108 attributes.set_is_evex_instruction(); 2109 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2110 emit_int16(0x7B, (0xC0 | encode)); 2111 } 2112 2113 void Assembler::evcvtqq2ps(XMMRegister dst, XMMRegister src, int vector_len) { 2114 assert(UseAVX > 2 && VM_Version::supports_avx512dq(), ""); 2115 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2116 attributes.set_is_evex_instruction(); 2117 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2118 emit_int16(0x5B, (0xC0 | encode)); 2119 } 2120 2121 void Assembler::evcvttpd2qq(XMMRegister dst, XMMRegister src, int vector_len) { 2122 assert(UseAVX > 2 && VM_Version::supports_avx512dq(), ""); 2123 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2124 attributes.set_is_evex_instruction(); 2125 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2126 emit_int16(0x7A, (0xC0 | encode)); 2127 } 2128 2129 void Assembler::evcvtqq2pd(XMMRegister dst, XMMRegister src, int vector_len) { 2130 assert(UseAVX > 2 && VM_Version::supports_avx512dq(), ""); 2131 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2132 attributes.set_is_evex_instruction(); 2133 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2134 emit_int16((unsigned char)0xE6, (0xC0 | encode)); 2135 } 2136 2137 void Assembler::evpmovwb(XMMRegister dst, XMMRegister src, int vector_len) { 2138 assert(UseAVX > 2 && VM_Version::supports_avx512bw(), ""); 2139 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2140 attributes.set_is_evex_instruction(); 2141 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 2142 emit_int16(0x30, (0xC0 | encode)); 2143 } 2144 2145 void Assembler::evpmovdw(XMMRegister dst, XMMRegister src, int vector_len) { 2146 assert(UseAVX > 2, ""); 2147 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2148 attributes.set_is_evex_instruction(); 2149 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 2150 emit_int16(0x33, (0xC0 | encode)); 2151 } 2152 2153 void Assembler::evpmovdb(XMMRegister dst, XMMRegister src, int vector_len) { 2154 assert(UseAVX > 2, ""); 2155 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2156 attributes.set_is_evex_instruction(); 2157 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 2158 emit_int16(0x31, (0xC0 | encode)); 2159 } 2160 2161 void Assembler::evpmovqd(XMMRegister dst, XMMRegister src, int vector_len) { 2162 assert(UseAVX > 2, ""); 2163 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2164 attributes.set_is_evex_instruction(); 2165 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 2166 emit_int16(0x35, (0xC0 | encode)); 2167 } 2168 2169 void Assembler::evpmovqb(XMMRegister dst, XMMRegister src, int vector_len) { 2170 assert(UseAVX > 2, ""); 2171 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2172 attributes.set_is_evex_instruction(); 2173 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 2174 emit_int16(0x32, (0xC0 | encode)); 2175 } 2176 2177 void Assembler::evpmovqw(XMMRegister dst, XMMRegister src, int vector_len) { 2178 assert(UseAVX > 2, ""); 2179 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2180 attributes.set_is_evex_instruction(); 2181 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 2182 emit_int16(0x34, (0xC0 | encode)); 2183 } 2184 2185 void Assembler::decl(Address dst) { 2186 // Don't use it directly. Use MacroAssembler::decrement() instead. 2187 InstructionMark im(this); 2188 prefix(dst); 2189 emit_int8((unsigned char)0xFF); 2190 emit_operand(rcx, dst); 2191 } 2192 2193 void Assembler::divsd(XMMRegister dst, Address src) { 2194 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2195 InstructionMark im(this); 2196 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2197 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 2198 attributes.set_rex_vex_w_reverted(); 2199 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2200 emit_int8(0x5E); 2201 emit_operand(dst, src); 2202 } 2203 2204 void Assembler::divsd(XMMRegister dst, XMMRegister src) { 2205 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2206 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2207 attributes.set_rex_vex_w_reverted(); 2208 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2209 emit_int16(0x5E, (0xC0 | encode)); 2210 } 2211 2212 void Assembler::divss(XMMRegister dst, Address src) { 2213 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2214 InstructionMark im(this); 2215 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2216 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2217 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2218 emit_int8(0x5E); 2219 emit_operand(dst, src); 2220 } 2221 2222 void Assembler::divss(XMMRegister dst, XMMRegister src) { 2223 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2224 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2225 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2226 emit_int16(0x5E, (0xC0 | encode)); 2227 } 2228 2229 void Assembler::hlt() { 2230 emit_int8((unsigned char)0xF4); 2231 } 2232 2233 void Assembler::idivl(Register src) { 2234 int encode = prefix_and_encode(src->encoding()); 2235 emit_int16((unsigned char)0xF7, (0xF8 | encode)); 2236 } 2237 2238 void Assembler::idivl(Address src) { 2239 InstructionMark im(this); 2240 prefix(src); 2241 emit_int8((unsigned char)0xF7); 2242 emit_operand(as_Register(7), src); 2243 } 2244 2245 void Assembler::divl(Register src) { // Unsigned 2246 int encode = prefix_and_encode(src->encoding()); 2247 emit_int16((unsigned char)0xF7, (0xF0 | encode)); 2248 } 2249 2250 void Assembler::imull(Register src) { 2251 int encode = prefix_and_encode(src->encoding()); 2252 emit_int16((unsigned char)0xF7, (0xE8 | encode)); 2253 } 2254 2255 void Assembler::imull(Register dst, Register src) { 2256 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2257 emit_int24(0x0F, 2258 (unsigned char)0xAF, 2259 (0xC0 | encode)); 2260 } 2261 2262 void Assembler::imull(Register dst, Address src, int32_t value) { 2263 InstructionMark im(this); 2264 prefix(src, dst); 2265 if (is8bit(value)) { 2266 emit_int8((unsigned char)0x6B); 2267 emit_operand(dst, src); 2268 emit_int8(value); 2269 } else { 2270 emit_int8((unsigned char)0x69); 2271 emit_operand(dst, src); 2272 emit_int32(value); 2273 } 2274 } 2275 2276 void Assembler::imull(Register dst, Register src, int value) { 2277 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2278 if (is8bit(value)) { 2279 emit_int24(0x6B, (0xC0 | encode), value & 0xFF); 2280 } else { 2281 emit_int16(0x69, (0xC0 | encode)); 2282 emit_int32(value); 2283 } 2284 } 2285 2286 void Assembler::imull(Register dst, Address src) { 2287 InstructionMark im(this); 2288 prefix(src, dst); 2289 emit_int16(0x0F, (unsigned char)0xAF); 2290 emit_operand(dst, src); 2291 } 2292 2293 2294 void Assembler::incl(Address dst) { 2295 // Don't use it directly. Use MacroAssembler::increment() instead. 2296 InstructionMark im(this); 2297 prefix(dst); 2298 emit_int8((unsigned char)0xFF); 2299 emit_operand(rax, dst); 2300 } 2301 2302 void Assembler::jcc(Condition cc, Label& L, bool maybe_short) { 2303 InstructionMark im(this); 2304 assert((0 <= cc) && (cc < 16), "illegal cc"); 2305 if (L.is_bound()) { 2306 address dst = target(L); 2307 assert(dst != NULL, "jcc most probably wrong"); 2308 2309 const int short_size = 2; 2310 const int long_size = 6; 2311 intptr_t offs = (intptr_t)dst - (intptr_t)pc(); 2312 if (maybe_short && is8bit(offs - short_size)) { 2313 // 0111 tttn #8-bit disp 2314 emit_int16(0x70 | cc, (offs - short_size) & 0xFF); 2315 } else { 2316 // 0000 1111 1000 tttn #32-bit disp 2317 assert(is_simm32(offs - long_size), 2318 "must be 32bit offset (call4)"); 2319 emit_int16(0x0F, (0x80 | cc)); 2320 emit_int32(offs - long_size); 2321 } 2322 } else { 2323 // Note: could eliminate cond. jumps to this jump if condition 2324 // is the same however, seems to be rather unlikely case. 2325 // Note: use jccb() if label to be bound is very close to get 2326 // an 8-bit displacement 2327 L.add_patch_at(code(), locator()); 2328 emit_int16(0x0F, (0x80 | cc)); 2329 emit_int32(0); 2330 } 2331 } 2332 2333 void Assembler::jccb_0(Condition cc, Label& L, const char* file, int line) { 2334 if (L.is_bound()) { 2335 const int short_size = 2; 2336 address entry = target(L); 2337 #ifdef ASSERT 2338 intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size); 2339 intptr_t delta = short_branch_delta(); 2340 if (delta != 0) { 2341 dist += (dist < 0 ? (-delta) :delta); 2342 } 2343 assert(is8bit(dist), "Dispacement too large for a short jmp at %s:%d", file, line); 2344 #endif 2345 intptr_t offs = (intptr_t)entry - (intptr_t)pc(); 2346 // 0111 tttn #8-bit disp 2347 emit_int16(0x70 | cc, (offs - short_size) & 0xFF); 2348 } else { 2349 InstructionMark im(this); 2350 L.add_patch_at(code(), locator(), file, line); 2351 emit_int16(0x70 | cc, 0); 2352 } 2353 } 2354 2355 void Assembler::jmp(Address adr) { 2356 InstructionMark im(this); 2357 prefix(adr); 2358 emit_int8((unsigned char)0xFF); 2359 emit_operand(rsp, adr); 2360 } 2361 2362 void Assembler::jmp(Label& L, bool maybe_short) { 2363 if (L.is_bound()) { 2364 address entry = target(L); 2365 assert(entry != NULL, "jmp most probably wrong"); 2366 InstructionMark im(this); 2367 const int short_size = 2; 2368 const int long_size = 5; 2369 intptr_t offs = entry - pc(); 2370 if (maybe_short && is8bit(offs - short_size)) { 2371 emit_int16((unsigned char)0xEB, ((offs - short_size) & 0xFF)); 2372 } else { 2373 emit_int8((unsigned char)0xE9); 2374 emit_int32(offs - long_size); 2375 } 2376 } else { 2377 // By default, forward jumps are always 32-bit displacements, since 2378 // we can't yet know where the label will be bound. If you're sure that 2379 // the forward jump will not run beyond 256 bytes, use jmpb to 2380 // force an 8-bit displacement. 2381 InstructionMark im(this); 2382 L.add_patch_at(code(), locator()); 2383 emit_int8((unsigned char)0xE9); 2384 emit_int32(0); 2385 } 2386 } 2387 2388 void Assembler::jmp(Register entry) { 2389 int encode = prefix_and_encode(entry->encoding()); 2390 emit_int16((unsigned char)0xFF, (0xE0 | encode)); 2391 } 2392 2393 void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) { 2394 InstructionMark im(this); 2395 emit_int8((unsigned char)0xE9); 2396 assert(dest != NULL, "must have a target"); 2397 intptr_t disp = dest - (pc() + sizeof(int32_t)); 2398 assert(is_simm32(disp), "must be 32bit offset (jmp)"); 2399 emit_data(disp, rspec.reloc(), call32_operand); 2400 } 2401 2402 void Assembler::jmpb_0(Label& L, const char* file, int line) { 2403 if (L.is_bound()) { 2404 const int short_size = 2; 2405 address entry = target(L); 2406 assert(entry != NULL, "jmp most probably wrong"); 2407 #ifdef ASSERT 2408 intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size); 2409 intptr_t delta = short_branch_delta(); 2410 if (delta != 0) { 2411 dist += (dist < 0 ? (-delta) :delta); 2412 } 2413 assert(is8bit(dist), "Dispacement too large for a short jmp at %s:%d", file, line); 2414 #endif 2415 intptr_t offs = entry - pc(); 2416 emit_int16((unsigned char)0xEB, (offs - short_size) & 0xFF); 2417 } else { 2418 InstructionMark im(this); 2419 L.add_patch_at(code(), locator(), file, line); 2420 emit_int16((unsigned char)0xEB, 0); 2421 } 2422 } 2423 2424 void Assembler::ldmxcsr( Address src) { 2425 if (UseAVX > 0 ) { 2426 InstructionMark im(this); 2427 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2428 vex_prefix(src, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2429 emit_int8((unsigned char)0xAE); 2430 emit_operand(as_Register(2), src); 2431 } else { 2432 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2433 InstructionMark im(this); 2434 prefix(src); 2435 emit_int16(0x0F, (unsigned char)0xAE); 2436 emit_operand(as_Register(2), src); 2437 } 2438 } 2439 2440 void Assembler::leal(Register dst, Address src) { 2441 InstructionMark im(this); 2442 prefix(src, dst); 2443 emit_int8((unsigned char)0x8D); 2444 emit_operand(dst, src); 2445 } 2446 2447 void Assembler::lfence() { 2448 emit_int24(0x0F, (unsigned char)0xAE, (unsigned char)0xE8); 2449 } 2450 2451 void Assembler::lock() { 2452 emit_int8((unsigned char)0xF0); 2453 } 2454 2455 void Assembler::size_prefix() { 2456 emit_int8(0x66); 2457 } 2458 2459 void Assembler::lzcntl(Register dst, Register src) { 2460 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); 2461 emit_int8((unsigned char)0xF3); 2462 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2463 emit_int24(0x0F, (unsigned char)0xBD, (0xC0 | encode)); 2464 } 2465 2466 // Emit mfence instruction 2467 void Assembler::mfence() { 2468 NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");) 2469 emit_int24(0x0F, (unsigned char)0xAE, (unsigned char)0xF0); 2470 } 2471 2472 // Emit sfence instruction 2473 void Assembler::sfence() { 2474 NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");) 2475 emit_int24(0x0F, (unsigned char)0xAE, (unsigned char)0xF8); 2476 } 2477 2478 void Assembler::mov(Register dst, Register src) { 2479 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2480 } 2481 2482 void Assembler::movapd(XMMRegister dst, XMMRegister src) { 2483 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2484 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 2485 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2486 attributes.set_rex_vex_w_reverted(); 2487 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2488 emit_int16(0x28, (0xC0 | encode)); 2489 } 2490 2491 void Assembler::movaps(XMMRegister dst, XMMRegister src) { 2492 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2493 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 2494 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2495 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2496 emit_int16(0x28, (0xC0 | encode)); 2497 } 2498 2499 void Assembler::movlhps(XMMRegister dst, XMMRegister src) { 2500 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2501 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2502 int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2503 emit_int16(0x16, (0xC0 | encode)); 2504 } 2505 2506 void Assembler::movb(Register dst, Address src) { 2507 NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); 2508 InstructionMark im(this); 2509 prefix(src, dst, true); 2510 emit_int8((unsigned char)0x8A); 2511 emit_operand(dst, src); 2512 } 2513 2514 void Assembler::movddup(XMMRegister dst, XMMRegister src) { 2515 NOT_LP64(assert(VM_Version::supports_sse3(), "")); 2516 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 2517 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2518 attributes.set_rex_vex_w_reverted(); 2519 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2520 emit_int16(0x12, 0xC0 | encode); 2521 } 2522 2523 void Assembler::vmovddup(XMMRegister dst, Address src, int vector_len) { 2524 assert(VM_Version::supports_avx(), ""); 2525 InstructionMark im(this); 2526 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2527 attributes.set_rex_vex_w_reverted(); 2528 simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2529 emit_int8(0x12); 2530 emit_operand(dst, src); 2531 } 2532 2533 void Assembler::kmovbl(KRegister dst, KRegister src) { 2534 assert(VM_Version::supports_avx512dq(), ""); 2535 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2536 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2537 emit_int16((unsigned char)0x90, (0xC0 | encode)); 2538 } 2539 2540 void Assembler::kmovbl(KRegister dst, Register src) { 2541 assert(VM_Version::supports_avx512dq(), ""); 2542 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2543 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2544 emit_int16((unsigned char)0x92, (0xC0 | encode)); 2545 } 2546 2547 void Assembler::kmovbl(Register dst, KRegister src) { 2548 assert(VM_Version::supports_avx512dq(), ""); 2549 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2550 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2551 emit_int16((unsigned char)0x93, (0xC0 | encode)); 2552 } 2553 2554 void Assembler::kmovwl(KRegister dst, Register src) { 2555 assert(VM_Version::supports_evex(), ""); 2556 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2557 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2558 emit_int16((unsigned char)0x92, (0xC0 | encode)); 2559 } 2560 2561 void Assembler::kmovwl(Register dst, KRegister src) { 2562 assert(VM_Version::supports_evex(), ""); 2563 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2564 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2565 emit_int16((unsigned char)0x93, (0xC0 | encode)); 2566 } 2567 2568 void Assembler::kmovwl(KRegister dst, Address src) { 2569 assert(VM_Version::supports_evex(), ""); 2570 InstructionMark im(this); 2571 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2572 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2573 emit_int8((unsigned char)0x90); 2574 emit_operand((Register)dst, src); 2575 } 2576 2577 void Assembler::kmovwl(Address dst, KRegister src) { 2578 assert(VM_Version::supports_evex(), ""); 2579 InstructionMark im(this); 2580 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2581 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2582 emit_int8((unsigned char)0x91); 2583 emit_operand((Register)src, dst); 2584 } 2585 2586 void Assembler::kmovwl(KRegister dst, KRegister src) { 2587 assert(VM_Version::supports_evex(), ""); 2588 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2589 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2590 emit_int16((unsigned char)0x90, (0xC0 | encode)); 2591 } 2592 2593 void Assembler::kmovdl(KRegister dst, Register src) { 2594 assert(VM_Version::supports_avx512bw(), ""); 2595 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2596 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2597 emit_int16((unsigned char)0x92, (0xC0 | encode)); 2598 } 2599 2600 void Assembler::kmovdl(Register dst, KRegister src) { 2601 assert(VM_Version::supports_avx512bw(), ""); 2602 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2603 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2604 emit_int16((unsigned char)0x93, (0xC0 | encode)); 2605 } 2606 2607 void Assembler::kmovql(KRegister dst, KRegister src) { 2608 assert(VM_Version::supports_avx512bw(), ""); 2609 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2610 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2611 emit_int16((unsigned char)0x90, (0xC0 | encode)); 2612 } 2613 2614 void Assembler::kmovql(KRegister dst, Address src) { 2615 assert(VM_Version::supports_avx512bw(), ""); 2616 InstructionMark im(this); 2617 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2618 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2619 emit_int8((unsigned char)0x90); 2620 emit_operand((Register)dst, src); 2621 } 2622 2623 void Assembler::kmovql(Address dst, KRegister src) { 2624 assert(VM_Version::supports_avx512bw(), ""); 2625 InstructionMark im(this); 2626 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2627 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2628 emit_int8((unsigned char)0x91); 2629 emit_operand((Register)src, dst); 2630 } 2631 2632 void Assembler::kmovql(KRegister dst, Register src) { 2633 assert(VM_Version::supports_avx512bw(), ""); 2634 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2635 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2636 emit_int16((unsigned char)0x92, (0xC0 | encode)); 2637 } 2638 2639 void Assembler::kmovql(Register dst, KRegister src) { 2640 assert(VM_Version::supports_avx512bw(), ""); 2641 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2642 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2643 emit_int16((unsigned char)0x93, (0xC0 | encode)); 2644 } 2645 2646 void Assembler::knotwl(KRegister dst, KRegister src) { 2647 assert(VM_Version::supports_evex(), ""); 2648 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2649 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2650 emit_int16(0x44, (0xC0 | encode)); 2651 } 2652 2653 void Assembler::knotbl(KRegister dst, KRegister src) { 2654 assert(VM_Version::supports_evex(), ""); 2655 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2656 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2657 emit_int16(0x44, (0xC0 | encode)); 2658 } 2659 2660 void Assembler::korbl(KRegister dst, KRegister src1, KRegister src2) { 2661 assert(VM_Version::supports_avx512dq(), ""); 2662 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2663 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2664 emit_int16(0x45, (0xC0 | encode)); 2665 } 2666 2667 void Assembler::korwl(KRegister dst, KRegister src1, KRegister src2) { 2668 assert(VM_Version::supports_evex(), ""); 2669 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2670 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2671 emit_int16(0x45, (0xC0 | encode)); 2672 } 2673 2674 void Assembler::kordl(KRegister dst, KRegister src1, KRegister src2) { 2675 assert(VM_Version::supports_avx512bw(), ""); 2676 InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2677 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2678 emit_int16(0x45, (0xC0 | encode)); 2679 } 2680 2681 void Assembler::korql(KRegister dst, KRegister src1, KRegister src2) { 2682 assert(VM_Version::supports_avx512bw(), ""); 2683 InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2684 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2685 emit_int16(0x45, (0xC0 | encode)); 2686 } 2687 2688 void Assembler::kxorbl(KRegister dst, KRegister src1, KRegister src2) { 2689 assert(VM_Version::supports_avx512dq(), ""); 2690 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2691 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2692 emit_int16(0x47, (0xC0 | encode)); 2693 } 2694 2695 void Assembler::kxorwl(KRegister dst, KRegister src1, KRegister src2) { 2696 assert(VM_Version::supports_evex(), ""); 2697 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2698 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2699 emit_int16(0x47, (0xC0 | encode)); 2700 } 2701 2702 void Assembler::kxordl(KRegister dst, KRegister src1, KRegister src2) { 2703 assert(VM_Version::supports_avx512bw(), ""); 2704 InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2705 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2706 emit_int16(0x47, (0xC0 | encode)); 2707 } 2708 2709 void Assembler::kxorql(KRegister dst, KRegister src1, KRegister src2) { 2710 assert(VM_Version::supports_avx512bw(), ""); 2711 InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2712 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2713 emit_int16(0x47, (0xC0 | encode)); 2714 } 2715 2716 void Assembler::kandbl(KRegister dst, KRegister src1, KRegister src2) { 2717 assert(VM_Version::supports_avx512dq(), ""); 2718 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2719 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2720 emit_int16(0x41, (0xC0 | encode)); 2721 } 2722 2723 void Assembler::kandwl(KRegister dst, KRegister src1, KRegister src2) { 2724 assert(VM_Version::supports_evex(), ""); 2725 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2726 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2727 emit_int16(0x41, (0xC0 | encode)); 2728 } 2729 2730 void Assembler::kanddl(KRegister dst, KRegister src1, KRegister src2) { 2731 assert(VM_Version::supports_avx512bw(), ""); 2732 InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2733 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2734 emit_int16(0x41, (0xC0 | encode)); 2735 } 2736 2737 void Assembler::kandql(KRegister dst, KRegister src1, KRegister src2) { 2738 assert(VM_Version::supports_avx512bw(), ""); 2739 InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2740 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2741 emit_int16(0x41, (0xC0 | encode)); 2742 } 2743 2744 void Assembler::knotdl(KRegister dst, KRegister src) { 2745 assert(VM_Version::supports_avx512bw(), ""); 2746 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2747 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2748 emit_int16(0x44, (0xC0 | encode)); 2749 } 2750 2751 void Assembler::knotql(KRegister dst, KRegister src) { 2752 assert(VM_Version::supports_avx512bw(), ""); 2753 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2754 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2755 emit_int16(0x44, (0xC0 | encode)); 2756 } 2757 2758 // This instruction produces ZF or CF flags 2759 void Assembler::kortestbl(KRegister src1, KRegister src2) { 2760 assert(VM_Version::supports_avx512dq(), ""); 2761 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2762 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2763 emit_int16((unsigned char)0x98, (0xC0 | encode)); 2764 } 2765 2766 // This instruction produces ZF or CF flags 2767 void Assembler::kortestwl(KRegister src1, KRegister src2) { 2768 assert(VM_Version::supports_evex(), ""); 2769 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2770 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2771 emit_int16((unsigned char)0x98, (0xC0 | encode)); 2772 } 2773 2774 // This instruction produces ZF or CF flags 2775 void Assembler::kortestdl(KRegister src1, KRegister src2) { 2776 assert(VM_Version::supports_avx512bw(), ""); 2777 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2778 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2779 emit_int16((unsigned char)0x98, (0xC0 | encode)); 2780 } 2781 2782 // This instruction produces ZF or CF flags 2783 void Assembler::kortestql(KRegister src1, KRegister src2) { 2784 assert(VM_Version::supports_avx512bw(), ""); 2785 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2786 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2787 emit_int16((unsigned char)0x98, (0xC0 | encode)); 2788 } 2789 2790 // This instruction produces ZF or CF flags 2791 void Assembler::ktestql(KRegister src1, KRegister src2) { 2792 assert(VM_Version::supports_avx512bw(), ""); 2793 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2794 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2795 emit_int16((unsigned char)0x99, (0xC0 | encode)); 2796 } 2797 2798 void Assembler::ktestdl(KRegister src1, KRegister src2) { 2799 assert(VM_Version::supports_avx512bw(), ""); 2800 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2801 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2802 emit_int16((unsigned char)0x99, (0xC0 | encode)); 2803 } 2804 2805 void Assembler::ktestwl(KRegister src1, KRegister src2) { 2806 assert(VM_Version::supports_avx512dq(), ""); 2807 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2808 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2809 emit_int16((unsigned char)0x99, (0xC0 | encode)); 2810 } 2811 2812 void Assembler::ktestbl(KRegister src1, KRegister src2) { 2813 assert(VM_Version::supports_avx512dq(), ""); 2814 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2815 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2816 emit_int16((unsigned char)0x99, (0xC0 | encode)); 2817 } 2818 2819 void Assembler::ktestq(KRegister src1, KRegister src2) { 2820 assert(VM_Version::supports_avx512bw(), ""); 2821 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2822 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2823 emit_int16((unsigned char)0x99, (0xC0 | encode)); 2824 } 2825 2826 void Assembler::ktestd(KRegister src1, KRegister src2) { 2827 assert(VM_Version::supports_avx512bw(), ""); 2828 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2829 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2830 emit_int16((unsigned char)0x99, (0xC0 | encode)); 2831 } 2832 2833 void Assembler::kxnorbl(KRegister dst, KRegister src1, KRegister src2) { 2834 assert(VM_Version::supports_avx512dq(), ""); 2835 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2836 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2837 emit_int16(0x46, (0xC0 | encode)); 2838 } 2839 2840 void Assembler::kshiftlbl(KRegister dst, KRegister src, int imm8) { 2841 assert(VM_Version::supports_avx512dq(), ""); 2842 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2843 int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 2844 emit_int16(0x32, (0xC0 | encode)); 2845 emit_int8(imm8); 2846 } 2847 2848 void Assembler::kshiftlql(KRegister dst, KRegister src, int imm8) { 2849 assert(VM_Version::supports_avx512bw(), ""); 2850 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2851 int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 2852 emit_int16(0x33, (0xC0 | encode)); 2853 emit_int8(imm8); 2854 } 2855 2856 2857 void Assembler::kshiftrbl(KRegister dst, KRegister src, int imm8) { 2858 assert(VM_Version::supports_avx512dq(), ""); 2859 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2860 int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 2861 emit_int16(0x30, (0xC0 | encode)); 2862 } 2863 2864 void Assembler::kshiftrwl(KRegister dst, KRegister src, int imm8) { 2865 assert(VM_Version::supports_evex(), ""); 2866 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2867 int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 2868 emit_int16(0x30, (0xC0 | encode)); 2869 emit_int8(imm8); 2870 } 2871 2872 void Assembler::kshiftrdl(KRegister dst, KRegister src, int imm8) { 2873 assert(VM_Version::supports_avx512bw(), ""); 2874 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2875 int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 2876 emit_int16(0x31, (0xC0 | encode)); 2877 emit_int8(imm8); 2878 } 2879 2880 void Assembler::kshiftrql(KRegister dst, KRegister src, int imm8) { 2881 assert(VM_Version::supports_avx512bw(), ""); 2882 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2883 int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 2884 emit_int16(0x31, (0xC0 | encode)); 2885 emit_int8(imm8); 2886 } 2887 2888 void Assembler::kunpckdql(KRegister dst, KRegister src1, KRegister src2) { 2889 assert(VM_Version::supports_avx512bw(), ""); 2890 InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2891 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2892 emit_int16(0x4B, (0xC0 | encode)); 2893 } 2894 2895 void Assembler::movb(Address dst, int imm8) { 2896 InstructionMark im(this); 2897 prefix(dst); 2898 emit_int8((unsigned char)0xC6); 2899 emit_operand(rax, dst, 1); 2900 emit_int8(imm8); 2901 } 2902 2903 2904 void Assembler::movb(Address dst, Register src) { 2905 assert(src->has_byte_register(), "must have byte register"); 2906 InstructionMark im(this); 2907 prefix(dst, src, true); 2908 emit_int8((unsigned char)0x88); 2909 emit_operand(src, dst); 2910 } 2911 2912 void Assembler::movdl(XMMRegister dst, Register src) { 2913 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2914 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2915 int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2916 emit_int16(0x6E, (0xC0 | encode)); 2917 } 2918 2919 void Assembler::movdl(Register dst, XMMRegister src) { 2920 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2921 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2922 // swap src/dst to get correct prefix 2923 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2924 emit_int16(0x7E, (0xC0 | encode)); 2925 } 2926 2927 void Assembler::movdl(XMMRegister dst, Address src) { 2928 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2929 InstructionMark im(this); 2930 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2931 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2932 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2933 emit_int8(0x6E); 2934 emit_operand(dst, src); 2935 } 2936 2937 void Assembler::movdl(Address dst, XMMRegister src) { 2938 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2939 InstructionMark im(this); 2940 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2941 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2942 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2943 emit_int8(0x7E); 2944 emit_operand(src, dst); 2945 } 2946 2947 void Assembler::movdqa(XMMRegister dst, XMMRegister src) { 2948 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2949 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2950 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2951 emit_int16(0x6F, (0xC0 | encode)); 2952 } 2953 2954 void Assembler::movdqa(XMMRegister dst, Address src) { 2955 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2956 InstructionMark im(this); 2957 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2958 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2959 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2960 emit_int8(0x6F); 2961 emit_operand(dst, src); 2962 } 2963 2964 void Assembler::movdqu(XMMRegister dst, Address src) { 2965 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2966 InstructionMark im(this); 2967 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2968 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2969 simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2970 emit_int8(0x6F); 2971 emit_operand(dst, src); 2972 } 2973 2974 void Assembler::movdqu(XMMRegister dst, XMMRegister src) { 2975 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2976 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2977 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2978 emit_int16(0x6F, (0xC0 | encode)); 2979 } 2980 2981 void Assembler::movdqu(Address dst, XMMRegister src) { 2982 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2983 InstructionMark im(this); 2984 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2985 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2986 attributes.reset_is_clear_context(); 2987 simd_prefix(src, xnoreg, dst, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2988 emit_int8(0x7F); 2989 emit_operand(src, dst); 2990 } 2991 2992 // Move Unaligned 256bit Vector 2993 void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) { 2994 assert(UseAVX > 0, ""); 2995 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2996 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2997 emit_int16(0x6F, (0xC0 | encode)); 2998 } 2999 3000 void Assembler::vmovdqu(XMMRegister dst, Address src) { 3001 assert(UseAVX > 0, ""); 3002 InstructionMark im(this); 3003 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3004 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3005 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3006 emit_int8(0x6F); 3007 emit_operand(dst, src); 3008 } 3009 3010 void Assembler::vmovdqu(Address dst, XMMRegister src) { 3011 assert(UseAVX > 0, ""); 3012 InstructionMark im(this); 3013 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3014 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3015 attributes.reset_is_clear_context(); 3016 // swap src<->dst for encoding 3017 assert(src != xnoreg, "sanity"); 3018 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3019 emit_int8(0x7F); 3020 emit_operand(src, dst); 3021 } 3022 3023 // Move Unaligned EVEX enabled Vector (programmable : 8,16,32,64) 3024 void Assembler::evmovdqub(XMMRegister dst, XMMRegister src, bool merge, int vector_len) { 3025 assert(VM_Version::supports_evex(), ""); 3026 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3027 attributes.set_is_evex_instruction(); 3028 if (merge) { 3029 attributes.reset_is_clear_context(); 3030 } 3031 int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3; 3032 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes); 3033 emit_int16(0x6F, (0xC0 | encode)); 3034 } 3035 3036 void Assembler::evmovdqub(XMMRegister dst, Address src, bool merge, int vector_len) { 3037 assert(VM_Version::supports_evex(), ""); 3038 InstructionMark im(this); 3039 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3040 int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3; 3041 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3042 attributes.set_is_evex_instruction(); 3043 if (merge) { 3044 attributes.reset_is_clear_context(); 3045 } 3046 vex_prefix(src, 0, dst->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes); 3047 emit_int8(0x6F); 3048 emit_operand(dst, src); 3049 } 3050 3051 void Assembler::evmovdqub(Address dst, XMMRegister src, bool merge, int vector_len) { 3052 assert(VM_Version::supports_evex(), ""); 3053 assert(src != xnoreg, "sanity"); 3054 InstructionMark im(this); 3055 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3056 int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3; 3057 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3058 attributes.set_is_evex_instruction(); 3059 if (merge) { 3060 attributes.reset_is_clear_context(); 3061 } 3062 vex_prefix(dst, 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes); 3063 emit_int8(0x7F); 3064 emit_operand(src, dst); 3065 } 3066 3067 void Assembler::evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { 3068 assert(VM_Version::supports_avx512vlbw(), ""); 3069 InstructionMark im(this); 3070 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3071 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3072 attributes.set_embedded_opmask_register_specifier(mask); 3073 attributes.set_is_evex_instruction(); 3074 if (merge) { 3075 attributes.reset_is_clear_context(); 3076 } 3077 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3078 emit_int8(0x6F); 3079 emit_operand(dst, src); 3080 } 3081 3082 void Assembler::evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 3083 assert(VM_Version::supports_avx512vlbw(), ""); 3084 assert(src != xnoreg, "sanity"); 3085 InstructionMark im(this); 3086 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3087 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3088 attributes.set_embedded_opmask_register_specifier(mask); 3089 attributes.set_is_evex_instruction(); 3090 if (merge) { 3091 attributes.reset_is_clear_context(); 3092 } 3093 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3094 emit_int8(0x7F); 3095 emit_operand(src, dst); 3096 } 3097 3098 void Assembler::evmovdquw(XMMRegister dst, Address src, bool merge, int vector_len) { 3099 assert(VM_Version::supports_evex(), ""); 3100 InstructionMark im(this); 3101 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3102 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3103 attributes.set_is_evex_instruction(); 3104 if (merge) { 3105 attributes.reset_is_clear_context(); 3106 } 3107 int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3; 3108 vex_prefix(src, 0, dst->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes); 3109 emit_int8(0x6F); 3110 emit_operand(dst, src); 3111 } 3112 3113 void Assembler::evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { 3114 assert(VM_Version::supports_avx512vlbw(), ""); 3115 InstructionMark im(this); 3116 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3117 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3118 attributes.set_embedded_opmask_register_specifier(mask); 3119 attributes.set_is_evex_instruction(); 3120 if (merge) { 3121 attributes.reset_is_clear_context(); 3122 } 3123 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3124 emit_int8(0x6F); 3125 emit_operand(dst, src); 3126 } 3127 3128 void Assembler::evmovdquw(Address dst, XMMRegister src, bool merge, int vector_len) { 3129 assert(VM_Version::supports_evex(), ""); 3130 assert(src != xnoreg, "sanity"); 3131 InstructionMark im(this); 3132 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3133 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3134 attributes.set_is_evex_instruction(); 3135 if (merge) { 3136 attributes.reset_is_clear_context(); 3137 } 3138 int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3; 3139 vex_prefix(dst, 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes); 3140 emit_int8(0x7F); 3141 emit_operand(src, dst); 3142 } 3143 3144 void Assembler::evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 3145 assert(VM_Version::supports_avx512vlbw(), ""); 3146 assert(src != xnoreg, "sanity"); 3147 InstructionMark im(this); 3148 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3149 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3150 attributes.set_embedded_opmask_register_specifier(mask); 3151 attributes.set_is_evex_instruction(); 3152 if (merge) { 3153 attributes.reset_is_clear_context(); 3154 } 3155 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3156 emit_int8(0x7F); 3157 emit_operand(src, dst); 3158 } 3159 3160 void Assembler::evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) { 3161 // Unmasked instruction 3162 evmovdqul(dst, k0, src, /*merge*/ false, vector_len); 3163 } 3164 3165 void Assembler::evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 3166 assert(VM_Version::supports_evex(), ""); 3167 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3168 attributes.set_embedded_opmask_register_specifier(mask); 3169 attributes.set_is_evex_instruction(); 3170 if (merge) { 3171 attributes.reset_is_clear_context(); 3172 } 3173 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3174 emit_int16(0x6F, (0xC0 | encode)); 3175 } 3176 3177 void Assembler::evmovdqul(XMMRegister dst, Address src, int vector_len) { 3178 // Unmasked instruction 3179 evmovdqul(dst, k0, src, /*merge*/ false, vector_len); 3180 } 3181 3182 void Assembler::evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { 3183 assert(VM_Version::supports_evex(), ""); 3184 InstructionMark im(this); 3185 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false , /* uses_vl */ true); 3186 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3187 attributes.set_embedded_opmask_register_specifier(mask); 3188 attributes.set_is_evex_instruction(); 3189 if (merge) { 3190 attributes.reset_is_clear_context(); 3191 } 3192 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3193 emit_int8(0x6F); 3194 emit_operand(dst, src); 3195 } 3196 3197 void Assembler::evmovdqul(Address dst, XMMRegister src, int vector_len) { 3198 // Unmasked isntruction 3199 evmovdqul(dst, k0, src, /*merge*/ true, vector_len); 3200 } 3201 3202 void Assembler::evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 3203 assert(VM_Version::supports_evex(), ""); 3204 assert(src != xnoreg, "sanity"); 3205 InstructionMark im(this); 3206 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3207 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3208 attributes.set_embedded_opmask_register_specifier(mask); 3209 attributes.set_is_evex_instruction(); 3210 if (merge) { 3211 attributes.reset_is_clear_context(); 3212 } 3213 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3214 emit_int8(0x7F); 3215 emit_operand(src, dst); 3216 } 3217 3218 void Assembler::evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { 3219 // Unmasked instruction 3220 if (dst->encoding() == src->encoding()) return; 3221 evmovdquq(dst, k0, src, /*merge*/ false, vector_len); 3222 } 3223 3224 void Assembler::evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 3225 assert(VM_Version::supports_evex(), ""); 3226 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3227 attributes.set_embedded_opmask_register_specifier(mask); 3228 attributes.set_is_evex_instruction(); 3229 if (merge) { 3230 attributes.reset_is_clear_context(); 3231 } 3232 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3233 emit_int16(0x6F, (0xC0 | encode)); 3234 } 3235 3236 void Assembler::evmovdquq(XMMRegister dst, Address src, int vector_len) { 3237 // Unmasked instruction 3238 evmovdquq(dst, k0, src, /*merge*/ false, vector_len); 3239 } 3240 3241 void Assembler::evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { 3242 assert(VM_Version::supports_evex(), ""); 3243 InstructionMark im(this); 3244 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3245 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3246 attributes.set_embedded_opmask_register_specifier(mask); 3247 attributes.set_is_evex_instruction(); 3248 if (merge) { 3249 attributes.reset_is_clear_context(); 3250 } 3251 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3252 emit_int8(0x6F); 3253 emit_operand(dst, src); 3254 } 3255 3256 void Assembler::evmovdquq(Address dst, XMMRegister src, int vector_len) { 3257 // Unmasked instruction 3258 evmovdquq(dst, k0, src, /*merge*/ true, vector_len); 3259 } 3260 3261 void Assembler::evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 3262 assert(VM_Version::supports_evex(), ""); 3263 assert(src != xnoreg, "sanity"); 3264 InstructionMark im(this); 3265 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3266 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3267 attributes.set_embedded_opmask_register_specifier(mask); 3268 if (merge) { 3269 attributes.reset_is_clear_context(); 3270 } 3271 attributes.set_is_evex_instruction(); 3272 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3273 emit_int8(0x7F); 3274 emit_operand(src, dst); 3275 } 3276 3277 // Uses zero extension on 64bit 3278 3279 void Assembler::movl(Register dst, int32_t imm32) { 3280 int encode = prefix_and_encode(dst->encoding()); 3281 emit_int8(0xB8 | encode); 3282 emit_int32(imm32); 3283 } 3284 3285 void Assembler::movl(Register dst, Register src) { 3286 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 3287 emit_int16((unsigned char)0x8B, (0xC0 | encode)); 3288 } 3289 3290 void Assembler::movl(Register dst, Address src) { 3291 InstructionMark im(this); 3292 prefix(src, dst); 3293 emit_int8((unsigned char)0x8B); 3294 emit_operand(dst, src); 3295 } 3296 3297 void Assembler::movl(Address dst, int32_t imm32) { 3298 InstructionMark im(this); 3299 prefix(dst); 3300 emit_int8((unsigned char)0xC7); 3301 emit_operand(rax, dst, 4); 3302 emit_int32(imm32); 3303 } 3304 3305 void Assembler::movl(Address dst, Register src) { 3306 InstructionMark im(this); 3307 prefix(dst, src); 3308 emit_int8((unsigned char)0x89); 3309 emit_operand(src, dst); 3310 } 3311 3312 // New cpus require to use movsd and movss to avoid partial register stall 3313 // when loading from memory. But for old Opteron use movlpd instead of movsd. 3314 // The selection is done in MacroAssembler::movdbl() and movflt(). 3315 void Assembler::movlpd(XMMRegister dst, Address src) { 3316 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3317 InstructionMark im(this); 3318 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3319 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 3320 attributes.set_rex_vex_w_reverted(); 3321 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3322 emit_int8(0x12); 3323 emit_operand(dst, src); 3324 } 3325 3326 void Assembler::movq(XMMRegister dst, Address src) { 3327 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3328 InstructionMark im(this); 3329 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3330 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 3331 attributes.set_rex_vex_w_reverted(); 3332 simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3333 emit_int8(0x7E); 3334 emit_operand(dst, src); 3335 } 3336 3337 void Assembler::movq(Address dst, XMMRegister src) { 3338 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3339 InstructionMark im(this); 3340 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3341 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 3342 attributes.set_rex_vex_w_reverted(); 3343 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3344 emit_int8((unsigned char)0xD6); 3345 emit_operand(src, dst); 3346 } 3347 3348 void Assembler::movq(XMMRegister dst, XMMRegister src) { 3349 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3350 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3351 attributes.set_rex_vex_w_reverted(); 3352 int encode = simd_prefix_and_encode(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3353 emit_int16((unsigned char)0xD6, (0xC0 | encode)); 3354 } 3355 3356 void Assembler::movq(Register dst, XMMRegister src) { 3357 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3358 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3359 // swap src/dst to get correct prefix 3360 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3361 emit_int16(0x7E, (0xC0 | encode)); 3362 } 3363 3364 void Assembler::movq(XMMRegister dst, Register src) { 3365 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3366 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3367 int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3368 emit_int16(0x6E, (0xC0 | encode)); 3369 } 3370 3371 void Assembler::movsbl(Register dst, Address src) { // movsxb 3372 InstructionMark im(this); 3373 prefix(src, dst); 3374 emit_int16(0x0F, (unsigned char)0xBE); 3375 emit_operand(dst, src); 3376 } 3377 3378 void Assembler::movsbl(Register dst, Register src) { // movsxb 3379 NOT_LP64(assert(src->has_byte_register(), "must have byte register")); 3380 int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true); 3381 emit_int24(0x0F, (unsigned char)0xBE, (0xC0 | encode)); 3382 } 3383 3384 void Assembler::movsd(XMMRegister dst, XMMRegister src) { 3385 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3386 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3387 attributes.set_rex_vex_w_reverted(); 3388 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3389 emit_int16(0x10, (0xC0 | encode)); 3390 } 3391 3392 void Assembler::movsd(XMMRegister dst, Address src) { 3393 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3394 InstructionMark im(this); 3395 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3396 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 3397 attributes.set_rex_vex_w_reverted(); 3398 simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3399 emit_int8(0x10); 3400 emit_operand(dst, src); 3401 } 3402 3403 void Assembler::movsd(Address dst, XMMRegister src) { 3404 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3405 InstructionMark im(this); 3406 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3407 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 3408 attributes.reset_is_clear_context(); 3409 attributes.set_rex_vex_w_reverted(); 3410 simd_prefix(src, xnoreg, dst, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3411 emit_int8(0x11); 3412 emit_operand(src, dst); 3413 } 3414 3415 void Assembler::movss(XMMRegister dst, XMMRegister src) { 3416 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3417 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3418 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3419 emit_int16(0x10, (0xC0 | encode)); 3420 } 3421 3422 void Assembler::movss(XMMRegister dst, Address src) { 3423 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3424 InstructionMark im(this); 3425 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3426 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 3427 simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3428 emit_int8(0x10); 3429 emit_operand(dst, src); 3430 } 3431 3432 void Assembler::movss(Address dst, XMMRegister src) { 3433 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3434 InstructionMark im(this); 3435 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3436 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 3437 attributes.reset_is_clear_context(); 3438 simd_prefix(src, xnoreg, dst, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3439 emit_int8(0x11); 3440 emit_operand(src, dst); 3441 } 3442 3443 void Assembler::movswl(Register dst, Address src) { // movsxw 3444 InstructionMark im(this); 3445 prefix(src, dst); 3446 emit_int16(0x0F, (unsigned char)0xBF); 3447 emit_operand(dst, src); 3448 } 3449 3450 void Assembler::movswl(Register dst, Register src) { // movsxw 3451 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 3452 emit_int24(0x0F, (unsigned char)0xBF, (0xC0 | encode)); 3453 } 3454 3455 void Assembler::movw(Address dst, int imm16) { 3456 InstructionMark im(this); 3457 3458 emit_int8(0x66); // switch to 16-bit mode 3459 prefix(dst); 3460 emit_int8((unsigned char)0xC7); 3461 emit_operand(rax, dst, 2); 3462 emit_int16(imm16); 3463 } 3464 3465 void Assembler::movw(Register dst, Address src) { 3466 InstructionMark im(this); 3467 emit_int8(0x66); 3468 prefix(src, dst); 3469 emit_int8((unsigned char)0x8B); 3470 emit_operand(dst, src); 3471 } 3472 3473 void Assembler::movw(Address dst, Register src) { 3474 InstructionMark im(this); 3475 emit_int8(0x66); 3476 prefix(dst, src); 3477 emit_int8((unsigned char)0x89); 3478 emit_operand(src, dst); 3479 } 3480 3481 void Assembler::movzbl(Register dst, Address src) { // movzxb 3482 InstructionMark im(this); 3483 prefix(src, dst); 3484 emit_int16(0x0F, (unsigned char)0xB6); 3485 emit_operand(dst, src); 3486 } 3487 3488 void Assembler::movzbl(Register dst, Register src) { // movzxb 3489 NOT_LP64(assert(src->has_byte_register(), "must have byte register")); 3490 int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true); 3491 emit_int24(0x0F, (unsigned char)0xB6, 0xC0 | encode); 3492 } 3493 3494 void Assembler::movzwl(Register dst, Address src) { // movzxw 3495 InstructionMark im(this); 3496 prefix(src, dst); 3497 emit_int16(0x0F, (unsigned char)0xB7); 3498 emit_operand(dst, src); 3499 } 3500 3501 void Assembler::movzwl(Register dst, Register src) { // movzxw 3502 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 3503 emit_int24(0x0F, (unsigned char)0xB7, 0xC0 | encode); 3504 } 3505 3506 void Assembler::mull(Address src) { 3507 InstructionMark im(this); 3508 prefix(src); 3509 emit_int8((unsigned char)0xF7); 3510 emit_operand(rsp, src); 3511 } 3512 3513 void Assembler::mull(Register src) { 3514 int encode = prefix_and_encode(src->encoding()); 3515 emit_int16((unsigned char)0xF7, (0xE0 | encode)); 3516 } 3517 3518 void Assembler::mulsd(XMMRegister dst, Address src) { 3519 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3520 InstructionMark im(this); 3521 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3522 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 3523 attributes.set_rex_vex_w_reverted(); 3524 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3525 emit_int8(0x59); 3526 emit_operand(dst, src); 3527 } 3528 3529 void Assembler::mulsd(XMMRegister dst, XMMRegister src) { 3530 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3531 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3532 attributes.set_rex_vex_w_reverted(); 3533 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3534 emit_int16(0x59, (0xC0 | encode)); 3535 } 3536 3537 void Assembler::mulss(XMMRegister dst, Address src) { 3538 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3539 InstructionMark im(this); 3540 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3541 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 3542 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3543 emit_int8(0x59); 3544 emit_operand(dst, src); 3545 } 3546 3547 void Assembler::mulss(XMMRegister dst, XMMRegister src) { 3548 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3549 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3550 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3551 emit_int16(0x59, (0xC0 | encode)); 3552 } 3553 3554 void Assembler::negl(Register dst) { 3555 int encode = prefix_and_encode(dst->encoding()); 3556 emit_int16((unsigned char)0xF7, (0xD8 | encode)); 3557 } 3558 3559 void Assembler::negl(Address dst) { 3560 InstructionMark im(this); 3561 prefix(dst); 3562 emit_int8((unsigned char)0xF7); 3563 emit_operand(as_Register(3), dst); 3564 } 3565 3566 void Assembler::nop(int i) { 3567 #ifdef ASSERT 3568 assert(i > 0, " "); 3569 // The fancy nops aren't currently recognized by debuggers making it a 3570 // pain to disassemble code while debugging. If asserts are on clearly 3571 // speed is not an issue so simply use the single byte traditional nop 3572 // to do alignment. 3573 3574 for (; i > 0 ; i--) emit_int8((unsigned char)0x90); 3575 return; 3576 3577 #endif // ASSERT 3578 3579 if (UseAddressNop && VM_Version::is_intel()) { 3580 // 3581 // Using multi-bytes nops "0x0F 0x1F [address]" for Intel 3582 // 1: 0x90 3583 // 2: 0x66 0x90 3584 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) 3585 // 4: 0x0F 0x1F 0x40 0x00 3586 // 5: 0x0F 0x1F 0x44 0x00 0x00 3587 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 3588 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 3589 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3590 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3591 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3592 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3593 3594 // The rest coding is Intel specific - don't use consecutive address nops 3595 3596 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3597 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3598 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3599 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3600 3601 while(i >= 15) { 3602 // For Intel don't generate consecutive address nops (mix with regular nops) 3603 i -= 15; 3604 emit_int24(0x66, 0x66, 0x66); 3605 addr_nop_8(); 3606 emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90); 3607 } 3608 switch (i) { 3609 case 14: 3610 emit_int8(0x66); // size prefix 3611 case 13: 3612 emit_int8(0x66); // size prefix 3613 case 12: 3614 addr_nop_8(); 3615 emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90); 3616 break; 3617 case 11: 3618 emit_int8(0x66); // size prefix 3619 case 10: 3620 emit_int8(0x66); // size prefix 3621 case 9: 3622 emit_int8(0x66); // size prefix 3623 case 8: 3624 addr_nop_8(); 3625 break; 3626 case 7: 3627 addr_nop_7(); 3628 break; 3629 case 6: 3630 emit_int8(0x66); // size prefix 3631 case 5: 3632 addr_nop_5(); 3633 break; 3634 case 4: 3635 addr_nop_4(); 3636 break; 3637 case 3: 3638 // Don't use "0x0F 0x1F 0x00" - need patching safe padding 3639 emit_int8(0x66); // size prefix 3640 case 2: 3641 emit_int8(0x66); // size prefix 3642 case 1: 3643 emit_int8((unsigned char)0x90); 3644 // nop 3645 break; 3646 default: 3647 assert(i == 0, " "); 3648 } 3649 return; 3650 } 3651 if (UseAddressNop && VM_Version::is_amd_family()) { 3652 // 3653 // Using multi-bytes nops "0x0F 0x1F [address]" for AMD. 3654 // 1: 0x90 3655 // 2: 0x66 0x90 3656 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) 3657 // 4: 0x0F 0x1F 0x40 0x00 3658 // 5: 0x0F 0x1F 0x44 0x00 0x00 3659 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 3660 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 3661 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3662 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3663 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3664 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3665 3666 // The rest coding is AMD specific - use consecutive address nops 3667 3668 // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 3669 // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 3670 // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 3671 // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 3672 // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3673 // Size prefixes (0x66) are added for larger sizes 3674 3675 while(i >= 22) { 3676 i -= 11; 3677 emit_int24(0x66, 0x66, 0x66); 3678 addr_nop_8(); 3679 } 3680 // Generate first nop for size between 21-12 3681 switch (i) { 3682 case 21: 3683 i -= 1; 3684 emit_int8(0x66); // size prefix 3685 case 20: 3686 case 19: 3687 i -= 1; 3688 emit_int8(0x66); // size prefix 3689 case 18: 3690 case 17: 3691 i -= 1; 3692 emit_int8(0x66); // size prefix 3693 case 16: 3694 case 15: 3695 i -= 8; 3696 addr_nop_8(); 3697 break; 3698 case 14: 3699 case 13: 3700 i -= 7; 3701 addr_nop_7(); 3702 break; 3703 case 12: 3704 i -= 6; 3705 emit_int8(0x66); // size prefix 3706 addr_nop_5(); 3707 break; 3708 default: 3709 assert(i < 12, " "); 3710 } 3711 3712 // Generate second nop for size between 11-1 3713 switch (i) { 3714 case 11: 3715 emit_int8(0x66); // size prefix 3716 case 10: 3717 emit_int8(0x66); // size prefix 3718 case 9: 3719 emit_int8(0x66); // size prefix 3720 case 8: 3721 addr_nop_8(); 3722 break; 3723 case 7: 3724 addr_nop_7(); 3725 break; 3726 case 6: 3727 emit_int8(0x66); // size prefix 3728 case 5: 3729 addr_nop_5(); 3730 break; 3731 case 4: 3732 addr_nop_4(); 3733 break; 3734 case 3: 3735 // Don't use "0x0F 0x1F 0x00" - need patching safe padding 3736 emit_int8(0x66); // size prefix 3737 case 2: 3738 emit_int8(0x66); // size prefix 3739 case 1: 3740 emit_int8((unsigned char)0x90); 3741 // nop 3742 break; 3743 default: 3744 assert(i == 0, " "); 3745 } 3746 return; 3747 } 3748 3749 if (UseAddressNop && VM_Version::is_zx()) { 3750 // 3751 // Using multi-bytes nops "0x0F 0x1F [address]" for ZX 3752 // 1: 0x90 3753 // 2: 0x66 0x90 3754 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) 3755 // 4: 0x0F 0x1F 0x40 0x00 3756 // 5: 0x0F 0x1F 0x44 0x00 0x00 3757 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 3758 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 3759 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3760 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3761 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3762 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3763 3764 // The rest coding is ZX specific - don't use consecutive address nops 3765 3766 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3767 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3768 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3769 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3770 3771 while (i >= 15) { 3772 // For ZX don't generate consecutive address nops (mix with regular nops) 3773 i -= 15; 3774 emit_int24(0x66, 0x66, 0x66); 3775 addr_nop_8(); 3776 emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90); 3777 } 3778 switch (i) { 3779 case 14: 3780 emit_int8(0x66); // size prefix 3781 case 13: 3782 emit_int8(0x66); // size prefix 3783 case 12: 3784 addr_nop_8(); 3785 emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90); 3786 break; 3787 case 11: 3788 emit_int8(0x66); // size prefix 3789 case 10: 3790 emit_int8(0x66); // size prefix 3791 case 9: 3792 emit_int8(0x66); // size prefix 3793 case 8: 3794 addr_nop_8(); 3795 break; 3796 case 7: 3797 addr_nop_7(); 3798 break; 3799 case 6: 3800 emit_int8(0x66); // size prefix 3801 case 5: 3802 addr_nop_5(); 3803 break; 3804 case 4: 3805 addr_nop_4(); 3806 break; 3807 case 3: 3808 // Don't use "0x0F 0x1F 0x00" - need patching safe padding 3809 emit_int8(0x66); // size prefix 3810 case 2: 3811 emit_int8(0x66); // size prefix 3812 case 1: 3813 emit_int8((unsigned char)0x90); 3814 // nop 3815 break; 3816 default: 3817 assert(i == 0, " "); 3818 } 3819 return; 3820 } 3821 3822 // Using nops with size prefixes "0x66 0x90". 3823 // From AMD Optimization Guide: 3824 // 1: 0x90 3825 // 2: 0x66 0x90 3826 // 3: 0x66 0x66 0x90 3827 // 4: 0x66 0x66 0x66 0x90 3828 // 5: 0x66 0x66 0x90 0x66 0x90 3829 // 6: 0x66 0x66 0x90 0x66 0x66 0x90 3830 // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 3831 // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90 3832 // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 3833 // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 3834 // 3835 while (i > 12) { 3836 i -= 4; 3837 emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90); 3838 } 3839 // 1 - 12 nops 3840 if (i > 8) { 3841 if (i > 9) { 3842 i -= 1; 3843 emit_int8(0x66); 3844 } 3845 i -= 3; 3846 emit_int24(0x66, 0x66, (unsigned char)0x90); 3847 } 3848 // 1 - 8 nops 3849 if (i > 4) { 3850 if (i > 6) { 3851 i -= 1; 3852 emit_int8(0x66); 3853 } 3854 i -= 3; 3855 emit_int24(0x66, 0x66, (unsigned char)0x90); 3856 } 3857 switch (i) { 3858 case 4: 3859 emit_int8(0x66); 3860 case 3: 3861 emit_int8(0x66); 3862 case 2: 3863 emit_int8(0x66); 3864 case 1: 3865 emit_int8((unsigned char)0x90); 3866 break; 3867 default: 3868 assert(i == 0, " "); 3869 } 3870 } 3871 3872 void Assembler::notl(Register dst) { 3873 int encode = prefix_and_encode(dst->encoding()); 3874 emit_int16((unsigned char)0xF7, (0xD0 | encode)); 3875 } 3876 3877 void Assembler::orw(Register dst, Register src) { 3878 (void)prefix_and_encode(dst->encoding(), src->encoding()); 3879 emit_arith(0x0B, 0xC0, dst, src); 3880 } 3881 3882 void Assembler::orl(Address dst, int32_t imm32) { 3883 InstructionMark im(this); 3884 prefix(dst); 3885 emit_arith_operand(0x81, rcx, dst, imm32); 3886 } 3887 3888 void Assembler::orl(Register dst, int32_t imm32) { 3889 prefix(dst); 3890 emit_arith(0x81, 0xC8, dst, imm32); 3891 } 3892 3893 void Assembler::orl(Register dst, Address src) { 3894 InstructionMark im(this); 3895 prefix(src, dst); 3896 emit_int8(0x0B); 3897 emit_operand(dst, src); 3898 } 3899 3900 void Assembler::orl(Register dst, Register src) { 3901 (void) prefix_and_encode(dst->encoding(), src->encoding()); 3902 emit_arith(0x0B, 0xC0, dst, src); 3903 } 3904 3905 void Assembler::orl(Address dst, Register src) { 3906 InstructionMark im(this); 3907 prefix(dst, src); 3908 emit_int8(0x09); 3909 emit_operand(src, dst); 3910 } 3911 3912 void Assembler::orb(Address dst, int imm8) { 3913 InstructionMark im(this); 3914 prefix(dst); 3915 emit_int8((unsigned char)0x80); 3916 emit_operand(rcx, dst, 1); 3917 emit_int8(imm8); 3918 } 3919 3920 void Assembler::orb(Address dst, Register src) { 3921 InstructionMark im(this); 3922 prefix(dst, src, true); 3923 emit_int8(0x08); 3924 emit_operand(src, dst); 3925 } 3926 3927 void Assembler::packsswb(XMMRegister dst, XMMRegister src) { 3928 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3929 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3930 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3931 emit_int16(0x63, (0xC0 | encode)); 3932 } 3933 3934 void Assembler::vpacksswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3935 assert(UseAVX > 0, "some form of AVX must be enabled"); 3936 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3937 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3938 emit_int16(0x63, (0xC0 | encode)); 3939 } 3940 3941 void Assembler::packssdw(XMMRegister dst, XMMRegister src) { 3942 assert(VM_Version::supports_sse2(), ""); 3943 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3944 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3945 emit_int16(0x6B, (0xC0 | encode)); 3946 } 3947 3948 void Assembler::vpackssdw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3949 assert(UseAVX > 0, "some form of AVX must be enabled"); 3950 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3951 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3952 emit_int16(0x6B, (0xC0 | encode)); 3953 } 3954 3955 void Assembler::packuswb(XMMRegister dst, Address src) { 3956 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3957 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 3958 InstructionMark im(this); 3959 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3960 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 3961 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3962 emit_int8(0x67); 3963 emit_operand(dst, src); 3964 } 3965 3966 void Assembler::packuswb(XMMRegister dst, XMMRegister src) { 3967 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3968 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3969 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3970 emit_int16(0x67, (0xC0 | encode)); 3971 } 3972 3973 void Assembler::vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3974 assert(UseAVX > 0, "some form of AVX must be enabled"); 3975 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3976 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3977 emit_int16(0x67, (0xC0 | encode)); 3978 } 3979 3980 void Assembler::packusdw(XMMRegister dst, XMMRegister src) { 3981 assert(VM_Version::supports_sse4_1(), ""); 3982 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3983 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3984 emit_int16(0x2B, (0xC0 | encode)); 3985 } 3986 3987 void Assembler::vpackusdw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3988 assert(UseAVX > 0, "some form of AVX must be enabled"); 3989 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3990 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3991 emit_int16(0x2B, (0xC0 | encode)); 3992 } 3993 3994 void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len) { 3995 assert(VM_Version::supports_avx2(), ""); 3996 assert(vector_len != AVX_128bit, ""); 3997 // VEX.256.66.0F3A.W1 00 /r ib 3998 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3999 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4000 emit_int24(0x00, (0xC0 | encode), imm8); 4001 } 4002 4003 void Assembler::vpermq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4004 assert(vector_len == AVX_256bit ? VM_Version::supports_avx512vl() : 4005 vector_len == AVX_512bit ? VM_Version::supports_evex() : false, "not supported"); 4006 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4007 attributes.set_is_evex_instruction(); 4008 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4009 emit_int16(0x36, (0xC0 | encode)); 4010 } 4011 4012 void Assembler::vpermb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4013 assert(VM_Version::supports_avx512_vbmi(), ""); 4014 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4015 attributes.set_is_evex_instruction(); 4016 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4017 emit_int16((unsigned char)0x8D, (0xC0 | encode)); 4018 } 4019 4020 void Assembler::vpermb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4021 assert(VM_Version::supports_avx512_vbmi(), ""); 4022 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4023 attributes.set_is_evex_instruction(); 4024 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4025 emit_int8((unsigned char)0x8D); 4026 emit_operand(dst, src); 4027 } 4028 4029 void Assembler::vpermw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4030 assert(vector_len == AVX_128bit ? VM_Version::supports_avx512vlbw() : 4031 vector_len == AVX_256bit ? VM_Version::supports_avx512vlbw() : 4032 vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : false, "not supported"); 4033 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4034 attributes.set_is_evex_instruction(); 4035 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4036 emit_int16((unsigned char)0x8D, (0xC0 | encode)); 4037 } 4038 4039 void Assembler::vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4040 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_evex(), ""); 4041 // VEX.NDS.256.66.0F38.W0 36 /r 4042 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4043 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4044 emit_int16(0x36, (0xC0 | encode)); 4045 } 4046 4047 void Assembler::vpermd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4048 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_evex(), ""); 4049 // VEX.NDS.256.66.0F38.W0 36 /r 4050 InstructionMark im(this); 4051 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4052 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4053 emit_int8(0x36); 4054 emit_operand(dst, src); 4055 } 4056 4057 void Assembler::vperm2i128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { 4058 assert(VM_Version::supports_avx2(), ""); 4059 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4060 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4061 emit_int24(0x46, (0xC0 | encode), imm8); 4062 } 4063 4064 void Assembler::vperm2f128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { 4065 assert(VM_Version::supports_avx(), ""); 4066 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4067 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4068 emit_int24(0x06, (0xC0 | encode), imm8); 4069 } 4070 4071 void Assembler::vpermilps(XMMRegister dst, XMMRegister src, int imm8, int vector_len) { 4072 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), ""); 4073 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4074 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4075 emit_int24(0x04, (0xC0 | encode), imm8); 4076 } 4077 4078 void Assembler::vpermilpd(XMMRegister dst, XMMRegister src, int imm8, int vector_len) { 4079 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), ""); 4080 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(),/* legacy_mode */ false,/* no_mask_reg */ true, /* uses_vl */ false); 4081 attributes.set_rex_vex_w_reverted(); 4082 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4083 emit_int24(0x05, (0xC0 | encode), imm8); 4084 } 4085 4086 void Assembler::vpermpd(XMMRegister dst, XMMRegister src, int imm8, int vector_len) { 4087 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_evex(), ""); 4088 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */false, /* no_mask_reg */ true, /* uses_vl */ false); 4089 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4090 emit_int24(0x01, (0xC0 | encode), imm8); 4091 } 4092 4093 void Assembler::evpermi2q(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4094 assert(VM_Version::supports_evex(), ""); 4095 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4096 attributes.set_is_evex_instruction(); 4097 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4098 emit_int16(0x76, (0xC0 | encode)); 4099 } 4100 4101 void Assembler::evpermt2b(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4102 assert(VM_Version::supports_avx512_vbmi(), ""); 4103 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4104 attributes.set_is_evex_instruction(); 4105 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4106 emit_int16(0x7D, (0xC0 | encode)); 4107 } 4108 4109 void Assembler::evpmultishiftqb(XMMRegister dst, XMMRegister ctl, XMMRegister src, int vector_len) { 4110 assert(VM_Version::supports_avx512_vbmi(), ""); 4111 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4112 attributes.set_is_evex_instruction(); 4113 int encode = vex_prefix_and_encode(dst->encoding(), ctl->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4114 emit_int16((unsigned char)0x83, (unsigned char)(0xC0 | encode)); 4115 } 4116 4117 void Assembler::pause() { 4118 emit_int16((unsigned char)0xF3, (unsigned char)0x90); 4119 } 4120 4121 void Assembler::ud2() { 4122 emit_int16(0x0F, 0x0B); 4123 } 4124 4125 void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) { 4126 assert(VM_Version::supports_sse4_2(), ""); 4127 InstructionMark im(this); 4128 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4129 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4130 emit_int8(0x61); 4131 emit_operand(dst, src); 4132 emit_int8(imm8); 4133 } 4134 4135 void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) { 4136 assert(VM_Version::supports_sse4_2(), ""); 4137 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4138 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4139 emit_int24(0x61, (0xC0 | encode), imm8); 4140 } 4141 4142 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 4143 void Assembler::pcmpeqb(XMMRegister dst, XMMRegister src) { 4144 assert(VM_Version::supports_sse2(), ""); 4145 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4146 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4147 emit_int16(0x74, (0xC0 | encode)); 4148 } 4149 4150 void Assembler::vpcmpCCbwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, int vector_len) { 4151 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), ""); 4152 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest"); 4153 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4154 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4155 emit_int16(cond_encoding, (0xC0 | encode)); 4156 } 4157 4158 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 4159 void Assembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4160 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), ""); 4161 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest"); 4162 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4163 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4164 emit_int16(0x74, (0xC0 | encode)); 4165 } 4166 4167 // In this context, kdst is written the mask used to process the equal components 4168 void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { 4169 assert(VM_Version::supports_avx512bw(), ""); 4170 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4171 attributes.set_is_evex_instruction(); 4172 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4173 emit_int16(0x74, (0xC0 | encode)); 4174 } 4175 4176 void Assembler::evpcmpgtb(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 4177 assert(VM_Version::supports_avx512vlbw(), ""); 4178 InstructionMark im(this); 4179 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4180 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 4181 attributes.set_is_evex_instruction(); 4182 int dst_enc = kdst->encoding(); 4183 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4184 emit_int8(0x64); 4185 emit_operand(as_Register(dst_enc), src); 4186 } 4187 4188 void Assembler::evpcmpgtb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) { 4189 assert(VM_Version::supports_avx512vlbw(), ""); 4190 InstructionMark im(this); 4191 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4192 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 4193 attributes.reset_is_clear_context(); 4194 attributes.set_embedded_opmask_register_specifier(mask); 4195 attributes.set_is_evex_instruction(); 4196 int dst_enc = kdst->encoding(); 4197 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4198 emit_int8(0x64); 4199 emit_operand(as_Register(dst_enc), src); 4200 } 4201 4202 void Assembler::evpcmpuw(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) { 4203 assert(VM_Version::supports_avx512vlbw(), ""); 4204 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4205 attributes.set_is_evex_instruction(); 4206 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4207 emit_int24(0x3E, (0xC0 | encode), vcc); 4208 } 4209 4210 void Assembler::evpcmpuw(KRegister kdst, XMMRegister nds, Address src, ComparisonPredicate vcc, int vector_len) { 4211 assert(VM_Version::supports_avx512vlbw(), ""); 4212 InstructionMark im(this); 4213 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4214 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 4215 attributes.set_is_evex_instruction(); 4216 int dst_enc = kdst->encoding(); 4217 vex_prefix(src, nds->encoding(), kdst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4218 emit_int8(0x3E); 4219 emit_operand(as_Register(dst_enc), src); 4220 emit_int8(vcc); 4221 } 4222 4223 void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 4224 assert(VM_Version::supports_avx512bw(), ""); 4225 InstructionMark im(this); 4226 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4227 attributes.set_is_evex_instruction(); 4228 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 4229 int dst_enc = kdst->encoding(); 4230 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4231 emit_int8(0x74); 4232 emit_operand(as_Register(dst_enc), src); 4233 } 4234 4235 void Assembler::evpcmpeqb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) { 4236 assert(VM_Version::supports_avx512vlbw(), ""); 4237 InstructionMark im(this); 4238 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4239 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 4240 attributes.reset_is_clear_context(); 4241 attributes.set_embedded_opmask_register_specifier(mask); 4242 attributes.set_is_evex_instruction(); 4243 vex_prefix(src, nds->encoding(), kdst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4244 emit_int8(0x74); 4245 emit_operand(as_Register(kdst->encoding()), src); 4246 } 4247 4248 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 4249 void Assembler::pcmpeqw(XMMRegister dst, XMMRegister src) { 4250 assert(VM_Version::supports_sse2(), ""); 4251 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4252 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4253 emit_int16(0x75, (0xC0 | encode)); 4254 } 4255 4256 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 4257 void Assembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4258 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), ""); 4259 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest"); 4260 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4261 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4262 emit_int16(0x75, (0xC0 | encode)); 4263 } 4264 4265 // In this context, kdst is written the mask used to process the equal components 4266 void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { 4267 assert(VM_Version::supports_avx512bw(), ""); 4268 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4269 attributes.set_is_evex_instruction(); 4270 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4271 emit_int16(0x75, (0xC0 | encode)); 4272 } 4273 4274 void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 4275 assert(VM_Version::supports_avx512bw(), ""); 4276 InstructionMark im(this); 4277 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4278 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 4279 attributes.set_is_evex_instruction(); 4280 int dst_enc = kdst->encoding(); 4281 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4282 emit_int8(0x75); 4283 emit_operand(as_Register(dst_enc), src); 4284 } 4285 4286 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 4287 void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) { 4288 assert(VM_Version::supports_sse2(), ""); 4289 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4290 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4291 emit_int16(0x76, (0xC0 | encode)); 4292 } 4293 4294 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 4295 void Assembler::vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4296 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), ""); 4297 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest"); 4298 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4299 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4300 emit_int16(0x76, (0xC0 | encode)); 4301 } 4302 4303 // In this context, kdst is written the mask used to process the equal components 4304 void Assembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int vector_len) { 4305 assert(VM_Version::supports_evex(), ""); 4306 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4307 attributes.set_is_evex_instruction(); 4308 attributes.reset_is_clear_context(); 4309 attributes.set_embedded_opmask_register_specifier(mask); 4310 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4311 emit_int16(0x76, (0xC0 | encode)); 4312 } 4313 4314 void Assembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) { 4315 assert(VM_Version::supports_evex(), ""); 4316 InstructionMark im(this); 4317 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4318 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 4319 attributes.set_is_evex_instruction(); 4320 attributes.reset_is_clear_context(); 4321 attributes.set_embedded_opmask_register_specifier(mask); 4322 int dst_enc = kdst->encoding(); 4323 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4324 emit_int8(0x76); 4325 emit_operand(as_Register(dst_enc), src); 4326 } 4327 4328 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 4329 void Assembler::pcmpeqq(XMMRegister dst, XMMRegister src) { 4330 assert(VM_Version::supports_sse4_1(), ""); 4331 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4332 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4333 emit_int16(0x29, (0xC0 | encode)); 4334 } 4335 4336 void Assembler::vpcmpCCq(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, int vector_len) { 4337 assert(VM_Version::supports_avx(), ""); 4338 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4339 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4340 emit_int16(cond_encoding, (0xC0 | encode)); 4341 } 4342 4343 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 4344 void Assembler::vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4345 assert(VM_Version::supports_avx(), ""); 4346 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4347 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4348 emit_int16(0x29, (0xC0 | encode)); 4349 } 4350 4351 // In this context, kdst is written the mask used to process the equal components 4352 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { 4353 assert(VM_Version::supports_evex(), ""); 4354 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4355 attributes.reset_is_clear_context(); 4356 attributes.set_is_evex_instruction(); 4357 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4358 emit_int16(0x29, (0xC0 | encode)); 4359 } 4360 4361 // In this context, kdst is written the mask used to process the equal components 4362 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 4363 assert(VM_Version::supports_evex(), ""); 4364 InstructionMark im(this); 4365 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4366 attributes.reset_is_clear_context(); 4367 attributes.set_is_evex_instruction(); 4368 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 4369 int dst_enc = kdst->encoding(); 4370 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4371 emit_int8(0x29); 4372 emit_operand(as_Register(dst_enc), src); 4373 } 4374 4375 void Assembler::pcmpgtq(XMMRegister dst, XMMRegister src) { 4376 assert(VM_Version::supports_sse4_1(), ""); 4377 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4378 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4379 emit_int16(0x37, (0xC0 | encode)); 4380 } 4381 4382 void Assembler::pmovmskb(Register dst, XMMRegister src) { 4383 assert(VM_Version::supports_sse2(), ""); 4384 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4385 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4386 emit_int16((unsigned char)0xD7, (0xC0 | encode)); 4387 } 4388 4389 void Assembler::vpmovmskb(Register dst, XMMRegister src, int vec_enc) { 4390 assert((VM_Version::supports_avx() && vec_enc == AVX_128bit) || 4391 (VM_Version::supports_avx2() && vec_enc == AVX_256bit), ""); 4392 InstructionAttr attributes(vec_enc, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4393 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4394 emit_int16((unsigned char)0xD7, (0xC0 | encode)); 4395 } 4396 4397 void Assembler::vmovmskps(Register dst, XMMRegister src, int vec_enc) { 4398 assert(VM_Version::supports_avx(), ""); 4399 InstructionAttr attributes(vec_enc, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4400 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4401 emit_int16(0x50, (0xC0 | encode)); 4402 } 4403 4404 void Assembler::vmovmskpd(Register dst, XMMRegister src, int vec_enc) { 4405 assert(VM_Version::supports_avx(), ""); 4406 InstructionAttr attributes(vec_enc, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4407 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4408 emit_int16(0x50, (0xC0 | encode)); 4409 } 4410 4411 void Assembler::vpmaskmovd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4412 assert((VM_Version::supports_avx2() && vector_len == AVX_256bit), ""); 4413 InstructionMark im(this); 4414 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ true); 4415 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4416 emit_int8((unsigned char)0x8C); 4417 emit_operand(dst, src); 4418 } 4419 4420 void Assembler::pextrd(Register dst, XMMRegister src, int imm8) { 4421 assert(VM_Version::supports_sse4_1(), ""); 4422 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4423 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4424 emit_int24(0x16, (0xC0 | encode), imm8); 4425 } 4426 4427 void Assembler::pextrd(Address dst, XMMRegister src, int imm8) { 4428 assert(VM_Version::supports_sse4_1(), ""); 4429 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4430 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4431 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4432 emit_int8(0x16); 4433 emit_operand(src, dst); 4434 emit_int8(imm8); 4435 } 4436 4437 void Assembler::pextrq(Register dst, XMMRegister src, int imm8) { 4438 assert(VM_Version::supports_sse4_1(), ""); 4439 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4440 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4441 emit_int24(0x16, (0xC0 | encode), imm8); 4442 } 4443 4444 void Assembler::pextrq(Address dst, XMMRegister src, int imm8) { 4445 assert(VM_Version::supports_sse4_1(), ""); 4446 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4447 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4448 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4449 emit_int8(0x16); 4450 emit_operand(src, dst); 4451 emit_int8(imm8); 4452 } 4453 4454 void Assembler::pextrw(Register dst, XMMRegister src, int imm8) { 4455 assert(VM_Version::supports_sse2(), ""); 4456 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4457 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4458 emit_int24((unsigned char)0xC5, (0xC0 | encode), imm8); 4459 } 4460 4461 void Assembler::pextrw(Address dst, XMMRegister src, int imm8) { 4462 assert(VM_Version::supports_sse4_1(), ""); 4463 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4464 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit); 4465 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4466 emit_int8(0x15); 4467 emit_operand(src, dst); 4468 emit_int8(imm8); 4469 } 4470 4471 void Assembler::pextrb(Register dst, XMMRegister src, int imm8) { 4472 assert(VM_Version::supports_sse4_1(), ""); 4473 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4474 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4475 emit_int24(0x14, (0xC0 | encode), imm8); 4476 } 4477 4478 void Assembler::pextrb(Address dst, XMMRegister src, int imm8) { 4479 assert(VM_Version::supports_sse4_1(), ""); 4480 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4481 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit); 4482 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4483 emit_int8(0x14); 4484 emit_operand(src, dst); 4485 emit_int8(imm8); 4486 } 4487 4488 void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) { 4489 assert(VM_Version::supports_sse4_1(), ""); 4490 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4491 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4492 emit_int24(0x22, (0xC0 | encode), imm8); 4493 } 4494 4495 void Assembler::pinsrd(XMMRegister dst, Address src, int imm8) { 4496 assert(VM_Version::supports_sse4_1(), ""); 4497 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4498 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4499 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4500 emit_int8(0x22); 4501 emit_operand(dst,src); 4502 emit_int8(imm8); 4503 } 4504 4505 void Assembler::vpinsrd(XMMRegister dst, XMMRegister nds, Register src, int imm8) { 4506 assert(VM_Version::supports_avx(), ""); 4507 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4508 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4509 emit_int24(0x22, (0xC0 | encode), imm8); 4510 } 4511 4512 void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) { 4513 assert(VM_Version::supports_sse4_1(), ""); 4514 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4515 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4516 emit_int24(0x22, (0xC0 | encode), imm8); 4517 } 4518 4519 void Assembler::pinsrq(XMMRegister dst, Address src, int imm8) { 4520 assert(VM_Version::supports_sse4_1(), ""); 4521 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4522 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4523 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4524 emit_int8(0x22); 4525 emit_operand(dst, src); 4526 emit_int8(imm8); 4527 } 4528 4529 void Assembler::vpinsrq(XMMRegister dst, XMMRegister nds, Register src, int imm8) { 4530 assert(VM_Version::supports_avx(), ""); 4531 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4532 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4533 emit_int24(0x22, (0xC0 | encode), imm8); 4534 } 4535 4536 void Assembler::pinsrw(XMMRegister dst, Register src, int imm8) { 4537 assert(VM_Version::supports_sse2(), ""); 4538 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4539 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4540 emit_int24((unsigned char)0xC4, (0xC0 | encode), imm8); 4541 } 4542 4543 void Assembler::pinsrw(XMMRegister dst, Address src, int imm8) { 4544 assert(VM_Version::supports_sse2(), ""); 4545 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4546 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit); 4547 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4548 emit_int8((unsigned char)0xC4); 4549 emit_operand(dst, src); 4550 emit_int8(imm8); 4551 } 4552 4553 void Assembler::vpinsrw(XMMRegister dst, XMMRegister nds, Register src, int imm8) { 4554 assert(VM_Version::supports_avx(), ""); 4555 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4556 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4557 emit_int24((unsigned char)0xC4, (0xC0 | encode), imm8); 4558 } 4559 4560 void Assembler::pinsrb(XMMRegister dst, Address src, int imm8) { 4561 assert(VM_Version::supports_sse4_1(), ""); 4562 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4563 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit); 4564 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4565 emit_int8(0x20); 4566 emit_operand(dst, src); 4567 emit_int8(imm8); 4568 } 4569 4570 void Assembler::pinsrb(XMMRegister dst, Register src, int imm8) { 4571 assert(VM_Version::supports_sse4_1(), ""); 4572 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4573 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4574 emit_int24(0x20, (0xC0 | encode), imm8); 4575 } 4576 4577 void Assembler::vpinsrb(XMMRegister dst, XMMRegister nds, Register src, int imm8) { 4578 assert(VM_Version::supports_avx(), ""); 4579 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4580 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4581 emit_int24(0x20, (0xC0 | encode), imm8); 4582 } 4583 4584 void Assembler::insertps(XMMRegister dst, XMMRegister src, int imm8) { 4585 assert(VM_Version::supports_sse4_1(), ""); 4586 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4587 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4588 emit_int24(0x21, (0xC0 | encode), imm8); 4589 } 4590 4591 void Assembler::vinsertps(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { 4592 assert(VM_Version::supports_avx(), ""); 4593 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4594 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4595 emit_int24(0x21, (0xC0 | encode), imm8); 4596 } 4597 4598 void Assembler::pmovzxbw(XMMRegister dst, Address src) { 4599 assert(VM_Version::supports_sse4_1(), ""); 4600 InstructionMark im(this); 4601 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4602 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 4603 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4604 emit_int8(0x30); 4605 emit_operand(dst, src); 4606 } 4607 4608 void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) { 4609 assert(VM_Version::supports_sse4_1(), ""); 4610 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4611 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4612 emit_int16(0x30, (0xC0 | encode)); 4613 } 4614 4615 void Assembler::pmovsxbw(XMMRegister dst, XMMRegister src) { 4616 assert(VM_Version::supports_sse4_1(), ""); 4617 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4618 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4619 emit_int16(0x20, (0xC0 | encode)); 4620 } 4621 4622 void Assembler::pmovzxdq(XMMRegister dst, XMMRegister src) { 4623 assert(VM_Version::supports_sse4_1(), ""); 4624 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4625 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4626 emit_int16(0x35, (0xC0 | encode)); 4627 } 4628 4629 void Assembler::pmovsxbd(XMMRegister dst, XMMRegister src) { 4630 assert(VM_Version::supports_sse4_1(), ""); 4631 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4632 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4633 emit_int16(0x21, (0xC0 | encode)); 4634 } 4635 4636 void Assembler::pmovzxbd(XMMRegister dst, XMMRegister src) { 4637 assert(VM_Version::supports_sse4_1(), ""); 4638 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4639 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4640 emit_int16(0x31, (0xC0 | encode)); 4641 } 4642 4643 void Assembler::pmovsxbq(XMMRegister dst, XMMRegister src) { 4644 assert(VM_Version::supports_sse4_1(), ""); 4645 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4646 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4647 emit_int16(0x22, (0xC0 | encode)); 4648 } 4649 4650 void Assembler::pmovsxwd(XMMRegister dst, XMMRegister src) { 4651 assert(VM_Version::supports_sse4_1(), ""); 4652 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4653 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4654 emit_int16(0x23, (0xC0 | encode)); 4655 } 4656 4657 void Assembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) { 4658 assert(VM_Version::supports_avx(), ""); 4659 InstructionMark im(this); 4660 assert(dst != xnoreg, "sanity"); 4661 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4662 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 4663 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4664 emit_int8(0x30); 4665 emit_operand(dst, src); 4666 } 4667 4668 void Assembler::vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { 4669 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 4670 vector_len == AVX_256bit? VM_Version::supports_avx2() : 4671 vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); 4672 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4673 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4674 emit_int16(0x30, (unsigned char) (0xC0 | encode)); 4675 } 4676 4677 void Assembler::vpmovsxbw(XMMRegister dst, XMMRegister src, int vector_len) { 4678 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 4679 vector_len == AVX_256bit? VM_Version::supports_avx2() : 4680 vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); 4681 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4682 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4683 emit_int16(0x20, (0xC0 | encode)); 4684 } 4685 4686 void Assembler::evpmovzxbw(XMMRegister dst, KRegister mask, Address src, int vector_len) { 4687 assert(VM_Version::supports_avx512vlbw(), ""); 4688 assert(dst != xnoreg, "sanity"); 4689 InstructionMark im(this); 4690 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true); 4691 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 4692 attributes.set_embedded_opmask_register_specifier(mask); 4693 attributes.set_is_evex_instruction(); 4694 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4695 emit_int8(0x30); 4696 emit_operand(dst, src); 4697 } 4698 4699 void Assembler::evpandd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 4700 assert(VM_Version::supports_evex(), ""); 4701 // Encoding: EVEX.NDS.XXX.66.0F.W0 DB /r 4702 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4703 attributes.set_is_evex_instruction(); 4704 attributes.set_embedded_opmask_register_specifier(mask); 4705 if (merge) { 4706 attributes.reset_is_clear_context(); 4707 } 4708 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4709 emit_int16((unsigned char)0xDB, (0xC0 | encode)); 4710 } 4711 4712 void Assembler::vpmovzxdq(XMMRegister dst, XMMRegister src, int vector_len) { 4713 assert(vector_len > AVX_128bit ? VM_Version::supports_avx2() : VM_Version::supports_avx(), ""); 4714 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4715 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4716 emit_int16(0x35, (0xC0 | encode)); 4717 } 4718 4719 void Assembler::vpmovzxbd(XMMRegister dst, XMMRegister src, int vector_len) { 4720 assert(vector_len > AVX_128bit ? VM_Version::supports_avx2() : VM_Version::supports_avx(), ""); 4721 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4722 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4723 emit_int16(0x31, (0xC0 | encode)); 4724 } 4725 4726 void Assembler::vpmovzxbq(XMMRegister dst, XMMRegister src, int vector_len) { 4727 assert(vector_len > AVX_128bit ? VM_Version::supports_avx2() : VM_Version::supports_avx(), ""); 4728 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4729 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4730 emit_int16(0x32, (0xC0 | encode)); 4731 } 4732 4733 void Assembler::vpmovsxbd(XMMRegister dst, XMMRegister src, int vector_len) { 4734 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 4735 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 4736 VM_Version::supports_evex(), ""); 4737 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4738 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4739 emit_int16(0x21, (0xC0 | encode)); 4740 } 4741 4742 void Assembler::vpmovsxbq(XMMRegister dst, XMMRegister src, int vector_len) { 4743 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 4744 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 4745 VM_Version::supports_evex(), ""); 4746 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4747 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4748 emit_int16(0x22, (0xC0 | encode)); 4749 } 4750 4751 void Assembler::vpmovsxwd(XMMRegister dst, XMMRegister src, int vector_len) { 4752 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 4753 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 4754 VM_Version::supports_evex(), ""); 4755 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4756 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4757 emit_int16(0x23, (0xC0 | encode)); 4758 } 4759 4760 void Assembler::vpmovsxwq(XMMRegister dst, XMMRegister src, int vector_len) { 4761 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 4762 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 4763 VM_Version::supports_evex(), ""); 4764 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4765 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4766 emit_int16(0x24, (0xC0 | encode)); 4767 } 4768 4769 void Assembler::vpmovsxdq(XMMRegister dst, XMMRegister src, int vector_len) { 4770 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 4771 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 4772 VM_Version::supports_evex(), ""); 4773 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4774 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4775 emit_int16(0x25, (0xC0 | encode)); 4776 } 4777 4778 void Assembler::evpmovwb(Address dst, XMMRegister src, int vector_len) { 4779 assert(VM_Version::supports_avx512vlbw(), ""); 4780 assert(src != xnoreg, "sanity"); 4781 InstructionMark im(this); 4782 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4783 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 4784 attributes.set_is_evex_instruction(); 4785 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 4786 emit_int8(0x30); 4787 emit_operand(src, dst); 4788 } 4789 4790 void Assembler::evpmovwb(Address dst, KRegister mask, XMMRegister src, int vector_len) { 4791 assert(VM_Version::supports_avx512vlbw(), ""); 4792 assert(src != xnoreg, "sanity"); 4793 InstructionMark im(this); 4794 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4795 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 4796 attributes.reset_is_clear_context(); 4797 attributes.set_embedded_opmask_register_specifier(mask); 4798 attributes.set_is_evex_instruction(); 4799 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 4800 emit_int8(0x30); 4801 emit_operand(src, dst); 4802 } 4803 4804 void Assembler::evpmovdb(Address dst, XMMRegister src, int vector_len) { 4805 assert(VM_Version::supports_evex(), ""); 4806 assert(src != xnoreg, "sanity"); 4807 InstructionMark im(this); 4808 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4809 attributes.set_address_attributes(/* tuple_type */ EVEX_QVM, /* input_size_in_bits */ EVEX_NObit); 4810 attributes.set_is_evex_instruction(); 4811 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 4812 emit_int8(0x31); 4813 emit_operand(src, dst); 4814 } 4815 4816 void Assembler::vpmovzxwd(XMMRegister dst, XMMRegister src, int vector_len) { 4817 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 4818 vector_len == AVX_256bit? VM_Version::supports_avx2() : 4819 vector_len == AVX_512bit? VM_Version::supports_evex() : 0, " "); 4820 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4821 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4822 emit_int16(0x33, (0xC0 | encode)); 4823 } 4824 4825 void Assembler::vpmovzxwq(XMMRegister dst, XMMRegister src, int vector_len) { 4826 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 4827 vector_len == AVX_256bit? VM_Version::supports_avx2() : 4828 vector_len == AVX_512bit? VM_Version::supports_evex() : 0, " "); 4829 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4830 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4831 emit_int16(0x34, (0xC0 | encode)); 4832 } 4833 4834 void Assembler::pmaddwd(XMMRegister dst, XMMRegister src) { 4835 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4836 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4837 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4838 emit_int16((unsigned char)0xF5, (0xC0 | encode)); 4839 } 4840 4841 void Assembler::vpmaddwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4842 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 4843 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : 4844 (vector_len == AVX_512bit ? VM_Version::supports_evex() : 0)), ""); 4845 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4846 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4847 emit_int16((unsigned char)0xF5, (0xC0 | encode)); 4848 } 4849 4850 void Assembler::vpmaddubsw(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) { 4851 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 4852 vector_len == AVX_256bit? VM_Version::supports_avx2() : 4853 vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); 4854 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4855 int encode = simd_prefix_and_encode(dst, src1, src2, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4856 emit_int16(0x04, (0xC0 | encode)); 4857 } 4858 4859 void Assembler::evpdpwssd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4860 assert(VM_Version::supports_evex(), ""); 4861 assert(VM_Version::supports_avx512_vnni(), "must support vnni"); 4862 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4863 attributes.set_is_evex_instruction(); 4864 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4865 emit_int16(0x52, (0xC0 | encode)); 4866 } 4867 4868 // generic 4869 void Assembler::pop(Register dst) { 4870 int encode = prefix_and_encode(dst->encoding()); 4871 emit_int8(0x58 | encode); 4872 } 4873 4874 void Assembler::popcntl(Register dst, Address src) { 4875 assert(VM_Version::supports_popcnt(), "must support"); 4876 InstructionMark im(this); 4877 emit_int8((unsigned char)0xF3); 4878 prefix(src, dst); 4879 emit_int16(0x0F, (unsigned char)0xB8); 4880 emit_operand(dst, src); 4881 } 4882 4883 void Assembler::popcntl(Register dst, Register src) { 4884 assert(VM_Version::supports_popcnt(), "must support"); 4885 emit_int8((unsigned char)0xF3); 4886 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 4887 emit_int24(0x0F, (unsigned char)0xB8, (0xC0 | encode)); 4888 } 4889 4890 void Assembler::vpopcntd(XMMRegister dst, XMMRegister src, int vector_len) { 4891 assert(VM_Version::supports_avx512_vpopcntdq(), "must support vpopcntdq feature"); 4892 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4893 attributes.set_is_evex_instruction(); 4894 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4895 emit_int16(0x55, (0xC0 | encode)); 4896 } 4897 4898 void Assembler::vpopcntq(XMMRegister dst, XMMRegister src, int vector_len) { 4899 assert(VM_Version::supports_avx512_vpopcntdq(), "must support vpopcntdq feature"); 4900 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4901 attributes.set_is_evex_instruction(); 4902 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4903 emit_int16(0x55, (0xC0 | encode)); 4904 } 4905 4906 void Assembler::popf() { 4907 emit_int8((unsigned char)0x9D); 4908 } 4909 4910 #ifndef _LP64 // no 32bit push/pop on amd64 4911 void Assembler::popl(Address dst) { 4912 // NOTE: this will adjust stack by 8byte on 64bits 4913 InstructionMark im(this); 4914 prefix(dst); 4915 emit_int8((unsigned char)0x8F); 4916 emit_operand(rax, dst); 4917 } 4918 #endif 4919 4920 void Assembler::prefetchnta(Address src) { 4921 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 4922 InstructionMark im(this); 4923 prefix(src); 4924 emit_int16(0x0F, 0x18); 4925 emit_operand(rax, src); // 0, src 4926 } 4927 4928 void Assembler::prefetchr(Address src) { 4929 assert(VM_Version::supports_3dnow_prefetch(), "must support"); 4930 InstructionMark im(this); 4931 prefix(src); 4932 emit_int16(0x0F, 0x0D); 4933 emit_operand(rax, src); // 0, src 4934 } 4935 4936 void Assembler::prefetcht0(Address src) { 4937 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 4938 InstructionMark im(this); 4939 prefix(src); 4940 emit_int16(0x0F, 0x18); 4941 emit_operand(rcx, src); // 1, src 4942 } 4943 4944 void Assembler::prefetcht1(Address src) { 4945 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 4946 InstructionMark im(this); 4947 prefix(src); 4948 emit_int16(0x0F, 0x18); 4949 emit_operand(rdx, src); // 2, src 4950 } 4951 4952 void Assembler::prefetcht2(Address src) { 4953 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 4954 InstructionMark im(this); 4955 prefix(src); 4956 emit_int16(0x0F, 0x18); 4957 emit_operand(rbx, src); // 3, src 4958 } 4959 4960 void Assembler::prefetchw(Address src) { 4961 assert(VM_Version::supports_3dnow_prefetch(), "must support"); 4962 InstructionMark im(this); 4963 prefix(src); 4964 emit_int16(0x0F, 0x0D); 4965 emit_operand(rcx, src); // 1, src 4966 } 4967 4968 void Assembler::prefix(Prefix p) { 4969 emit_int8(p); 4970 } 4971 4972 void Assembler::pshufb(XMMRegister dst, XMMRegister src) { 4973 assert(VM_Version::supports_ssse3(), ""); 4974 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4975 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4976 emit_int16(0x00, (0xC0 | encode)); 4977 } 4978 4979 void Assembler::vpshufb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4980 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 4981 vector_len == AVX_256bit? VM_Version::supports_avx2() : 4982 vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); 4983 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4984 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4985 emit_int16(0x00, (0xC0 | encode)); 4986 } 4987 4988 void Assembler::pshufb(XMMRegister dst, Address src) { 4989 assert(VM_Version::supports_ssse3(), ""); 4990 InstructionMark im(this); 4991 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4992 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 4993 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4994 emit_int8(0x00); 4995 emit_operand(dst, src); 4996 } 4997 4998 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) { 4999 assert(isByte(mode), "invalid value"); 5000 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5001 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 5002 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5003 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5004 emit_int24(0x70, (0xC0 | encode), mode & 0xFF); 5005 } 5006 5007 void Assembler::vpshufd(XMMRegister dst, XMMRegister src, int mode, int vector_len) { 5008 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 5009 (vector_len == AVX_256bit? VM_Version::supports_avx2() : 5010 (vector_len == AVX_512bit? VM_Version::supports_evex() : 0)), ""); 5011 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5012 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5013 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5014 emit_int24(0x70, (0xC0 | encode), mode & 0xFF); 5015 } 5016 5017 void Assembler::pshufd(XMMRegister dst, Address src, int mode) { 5018 assert(isByte(mode), "invalid value"); 5019 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5020 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 5021 InstructionMark im(this); 5022 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5023 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5024 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5025 emit_int8(0x70); 5026 emit_operand(dst, src); 5027 emit_int8(mode & 0xFF); 5028 } 5029 5030 void Assembler::pshufhw(XMMRegister dst, XMMRegister src, int mode) { 5031 assert(isByte(mode), "invalid value"); 5032 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5033 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5034 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 5035 emit_int24(0x70, (0xC0 | encode), mode & 0xFF); 5036 } 5037 5038 void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { 5039 assert(isByte(mode), "invalid value"); 5040 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5041 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5042 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 5043 emit_int24(0x70, (0xC0 | encode), mode & 0xFF); 5044 } 5045 5046 void Assembler::pshuflw(XMMRegister dst, Address src, int mode) { 5047 assert(isByte(mode), "invalid value"); 5048 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5049 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 5050 InstructionMark im(this); 5051 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5052 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 5053 simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 5054 emit_int8(0x70); 5055 emit_operand(dst, src); 5056 emit_int8(mode & 0xFF); 5057 } 5058 5059 void Assembler::evshufi64x2(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { 5060 assert(VM_Version::supports_evex(), "requires EVEX support"); 5061 assert(vector_len == Assembler::AVX_256bit || vector_len == Assembler::AVX_512bit, ""); 5062 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5063 attributes.set_is_evex_instruction(); 5064 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5065 emit_int24(0x43, (0xC0 | encode), imm8 & 0xFF); 5066 } 5067 5068 void Assembler::pshufpd(XMMRegister dst, XMMRegister src, int imm8) { 5069 assert(isByte(imm8), "invalid value"); 5070 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5071 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5072 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5073 emit_int24((unsigned char)0xC6, (0xC0 | encode), imm8 & 0xFF); 5074 } 5075 5076 void Assembler::vpshufpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { 5077 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5078 attributes.set_rex_vex_w_reverted(); 5079 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5080 emit_int24((unsigned char)0xC6, (0xC0 | encode), imm8 & 0xFF); 5081 } 5082 5083 void Assembler::pshufps(XMMRegister dst, XMMRegister src, int imm8) { 5084 assert(isByte(imm8), "invalid value"); 5085 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5086 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5087 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5088 emit_int24((unsigned char)0xC6, (0xC0 | encode), imm8 & 0xFF); 5089 } 5090 5091 void Assembler::vpshufps(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { 5092 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5093 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5094 emit_int24((unsigned char)0xC6, (0xC0 | encode), imm8 & 0xFF); 5095 } 5096 5097 void Assembler::psrldq(XMMRegister dst, int shift) { 5098 // Shift left 128 bit value in dst XMMRegister by shift number of bytes. 5099 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5100 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5101 int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5102 emit_int24(0x73, (0xC0 | encode), shift); 5103 } 5104 5105 void Assembler::vpsrldq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 5106 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 5107 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 5108 vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : 0, ""); 5109 InstructionAttr attributes(vector_len, /*vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5110 int encode = vex_prefix_and_encode(xmm3->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5111 emit_int24(0x73, (0xC0 | encode), shift & 0xFF); 5112 } 5113 5114 void Assembler::pslldq(XMMRegister dst, int shift) { 5115 // Shift left 128 bit value in dst XMMRegister by shift number of bytes. 5116 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5117 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5118 // XMM7 is for /7 encoding: 66 0F 73 /7 ib 5119 int encode = simd_prefix_and_encode(xmm7, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5120 emit_int24(0x73, (0xC0 | encode), shift); 5121 } 5122 5123 void Assembler::vpslldq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 5124 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 5125 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 5126 vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : 0, ""); 5127 InstructionAttr attributes(vector_len, /*vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5128 int encode = vex_prefix_and_encode(xmm7->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5129 emit_int24(0x73, (0xC0 | encode), shift & 0xFF); 5130 } 5131 5132 void Assembler::ptest(XMMRegister dst, Address src) { 5133 assert(VM_Version::supports_sse4_1(), ""); 5134 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 5135 InstructionMark im(this); 5136 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 5137 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5138 emit_int8(0x17); 5139 emit_operand(dst, src); 5140 } 5141 5142 void Assembler::ptest(XMMRegister dst, XMMRegister src) { 5143 assert(VM_Version::supports_sse4_1() || VM_Version::supports_avx(), ""); 5144 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 5145 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5146 emit_int8(0x17); 5147 emit_int8((0xC0 | encode)); 5148 } 5149 5150 void Assembler::vptest(XMMRegister dst, Address src) { 5151 assert(VM_Version::supports_avx(), ""); 5152 InstructionMark im(this); 5153 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 5154 assert(dst != xnoreg, "sanity"); 5155 // swap src<->dst for encoding 5156 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5157 emit_int8(0x17); 5158 emit_operand(dst, src); 5159 } 5160 5161 void Assembler::vptest(XMMRegister dst, XMMRegister src) { 5162 assert(VM_Version::supports_avx(), ""); 5163 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 5164 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5165 emit_int16(0x17, (0xC0 | encode)); 5166 } 5167 5168 void Assembler::vptest(XMMRegister dst, XMMRegister src, int vector_len) { 5169 assert(VM_Version::supports_avx(), ""); 5170 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 5171 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5172 emit_int16(0x17, (0xC0 | encode)); 5173 } 5174 5175 void Assembler::evptestmb(KRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5176 assert(VM_Version::supports_avx512vlbw(), ""); 5177 // Encoding: EVEX.NDS.XXX.66.0F.W0 DB /r 5178 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5179 attributes.set_is_evex_instruction(); 5180 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5181 emit_int16((unsigned char)0x26, (0xC0 | encode)); 5182 } 5183 5184 void Assembler::punpcklbw(XMMRegister dst, Address src) { 5185 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5186 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 5187 InstructionMark im(this); 5188 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ true, /* uses_vl */ true); 5189 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 5190 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5191 emit_int8(0x60); 5192 emit_operand(dst, src); 5193 } 5194 5195 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) { 5196 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5197 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ true, /* uses_vl */ true); 5198 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5199 emit_int16(0x60, (0xC0 | encode)); 5200 } 5201 5202 void Assembler::punpckldq(XMMRegister dst, Address src) { 5203 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5204 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 5205 InstructionMark im(this); 5206 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5207 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5208 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5209 emit_int8(0x62); 5210 emit_operand(dst, src); 5211 } 5212 5213 void Assembler::punpckldq(XMMRegister dst, XMMRegister src) { 5214 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5215 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5216 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5217 emit_int16(0x62, (0xC0 | encode)); 5218 } 5219 5220 void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) { 5221 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5222 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5223 attributes.set_rex_vex_w_reverted(); 5224 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5225 emit_int16(0x6C, (0xC0 | encode)); 5226 } 5227 5228 void Assembler::push(int32_t imm32) { 5229 // in 64bits we push 64bits onto the stack but only 5230 // take a 32bit immediate 5231 emit_int8(0x68); 5232 emit_int32(imm32); 5233 } 5234 5235 void Assembler::push(Register src) { 5236 int encode = prefix_and_encode(src->encoding()); 5237 emit_int8(0x50 | encode); 5238 } 5239 5240 void Assembler::pushf() { 5241 emit_int8((unsigned char)0x9C); 5242 } 5243 5244 #ifndef _LP64 // no 32bit push/pop on amd64 5245 void Assembler::pushl(Address src) { 5246 // Note this will push 64bit on 64bit 5247 InstructionMark im(this); 5248 prefix(src); 5249 emit_int8((unsigned char)0xFF); 5250 emit_operand(rsi, src); 5251 } 5252 #endif 5253 5254 void Assembler::rcll(Register dst, int imm8) { 5255 assert(isShiftCount(imm8), "illegal shift count"); 5256 int encode = prefix_and_encode(dst->encoding()); 5257 if (imm8 == 1) { 5258 emit_int16((unsigned char)0xD1, (0xD0 | encode)); 5259 } else { 5260 emit_int24((unsigned char)0xC1, (0xD0 | encode), imm8); 5261 } 5262 } 5263 5264 void Assembler::rcpps(XMMRegister dst, XMMRegister src) { 5265 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5266 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 5267 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5268 emit_int16(0x53, (0xC0 | encode)); 5269 } 5270 5271 void Assembler::rcpss(XMMRegister dst, XMMRegister src) { 5272 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5273 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 5274 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 5275 emit_int16(0x53, (0xC0 | encode)); 5276 } 5277 5278 void Assembler::rdtsc() { 5279 emit_int16(0x0F, 0x31); 5280 } 5281 5282 // copies data from [esi] to [edi] using rcx pointer sized words 5283 // generic 5284 void Assembler::rep_mov() { 5285 // REP 5286 // MOVSQ 5287 LP64_ONLY(emit_int24((unsigned char)0xF3, REX_W, (unsigned char)0xA5);) 5288 NOT_LP64( emit_int16((unsigned char)0xF3, (unsigned char)0xA5);) 5289 } 5290 5291 // sets rcx bytes with rax, value at [edi] 5292 void Assembler::rep_stosb() { 5293 // REP 5294 // STOSB 5295 LP64_ONLY(emit_int24((unsigned char)0xF3, REX_W, (unsigned char)0xAA);) 5296 NOT_LP64( emit_int16((unsigned char)0xF3, (unsigned char)0xAA);) 5297 } 5298 5299 // sets rcx pointer sized words with rax, value at [edi] 5300 // generic 5301 void Assembler::rep_stos() { 5302 // REP 5303 // LP64:STOSQ, LP32:STOSD 5304 LP64_ONLY(emit_int24((unsigned char)0xF3, REX_W, (unsigned char)0xAB);) 5305 NOT_LP64( emit_int16((unsigned char)0xF3, (unsigned char)0xAB);) 5306 } 5307 5308 // scans rcx pointer sized words at [edi] for occurrence of rax, 5309 // generic 5310 void Assembler::repne_scan() { // repne_scan 5311 // SCASQ 5312 LP64_ONLY(emit_int24((unsigned char)0xF2, REX_W, (unsigned char)0xAF);) 5313 NOT_LP64( emit_int16((unsigned char)0xF2, (unsigned char)0xAF);) 5314 } 5315 5316 #ifdef _LP64 5317 // scans rcx 4 byte words at [edi] for occurrence of rax, 5318 // generic 5319 void Assembler::repne_scanl() { // repne_scan 5320 // SCASL 5321 emit_int16((unsigned char)0xF2, (unsigned char)0xAF); 5322 } 5323 #endif 5324 5325 void Assembler::ret(int imm16) { 5326 if (imm16 == 0) { 5327 emit_int8((unsigned char)0xC3); 5328 } else { 5329 emit_int8((unsigned char)0xC2); 5330 emit_int16(imm16); 5331 } 5332 } 5333 5334 void Assembler::roll(Register dst, int imm8) { 5335 assert(isShiftCount(imm8), "illegal shift count"); 5336 int encode = prefix_and_encode(dst->encoding()); 5337 if (imm8 == 1) { 5338 emit_int16((unsigned char)0xD1, (0xC0 | encode)); 5339 } else { 5340 emit_int24((unsigned char)0xC1, (0xc0 | encode), imm8); 5341 } 5342 } 5343 5344 void Assembler::roll(Register dst) { 5345 int encode = prefix_and_encode(dst->encoding()); 5346 emit_int16((unsigned char)0xD3, (0xC0 | encode)); 5347 } 5348 5349 void Assembler::rorl(Register dst, int imm8) { 5350 assert(isShiftCount(imm8), "illegal shift count"); 5351 int encode = prefix_and_encode(dst->encoding()); 5352 if (imm8 == 1) { 5353 emit_int16((unsigned char)0xD1, (0xC8 | encode)); 5354 } else { 5355 emit_int24((unsigned char)0xC1, (0xc8 | encode), imm8); 5356 } 5357 } 5358 5359 void Assembler::rorl(Register dst) { 5360 int encode = prefix_and_encode(dst->encoding()); 5361 emit_int16((unsigned char)0xD3, (0xC8 | encode)); 5362 } 5363 5364 #ifdef _LP64 5365 void Assembler::rorq(Register dst) { 5366 int encode = prefixq_and_encode(dst->encoding()); 5367 emit_int16((unsigned char)0xD3, (0xC8 | encode)); 5368 } 5369 5370 void Assembler::rorq(Register dst, int imm8) { 5371 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 5372 int encode = prefixq_and_encode(dst->encoding()); 5373 if (imm8 == 1) { 5374 emit_int16((unsigned char)0xD1, (0xC8 | encode)); 5375 } else { 5376 emit_int24((unsigned char)0xC1, (0xc8 | encode), imm8); 5377 } 5378 } 5379 5380 void Assembler::rolq(Register dst) { 5381 int encode = prefixq_and_encode(dst->encoding()); 5382 emit_int16((unsigned char)0xD3, (0xC0 | encode)); 5383 } 5384 5385 void Assembler::rolq(Register dst, int imm8) { 5386 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 5387 int encode = prefixq_and_encode(dst->encoding()); 5388 if (imm8 == 1) { 5389 emit_int16((unsigned char)0xD1, (0xC0 | encode)); 5390 } else { 5391 emit_int24((unsigned char)0xC1, (0xc0 | encode), imm8); 5392 } 5393 } 5394 #endif 5395 5396 void Assembler::sahf() { 5397 #ifdef _LP64 5398 // Not supported in 64bit mode 5399 ShouldNotReachHere(); 5400 #endif 5401 emit_int8((unsigned char)0x9E); 5402 } 5403 5404 void Assembler::sall(Address dst, int imm8) { 5405 InstructionMark im(this); 5406 assert(isShiftCount(imm8), "illegal shift count"); 5407 prefix(dst); 5408 if (imm8 == 1) { 5409 emit_int8((unsigned char)0xD1); 5410 emit_operand(as_Register(4), dst); 5411 } 5412 else { 5413 emit_int8((unsigned char)0xC1); 5414 emit_operand(as_Register(4), dst); 5415 emit_int8(imm8); 5416 } 5417 } 5418 5419 void Assembler::sall(Address dst) { 5420 InstructionMark im(this); 5421 prefix(dst); 5422 emit_int8((unsigned char)0xD3); 5423 emit_operand(as_Register(4), dst); 5424 } 5425 5426 void Assembler::sall(Register dst, int imm8) { 5427 assert(isShiftCount(imm8), "illegal shift count"); 5428 int encode = prefix_and_encode(dst->encoding()); 5429 if (imm8 == 1) { 5430 emit_int16((unsigned char)0xD1, (0xE0 | encode)); 5431 } else { 5432 emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8); 5433 } 5434 } 5435 5436 void Assembler::sall(Register dst) { 5437 int encode = prefix_and_encode(dst->encoding()); 5438 emit_int16((unsigned char)0xD3, (0xE0 | encode)); 5439 } 5440 5441 void Assembler::sarl(Address dst, int imm8) { 5442 assert(isShiftCount(imm8), "illegal shift count"); 5443 InstructionMark im(this); 5444 prefix(dst); 5445 if (imm8 == 1) { 5446 emit_int8((unsigned char)0xD1); 5447 emit_operand(as_Register(7), dst); 5448 } 5449 else { 5450 emit_int8((unsigned char)0xC1); 5451 emit_operand(as_Register(7), dst); 5452 emit_int8(imm8); 5453 } 5454 } 5455 5456 void Assembler::sarl(Address dst) { 5457 InstructionMark im(this); 5458 prefix(dst); 5459 emit_int8((unsigned char)0xD3); 5460 emit_operand(as_Register(7), dst); 5461 } 5462 5463 void Assembler::sarl(Register dst, int imm8) { 5464 int encode = prefix_and_encode(dst->encoding()); 5465 assert(isShiftCount(imm8), "illegal shift count"); 5466 if (imm8 == 1) { 5467 emit_int16((unsigned char)0xD1, (0xF8 | encode)); 5468 } else { 5469 emit_int24((unsigned char)0xC1, (0xF8 | encode), imm8); 5470 } 5471 } 5472 5473 void Assembler::sarl(Register dst) { 5474 int encode = prefix_and_encode(dst->encoding()); 5475 emit_int16((unsigned char)0xD3, (0xF8 | encode)); 5476 } 5477 5478 void Assembler::sbbl(Address dst, int32_t imm32) { 5479 InstructionMark im(this); 5480 prefix(dst); 5481 emit_arith_operand(0x81, rbx, dst, imm32); 5482 } 5483 5484 void Assembler::sbbl(Register dst, int32_t imm32) { 5485 prefix(dst); 5486 emit_arith(0x81, 0xD8, dst, imm32); 5487 } 5488 5489 5490 void Assembler::sbbl(Register dst, Address src) { 5491 InstructionMark im(this); 5492 prefix(src, dst); 5493 emit_int8(0x1B); 5494 emit_operand(dst, src); 5495 } 5496 5497 void Assembler::sbbl(Register dst, Register src) { 5498 (void) prefix_and_encode(dst->encoding(), src->encoding()); 5499 emit_arith(0x1B, 0xC0, dst, src); 5500 } 5501 5502 void Assembler::setb(Condition cc, Register dst) { 5503 assert(0 <= cc && cc < 16, "illegal cc"); 5504 int encode = prefix_and_encode(dst->encoding(), true); 5505 emit_int24(0x0F, (unsigned char)0x90 | cc, (0xC0 | encode)); 5506 } 5507 5508 void Assembler::sete(Register dst) { 5509 int encode = prefix_and_encode(dst->encoding(), true); 5510 emit_int24(0x0F, (unsigned char)0x94, (0xC0 | encode)); 5511 } 5512 5513 void Assembler::setl(Register dst) { 5514 int encode = prefix_and_encode(dst->encoding(), true); 5515 emit_int24(0x0F, (unsigned char)0x9C, (0xC0 | encode)); 5516 } 5517 5518 void Assembler::setne(Register dst) { 5519 int encode = prefix_and_encode(dst->encoding(), true); 5520 emit_int24(0x0F, (unsigned char)0x95, (0xC0 | encode)); 5521 } 5522 5523 void Assembler::palignr(XMMRegister dst, XMMRegister src, int imm8) { 5524 assert(VM_Version::supports_ssse3(), ""); 5525 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5526 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5527 emit_int24(0x0F, (0xC0 | encode), imm8); 5528 } 5529 5530 void Assembler::vpalignr(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { 5531 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 5532 vector_len == AVX_256bit? VM_Version::supports_avx2() : 5533 0, ""); 5534 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5535 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5536 emit_int24(0x0F, (0xC0 | encode), imm8); 5537 } 5538 5539 void Assembler::evalignq(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 5540 assert(VM_Version::supports_evex(), ""); 5541 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5542 attributes.set_is_evex_instruction(); 5543 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5544 emit_int24(0x3, (0xC0 | encode), imm8); 5545 } 5546 5547 void Assembler::pblendw(XMMRegister dst, XMMRegister src, int imm8) { 5548 assert(VM_Version::supports_sse4_1(), ""); 5549 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 5550 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5551 emit_int24(0x0E, (0xC0 | encode), imm8); 5552 } 5553 5554 void Assembler::sha1rnds4(XMMRegister dst, XMMRegister src, int imm8) { 5555 assert(VM_Version::supports_sha(), ""); 5556 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3A, /* rex_w */ false); 5557 emit_int24((unsigned char)0xCC, (0xC0 | encode), (unsigned char)imm8); 5558 } 5559 5560 void Assembler::sha1nexte(XMMRegister dst, XMMRegister src) { 5561 assert(VM_Version::supports_sha(), ""); 5562 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 5563 emit_int16((unsigned char)0xC8, (0xC0 | encode)); 5564 } 5565 5566 void Assembler::sha1msg1(XMMRegister dst, XMMRegister src) { 5567 assert(VM_Version::supports_sha(), ""); 5568 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 5569 emit_int16((unsigned char)0xC9, (0xC0 | encode)); 5570 } 5571 5572 void Assembler::sha1msg2(XMMRegister dst, XMMRegister src) { 5573 assert(VM_Version::supports_sha(), ""); 5574 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 5575 emit_int16((unsigned char)0xCA, (0xC0 | encode)); 5576 } 5577 5578 // xmm0 is implicit additional source to this instruction. 5579 void Assembler::sha256rnds2(XMMRegister dst, XMMRegister src) { 5580 assert(VM_Version::supports_sha(), ""); 5581 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 5582 emit_int16((unsigned char)0xCB, (0xC0 | encode)); 5583 } 5584 5585 void Assembler::sha256msg1(XMMRegister dst, XMMRegister src) { 5586 assert(VM_Version::supports_sha(), ""); 5587 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 5588 emit_int16((unsigned char)0xCC, (0xC0 | encode)); 5589 } 5590 5591 void Assembler::sha256msg2(XMMRegister dst, XMMRegister src) { 5592 assert(VM_Version::supports_sha(), ""); 5593 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 5594 emit_int16((unsigned char)0xCD, (0xC0 | encode)); 5595 } 5596 5597 5598 void Assembler::shll(Register dst, int imm8) { 5599 assert(isShiftCount(imm8), "illegal shift count"); 5600 int encode = prefix_and_encode(dst->encoding()); 5601 if (imm8 == 1 ) { 5602 emit_int16((unsigned char)0xD1, (0xE0 | encode)); 5603 } else { 5604 emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8); 5605 } 5606 } 5607 5608 void Assembler::shll(Register dst) { 5609 int encode = prefix_and_encode(dst->encoding()); 5610 emit_int16((unsigned char)0xD3, (0xE0 | encode)); 5611 } 5612 5613 void Assembler::shrl(Register dst, int imm8) { 5614 assert(isShiftCount(imm8), "illegal shift count"); 5615 int encode = prefix_and_encode(dst->encoding()); 5616 if (imm8 == 1) { 5617 emit_int16((unsigned char)0xD1, (0xE8 | encode)); 5618 } 5619 else { 5620 emit_int24((unsigned char)0xC1, (0xE8 | encode), imm8); 5621 } 5622 } 5623 5624 void Assembler::shrl(Register dst) { 5625 int encode = prefix_and_encode(dst->encoding()); 5626 emit_int16((unsigned char)0xD3, (0xE8 | encode)); 5627 } 5628 5629 void Assembler::shrl(Address dst) { 5630 InstructionMark im(this); 5631 prefix(dst); 5632 emit_int8((unsigned char)0xD3); 5633 emit_operand(as_Register(5), dst); 5634 } 5635 5636 void Assembler::shrl(Address dst, int imm8) { 5637 InstructionMark im(this); 5638 assert(isShiftCount(imm8), "illegal shift count"); 5639 prefix(dst); 5640 if (imm8 == 1) { 5641 emit_int8((unsigned char)0xD1); 5642 emit_operand(as_Register(5), dst); 5643 } 5644 else { 5645 emit_int8((unsigned char)0xC1); 5646 emit_operand(as_Register(5), dst); 5647 emit_int8(imm8); 5648 } 5649 } 5650 5651 5652 void Assembler::shldl(Register dst, Register src) { 5653 int encode = prefix_and_encode(src->encoding(), dst->encoding()); 5654 emit_int24(0x0F, (unsigned char)0xA5, (0xC0 | encode)); 5655 } 5656 5657 void Assembler::shldl(Register dst, Register src, int8_t imm8) { 5658 int encode = prefix_and_encode(src->encoding(), dst->encoding()); 5659 emit_int32(0x0F, (unsigned char)0xA4, (0xC0 | encode), imm8); 5660 } 5661 5662 void Assembler::shrdl(Register dst, Register src) { 5663 int encode = prefix_and_encode(src->encoding(), dst->encoding()); 5664 emit_int24(0x0F, (unsigned char)0xAD, (0xC0 | encode)); 5665 } 5666 5667 void Assembler::shrdl(Register dst, Register src, int8_t imm8) { 5668 int encode = prefix_and_encode(src->encoding(), dst->encoding()); 5669 emit_int32(0x0F, (unsigned char)0xAC, (0xC0 | encode), imm8); 5670 } 5671 5672 // copies a single word from [esi] to [edi] 5673 void Assembler::smovl() { 5674 emit_int8((unsigned char)0xA5); 5675 } 5676 5677 void Assembler::roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { 5678 assert(VM_Version::supports_sse4_1(), ""); 5679 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 5680 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5681 emit_int24(0x0B, (0xC0 | encode), (unsigned char)rmode); 5682 } 5683 5684 void Assembler::roundsd(XMMRegister dst, Address src, int32_t rmode) { 5685 assert(VM_Version::supports_sse4_1(), ""); 5686 InstructionMark im(this); 5687 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 5688 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5689 emit_int8(0x0B); 5690 emit_operand(dst, src); 5691 emit_int8((unsigned char)rmode); 5692 } 5693 5694 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) { 5695 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5696 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5697 attributes.set_rex_vex_w_reverted(); 5698 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 5699 emit_int16(0x51, (0xC0 | encode)); 5700 } 5701 5702 void Assembler::sqrtsd(XMMRegister dst, Address src) { 5703 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5704 InstructionMark im(this); 5705 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5706 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 5707 attributes.set_rex_vex_w_reverted(); 5708 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 5709 emit_int8(0x51); 5710 emit_operand(dst, src); 5711 } 5712 5713 void Assembler::sqrtss(XMMRegister dst, XMMRegister src) { 5714 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5715 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5716 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 5717 emit_int16(0x51, (0xC0 | encode)); 5718 } 5719 5720 void Assembler::std() { 5721 emit_int8((unsigned char)0xFD); 5722 } 5723 5724 void Assembler::sqrtss(XMMRegister dst, Address src) { 5725 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5726 InstructionMark im(this); 5727 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5728 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 5729 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 5730 emit_int8(0x51); 5731 emit_operand(dst, src); 5732 } 5733 5734 void Assembler::stmxcsr( Address dst) { 5735 if (UseAVX > 0 ) { 5736 assert(VM_Version::supports_avx(), ""); 5737 InstructionMark im(this); 5738 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 5739 vex_prefix(dst, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5740 emit_int8((unsigned char)0xAE); 5741 emit_operand(as_Register(3), dst); 5742 } else { 5743 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5744 InstructionMark im(this); 5745 prefix(dst); 5746 emit_int16(0x0F, (unsigned char)0xAE); 5747 emit_operand(as_Register(3), dst); 5748 } 5749 } 5750 5751 void Assembler::subl(Address dst, int32_t imm32) { 5752 InstructionMark im(this); 5753 prefix(dst); 5754 emit_arith_operand(0x81, rbp, dst, imm32); 5755 } 5756 5757 void Assembler::subl(Address dst, Register src) { 5758 InstructionMark im(this); 5759 prefix(dst, src); 5760 emit_int8(0x29); 5761 emit_operand(src, dst); 5762 } 5763 5764 void Assembler::subl(Register dst, int32_t imm32) { 5765 prefix(dst); 5766 emit_arith(0x81, 0xE8, dst, imm32); 5767 } 5768 5769 // Force generation of a 4 byte immediate value even if it fits into 8bit 5770 void Assembler::subl_imm32(Register dst, int32_t imm32) { 5771 prefix(dst); 5772 emit_arith_imm32(0x81, 0xE8, dst, imm32); 5773 } 5774 5775 void Assembler::subl(Register dst, Address src) { 5776 InstructionMark im(this); 5777 prefix(src, dst); 5778 emit_int8(0x2B); 5779 emit_operand(dst, src); 5780 } 5781 5782 void Assembler::subl(Register dst, Register src) { 5783 (void) prefix_and_encode(dst->encoding(), src->encoding()); 5784 emit_arith(0x2B, 0xC0, dst, src); 5785 } 5786 5787 void Assembler::subsd(XMMRegister dst, XMMRegister src) { 5788 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5789 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5790 attributes.set_rex_vex_w_reverted(); 5791 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 5792 emit_int16(0x5C, (0xC0 | encode)); 5793 } 5794 5795 void Assembler::subsd(XMMRegister dst, Address src) { 5796 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5797 InstructionMark im(this); 5798 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5799 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 5800 attributes.set_rex_vex_w_reverted(); 5801 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 5802 emit_int8(0x5C); 5803 emit_operand(dst, src); 5804 } 5805 5806 void Assembler::subss(XMMRegister dst, XMMRegister src) { 5807 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5808 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true , /* uses_vl */ false); 5809 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 5810 emit_int16(0x5C, (0xC0 | encode)); 5811 } 5812 5813 void Assembler::subss(XMMRegister dst, Address src) { 5814 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5815 InstructionMark im(this); 5816 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5817 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 5818 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 5819 emit_int8(0x5C); 5820 emit_operand(dst, src); 5821 } 5822 5823 void Assembler::testb(Register dst, int imm8) { 5824 NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); 5825 if (dst == rax) { 5826 emit_int8((unsigned char)0xA8); 5827 emit_int8(imm8); 5828 } else { 5829 (void) prefix_and_encode(dst->encoding(), true); 5830 emit_arith_b(0xF6, 0xC0, dst, imm8); 5831 } 5832 } 5833 5834 void Assembler::testb(Address dst, int imm8) { 5835 InstructionMark im(this); 5836 prefix(dst); 5837 emit_int8((unsigned char)0xF6); 5838 emit_operand(rax, dst, 1); 5839 emit_int8(imm8); 5840 } 5841 5842 void Assembler::testl(Address dst, int32_t imm32) { 5843 if (imm32 >= 0 && is8bit(imm32)) { 5844 testb(dst, imm32); 5845 return; 5846 } 5847 InstructionMark im(this); 5848 emit_int8((unsigned char)0xF7); 5849 emit_operand(as_Register(0), dst); 5850 emit_int32(imm32); 5851 } 5852 5853 void Assembler::testl(Register dst, int32_t imm32) { 5854 if (imm32 >= 0 && is8bit(imm32) && dst->has_byte_register()) { 5855 testb(dst, imm32); 5856 return; 5857 } 5858 // not using emit_arith because test 5859 // doesn't support sign-extension of 5860 // 8bit operands 5861 if (dst == rax) { 5862 emit_int8((unsigned char)0xA9); 5863 emit_int32(imm32); 5864 } else { 5865 int encode = dst->encoding(); 5866 encode = prefix_and_encode(encode); 5867 emit_int16((unsigned char)0xF7, (0xC0 | encode)); 5868 emit_int32(imm32); 5869 } 5870 } 5871 5872 void Assembler::testl(Register dst, Register src) { 5873 (void) prefix_and_encode(dst->encoding(), src->encoding()); 5874 emit_arith(0x85, 0xC0, dst, src); 5875 } 5876 5877 void Assembler::testl(Register dst, Address src) { 5878 InstructionMark im(this); 5879 prefix(src, dst); 5880 emit_int8((unsigned char)0x85); 5881 emit_operand(dst, src); 5882 } 5883 5884 void Assembler::tzcntl(Register dst, Register src) { 5885 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); 5886 emit_int8((unsigned char)0xF3); 5887 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 5888 emit_int24(0x0F, 5889 (unsigned char)0xBC, 5890 0xC0 | encode); 5891 } 5892 5893 void Assembler::tzcntq(Register dst, Register src) { 5894 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); 5895 emit_int8((unsigned char)0xF3); 5896 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 5897 emit_int24(0x0F, (unsigned char)0xBC, (0xC0 | encode)); 5898 } 5899 5900 void Assembler::ucomisd(XMMRegister dst, Address src) { 5901 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5902 InstructionMark im(this); 5903 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5904 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 5905 attributes.set_rex_vex_w_reverted(); 5906 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5907 emit_int8(0x2E); 5908 emit_operand(dst, src); 5909 } 5910 5911 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { 5912 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5913 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5914 attributes.set_rex_vex_w_reverted(); 5915 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5916 emit_int16(0x2E, (0xC0 | encode)); 5917 } 5918 5919 void Assembler::ucomiss(XMMRegister dst, Address src) { 5920 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5921 InstructionMark im(this); 5922 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5923 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 5924 simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5925 emit_int8(0x2E); 5926 emit_operand(dst, src); 5927 } 5928 5929 void Assembler::ucomiss(XMMRegister dst, XMMRegister src) { 5930 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5931 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5932 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5933 emit_int16(0x2E, (0xC0 | encode)); 5934 } 5935 5936 void Assembler::xabort(int8_t imm8) { 5937 emit_int24((unsigned char)0xC6, (unsigned char)0xF8, (imm8 & 0xFF)); 5938 } 5939 5940 void Assembler::xaddb(Address dst, Register src) { 5941 InstructionMark im(this); 5942 prefix(dst, src, true); 5943 emit_int16(0x0F, (unsigned char)0xC0); 5944 emit_operand(src, dst); 5945 } 5946 5947 void Assembler::xaddw(Address dst, Register src) { 5948 InstructionMark im(this); 5949 emit_int8(0x66); 5950 prefix(dst, src); 5951 emit_int16(0x0F, (unsigned char)0xC1); 5952 emit_operand(src, dst); 5953 } 5954 5955 void Assembler::xaddl(Address dst, Register src) { 5956 InstructionMark im(this); 5957 prefix(dst, src); 5958 emit_int16(0x0F, (unsigned char)0xC1); 5959 emit_operand(src, dst); 5960 } 5961 5962 void Assembler::xbegin(Label& abort, relocInfo::relocType rtype) { 5963 InstructionMark im(this); 5964 relocate(rtype); 5965 if (abort.is_bound()) { 5966 address entry = target(abort); 5967 assert(entry != NULL, "abort entry NULL"); 5968 intptr_t offset = entry - pc(); 5969 emit_int16((unsigned char)0xC7, (unsigned char)0xF8); 5970 emit_int32(offset - 6); // 2 opcode + 4 address 5971 } else { 5972 abort.add_patch_at(code(), locator()); 5973 emit_int16((unsigned char)0xC7, (unsigned char)0xF8); 5974 emit_int32(0); 5975 } 5976 } 5977 5978 void Assembler::xchgb(Register dst, Address src) { // xchg 5979 InstructionMark im(this); 5980 prefix(src, dst, true); 5981 emit_int8((unsigned char)0x86); 5982 emit_operand(dst, src); 5983 } 5984 5985 void Assembler::xchgw(Register dst, Address src) { // xchg 5986 InstructionMark im(this); 5987 emit_int8(0x66); 5988 prefix(src, dst); 5989 emit_int8((unsigned char)0x87); 5990 emit_operand(dst, src); 5991 } 5992 5993 void Assembler::xchgl(Register dst, Address src) { // xchg 5994 InstructionMark im(this); 5995 prefix(src, dst); 5996 emit_int8((unsigned char)0x87); 5997 emit_operand(dst, src); 5998 } 5999 6000 void Assembler::xchgl(Register dst, Register src) { 6001 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 6002 emit_int16((unsigned char)0x87, (0xC0 | encode)); 6003 } 6004 6005 void Assembler::xend() { 6006 emit_int24(0x0F, 0x01, (unsigned char)0xD5); 6007 } 6008 6009 void Assembler::xgetbv() { 6010 emit_int24(0x0F, 0x01, (unsigned char)0xD0); 6011 } 6012 6013 void Assembler::xorl(Address dst, int32_t imm32) { 6014 InstructionMark im(this); 6015 prefix(dst); 6016 emit_arith_operand(0x81, as_Register(6), dst, imm32); 6017 } 6018 6019 void Assembler::xorl(Register dst, int32_t imm32) { 6020 prefix(dst); 6021 emit_arith(0x81, 0xF0, dst, imm32); 6022 } 6023 6024 void Assembler::xorl(Register dst, Address src) { 6025 InstructionMark im(this); 6026 prefix(src, dst); 6027 emit_int8(0x33); 6028 emit_operand(dst, src); 6029 } 6030 6031 void Assembler::xorl(Register dst, Register src) { 6032 (void) prefix_and_encode(dst->encoding(), src->encoding()); 6033 emit_arith(0x33, 0xC0, dst, src); 6034 } 6035 6036 void Assembler::xorl(Address dst, Register src) { 6037 InstructionMark im(this); 6038 prefix(dst, src); 6039 emit_int8(0x31); 6040 emit_operand(src, dst); 6041 } 6042 6043 void Assembler::xorb(Register dst, Address src) { 6044 InstructionMark im(this); 6045 prefix(src, dst); 6046 emit_int8(0x32); 6047 emit_operand(dst, src); 6048 } 6049 6050 void Assembler::xorb(Address dst, Register src) { 6051 InstructionMark im(this); 6052 prefix(dst, src, true); 6053 emit_int8(0x30); 6054 emit_operand(src, dst); 6055 } 6056 6057 void Assembler::xorw(Register dst, Register src) { 6058 (void)prefix_and_encode(dst->encoding(), src->encoding()); 6059 emit_arith(0x33, 0xC0, dst, src); 6060 } 6061 6062 // AVX 3-operands scalar float-point arithmetic instructions 6063 6064 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) { 6065 assert(VM_Version::supports_avx(), ""); 6066 InstructionMark im(this); 6067 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6068 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 6069 attributes.set_rex_vex_w_reverted(); 6070 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 6071 emit_int8(0x58); 6072 emit_operand(dst, src); 6073 } 6074 6075 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 6076 assert(VM_Version::supports_avx(), ""); 6077 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6078 attributes.set_rex_vex_w_reverted(); 6079 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 6080 emit_int16(0x58, (0xC0 | encode)); 6081 } 6082 6083 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) { 6084 assert(VM_Version::supports_avx(), ""); 6085 InstructionMark im(this); 6086 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6087 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 6088 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 6089 emit_int8(0x58); 6090 emit_operand(dst, src); 6091 } 6092 6093 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 6094 assert(VM_Version::supports_avx(), ""); 6095 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6096 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 6097 emit_int16(0x58, (0xC0 | encode)); 6098 } 6099 6100 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) { 6101 assert(VM_Version::supports_avx(), ""); 6102 InstructionMark im(this); 6103 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6104 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 6105 attributes.set_rex_vex_w_reverted(); 6106 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 6107 emit_int8(0x5E); 6108 emit_operand(dst, src); 6109 } 6110 6111 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 6112 assert(VM_Version::supports_avx(), ""); 6113 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6114 attributes.set_rex_vex_w_reverted(); 6115 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 6116 emit_int16(0x5E, (0xC0 | encode)); 6117 } 6118 6119 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) { 6120 assert(VM_Version::supports_avx(), ""); 6121 InstructionMark im(this); 6122 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6123 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 6124 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 6125 emit_int8(0x5E); 6126 emit_operand(dst, src); 6127 } 6128 6129 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 6130 assert(VM_Version::supports_avx(), ""); 6131 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6132 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 6133 emit_int16(0x5E, (0xC0 | encode)); 6134 } 6135 6136 void Assembler::vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { 6137 assert(VM_Version::supports_fma(), ""); 6138 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6139 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6140 emit_int16((unsigned char)0xB9, (0xC0 | encode)); 6141 } 6142 6143 void Assembler::vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) { 6144 assert(VM_Version::supports_fma(), ""); 6145 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6146 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6147 emit_int16((unsigned char)0xB9, (0xC0 | encode)); 6148 } 6149 6150 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) { 6151 assert(VM_Version::supports_avx(), ""); 6152 InstructionMark im(this); 6153 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6154 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 6155 attributes.set_rex_vex_w_reverted(); 6156 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 6157 emit_int8(0x59); 6158 emit_operand(dst, src); 6159 } 6160 6161 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 6162 assert(VM_Version::supports_avx(), ""); 6163 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6164 attributes.set_rex_vex_w_reverted(); 6165 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 6166 emit_int16(0x59, (0xC0 | encode)); 6167 } 6168 6169 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) { 6170 assert(VM_Version::supports_avx(), ""); 6171 InstructionMark im(this); 6172 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6173 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 6174 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 6175 emit_int8(0x59); 6176 emit_operand(dst, src); 6177 } 6178 6179 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 6180 assert(VM_Version::supports_avx(), ""); 6181 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6182 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 6183 emit_int16(0x59, (0xC0 | encode)); 6184 } 6185 6186 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) { 6187 assert(VM_Version::supports_avx(), ""); 6188 InstructionMark im(this); 6189 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6190 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 6191 attributes.set_rex_vex_w_reverted(); 6192 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 6193 emit_int8(0x5C); 6194 emit_operand(dst, src); 6195 } 6196 6197 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 6198 assert(VM_Version::supports_avx(), ""); 6199 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6200 attributes.set_rex_vex_w_reverted(); 6201 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 6202 emit_int16(0x5C, (0xC0 | encode)); 6203 } 6204 6205 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) { 6206 assert(VM_Version::supports_avx(), ""); 6207 InstructionMark im(this); 6208 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6209 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 6210 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 6211 emit_int8(0x5C); 6212 emit_operand(dst, src); 6213 } 6214 6215 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 6216 assert(VM_Version::supports_avx(), ""); 6217 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6218 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 6219 emit_int16(0x5C, (0xC0 | encode)); 6220 } 6221 6222 //====================VECTOR ARITHMETIC===================================== 6223 6224 // Float-point vector arithmetic 6225 6226 void Assembler::addpd(XMMRegister dst, XMMRegister src) { 6227 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6228 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6229 attributes.set_rex_vex_w_reverted(); 6230 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6231 emit_int16(0x58, (0xC0 | encode)); 6232 } 6233 6234 void Assembler::addpd(XMMRegister dst, Address src) { 6235 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6236 InstructionMark im(this); 6237 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6238 attributes.set_rex_vex_w_reverted(); 6239 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6240 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6241 emit_int8(0x58); 6242 emit_operand(dst, src); 6243 } 6244 6245 6246 void Assembler::addps(XMMRegister dst, XMMRegister src) { 6247 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6248 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6249 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6250 emit_int16(0x58, (0xC0 | encode)); 6251 } 6252 6253 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6254 assert(VM_Version::supports_avx(), ""); 6255 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6256 attributes.set_rex_vex_w_reverted(); 6257 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6258 emit_int16(0x58, (0xC0 | encode)); 6259 } 6260 6261 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6262 assert(VM_Version::supports_avx(), ""); 6263 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6264 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6265 emit_int16(0x58, (0xC0 | encode)); 6266 } 6267 6268 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6269 assert(VM_Version::supports_avx(), ""); 6270 InstructionMark im(this); 6271 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6272 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6273 attributes.set_rex_vex_w_reverted(); 6274 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6275 emit_int8(0x58); 6276 emit_operand(dst, src); 6277 } 6278 6279 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6280 assert(VM_Version::supports_avx(), ""); 6281 InstructionMark im(this); 6282 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6283 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6284 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6285 emit_int8(0x58); 6286 emit_operand(dst, src); 6287 } 6288 6289 void Assembler::subpd(XMMRegister dst, XMMRegister src) { 6290 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6291 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6292 attributes.set_rex_vex_w_reverted(); 6293 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6294 emit_int16(0x5C, (0xC0 | encode)); 6295 } 6296 6297 void Assembler::subps(XMMRegister dst, XMMRegister src) { 6298 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6299 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6300 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6301 emit_int16(0x5C, (0xC0 | encode)); 6302 } 6303 6304 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6305 assert(VM_Version::supports_avx(), ""); 6306 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6307 attributes.set_rex_vex_w_reverted(); 6308 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6309 emit_int16(0x5C, (0xC0 | encode)); 6310 } 6311 6312 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6313 assert(VM_Version::supports_avx(), ""); 6314 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6315 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6316 emit_int16(0x5C, (0xC0 | encode)); 6317 } 6318 6319 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6320 assert(VM_Version::supports_avx(), ""); 6321 InstructionMark im(this); 6322 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6323 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6324 attributes.set_rex_vex_w_reverted(); 6325 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6326 emit_int8(0x5C); 6327 emit_operand(dst, src); 6328 } 6329 6330 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6331 assert(VM_Version::supports_avx(), ""); 6332 InstructionMark im(this); 6333 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6334 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6335 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6336 emit_int8(0x5C); 6337 emit_operand(dst, src); 6338 } 6339 6340 void Assembler::mulpd(XMMRegister dst, XMMRegister src) { 6341 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6342 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6343 attributes.set_rex_vex_w_reverted(); 6344 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6345 emit_int16(0x59, (0xC0 | encode)); 6346 } 6347 6348 void Assembler::mulpd(XMMRegister dst, Address src) { 6349 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6350 InstructionMark im(this); 6351 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6352 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6353 attributes.set_rex_vex_w_reverted(); 6354 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6355 emit_int8(0x59); 6356 emit_operand(dst, src); 6357 } 6358 6359 void Assembler::mulps(XMMRegister dst, XMMRegister src) { 6360 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6361 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6362 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6363 emit_int16(0x59, (0xC0 | encode)); 6364 } 6365 6366 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6367 assert(VM_Version::supports_avx(), ""); 6368 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6369 attributes.set_rex_vex_w_reverted(); 6370 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6371 emit_int16(0x59, (0xC0 | encode)); 6372 } 6373 6374 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6375 assert(VM_Version::supports_avx(), ""); 6376 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6377 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6378 emit_int16(0x59, (0xC0 | encode)); 6379 } 6380 6381 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6382 assert(VM_Version::supports_avx(), ""); 6383 InstructionMark im(this); 6384 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6385 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6386 attributes.set_rex_vex_w_reverted(); 6387 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6388 emit_int8(0x59); 6389 emit_operand(dst, src); 6390 } 6391 6392 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6393 assert(VM_Version::supports_avx(), ""); 6394 InstructionMark im(this); 6395 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6396 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6397 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6398 emit_int8(0x59); 6399 emit_operand(dst, src); 6400 } 6401 6402 void Assembler::vfmadd231pd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) { 6403 assert(VM_Version::supports_fma(), ""); 6404 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6405 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6406 emit_int16((unsigned char)0xB8, (0xC0 | encode)); 6407 } 6408 6409 void Assembler::vfmadd231ps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) { 6410 assert(VM_Version::supports_fma(), ""); 6411 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6412 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6413 emit_int16((unsigned char)0xB8, (0xC0 | encode)); 6414 } 6415 6416 void Assembler::vfmadd231pd(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) { 6417 assert(VM_Version::supports_fma(), ""); 6418 InstructionMark im(this); 6419 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6420 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6421 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6422 emit_int8((unsigned char)0xB8); 6423 emit_operand(dst, src2); 6424 } 6425 6426 void Assembler::vfmadd231ps(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) { 6427 assert(VM_Version::supports_fma(), ""); 6428 InstructionMark im(this); 6429 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6430 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6431 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6432 emit_int8((unsigned char)0xB8); 6433 emit_operand(dst, src2); 6434 } 6435 6436 void Assembler::divpd(XMMRegister dst, XMMRegister src) { 6437 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6438 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6439 attributes.set_rex_vex_w_reverted(); 6440 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6441 emit_int16(0x5E, (0xC0 | encode)); 6442 } 6443 6444 void Assembler::divps(XMMRegister dst, XMMRegister src) { 6445 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6446 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6447 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6448 emit_int16(0x5E, (0xC0 | encode)); 6449 } 6450 6451 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6452 assert(VM_Version::supports_avx(), ""); 6453 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6454 attributes.set_rex_vex_w_reverted(); 6455 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6456 emit_int16(0x5E, (0xC0 | encode)); 6457 } 6458 6459 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6460 assert(VM_Version::supports_avx(), ""); 6461 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6462 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6463 emit_int16(0x5E, (0xC0 | encode)); 6464 } 6465 6466 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6467 assert(VM_Version::supports_avx(), ""); 6468 InstructionMark im(this); 6469 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6470 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6471 attributes.set_rex_vex_w_reverted(); 6472 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6473 emit_int8(0x5E); 6474 emit_operand(dst, src); 6475 } 6476 6477 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6478 assert(VM_Version::supports_avx(), ""); 6479 InstructionMark im(this); 6480 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6481 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6482 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6483 emit_int8(0x5E); 6484 emit_operand(dst, src); 6485 } 6486 6487 void Assembler::vroundpd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len) { 6488 assert(VM_Version::supports_avx(), ""); 6489 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 6490 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6491 emit_int24(0x09, (0xC0 | encode), (rmode)); 6492 } 6493 6494 void Assembler::vroundpd(XMMRegister dst, Address src, int32_t rmode, int vector_len) { 6495 assert(VM_Version::supports_avx(), ""); 6496 InstructionMark im(this); 6497 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 6498 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6499 emit_int8(0x09); 6500 emit_operand(dst, src); 6501 emit_int8((rmode)); 6502 } 6503 6504 void Assembler::vrndscalepd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len) { 6505 assert(VM_Version::supports_evex(), "requires EVEX support"); 6506 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6507 attributes.set_is_evex_instruction(); 6508 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6509 emit_int24(0x09, (0xC0 | encode), (rmode)); 6510 } 6511 6512 void Assembler::vrndscalepd(XMMRegister dst, Address src, int32_t rmode, int vector_len) { 6513 assert(VM_Version::supports_evex(), "requires EVEX support"); 6514 assert(dst != xnoreg, "sanity"); 6515 InstructionMark im(this); 6516 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6517 attributes.set_is_evex_instruction(); 6518 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6519 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6520 emit_int8(0x09); 6521 emit_operand(dst, src); 6522 emit_int8((rmode)); 6523 } 6524 6525 void Assembler::vsqrtpd(XMMRegister dst, XMMRegister src, int vector_len) { 6526 assert(VM_Version::supports_avx(), ""); 6527 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6528 attributes.set_rex_vex_w_reverted(); 6529 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6530 emit_int16(0x51, (0xC0 | encode)); 6531 } 6532 6533 void Assembler::vsqrtpd(XMMRegister dst, Address src, int vector_len) { 6534 assert(VM_Version::supports_avx(), ""); 6535 InstructionMark im(this); 6536 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6537 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6538 attributes.set_rex_vex_w_reverted(); 6539 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6540 emit_int8(0x51); 6541 emit_operand(dst, src); 6542 } 6543 6544 void Assembler::vsqrtps(XMMRegister dst, XMMRegister src, int vector_len) { 6545 assert(VM_Version::supports_avx(), ""); 6546 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6547 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6548 emit_int16(0x51, (0xC0 | encode)); 6549 } 6550 6551 void Assembler::vsqrtps(XMMRegister dst, Address src, int vector_len) { 6552 assert(VM_Version::supports_avx(), ""); 6553 InstructionMark im(this); 6554 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6555 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6556 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6557 emit_int8(0x51); 6558 emit_operand(dst, src); 6559 } 6560 6561 void Assembler::andpd(XMMRegister dst, XMMRegister src) { 6562 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6563 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 6564 attributes.set_rex_vex_w_reverted(); 6565 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6566 emit_int16(0x54, (0xC0 | encode)); 6567 } 6568 6569 void Assembler::andps(XMMRegister dst, XMMRegister src) { 6570 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6571 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 6572 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6573 emit_int16(0x54, (0xC0 | encode)); 6574 } 6575 6576 void Assembler::andps(XMMRegister dst, Address src) { 6577 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6578 InstructionMark im(this); 6579 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 6580 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6581 simd_prefix(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6582 emit_int8(0x54); 6583 emit_operand(dst, src); 6584 } 6585 6586 void Assembler::andpd(XMMRegister dst, Address src) { 6587 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6588 InstructionMark im(this); 6589 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 6590 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6591 attributes.set_rex_vex_w_reverted(); 6592 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6593 emit_int8(0x54); 6594 emit_operand(dst, src); 6595 } 6596 6597 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6598 assert(VM_Version::supports_avx(), ""); 6599 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 6600 attributes.set_rex_vex_w_reverted(); 6601 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6602 emit_int16(0x54, (0xC0 | encode)); 6603 } 6604 6605 void Assembler::vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6606 assert(VM_Version::supports_avx(), ""); 6607 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 6608 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6609 emit_int16(0x54, (0xC0 | encode)); 6610 } 6611 6612 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6613 assert(VM_Version::supports_avx(), ""); 6614 InstructionMark im(this); 6615 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 6616 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6617 attributes.set_rex_vex_w_reverted(); 6618 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6619 emit_int8(0x54); 6620 emit_operand(dst, src); 6621 } 6622 6623 void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6624 assert(VM_Version::supports_avx(), ""); 6625 InstructionMark im(this); 6626 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 6627 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6628 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6629 emit_int8(0x54); 6630 emit_operand(dst, src); 6631 } 6632 6633 void Assembler::unpckhpd(XMMRegister dst, XMMRegister src) { 6634 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6635 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6636 attributes.set_rex_vex_w_reverted(); 6637 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6638 emit_int8(0x15); 6639 emit_int8((0xC0 | encode)); 6640 } 6641 6642 void Assembler::unpcklpd(XMMRegister dst, XMMRegister src) { 6643 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6644 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6645 attributes.set_rex_vex_w_reverted(); 6646 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6647 emit_int16(0x14, (0xC0 | encode)); 6648 } 6649 6650 void Assembler::xorpd(XMMRegister dst, XMMRegister src) { 6651 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6652 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 6653 attributes.set_rex_vex_w_reverted(); 6654 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6655 emit_int16(0x57, (0xC0 | encode)); 6656 } 6657 6658 void Assembler::xorps(XMMRegister dst, XMMRegister src) { 6659 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6660 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 6661 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6662 emit_int16(0x57, (0xC0 | encode)); 6663 } 6664 6665 void Assembler::xorpd(XMMRegister dst, Address src) { 6666 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6667 InstructionMark im(this); 6668 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 6669 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6670 attributes.set_rex_vex_w_reverted(); 6671 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6672 emit_int8(0x57); 6673 emit_operand(dst, src); 6674 } 6675 6676 void Assembler::xorps(XMMRegister dst, Address src) { 6677 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6678 InstructionMark im(this); 6679 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 6680 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6681 simd_prefix(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6682 emit_int8(0x57); 6683 emit_operand(dst, src); 6684 } 6685 6686 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6687 assert(VM_Version::supports_avx(), ""); 6688 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 6689 attributes.set_rex_vex_w_reverted(); 6690 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6691 emit_int16(0x57, (0xC0 | encode)); 6692 } 6693 6694 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6695 assert(VM_Version::supports_avx(), ""); 6696 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 6697 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6698 emit_int16(0x57, (0xC0 | encode)); 6699 } 6700 6701 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6702 assert(VM_Version::supports_avx(), ""); 6703 InstructionMark im(this); 6704 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 6705 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6706 attributes.set_rex_vex_w_reverted(); 6707 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6708 emit_int8(0x57); 6709 emit_operand(dst, src); 6710 } 6711 6712 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6713 assert(VM_Version::supports_avx(), ""); 6714 InstructionMark im(this); 6715 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 6716 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6717 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6718 emit_int8(0x57); 6719 emit_operand(dst, src); 6720 } 6721 6722 // Integer vector arithmetic 6723 void Assembler::vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6724 assert(VM_Version::supports_avx() && (vector_len == 0) || 6725 VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2"); 6726 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 6727 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6728 emit_int16(0x01, (0xC0 | encode)); 6729 } 6730 6731 void Assembler::vphaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6732 assert(VM_Version::supports_avx() && (vector_len == 0) || 6733 VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2"); 6734 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 6735 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6736 emit_int16(0x02, (0xC0 | encode)); 6737 } 6738 6739 void Assembler::paddb(XMMRegister dst, XMMRegister src) { 6740 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6741 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6742 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6743 emit_int16((unsigned char)0xFC, (0xC0 | encode)); 6744 } 6745 6746 void Assembler::paddw(XMMRegister dst, XMMRegister src) { 6747 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6748 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6749 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6750 emit_int16((unsigned char)0xFD, (0xC0 | encode)); 6751 } 6752 6753 void Assembler::paddd(XMMRegister dst, XMMRegister src) { 6754 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6755 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6756 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6757 emit_int16((unsigned char)0xFE, (0xC0 | encode)); 6758 } 6759 6760 void Assembler::paddd(XMMRegister dst, Address src) { 6761 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6762 InstructionMark im(this); 6763 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6764 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6765 emit_int8((unsigned char)0xFE); 6766 emit_operand(dst, src); 6767 } 6768 6769 void Assembler::paddq(XMMRegister dst, XMMRegister src) { 6770 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6771 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6772 attributes.set_rex_vex_w_reverted(); 6773 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6774 emit_int16((unsigned char)0xD4, (0xC0 | encode)); 6775 } 6776 6777 void Assembler::phaddw(XMMRegister dst, XMMRegister src) { 6778 assert(VM_Version::supports_sse3(), ""); 6779 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 6780 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6781 emit_int16(0x01, (0xC0 | encode)); 6782 } 6783 6784 void Assembler::phaddd(XMMRegister dst, XMMRegister src) { 6785 assert(VM_Version::supports_sse3(), ""); 6786 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 6787 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6788 emit_int16(0x02, (0xC0 | encode)); 6789 } 6790 6791 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6792 assert(UseAVX > 0, "requires some form of AVX"); 6793 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6794 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6795 emit_int16((unsigned char)0xFC, (0xC0 | encode)); 6796 } 6797 6798 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6799 assert(UseAVX > 0, "requires some form of AVX"); 6800 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6801 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6802 emit_int16((unsigned char)0xFD, (0xC0 | encode)); 6803 } 6804 6805 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6806 assert(UseAVX > 0, "requires some form of AVX"); 6807 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6808 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6809 emit_int16((unsigned char)0xFE, (0xC0 | encode)); 6810 } 6811 6812 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6813 assert(UseAVX > 0, "requires some form of AVX"); 6814 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6815 attributes.set_rex_vex_w_reverted(); 6816 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6817 emit_int16((unsigned char)0xD4, (0xC0 | encode)); 6818 } 6819 6820 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6821 assert(UseAVX > 0, "requires some form of AVX"); 6822 InstructionMark im(this); 6823 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6824 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 6825 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6826 emit_int8((unsigned char)0xFC); 6827 emit_operand(dst, src); 6828 } 6829 6830 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6831 assert(UseAVX > 0, "requires some form of AVX"); 6832 InstructionMark im(this); 6833 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6834 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 6835 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6836 emit_int8((unsigned char)0xFD); 6837 emit_operand(dst, src); 6838 } 6839 6840 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6841 assert(UseAVX > 0, "requires some form of AVX"); 6842 InstructionMark im(this); 6843 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6844 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6845 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6846 emit_int8((unsigned char)0xFE); 6847 emit_operand(dst, src); 6848 } 6849 6850 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6851 assert(UseAVX > 0, "requires some form of AVX"); 6852 InstructionMark im(this); 6853 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6854 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6855 attributes.set_rex_vex_w_reverted(); 6856 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6857 emit_int8((unsigned char)0xD4); 6858 emit_operand(dst, src); 6859 } 6860 6861 void Assembler::psubb(XMMRegister dst, XMMRegister src) { 6862 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6863 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6864 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6865 emit_int16((unsigned char)0xF8, (0xC0 | encode)); 6866 } 6867 6868 void Assembler::psubw(XMMRegister dst, XMMRegister src) { 6869 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6870 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6871 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6872 emit_int16((unsigned char)0xF9, (0xC0 | encode)); 6873 } 6874 6875 void Assembler::psubd(XMMRegister dst, XMMRegister src) { 6876 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6877 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6878 emit_int16((unsigned char)0xFA, (0xC0 | encode)); 6879 } 6880 6881 void Assembler::psubq(XMMRegister dst, XMMRegister src) { 6882 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6883 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6884 attributes.set_rex_vex_w_reverted(); 6885 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6886 emit_int8((unsigned char)0xFB); 6887 emit_int8((0xC0 | encode)); 6888 } 6889 6890 void Assembler::vpsubusb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6891 assert(UseAVX > 0, "requires some form of AVX"); 6892 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6893 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6894 emit_int16((unsigned char)0xD8, (0xC0 | encode)); 6895 } 6896 6897 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6898 assert(UseAVX > 0, "requires some form of AVX"); 6899 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6900 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6901 emit_int16((unsigned char)0xF8, (0xC0 | encode)); 6902 } 6903 6904 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6905 assert(UseAVX > 0, "requires some form of AVX"); 6906 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6907 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6908 emit_int16((unsigned char)0xF9, (0xC0 | encode)); 6909 } 6910 6911 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6912 assert(UseAVX > 0, "requires some form of AVX"); 6913 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6914 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6915 emit_int16((unsigned char)0xFA, (0xC0 | encode)); 6916 } 6917 6918 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6919 assert(UseAVX > 0, "requires some form of AVX"); 6920 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6921 attributes.set_rex_vex_w_reverted(); 6922 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6923 emit_int16((unsigned char)0xFB, (0xC0 | encode)); 6924 } 6925 6926 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6927 assert(UseAVX > 0, "requires some form of AVX"); 6928 InstructionMark im(this); 6929 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6930 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 6931 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6932 emit_int8((unsigned char)0xF8); 6933 emit_operand(dst, src); 6934 } 6935 6936 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6937 assert(UseAVX > 0, "requires some form of AVX"); 6938 InstructionMark im(this); 6939 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6940 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 6941 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6942 emit_int8((unsigned char)0xF9); 6943 emit_operand(dst, src); 6944 } 6945 6946 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6947 assert(UseAVX > 0, "requires some form of AVX"); 6948 InstructionMark im(this); 6949 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6950 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6951 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6952 emit_int8((unsigned char)0xFA); 6953 emit_operand(dst, src); 6954 } 6955 6956 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6957 assert(UseAVX > 0, "requires some form of AVX"); 6958 InstructionMark im(this); 6959 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6960 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6961 attributes.set_rex_vex_w_reverted(); 6962 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6963 emit_int8((unsigned char)0xFB); 6964 emit_operand(dst, src); 6965 } 6966 6967 void Assembler::pmullw(XMMRegister dst, XMMRegister src) { 6968 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6969 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6970 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6971 emit_int16((unsigned char)0xD5, (0xC0 | encode)); 6972 } 6973 6974 void Assembler::pmulld(XMMRegister dst, XMMRegister src) { 6975 assert(VM_Version::supports_sse4_1(), ""); 6976 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6977 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6978 emit_int16(0x40, (0xC0 | encode)); 6979 } 6980 6981 void Assembler::pmuludq(XMMRegister dst, XMMRegister src) { 6982 assert(VM_Version::supports_sse2(), ""); 6983 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6984 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6985 emit_int16((unsigned char)0xF4, (0xC0 | encode)); 6986 } 6987 6988 void Assembler::vpmulhuw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6989 assert((vector_len == AVX_128bit && VM_Version::supports_avx()) || 6990 (vector_len == AVX_256bit && VM_Version::supports_avx2()) || 6991 (vector_len == AVX_512bit && VM_Version::supports_avx512bw()), ""); 6992 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6993 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6994 emit_int16((unsigned char)0xE4, (0xC0 | encode)); 6995 } 6996 6997 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6998 assert(UseAVX > 0, "requires some form of AVX"); 6999 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7000 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7001 emit_int16((unsigned char)0xD5, (0xC0 | encode)); 7002 } 7003 7004 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7005 assert(UseAVX > 0, "requires some form of AVX"); 7006 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7007 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7008 emit_int16(0x40, (0xC0 | encode)); 7009 } 7010 7011 void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7012 assert(UseAVX > 2, "requires some form of EVEX"); 7013 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 7014 attributes.set_is_evex_instruction(); 7015 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7016 emit_int16(0x40, (0xC0 | encode)); 7017 } 7018 7019 void Assembler::vpmuludq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7020 assert(UseAVX > 0, "requires some form of AVX"); 7021 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7022 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7023 emit_int16((unsigned char)0xF4, (0xC0 | encode)); 7024 } 7025 7026 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7027 assert(UseAVX > 0, "requires some form of AVX"); 7028 InstructionMark im(this); 7029 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7030 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 7031 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7032 emit_int8((unsigned char)0xD5); 7033 emit_operand(dst, src); 7034 } 7035 7036 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7037 assert(UseAVX > 0, "requires some form of AVX"); 7038 InstructionMark im(this); 7039 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7040 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 7041 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7042 emit_int8(0x40); 7043 emit_operand(dst, src); 7044 } 7045 7046 void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7047 assert(UseAVX > 2, "requires some form of EVEX"); 7048 InstructionMark im(this); 7049 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 7050 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 7051 attributes.set_is_evex_instruction(); 7052 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7053 emit_int8(0x40); 7054 emit_operand(dst, src); 7055 } 7056 7057 // Min, max 7058 void Assembler::pminsb(XMMRegister dst, XMMRegister src) { 7059 assert(VM_Version::supports_sse4_1(), ""); 7060 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7061 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7062 emit_int16(0x38, (0xC0 | encode)); 7063 } 7064 7065 void Assembler::vpminsb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7066 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 7067 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), ""); 7068 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7069 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7070 emit_int16(0x38, (0xC0 | encode)); 7071 } 7072 7073 void Assembler::pminsw(XMMRegister dst, XMMRegister src) { 7074 assert(VM_Version::supports_sse2(), ""); 7075 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7076 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7077 emit_int16((unsigned char)0xEA, (0xC0 | encode)); 7078 } 7079 7080 void Assembler::vpminsw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7081 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 7082 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), ""); 7083 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7084 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7085 emit_int16((unsigned char)0xEA, (0xC0 | encode)); 7086 } 7087 7088 void Assembler::pminsd(XMMRegister dst, XMMRegister src) { 7089 assert(VM_Version::supports_sse4_1(), ""); 7090 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7091 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7092 emit_int16(0x39, (0xC0 | encode)); 7093 } 7094 7095 void Assembler::vpminsd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7096 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 7097 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_evex()), ""); 7098 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7099 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7100 emit_int16(0x39, (0xC0 | encode)); 7101 } 7102 7103 void Assembler::vpminsq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7104 assert(UseAVX > 2, "requires AVX512F"); 7105 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7106 attributes.set_is_evex_instruction(); 7107 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7108 emit_int16(0x39, (0xC0 | encode)); 7109 } 7110 7111 void Assembler::minps(XMMRegister dst, XMMRegister src) { 7112 NOT_LP64(assert(VM_Version::supports_sse(), "")); 7113 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7114 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 7115 emit_int16(0x5D, (0xC0 | encode)); 7116 } 7117 void Assembler::vminps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7118 assert(vector_len >= AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), ""); 7119 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7120 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 7121 emit_int16(0x5D, (0xC0 | encode)); 7122 } 7123 7124 void Assembler::minpd(XMMRegister dst, XMMRegister src) { 7125 NOT_LP64(assert(VM_Version::supports_sse(), "")); 7126 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7127 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7128 emit_int16(0x5D, (0xC0 | encode)); 7129 } 7130 void Assembler::vminpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7131 assert(vector_len >= AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), ""); 7132 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7133 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7134 emit_int16(0x5D, (0xC0 | encode)); 7135 } 7136 7137 void Assembler::pmaxsb(XMMRegister dst, XMMRegister src) { 7138 assert(VM_Version::supports_sse4_1(), ""); 7139 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7140 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7141 emit_int16(0x3C, (0xC0 | encode)); 7142 } 7143 7144 void Assembler::vpmaxsb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7145 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 7146 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), ""); 7147 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7148 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7149 emit_int16(0x3C, (0xC0 | encode)); 7150 } 7151 7152 void Assembler::pmaxsw(XMMRegister dst, XMMRegister src) { 7153 assert(VM_Version::supports_sse2(), ""); 7154 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7155 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7156 emit_int16((unsigned char)0xEE, (0xC0 | encode)); 7157 } 7158 7159 void Assembler::vpmaxsw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7160 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 7161 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), ""); 7162 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7163 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7164 emit_int16((unsigned char)0xEE, (0xC0 | encode)); 7165 } 7166 7167 void Assembler::pmaxsd(XMMRegister dst, XMMRegister src) { 7168 assert(VM_Version::supports_sse4_1(), ""); 7169 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7170 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7171 emit_int16(0x3D, (0xC0 | encode)); 7172 } 7173 7174 void Assembler::vpmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7175 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 7176 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_evex()), ""); 7177 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7178 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7179 emit_int16(0x3D, (0xC0 | encode)); 7180 } 7181 7182 void Assembler::vpmaxsq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7183 assert(UseAVX > 2, "requires AVX512F"); 7184 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7185 attributes.set_is_evex_instruction(); 7186 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7187 emit_int16(0x3D, (0xC0 | encode)); 7188 } 7189 7190 void Assembler::maxps(XMMRegister dst, XMMRegister src) { 7191 NOT_LP64(assert(VM_Version::supports_sse(), "")); 7192 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7193 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 7194 emit_int16(0x5F, (0xC0 | encode)); 7195 } 7196 7197 void Assembler::vmaxps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7198 assert(vector_len >= AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), ""); 7199 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7200 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 7201 emit_int16(0x5F, (0xC0 | encode)); 7202 } 7203 7204 void Assembler::maxpd(XMMRegister dst, XMMRegister src) { 7205 NOT_LP64(assert(VM_Version::supports_sse(), "")); 7206 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7207 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7208 emit_int16(0x5F, (0xC0 | encode)); 7209 } 7210 7211 void Assembler::vmaxpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7212 assert(vector_len >= AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), ""); 7213 InstructionAttr attributes(vector_len, /* vex_w */true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7214 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7215 emit_int16(0x5F, (0xC0 | encode)); 7216 } 7217 7218 // Shift packed integers left by specified number of bits. 7219 void Assembler::psllw(XMMRegister dst, int shift) { 7220 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7221 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7222 // XMM6 is for /6 encoding: 66 0F 71 /6 ib 7223 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7224 emit_int24(0x71, (0xC0 | encode), shift & 0xFF); 7225 } 7226 7227 void Assembler::pslld(XMMRegister dst, int shift) { 7228 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7229 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7230 // XMM6 is for /6 encoding: 66 0F 72 /6 ib 7231 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7232 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 7233 } 7234 7235 void Assembler::psllq(XMMRegister dst, int shift) { 7236 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7237 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7238 // XMM6 is for /6 encoding: 66 0F 73 /6 ib 7239 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7240 emit_int24(0x73, (0xC0 | encode), shift & 0xFF); 7241 } 7242 7243 void Assembler::psllw(XMMRegister dst, XMMRegister shift) { 7244 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7245 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7246 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7247 emit_int16((unsigned char)0xF1, (0xC0 | encode)); 7248 } 7249 7250 void Assembler::pslld(XMMRegister dst, XMMRegister shift) { 7251 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7252 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7253 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7254 emit_int16((unsigned char)0xF2, (0xC0 | encode)); 7255 } 7256 7257 void Assembler::psllq(XMMRegister dst, XMMRegister shift) { 7258 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7259 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7260 attributes.set_rex_vex_w_reverted(); 7261 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7262 emit_int16((unsigned char)0xF3, (0xC0 | encode)); 7263 } 7264 7265 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 7266 assert(UseAVX > 0, "requires some form of AVX"); 7267 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7268 // XMM6 is for /6 encoding: 66 0F 71 /6 ib 7269 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7270 emit_int24(0x71, (0xC0 | encode), shift & 0xFF); 7271 } 7272 7273 void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 7274 assert(UseAVX > 0, "requires some form of AVX"); 7275 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7276 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7277 // XMM6 is for /6 encoding: 66 0F 72 /6 ib 7278 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7279 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 7280 } 7281 7282 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 7283 assert(UseAVX > 0, "requires some form of AVX"); 7284 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7285 attributes.set_rex_vex_w_reverted(); 7286 // XMM6 is for /6 encoding: 66 0F 73 /6 ib 7287 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7288 emit_int24(0x73, (0xC0 | encode), shift & 0xFF); 7289 } 7290 7291 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7292 assert(UseAVX > 0, "requires some form of AVX"); 7293 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7294 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7295 emit_int16((unsigned char)0xF1, (0xC0 | encode)); 7296 } 7297 7298 void Assembler::vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7299 assert(UseAVX > 0, "requires some form of AVX"); 7300 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7301 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7302 emit_int16((unsigned char)0xF2, (0xC0 | encode)); 7303 } 7304 7305 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7306 assert(UseAVX > 0, "requires some form of AVX"); 7307 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7308 attributes.set_rex_vex_w_reverted(); 7309 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7310 emit_int16((unsigned char)0xF3, (0xC0 | encode)); 7311 } 7312 7313 // Shift packed integers logically right by specified number of bits. 7314 void Assembler::psrlw(XMMRegister dst, int shift) { 7315 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7316 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7317 // XMM2 is for /2 encoding: 66 0F 71 /2 ib 7318 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7319 emit_int24(0x71, (0xC0 | encode), shift & 0xFF); 7320 } 7321 7322 void Assembler::psrld(XMMRegister dst, int shift) { 7323 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7324 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7325 // XMM2 is for /2 encoding: 66 0F 72 /2 ib 7326 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7327 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 7328 } 7329 7330 void Assembler::psrlq(XMMRegister dst, int shift) { 7331 // Do not confuse it with psrldq SSE2 instruction which 7332 // shifts 128 bit value in xmm register by number of bytes. 7333 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7334 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7335 attributes.set_rex_vex_w_reverted(); 7336 // XMM2 is for /2 encoding: 66 0F 73 /2 ib 7337 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7338 emit_int24(0x73, (0xC0 | encode), shift & 0xFF); 7339 } 7340 7341 void Assembler::psrlw(XMMRegister dst, XMMRegister shift) { 7342 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7343 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7344 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7345 emit_int16((unsigned char)0xD1, (0xC0 | encode)); 7346 } 7347 7348 void Assembler::psrld(XMMRegister dst, XMMRegister shift) { 7349 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7350 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7351 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7352 emit_int16((unsigned char)0xD2, (0xC0 | encode)); 7353 } 7354 7355 void Assembler::psrlq(XMMRegister dst, XMMRegister shift) { 7356 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7357 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7358 attributes.set_rex_vex_w_reverted(); 7359 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7360 emit_int16((unsigned char)0xD3, (0xC0 | encode)); 7361 } 7362 7363 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 7364 assert(UseAVX > 0, "requires some form of AVX"); 7365 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7366 // XMM2 is for /2 encoding: 66 0F 71 /2 ib 7367 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7368 emit_int24(0x71, (0xC0 | encode), shift & 0xFF); 7369 } 7370 7371 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 7372 assert(UseAVX > 0, "requires some form of AVX"); 7373 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7374 // XMM2 is for /2 encoding: 66 0F 72 /2 ib 7375 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7376 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 7377 } 7378 7379 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 7380 assert(UseAVX > 0, "requires some form of AVX"); 7381 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7382 attributes.set_rex_vex_w_reverted(); 7383 // XMM2 is for /2 encoding: 66 0F 73 /2 ib 7384 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7385 emit_int24(0x73, (0xC0 | encode), shift & 0xFF); 7386 } 7387 7388 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7389 assert(UseAVX > 0, "requires some form of AVX"); 7390 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7391 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7392 emit_int16((unsigned char)0xD1, (0xC0 | encode)); 7393 } 7394 7395 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7396 assert(UseAVX > 0, "requires some form of AVX"); 7397 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7398 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7399 emit_int16((unsigned char)0xD2, (0xC0 | encode)); 7400 } 7401 7402 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7403 assert(UseAVX > 0, "requires some form of AVX"); 7404 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7405 attributes.set_rex_vex_w_reverted(); 7406 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7407 emit_int16((unsigned char)0xD3, (0xC0 | encode)); 7408 } 7409 7410 void Assembler::evpsrlvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7411 assert(VM_Version::supports_avx512bw(), ""); 7412 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7413 attributes.set_is_evex_instruction(); 7414 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7415 emit_int16(0x10, (0xC0 | encode)); 7416 } 7417 7418 void Assembler::evpsllvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7419 assert(VM_Version::supports_avx512bw(), ""); 7420 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7421 attributes.set_is_evex_instruction(); 7422 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7423 emit_int16(0x12, (0xC0 | encode)); 7424 } 7425 7426 // Shift packed integers arithmetically right by specified number of bits. 7427 void Assembler::psraw(XMMRegister dst, int shift) { 7428 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7429 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7430 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 7431 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7432 emit_int24(0x71, (0xC0 | encode), shift & 0xFF); 7433 } 7434 7435 void Assembler::psrad(XMMRegister dst, int shift) { 7436 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7437 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7438 // XMM4 is for /4 encoding: 66 0F 72 /4 ib 7439 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7440 emit_int8(0x72); 7441 emit_int8((0xC0 | encode)); 7442 emit_int8(shift & 0xFF); 7443 } 7444 7445 void Assembler::psraw(XMMRegister dst, XMMRegister shift) { 7446 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7447 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7448 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7449 emit_int16((unsigned char)0xE1, (0xC0 | encode)); 7450 } 7451 7452 void Assembler::psrad(XMMRegister dst, XMMRegister shift) { 7453 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7454 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7455 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7456 emit_int16((unsigned char)0xE2, (0xC0 | encode)); 7457 } 7458 7459 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 7460 assert(UseAVX > 0, "requires some form of AVX"); 7461 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7462 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 7463 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7464 emit_int24(0x71, (0xC0 | encode), shift & 0xFF); 7465 } 7466 7467 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 7468 assert(UseAVX > 0, "requires some form of AVX"); 7469 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7470 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 7471 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7472 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 7473 } 7474 7475 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7476 assert(UseAVX > 0, "requires some form of AVX"); 7477 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7478 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7479 emit_int16((unsigned char)0xE1, (0xC0 | encode)); 7480 } 7481 7482 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7483 assert(UseAVX > 0, "requires some form of AVX"); 7484 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7485 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7486 emit_int16((unsigned char)0xE2, (0xC0 | encode)); 7487 } 7488 7489 void Assembler::evpsraq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 7490 assert(UseAVX > 2, "requires AVX512"); 7491 assert ((VM_Version::supports_avx512vl() || vector_len == 2), "requires AVX512vl"); 7492 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7493 attributes.set_is_evex_instruction(); 7494 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7495 emit_int24((unsigned char)0x72, (0xC0 | encode), shift & 0xFF); 7496 } 7497 7498 void Assembler::evpsraq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7499 assert(UseAVX > 2, "requires AVX512"); 7500 assert ((VM_Version::supports_avx512vl() || vector_len == 2), "requires AVX512vl"); 7501 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7502 attributes.set_is_evex_instruction(); 7503 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7504 emit_int16((unsigned char)0xE2, (0xC0 | encode)); 7505 } 7506 7507 // logical operations packed integers 7508 void Assembler::pand(XMMRegister dst, XMMRegister src) { 7509 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7510 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7511 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7512 emit_int16((unsigned char)0xDB, (0xC0 | encode)); 7513 } 7514 7515 void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7516 assert(UseAVX > 0, "requires some form of AVX"); 7517 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7518 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7519 emit_int16((unsigned char)0xDB, (0xC0 | encode)); 7520 } 7521 7522 void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7523 assert(UseAVX > 0, "requires some form of AVX"); 7524 InstructionMark im(this); 7525 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7526 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 7527 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7528 emit_int8((unsigned char)0xDB); 7529 emit_operand(dst, src); 7530 } 7531 7532 void Assembler::vpandq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7533 assert(VM_Version::supports_evex(), ""); 7534 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7535 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7536 emit_int16((unsigned char)0xDB, (0xC0 | encode)); 7537 } 7538 7539 //Variable Shift packed integers logically left. 7540 void Assembler::vpsllvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7541 assert(UseAVX > 1, "requires AVX2"); 7542 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7543 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7544 emit_int16(0x47, (0xC0 | encode)); 7545 } 7546 7547 void Assembler::vpsllvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7548 assert(UseAVX > 1, "requires AVX2"); 7549 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7550 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7551 emit_int16(0x47, (0xC0 | encode)); 7552 } 7553 7554 //Variable Shift packed integers logically right. 7555 void Assembler::vpsrlvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7556 assert(UseAVX > 1, "requires AVX2"); 7557 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7558 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7559 emit_int16(0x45, (0xC0 | encode)); 7560 } 7561 7562 void Assembler::vpsrlvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7563 assert(UseAVX > 1, "requires AVX2"); 7564 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7565 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7566 emit_int16(0x45, (0xC0 | encode)); 7567 } 7568 7569 //Variable right Shift arithmetic packed integers . 7570 void Assembler::vpsravd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7571 assert(UseAVX > 1, "requires AVX2"); 7572 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7573 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7574 emit_int16(0x46, (0xC0 | encode)); 7575 } 7576 7577 void Assembler::evpsravw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7578 assert(VM_Version::supports_avx512bw(), ""); 7579 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7580 attributes.set_is_evex_instruction(); 7581 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7582 emit_int16(0x11, (0xC0 | encode)); 7583 } 7584 7585 void Assembler::evpsravq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7586 assert(UseAVX > 2, "requires AVX512"); 7587 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL"); 7588 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7589 attributes.set_is_evex_instruction(); 7590 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7591 emit_int16(0x46, (0xC0 | encode)); 7592 } 7593 7594 void Assembler::vpshldvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7595 assert(VM_Version::supports_avx512_vbmi2(), "requires vbmi2"); 7596 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7597 attributes.set_is_evex_instruction(); 7598 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7599 emit_int16(0x71, (0xC0 | encode)); 7600 } 7601 7602 void Assembler::vpshrdvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7603 assert(VM_Version::supports_avx512_vbmi2(), "requires vbmi2"); 7604 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7605 attributes.set_is_evex_instruction(); 7606 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7607 emit_int16(0x73, (0xC0 | encode)); 7608 } 7609 7610 void Assembler::pandn(XMMRegister dst, XMMRegister src) { 7611 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7612 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7613 attributes.set_rex_vex_w_reverted(); 7614 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7615 emit_int16((unsigned char)0xDF, (0xC0 | encode)); 7616 } 7617 7618 void Assembler::vpandn(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7619 assert(UseAVX > 0, "requires some form of AVX"); 7620 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7621 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7622 emit_int16((unsigned char)0xDF, (0xC0 | encode)); 7623 } 7624 7625 void Assembler::por(XMMRegister dst, XMMRegister src) { 7626 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7627 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7628 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7629 emit_int16((unsigned char)0xEB, (0xC0 | encode)); 7630 } 7631 7632 void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7633 assert(UseAVX > 0, "requires some form of AVX"); 7634 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7635 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7636 emit_int16((unsigned char)0xEB, (0xC0 | encode)); 7637 } 7638 7639 void Assembler::vpor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7640 assert(UseAVX > 0, "requires some form of AVX"); 7641 InstructionMark im(this); 7642 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7643 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 7644 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7645 emit_int8((unsigned char)0xEB); 7646 emit_operand(dst, src); 7647 } 7648 7649 void Assembler::vporq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7650 assert(VM_Version::supports_evex(), ""); 7651 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7652 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7653 emit_int16((unsigned char)0xEB, (0xC0 | encode)); 7654 } 7655 7656 7657 void Assembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 7658 assert(VM_Version::supports_evex(), ""); 7659 // Encoding: EVEX.NDS.XXX.66.0F.W0 EB /r 7660 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7661 attributes.set_is_evex_instruction(); 7662 attributes.set_embedded_opmask_register_specifier(mask); 7663 if (merge) { 7664 attributes.reset_is_clear_context(); 7665 } 7666 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7667 emit_int16((unsigned char)0xEB, (0xC0 | encode)); 7668 } 7669 7670 void Assembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 7671 assert(VM_Version::supports_evex(), ""); 7672 // Encoding: EVEX.NDS.XXX.66.0F.W0 EB /r 7673 InstructionMark im(this); 7674 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7675 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit); 7676 attributes.set_is_evex_instruction(); 7677 attributes.set_embedded_opmask_register_specifier(mask); 7678 if (merge) { 7679 attributes.reset_is_clear_context(); 7680 } 7681 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7682 emit_int8((unsigned char)0xEB); 7683 emit_operand(dst, src); 7684 } 7685 7686 void Assembler::pxor(XMMRegister dst, XMMRegister src) { 7687 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7688 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7689 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7690 emit_int16((unsigned char)0xEF, (0xC0 | encode)); 7691 } 7692 7693 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7694 assert(UseAVX > 0, "requires some form of AVX"); 7695 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 7696 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 7697 vector_len == AVX_512bit ? VM_Version::supports_evex() : 0, ""); 7698 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7699 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7700 emit_int16((unsigned char)0xEF, (0xC0 | encode)); 7701 } 7702 7703 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7704 assert(UseAVX > 0, "requires some form of AVX"); 7705 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 7706 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 7707 vector_len == AVX_512bit ? VM_Version::supports_evex() : 0, ""); 7708 InstructionMark im(this); 7709 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7710 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 7711 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7712 emit_int8((unsigned char)0xEF); 7713 emit_operand(dst, src); 7714 } 7715 7716 void Assembler::vpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7717 assert(UseAVX > 2, "requires some form of EVEX"); 7718 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7719 attributes.set_rex_vex_w_reverted(); 7720 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7721 emit_int16((unsigned char)0xEF, (0xC0 | encode)); 7722 } 7723 7724 void Assembler::evpxord(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 7725 // Encoding: EVEX.NDS.XXX.66.0F.W0 EF /r 7726 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 7727 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7728 attributes.set_is_evex_instruction(); 7729 attributes.set_embedded_opmask_register_specifier(mask); 7730 if (merge) { 7731 attributes.reset_is_clear_context(); 7732 } 7733 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7734 emit_int16((unsigned char)0xEF, (0xC0 | encode)); 7735 } 7736 7737 void Assembler::evpxord(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 7738 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 7739 InstructionMark im(this); 7740 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 7741 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 7742 attributes.set_is_evex_instruction(); 7743 attributes.set_embedded_opmask_register_specifier(mask); 7744 if (merge) { 7745 attributes.reset_is_clear_context(); 7746 } 7747 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7748 emit_int8((unsigned char)0xEF); 7749 emit_operand(dst, src); 7750 } 7751 7752 void Assembler::evpxorq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 7753 // Encoding: EVEX.NDS.XXX.66.0F.W1 EF /r 7754 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 7755 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7756 attributes.set_is_evex_instruction(); 7757 attributes.set_embedded_opmask_register_specifier(mask); 7758 if (merge) { 7759 attributes.reset_is_clear_context(); 7760 } 7761 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7762 emit_int16((unsigned char)0xEF, (0xC0 | encode)); 7763 } 7764 7765 void Assembler::evpxorq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 7766 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 7767 InstructionMark im(this); 7768 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 7769 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 7770 attributes.set_is_evex_instruction(); 7771 attributes.set_embedded_opmask_register_specifier(mask); 7772 if (merge) { 7773 attributes.reset_is_clear_context(); 7774 } 7775 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7776 emit_int8((unsigned char)0xEF); 7777 emit_operand(dst, src); 7778 } 7779 7780 void Assembler::evpandd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 7781 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 7782 InstructionMark im(this); 7783 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 7784 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 7785 attributes.set_is_evex_instruction(); 7786 attributes.set_embedded_opmask_register_specifier(mask); 7787 if (merge) { 7788 attributes.reset_is_clear_context(); 7789 } 7790 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7791 emit_int8((unsigned char)0xDB); 7792 emit_operand(dst, src); 7793 } 7794 7795 void Assembler::evpandq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 7796 assert(VM_Version::supports_evex(), ""); 7797 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7798 attributes.set_is_evex_instruction(); 7799 attributes.set_embedded_opmask_register_specifier(mask); 7800 if (merge) { 7801 attributes.reset_is_clear_context(); 7802 } 7803 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7804 emit_int16((unsigned char)0xDB, (0xC0 | encode)); 7805 } 7806 7807 void Assembler::evpandq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 7808 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 7809 InstructionMark im(this); 7810 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 7811 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 7812 attributes.set_is_evex_instruction(); 7813 attributes.set_embedded_opmask_register_specifier(mask); 7814 if (merge) { 7815 attributes.reset_is_clear_context(); 7816 } 7817 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7818 emit_int8((unsigned char)0xDB); 7819 emit_operand(dst, src); 7820 } 7821 7822 void Assembler::evporq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 7823 assert(VM_Version::supports_evex(), ""); 7824 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7825 attributes.set_is_evex_instruction(); 7826 attributes.set_embedded_opmask_register_specifier(mask); 7827 if (merge) { 7828 attributes.reset_is_clear_context(); 7829 } 7830 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7831 emit_int16((unsigned char)0xEB, (0xC0 | encode)); 7832 } 7833 7834 void Assembler::evporq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 7835 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 7836 InstructionMark im(this); 7837 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 7838 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 7839 attributes.set_is_evex_instruction(); 7840 attributes.set_embedded_opmask_register_specifier(mask); 7841 if (merge) { 7842 attributes.reset_is_clear_context(); 7843 } 7844 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7845 emit_int8((unsigned char)0xEB); 7846 emit_operand(dst, src); 7847 } 7848 7849 void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7850 assert(VM_Version::supports_evex(), "requires EVEX support"); 7851 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7852 attributes.set_is_evex_instruction(); 7853 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7854 emit_int16((unsigned char)0xEF, (0xC0 | encode)); 7855 } 7856 7857 void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7858 assert(VM_Version::supports_evex(), "requires EVEX support"); 7859 assert(dst != xnoreg, "sanity"); 7860 InstructionMark im(this); 7861 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7862 attributes.set_is_evex_instruction(); 7863 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 7864 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7865 emit_int8((unsigned char)0xEF); 7866 emit_operand(dst, src); 7867 } 7868 7869 void Assembler::evprold(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 7870 assert(VM_Version::supports_evex(), "requires EVEX support"); 7871 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 7872 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7873 attributes.set_is_evex_instruction(); 7874 int encode = vex_prefix_and_encode(xmm1->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7875 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 7876 } 7877 7878 void Assembler::evprolq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 7879 assert(VM_Version::supports_evex(), "requires EVEX support"); 7880 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 7881 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7882 attributes.set_is_evex_instruction(); 7883 int encode = vex_prefix_and_encode(xmm1->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7884 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 7885 } 7886 7887 // Register is a class, but it would be assigned numerical value. 7888 // "0" is assigned for xmm0. Thus we need to ignore -Wnonnull. 7889 PRAGMA_DIAG_PUSH 7890 PRAGMA_NONNULL_IGNORED 7891 void Assembler::evprord(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 7892 assert(VM_Version::supports_evex(), "requires EVEX support"); 7893 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 7894 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7895 attributes.set_is_evex_instruction(); 7896 int encode = vex_prefix_and_encode(xmm0->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7897 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 7898 } 7899 7900 void Assembler::evprorq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 7901 assert(VM_Version::supports_evex(), "requires EVEX support"); 7902 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 7903 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7904 attributes.set_is_evex_instruction(); 7905 int encode = vex_prefix_and_encode(xmm0->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7906 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 7907 } 7908 PRAGMA_DIAG_POP 7909 7910 void Assembler::evprolvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7911 assert(VM_Version::supports_evex(), "requires EVEX support"); 7912 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 7913 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7914 attributes.set_is_evex_instruction(); 7915 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7916 emit_int16(0x15, (unsigned char)(0xC0 | encode)); 7917 } 7918 7919 void Assembler::evprolvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7920 assert(VM_Version::supports_evex(), "requires EVEX support"); 7921 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 7922 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7923 attributes.set_is_evex_instruction(); 7924 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7925 emit_int16(0x15, (unsigned char)(0xC0 | encode)); 7926 } 7927 7928 void Assembler::evprorvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7929 assert(VM_Version::supports_evex(), "requires EVEX support"); 7930 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 7931 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7932 attributes.set_is_evex_instruction(); 7933 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7934 emit_int16(0x14, (unsigned char)(0xC0 | encode)); 7935 } 7936 7937 void Assembler::evprorvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7938 assert(VM_Version::supports_evex(), "requires EVEX support"); 7939 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 7940 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7941 attributes.set_is_evex_instruction(); 7942 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7943 emit_int16(0x14, (unsigned char)(0xC0 | encode)); 7944 } 7945 7946 void Assembler::vpternlogd(XMMRegister dst, int imm8, XMMRegister src2, XMMRegister src3, int vector_len) { 7947 assert(VM_Version::supports_evex(), "requires EVEX support"); 7948 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 7949 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7950 attributes.set_is_evex_instruction(); 7951 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7952 emit_int8(0x25); 7953 emit_int8((unsigned char)(0xC0 | encode)); 7954 emit_int8(imm8); 7955 } 7956 7957 void Assembler::vpternlogd(XMMRegister dst, int imm8, XMMRegister src2, Address src3, int vector_len) { 7958 assert(VM_Version::supports_evex(), "requires EVEX support"); 7959 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 7960 assert(dst != xnoreg, "sanity"); 7961 InstructionMark im(this); 7962 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7963 attributes.set_is_evex_instruction(); 7964 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 7965 vex_prefix(src3, src2->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7966 emit_int8(0x25); 7967 emit_operand(dst, src3); 7968 emit_int8(imm8); 7969 } 7970 7971 void Assembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, XMMRegister src3, int vector_len) { 7972 assert(VM_Version::supports_evex(), "requires EVEX support"); 7973 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 7974 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7975 attributes.set_is_evex_instruction(); 7976 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7977 emit_int8(0x25); 7978 emit_int8((unsigned char)(0xC0 | encode)); 7979 emit_int8(imm8); 7980 } 7981 7982 // vinserti forms 7983 7984 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 7985 assert(VM_Version::supports_avx2(), ""); 7986 assert(imm8 <= 0x01, "imm8: %u", imm8); 7987 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7988 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7989 // last byte: 7990 // 0x00 - insert into lower 128 bits 7991 // 0x01 - insert into upper 128 bits 7992 emit_int24(0x38, (0xC0 | encode), imm8 & 0x01); 7993 } 7994 7995 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 7996 assert(VM_Version::supports_avx2(), ""); 7997 assert(dst != xnoreg, "sanity"); 7998 assert(imm8 <= 0x01, "imm8: %u", imm8); 7999 InstructionMark im(this); 8000 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8001 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 8002 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8003 emit_int8(0x38); 8004 emit_operand(dst, src); 8005 // 0x00 - insert into lower 128 bits 8006 // 0x01 - insert into upper 128 bits 8007 emit_int8(imm8 & 0x01); 8008 } 8009 8010 void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 8011 assert(VM_Version::supports_evex(), ""); 8012 assert(imm8 <= 0x03, "imm8: %u", imm8); 8013 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8014 attributes.set_is_evex_instruction(); 8015 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8016 // imm8: 8017 // 0x00 - insert into q0 128 bits (0..127) 8018 // 0x01 - insert into q1 128 bits (128..255) 8019 // 0x02 - insert into q2 128 bits (256..383) 8020 // 0x03 - insert into q3 128 bits (384..511) 8021 emit_int24(0x38, (0xC0 | encode), imm8 & 0x03); 8022 } 8023 8024 void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 8025 assert(VM_Version::supports_avx(), ""); 8026 assert(dst != xnoreg, "sanity"); 8027 assert(imm8 <= 0x03, "imm8: %u", imm8); 8028 InstructionMark im(this); 8029 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8030 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 8031 attributes.set_is_evex_instruction(); 8032 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8033 emit_int8(0x18); 8034 emit_operand(dst, src); 8035 // 0x00 - insert into q0 128 bits (0..127) 8036 // 0x01 - insert into q1 128 bits (128..255) 8037 // 0x02 - insert into q2 128 bits (256..383) 8038 // 0x03 - insert into q3 128 bits (384..511) 8039 emit_int8(imm8 & 0x03); 8040 } 8041 8042 void Assembler::vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 8043 assert(VM_Version::supports_evex(), ""); 8044 assert(imm8 <= 0x01, "imm8: %u", imm8); 8045 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8046 attributes.set_is_evex_instruction(); 8047 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8048 //imm8: 8049 // 0x00 - insert into lower 256 bits 8050 // 0x01 - insert into upper 256 bits 8051 emit_int24(0x3A, (0xC0 | encode), imm8 & 0x01); 8052 } 8053 8054 8055 // vinsertf forms 8056 8057 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 8058 assert(VM_Version::supports_avx(), ""); 8059 assert(imm8 <= 0x01, "imm8: %u", imm8); 8060 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8061 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8062 // imm8: 8063 // 0x00 - insert into lower 128 bits 8064 // 0x01 - insert into upper 128 bits 8065 emit_int24(0x18, (0xC0 | encode), imm8 & 0x01); 8066 } 8067 8068 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 8069 assert(VM_Version::supports_avx(), ""); 8070 assert(dst != xnoreg, "sanity"); 8071 assert(imm8 <= 0x01, "imm8: %u", imm8); 8072 InstructionMark im(this); 8073 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8074 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 8075 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8076 emit_int8(0x18); 8077 emit_operand(dst, src); 8078 // 0x00 - insert into lower 128 bits 8079 // 0x01 - insert into upper 128 bits 8080 emit_int8(imm8 & 0x01); 8081 } 8082 8083 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 8084 assert(VM_Version::supports_avx2(), ""); 8085 assert(imm8 <= 0x03, "imm8: %u", imm8); 8086 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8087 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8088 // imm8: 8089 // 0x00 - insert into q0 128 bits (0..127) 8090 // 0x01 - insert into q1 128 bits (128..255) 8091 // 0x02 - insert into q0 128 bits (256..383) 8092 // 0x03 - insert into q1 128 bits (384..512) 8093 emit_int24(0x18, (0xC0 | encode), imm8 & 0x03); 8094 } 8095 8096 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 8097 assert(VM_Version::supports_avx(), ""); 8098 assert(dst != xnoreg, "sanity"); 8099 assert(imm8 <= 0x03, "imm8: %u", imm8); 8100 InstructionMark im(this); 8101 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8102 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 8103 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8104 emit_int8(0x18); 8105 emit_operand(dst, src); 8106 // 0x00 - insert into q0 128 bits (0..127) 8107 // 0x01 - insert into q1 128 bits (128..255) 8108 // 0x02 - insert into q0 128 bits (256..383) 8109 // 0x03 - insert into q1 128 bits (384..512) 8110 emit_int8(imm8 & 0x03); 8111 } 8112 8113 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 8114 assert(VM_Version::supports_evex(), ""); 8115 assert(imm8 <= 0x01, "imm8: %u", imm8); 8116 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8117 attributes.set_is_evex_instruction(); 8118 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8119 // imm8: 8120 // 0x00 - insert into lower 256 bits 8121 // 0x01 - insert into upper 256 bits 8122 emit_int24(0x1A, (0xC0 | encode), imm8 & 0x01); 8123 } 8124 8125 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 8126 assert(VM_Version::supports_evex(), ""); 8127 assert(dst != xnoreg, "sanity"); 8128 assert(imm8 <= 0x01, "imm8: %u", imm8); 8129 InstructionMark im(this); 8130 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8131 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit); 8132 attributes.set_is_evex_instruction(); 8133 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8134 emit_int8(0x1A); 8135 emit_operand(dst, src); 8136 // 0x00 - insert into lower 256 bits 8137 // 0x01 - insert into upper 256 bits 8138 emit_int8(imm8 & 0x01); 8139 } 8140 8141 8142 // vextracti forms 8143 8144 void Assembler::vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 8145 assert(VM_Version::supports_avx2(), ""); 8146 assert(imm8 <= 0x01, "imm8: %u", imm8); 8147 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8148 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8149 // imm8: 8150 // 0x00 - extract from lower 128 bits 8151 // 0x01 - extract from upper 128 bits 8152 emit_int24(0x39, (0xC0 | encode), imm8 & 0x01); 8153 } 8154 8155 void Assembler::vextracti128(Address dst, XMMRegister src, uint8_t imm8) { 8156 assert(VM_Version::supports_avx2(), ""); 8157 assert(src != xnoreg, "sanity"); 8158 assert(imm8 <= 0x01, "imm8: %u", imm8); 8159 InstructionMark im(this); 8160 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8161 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 8162 attributes.reset_is_clear_context(); 8163 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8164 emit_int8(0x39); 8165 emit_operand(src, dst); 8166 // 0x00 - extract from lower 128 bits 8167 // 0x01 - extract from upper 128 bits 8168 emit_int8(imm8 & 0x01); 8169 } 8170 8171 void Assembler::vextracti32x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { 8172 assert(VM_Version::supports_evex(), ""); 8173 assert(imm8 <= 0x03, "imm8: %u", imm8); 8174 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8175 attributes.set_is_evex_instruction(); 8176 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8177 // imm8: 8178 // 0x00 - extract from bits 127:0 8179 // 0x01 - extract from bits 255:128 8180 // 0x02 - extract from bits 383:256 8181 // 0x03 - extract from bits 511:384 8182 emit_int24(0x39, (0xC0 | encode), imm8 & 0x03); 8183 } 8184 8185 void Assembler::vextracti32x4(Address dst, XMMRegister src, uint8_t imm8) { 8186 assert(VM_Version::supports_evex(), ""); 8187 assert(src != xnoreg, "sanity"); 8188 assert(imm8 <= 0x03, "imm8: %u", imm8); 8189 InstructionMark im(this); 8190 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8191 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 8192 attributes.reset_is_clear_context(); 8193 attributes.set_is_evex_instruction(); 8194 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8195 emit_int8(0x39); 8196 emit_operand(src, dst); 8197 // 0x00 - extract from bits 127:0 8198 // 0x01 - extract from bits 255:128 8199 // 0x02 - extract from bits 383:256 8200 // 0x03 - extract from bits 511:384 8201 emit_int8(imm8 & 0x03); 8202 } 8203 8204 void Assembler::vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8) { 8205 assert(VM_Version::supports_avx512dq(), ""); 8206 assert(imm8 <= 0x03, "imm8: %u", imm8); 8207 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8208 attributes.set_is_evex_instruction(); 8209 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8210 // imm8: 8211 // 0x00 - extract from bits 127:0 8212 // 0x01 - extract from bits 255:128 8213 // 0x02 - extract from bits 383:256 8214 // 0x03 - extract from bits 511:384 8215 emit_int24(0x39, (0xC0 | encode), imm8 & 0x03); 8216 } 8217 8218 void Assembler::vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { 8219 assert(VM_Version::supports_evex(), ""); 8220 assert(imm8 <= 0x01, "imm8: %u", imm8); 8221 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8222 attributes.set_is_evex_instruction(); 8223 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8224 // imm8: 8225 // 0x00 - extract from lower 256 bits 8226 // 0x01 - extract from upper 256 bits 8227 emit_int24(0x3B, (0xC0 | encode), imm8 & 0x01); 8228 } 8229 8230 void Assembler::vextracti64x4(Address dst, XMMRegister src, uint8_t imm8) { 8231 assert(VM_Version::supports_evex(), ""); 8232 assert(src != xnoreg, "sanity"); 8233 assert(imm8 <= 0x01, "imm8: %u", imm8); 8234 InstructionMark im(this); 8235 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8236 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit); 8237 attributes.reset_is_clear_context(); 8238 attributes.set_is_evex_instruction(); 8239 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8240 emit_int8(0x38); 8241 emit_operand(src, dst); 8242 // 0x00 - extract from lower 256 bits 8243 // 0x01 - extract from upper 256 bits 8244 emit_int8(imm8 & 0x01); 8245 } 8246 // vextractf forms 8247 8248 void Assembler::vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 8249 assert(VM_Version::supports_avx(), ""); 8250 assert(imm8 <= 0x01, "imm8: %u", imm8); 8251 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8252 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8253 // imm8: 8254 // 0x00 - extract from lower 128 bits 8255 // 0x01 - extract from upper 128 bits 8256 emit_int24(0x19, (0xC0 | encode), imm8 & 0x01); 8257 } 8258 8259 void Assembler::vextractf128(Address dst, XMMRegister src, uint8_t imm8) { 8260 assert(VM_Version::supports_avx(), ""); 8261 assert(src != xnoreg, "sanity"); 8262 assert(imm8 <= 0x01, "imm8: %u", imm8); 8263 InstructionMark im(this); 8264 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8265 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 8266 attributes.reset_is_clear_context(); 8267 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8268 emit_int8(0x19); 8269 emit_operand(src, dst); 8270 // 0x00 - extract from lower 128 bits 8271 // 0x01 - extract from upper 128 bits 8272 emit_int8(imm8 & 0x01); 8273 } 8274 8275 void Assembler::vextractf32x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { 8276 assert(VM_Version::supports_evex(), ""); 8277 assert(imm8 <= 0x03, "imm8: %u", imm8); 8278 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8279 attributes.set_is_evex_instruction(); 8280 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8281 // imm8: 8282 // 0x00 - extract from bits 127:0 8283 // 0x01 - extract from bits 255:128 8284 // 0x02 - extract from bits 383:256 8285 // 0x03 - extract from bits 511:384 8286 emit_int24(0x19, (0xC0 | encode), imm8 & 0x03); 8287 } 8288 8289 void Assembler::vextractf32x4(Address dst, XMMRegister src, uint8_t imm8) { 8290 assert(VM_Version::supports_evex(), ""); 8291 assert(src != xnoreg, "sanity"); 8292 assert(imm8 <= 0x03, "imm8: %u", imm8); 8293 InstructionMark im(this); 8294 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8295 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 8296 attributes.reset_is_clear_context(); 8297 attributes.set_is_evex_instruction(); 8298 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8299 emit_int8(0x19); 8300 emit_operand(src, dst); 8301 // 0x00 - extract from bits 127:0 8302 // 0x01 - extract from bits 255:128 8303 // 0x02 - extract from bits 383:256 8304 // 0x03 - extract from bits 511:384 8305 emit_int8(imm8 & 0x03); 8306 } 8307 8308 void Assembler::vextractf64x2(XMMRegister dst, XMMRegister src, uint8_t imm8) { 8309 assert(VM_Version::supports_avx512dq(), ""); 8310 assert(imm8 <= 0x03, "imm8: %u", imm8); 8311 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8312 attributes.set_is_evex_instruction(); 8313 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8314 // imm8: 8315 // 0x00 - extract from bits 127:0 8316 // 0x01 - extract from bits 255:128 8317 // 0x02 - extract from bits 383:256 8318 // 0x03 - extract from bits 511:384 8319 emit_int24(0x19, (0xC0 | encode), imm8 & 0x03); 8320 } 8321 8322 void Assembler::vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { 8323 assert(VM_Version::supports_evex(), ""); 8324 assert(imm8 <= 0x01, "imm8: %u", imm8); 8325 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8326 attributes.set_is_evex_instruction(); 8327 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8328 // imm8: 8329 // 0x00 - extract from lower 256 bits 8330 // 0x01 - extract from upper 256 bits 8331 emit_int24(0x1B, (0xC0 | encode), imm8 & 0x01); 8332 } 8333 8334 void Assembler::vextractf64x4(Address dst, XMMRegister src, uint8_t imm8) { 8335 assert(VM_Version::supports_evex(), ""); 8336 assert(src != xnoreg, "sanity"); 8337 assert(imm8 <= 0x01, "imm8: %u", imm8); 8338 InstructionMark im(this); 8339 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8340 attributes.set_address_attributes(/* tuple_type */ EVEX_T4,/* input_size_in_bits */ EVEX_64bit); 8341 attributes.reset_is_clear_context(); 8342 attributes.set_is_evex_instruction(); 8343 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8344 emit_int8(0x1B); 8345 emit_operand(src, dst); 8346 // 0x00 - extract from lower 256 bits 8347 // 0x01 - extract from upper 256 bits 8348 emit_int8(imm8 & 0x01); 8349 } 8350 8351 // duplicate 1-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL 8352 void Assembler::vpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len) { 8353 assert(VM_Version::supports_avx2(), ""); 8354 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 8355 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8356 emit_int16(0x78, (0xC0 | encode)); 8357 } 8358 8359 void Assembler::vpbroadcastb(XMMRegister dst, Address src, int vector_len) { 8360 assert(VM_Version::supports_avx2(), ""); 8361 assert(dst != xnoreg, "sanity"); 8362 InstructionMark im(this); 8363 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 8364 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit); 8365 // swap src<->dst for encoding 8366 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8367 emit_int8(0x78); 8368 emit_operand(dst, src); 8369 } 8370 8371 // duplicate 2-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL 8372 void Assembler::vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len) { 8373 assert(VM_Version::supports_avx2(), ""); 8374 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 8375 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8376 emit_int16(0x79, (0xC0 | encode)); 8377 } 8378 8379 void Assembler::vpbroadcastw(XMMRegister dst, Address src, int vector_len) { 8380 assert(VM_Version::supports_avx2(), ""); 8381 assert(dst != xnoreg, "sanity"); 8382 InstructionMark im(this); 8383 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 8384 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit); 8385 // swap src<->dst for encoding 8386 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8387 emit_int8(0x79); 8388 emit_operand(dst, src); 8389 } 8390 8391 void Assembler::vpsadbw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 8392 assert(UseAVX > 0, "requires some form of AVX"); 8393 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 8394 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8395 emit_int16((unsigned char)0xF6, (0xC0 | encode)); 8396 } 8397 8398 void Assembler::vpunpckhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 8399 assert(UseAVX > 0, "requires some form of AVX"); 8400 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8401 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8402 emit_int16(0x6A, (0xC0 | encode)); 8403 } 8404 8405 void Assembler::vpunpckldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 8406 assert(UseAVX > 0, "requires some form of AVX"); 8407 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 8408 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8409 emit_int16(0x62, (0xC0 | encode)); 8410 } 8411 8412 // xmm/mem sourced byte/word/dword/qword replicate 8413 void Assembler::evpaddb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8414 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 8415 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8416 attributes.set_is_evex_instruction(); 8417 attributes.set_embedded_opmask_register_specifier(mask); 8418 if (merge) { 8419 attributes.reset_is_clear_context(); 8420 } 8421 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8422 emit_int16((unsigned char)0xFC, (0xC0 | encode)); 8423 } 8424 8425 void Assembler::evpaddb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8426 InstructionMark im(this); 8427 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 8428 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8429 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8430 attributes.set_is_evex_instruction(); 8431 attributes.set_embedded_opmask_register_specifier(mask); 8432 if (merge) { 8433 attributes.reset_is_clear_context(); 8434 } 8435 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8436 emit_int8((unsigned char)0xFC); 8437 emit_operand(dst, src); 8438 } 8439 8440 void Assembler::evpaddw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8441 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 8442 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8443 attributes.set_is_evex_instruction(); 8444 attributes.set_embedded_opmask_register_specifier(mask); 8445 if (merge) { 8446 attributes.reset_is_clear_context(); 8447 } 8448 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8449 emit_int16((unsigned char)0xFD, (0xC0 | encode)); 8450 } 8451 8452 void Assembler::evpaddw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8453 InstructionMark im(this); 8454 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 8455 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8456 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8457 attributes.set_is_evex_instruction(); 8458 attributes.set_embedded_opmask_register_specifier(mask); 8459 if (merge) { 8460 attributes.reset_is_clear_context(); 8461 } 8462 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8463 emit_int8((unsigned char)0xFD); 8464 emit_operand(dst, src); 8465 } 8466 8467 void Assembler::evpaddd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8468 assert(VM_Version::supports_evex(), ""); 8469 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8470 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8471 attributes.set_is_evex_instruction(); 8472 attributes.set_embedded_opmask_register_specifier(mask); 8473 if (merge) { 8474 attributes.reset_is_clear_context(); 8475 } 8476 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8477 emit_int16((unsigned char)0xFE, (0xC0 | encode)); 8478 } 8479 8480 void Assembler::evpaddd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8481 InstructionMark im(this); 8482 assert(VM_Version::supports_evex(), ""); 8483 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8484 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8485 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8486 attributes.set_is_evex_instruction(); 8487 attributes.set_embedded_opmask_register_specifier(mask); 8488 if (merge) { 8489 attributes.reset_is_clear_context(); 8490 } 8491 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8492 emit_int8((unsigned char)0xFE); 8493 emit_operand(dst, src); 8494 } 8495 8496 void Assembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8497 assert(VM_Version::supports_evex(), ""); 8498 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8499 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8500 attributes.set_is_evex_instruction(); 8501 attributes.set_embedded_opmask_register_specifier(mask); 8502 if (merge) { 8503 attributes.reset_is_clear_context(); 8504 } 8505 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8506 emit_int16((unsigned char)0xD4, (0xC0 | encode)); 8507 } 8508 8509 void Assembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8510 InstructionMark im(this); 8511 assert(VM_Version::supports_evex(), ""); 8512 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8513 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8514 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8515 attributes.set_is_evex_instruction(); 8516 attributes.set_embedded_opmask_register_specifier(mask); 8517 if (merge) { 8518 attributes.reset_is_clear_context(); 8519 } 8520 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8521 emit_int8((unsigned char)0xD4); 8522 emit_operand(dst, src); 8523 } 8524 8525 void Assembler::evaddps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8526 assert(VM_Version::supports_evex(), ""); 8527 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8528 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8529 attributes.set_is_evex_instruction(); 8530 attributes.set_embedded_opmask_register_specifier(mask); 8531 if (merge) { 8532 attributes.reset_is_clear_context(); 8533 } 8534 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 8535 emit_int16(0x58, (0xC0 | encode)); 8536 } 8537 8538 void Assembler::evaddps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8539 InstructionMark im(this); 8540 assert(VM_Version::supports_evex(), ""); 8541 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8542 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8543 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8544 attributes.set_is_evex_instruction(); 8545 attributes.set_embedded_opmask_register_specifier(mask); 8546 if (merge) { 8547 attributes.reset_is_clear_context(); 8548 } 8549 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 8550 emit_int8(0x58); 8551 emit_operand(dst, src); 8552 } 8553 8554 void Assembler::evaddpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8555 assert(VM_Version::supports_evex(), ""); 8556 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8557 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8558 attributes.set_is_evex_instruction(); 8559 attributes.set_embedded_opmask_register_specifier(mask); 8560 if (merge) { 8561 attributes.reset_is_clear_context(); 8562 } 8563 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8564 emit_int16(0x58, (0xC0 | encode)); 8565 } 8566 8567 void Assembler::evaddpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8568 InstructionMark im(this); 8569 assert(VM_Version::supports_evex(), ""); 8570 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8571 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8572 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8573 attributes.set_is_evex_instruction(); 8574 attributes.set_embedded_opmask_register_specifier(mask); 8575 if (merge) { 8576 attributes.reset_is_clear_context(); 8577 } 8578 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8579 emit_int8(0x58); 8580 emit_operand(dst, src); 8581 } 8582 8583 void Assembler::evpsubb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8584 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 8585 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8586 attributes.set_is_evex_instruction(); 8587 attributes.set_embedded_opmask_register_specifier(mask); 8588 if (merge) { 8589 attributes.reset_is_clear_context(); 8590 } 8591 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8592 emit_int16((unsigned char)0xF8, (0xC0 | encode)); 8593 } 8594 8595 void Assembler::evpsubb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8596 InstructionMark im(this); 8597 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 8598 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8599 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8600 attributes.set_is_evex_instruction(); 8601 attributes.set_embedded_opmask_register_specifier(mask); 8602 if (merge) { 8603 attributes.reset_is_clear_context(); 8604 } 8605 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8606 emit_int8((unsigned char)0xF8); 8607 emit_operand(dst, src); 8608 } 8609 8610 void Assembler::evpsubw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8611 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 8612 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8613 attributes.set_is_evex_instruction(); 8614 attributes.set_embedded_opmask_register_specifier(mask); 8615 if (merge) { 8616 attributes.reset_is_clear_context(); 8617 } 8618 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8619 emit_int16((unsigned char)0xF9, (0xC0 | encode)); 8620 } 8621 8622 void Assembler::evpsubw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8623 InstructionMark im(this); 8624 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 8625 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8626 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8627 attributes.set_is_evex_instruction(); 8628 attributes.set_embedded_opmask_register_specifier(mask); 8629 if (merge) { 8630 attributes.reset_is_clear_context(); 8631 } 8632 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8633 emit_int8((unsigned char)0xF9); 8634 emit_operand(dst, src); 8635 } 8636 8637 void Assembler::evpsubd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8638 assert(VM_Version::supports_evex(), ""); 8639 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8640 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8641 attributes.set_is_evex_instruction(); 8642 attributes.set_embedded_opmask_register_specifier(mask); 8643 if (merge) { 8644 attributes.reset_is_clear_context(); 8645 } 8646 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8647 emit_int16((unsigned char)0xFA, (0xC0 | encode)); 8648 } 8649 8650 void Assembler::evpsubd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8651 InstructionMark im(this); 8652 assert(VM_Version::supports_evex(), ""); 8653 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8654 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8655 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8656 attributes.set_is_evex_instruction(); 8657 attributes.set_embedded_opmask_register_specifier(mask); 8658 if (merge) { 8659 attributes.reset_is_clear_context(); 8660 } 8661 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8662 emit_int8((unsigned char)0xFA); 8663 emit_operand(dst, src); 8664 } 8665 8666 void Assembler::evpsubq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8667 assert(VM_Version::supports_evex(), ""); 8668 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8669 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8670 attributes.set_is_evex_instruction(); 8671 attributes.set_embedded_opmask_register_specifier(mask); 8672 if (merge) { 8673 attributes.reset_is_clear_context(); 8674 } 8675 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8676 emit_int16((unsigned char)0xFB, (0xC0 | encode)); 8677 } 8678 8679 void Assembler::evpsubq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8680 InstructionMark im(this); 8681 assert(VM_Version::supports_evex(), ""); 8682 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8683 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8684 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8685 attributes.set_is_evex_instruction(); 8686 attributes.set_embedded_opmask_register_specifier(mask); 8687 if (merge) { 8688 attributes.reset_is_clear_context(); 8689 } 8690 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8691 emit_int8((unsigned char)0xFB); 8692 emit_operand(dst, src); 8693 } 8694 8695 void Assembler::evsubps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8696 assert(VM_Version::supports_evex(), ""); 8697 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8698 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8699 attributes.set_is_evex_instruction(); 8700 attributes.set_embedded_opmask_register_specifier(mask); 8701 if (merge) { 8702 attributes.reset_is_clear_context(); 8703 } 8704 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 8705 emit_int16(0x5C, (0xC0 | encode)); 8706 } 8707 8708 void Assembler::evsubps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8709 InstructionMark im(this); 8710 assert(VM_Version::supports_evex(), ""); 8711 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8712 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8713 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8714 attributes.set_is_evex_instruction(); 8715 attributes.set_embedded_opmask_register_specifier(mask); 8716 if (merge) { 8717 attributes.reset_is_clear_context(); 8718 } 8719 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 8720 emit_int8(0x5C); 8721 emit_operand(dst, src); 8722 } 8723 8724 void Assembler::evsubpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8725 assert(VM_Version::supports_evex(), ""); 8726 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8727 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8728 attributes.set_is_evex_instruction(); 8729 attributes.set_embedded_opmask_register_specifier(mask); 8730 if (merge) { 8731 attributes.reset_is_clear_context(); 8732 } 8733 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8734 emit_int16(0x5C, (0xC0 | encode)); 8735 } 8736 8737 void Assembler::evsubpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8738 InstructionMark im(this); 8739 assert(VM_Version::supports_evex(), ""); 8740 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8741 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8742 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8743 attributes.set_is_evex_instruction(); 8744 attributes.set_embedded_opmask_register_specifier(mask); 8745 if (merge) { 8746 attributes.reset_is_clear_context(); 8747 } 8748 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8749 emit_int8(0x5C); 8750 emit_operand(dst, src); 8751 } 8752 8753 void Assembler::evpmullw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8754 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 8755 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8756 attributes.set_is_evex_instruction(); 8757 attributes.set_embedded_opmask_register_specifier(mask); 8758 if (merge) { 8759 attributes.reset_is_clear_context(); 8760 } 8761 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8762 emit_int16((unsigned char)0xD5, (0xC0 | encode)); 8763 } 8764 8765 void Assembler::evpmullw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8766 InstructionMark im(this); 8767 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 8768 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8769 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8770 attributes.set_is_evex_instruction(); 8771 attributes.set_embedded_opmask_register_specifier(mask); 8772 if (merge) { 8773 attributes.reset_is_clear_context(); 8774 } 8775 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8776 emit_int8((unsigned char)0xD5); 8777 emit_operand(dst, src); 8778 } 8779 8780 void Assembler::evpmulld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8781 assert(VM_Version::supports_evex(), ""); 8782 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8783 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8784 attributes.set_is_evex_instruction(); 8785 attributes.set_embedded_opmask_register_specifier(mask); 8786 if (merge) { 8787 attributes.reset_is_clear_context(); 8788 } 8789 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8790 emit_int16(0x40, (0xC0 | encode)); 8791 } 8792 8793 void Assembler::evpmulld(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8794 InstructionMark im(this); 8795 assert(VM_Version::supports_evex(), ""); 8796 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8797 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8798 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8799 attributes.set_is_evex_instruction(); 8800 attributes.set_embedded_opmask_register_specifier(mask); 8801 if (merge) { 8802 attributes.reset_is_clear_context(); 8803 } 8804 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8805 emit_int8(0x40); 8806 emit_operand(dst, src); 8807 } 8808 8809 void Assembler::evpmullq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8810 assert(VM_Version::supports_avx512dq() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 8811 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8812 attributes.set_is_evex_instruction(); 8813 attributes.set_embedded_opmask_register_specifier(mask); 8814 if (merge) { 8815 attributes.reset_is_clear_context(); 8816 } 8817 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8818 emit_int16(0x40, (0xC0 | encode)); 8819 } 8820 8821 void Assembler::evpmullq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8822 InstructionMark im(this); 8823 assert(VM_Version::supports_avx512dq() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 8824 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8825 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8826 attributes.set_is_evex_instruction(); 8827 attributes.set_embedded_opmask_register_specifier(mask); 8828 if (merge) { 8829 attributes.reset_is_clear_context(); 8830 } 8831 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8832 emit_int8(0x40); 8833 emit_operand(dst, src); 8834 } 8835 8836 void Assembler::evmulps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8837 assert(VM_Version::supports_evex(), ""); 8838 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8839 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8840 attributes.set_is_evex_instruction(); 8841 attributes.set_embedded_opmask_register_specifier(mask); 8842 if (merge) { 8843 attributes.reset_is_clear_context(); 8844 } 8845 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 8846 emit_int16(0x59, (0xC0 | encode)); 8847 } 8848 8849 void Assembler::evmulps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8850 InstructionMark im(this); 8851 assert(VM_Version::supports_evex(), ""); 8852 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8853 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8854 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8855 attributes.set_is_evex_instruction(); 8856 attributes.set_embedded_opmask_register_specifier(mask); 8857 if (merge) { 8858 attributes.reset_is_clear_context(); 8859 } 8860 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 8861 emit_int8(0x59); 8862 emit_operand(dst, src); 8863 } 8864 8865 void Assembler::evmulpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8866 assert(VM_Version::supports_evex(), ""); 8867 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8868 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8869 attributes.set_is_evex_instruction(); 8870 attributes.set_embedded_opmask_register_specifier(mask); 8871 if (merge) { 8872 attributes.reset_is_clear_context(); 8873 } 8874 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8875 emit_int16(0x59, (0xC0 | encode)); 8876 } 8877 8878 void Assembler::evmulpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8879 InstructionMark im(this); 8880 assert(VM_Version::supports_evex(), ""); 8881 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8882 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8883 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8884 attributes.set_is_evex_instruction(); 8885 attributes.set_embedded_opmask_register_specifier(mask); 8886 if (merge) { 8887 attributes.reset_is_clear_context(); 8888 } 8889 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8890 emit_int8(0x59); 8891 emit_operand(dst, src); 8892 } 8893 8894 void Assembler::evsqrtps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8895 assert(VM_Version::supports_evex(), ""); 8896 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8897 InstructionAttr attributes(vector_len,/* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8898 attributes.set_is_evex_instruction(); 8899 attributes.set_embedded_opmask_register_specifier(mask); 8900 if (merge) { 8901 attributes.reset_is_clear_context(); 8902 } 8903 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 8904 emit_int16(0x51, (0xC0 | encode)); 8905 } 8906 8907 void Assembler::evsqrtps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8908 InstructionMark im(this); 8909 assert(VM_Version::supports_evex(), ""); 8910 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8911 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8912 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8913 attributes.set_is_evex_instruction(); 8914 attributes.set_embedded_opmask_register_specifier(mask); 8915 if (merge) { 8916 attributes.reset_is_clear_context(); 8917 } 8918 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 8919 emit_int8(0x51); 8920 emit_operand(dst, src); 8921 } 8922 8923 void Assembler::evsqrtpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8924 assert(VM_Version::supports_evex(), ""); 8925 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8926 InstructionAttr attributes(vector_len,/* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8927 attributes.set_is_evex_instruction(); 8928 attributes.set_embedded_opmask_register_specifier(mask); 8929 if (merge) { 8930 attributes.reset_is_clear_context(); 8931 } 8932 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8933 emit_int16(0x51, (0xC0 | encode)); 8934 } 8935 8936 void Assembler::evsqrtpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8937 InstructionMark im(this); 8938 assert(VM_Version::supports_evex(), ""); 8939 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8940 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8941 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8942 attributes.set_is_evex_instruction(); 8943 attributes.set_embedded_opmask_register_specifier(mask); 8944 if (merge) { 8945 attributes.reset_is_clear_context(); 8946 } 8947 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8948 emit_int8(0x51); 8949 emit_operand(dst, src); 8950 } 8951 8952 8953 void Assembler::evdivps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8954 assert(VM_Version::supports_evex(), ""); 8955 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8956 InstructionAttr attributes(vector_len,/* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8957 attributes.set_is_evex_instruction(); 8958 attributes.set_embedded_opmask_register_specifier(mask); 8959 if (merge) { 8960 attributes.reset_is_clear_context(); 8961 } 8962 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 8963 emit_int16(0x5E, (0xC0 | encode)); 8964 } 8965 8966 void Assembler::evdivps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8967 InstructionMark im(this); 8968 assert(VM_Version::supports_evex(), ""); 8969 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8970 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8971 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 8972 attributes.set_is_evex_instruction(); 8973 attributes.set_embedded_opmask_register_specifier(mask); 8974 if (merge) { 8975 attributes.reset_is_clear_context(); 8976 } 8977 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 8978 emit_int8(0x5E); 8979 emit_operand(dst, src); 8980 } 8981 8982 void Assembler::evdivpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8983 assert(VM_Version::supports_evex(), ""); 8984 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8985 InstructionAttr attributes(vector_len,/* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 8986 attributes.set_is_evex_instruction(); 8987 attributes.set_embedded_opmask_register_specifier(mask); 8988 if (merge) { 8989 attributes.reset_is_clear_context(); 8990 } 8991 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8992 emit_int16(0x5E, (0xC0 | encode)); 8993 } 8994 8995 void Assembler::evdivpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8996 InstructionMark im(this); 8997 assert(VM_Version::supports_evex(), ""); 8998 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 8999 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9000 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9001 attributes.set_is_evex_instruction(); 9002 attributes.set_embedded_opmask_register_specifier(mask); 9003 if (merge) { 9004 attributes.reset_is_clear_context(); 9005 } 9006 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9007 emit_int8(0x5E); 9008 emit_operand(dst, src); 9009 } 9010 9011 void Assembler::evpabsb(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 9012 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9013 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9014 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9015 attributes.set_is_evex_instruction(); 9016 attributes.set_embedded_opmask_register_specifier(mask); 9017 if (merge) { 9018 attributes.reset_is_clear_context(); 9019 } 9020 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9021 emit_int16(0x1C, (0xC0 | encode)); 9022 } 9023 9024 9025 void Assembler::evpabsb(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { 9026 InstructionMark im(this); 9027 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9028 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9029 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9030 attributes.set_is_evex_instruction(); 9031 attributes.set_embedded_opmask_register_specifier(mask); 9032 if (merge) { 9033 attributes.reset_is_clear_context(); 9034 } 9035 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9036 emit_int8(0x1C); 9037 emit_operand(dst, src); 9038 } 9039 9040 void Assembler::evpabsw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 9041 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9042 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9043 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9044 attributes.set_is_evex_instruction(); 9045 attributes.set_embedded_opmask_register_specifier(mask); 9046 if (merge) { 9047 attributes.reset_is_clear_context(); 9048 } 9049 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9050 emit_int16(0x1D, (0xC0 | encode)); 9051 } 9052 9053 9054 void Assembler::evpabsw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { 9055 InstructionMark im(this); 9056 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9057 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9058 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9059 attributes.set_is_evex_instruction(); 9060 attributes.set_embedded_opmask_register_specifier(mask); 9061 if (merge) { 9062 attributes.reset_is_clear_context(); 9063 } 9064 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9065 emit_int8(0x1D); 9066 emit_operand(dst, src); 9067 } 9068 9069 void Assembler::evpabsd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 9070 assert(VM_Version::supports_evex(), ""); 9071 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9072 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9073 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9074 attributes.set_is_evex_instruction(); 9075 attributes.set_embedded_opmask_register_specifier(mask); 9076 if (merge) { 9077 attributes.reset_is_clear_context(); 9078 } 9079 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9080 emit_int16(0x1E, (0xC0 | encode)); 9081 } 9082 9083 9084 void Assembler::evpabsd(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { 9085 InstructionMark im(this); 9086 assert(VM_Version::supports_evex(), ""); 9087 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9088 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9089 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9090 attributes.set_is_evex_instruction(); 9091 attributes.set_embedded_opmask_register_specifier(mask); 9092 if (merge) { 9093 attributes.reset_is_clear_context(); 9094 } 9095 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9096 emit_int8(0x1E); 9097 emit_operand(dst, src); 9098 } 9099 9100 void Assembler::evpabsq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 9101 assert(VM_Version::supports_evex(), ""); 9102 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9103 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9104 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9105 attributes.set_is_evex_instruction(); 9106 attributes.set_embedded_opmask_register_specifier(mask); 9107 if (merge) { 9108 attributes.reset_is_clear_context(); 9109 } 9110 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9111 emit_int16(0x1F, (0xC0 | encode)); 9112 } 9113 9114 9115 void Assembler::evpabsq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { 9116 InstructionMark im(this); 9117 assert(VM_Version::supports_evex(), ""); 9118 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9119 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9120 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9121 attributes.set_is_evex_instruction(); 9122 attributes.set_embedded_opmask_register_specifier(mask); 9123 if (merge) { 9124 attributes.reset_is_clear_context(); 9125 } 9126 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9127 emit_int8(0x1F); 9128 emit_operand(dst, src); 9129 } 9130 9131 void Assembler::evpfma213ps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9132 assert(VM_Version::supports_evex(), ""); 9133 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9134 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9135 attributes.set_is_evex_instruction(); 9136 attributes.set_embedded_opmask_register_specifier(mask); 9137 if (merge) { 9138 attributes.reset_is_clear_context(); 9139 } 9140 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9141 emit_int16((unsigned char)0xA8, (0xC0 | encode)); 9142 } 9143 9144 void Assembler::evpfma213ps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9145 InstructionMark im(this); 9146 assert(VM_Version::supports_evex(), ""); 9147 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9148 InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9149 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9150 attributes.set_is_evex_instruction(); 9151 attributes.set_embedded_opmask_register_specifier(mask); 9152 if (merge) { 9153 attributes.reset_is_clear_context(); 9154 } 9155 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9156 emit_int8((unsigned char)0xA8); 9157 emit_operand(dst, src); 9158 } 9159 9160 void Assembler::evpfma213pd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9161 assert(VM_Version::supports_evex(), ""); 9162 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9163 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9164 attributes.set_is_evex_instruction(); 9165 attributes.set_embedded_opmask_register_specifier(mask); 9166 if (merge) { 9167 attributes.reset_is_clear_context(); 9168 } 9169 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9170 emit_int16((unsigned char)0xA8, (0xC0 | encode)); 9171 } 9172 9173 void Assembler::evpfma213pd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9174 InstructionMark im(this); 9175 assert(VM_Version::supports_evex(), ""); 9176 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9177 InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true); 9178 attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit); 9179 attributes.set_is_evex_instruction(); 9180 attributes.set_embedded_opmask_register_specifier(mask); 9181 if (merge) { 9182 attributes.reset_is_clear_context(); 9183 } 9184 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9185 emit_int8((unsigned char)0xA8); 9186 emit_operand(dst, src); 9187 } 9188 9189 void Assembler::evpermb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9190 assert(VM_Version::supports_avx512_vbmi() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9191 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9192 attributes.set_is_evex_instruction(); 9193 attributes.set_embedded_opmask_register_specifier(mask); 9194 if (merge) { 9195 attributes.reset_is_clear_context(); 9196 } 9197 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9198 emit_int16((unsigned char)0x8D, (0xC0 | encode)); 9199 } 9200 9201 void Assembler::evpermb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9202 assert(VM_Version::supports_avx512_vbmi() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9203 InstructionMark im(this); 9204 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9205 attributes.set_is_evex_instruction(); 9206 attributes.set_embedded_opmask_register_specifier(mask); 9207 if (merge) { 9208 attributes.reset_is_clear_context(); 9209 } 9210 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9211 emit_int8((unsigned char)0x8D); 9212 emit_operand(dst, src); 9213 } 9214 9215 void Assembler::evpermw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9216 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9217 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9218 attributes.set_is_evex_instruction(); 9219 attributes.set_embedded_opmask_register_specifier(mask); 9220 if (merge) { 9221 attributes.reset_is_clear_context(); 9222 } 9223 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9224 emit_int16((unsigned char)0x8D, (0xC0 | encode)); 9225 } 9226 9227 void Assembler::evpermw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9228 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9229 InstructionMark im(this); 9230 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9231 attributes.set_is_evex_instruction(); 9232 attributes.set_embedded_opmask_register_specifier(mask); 9233 if (merge) { 9234 attributes.reset_is_clear_context(); 9235 } 9236 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9237 emit_int8((unsigned char)0x8D); 9238 emit_operand(dst, src); 9239 } 9240 9241 void Assembler::evpermd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9242 assert(VM_Version::supports_evex() && vector_len > AVX_128bit, ""); 9243 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9244 attributes.set_is_evex_instruction(); 9245 attributes.set_embedded_opmask_register_specifier(mask); 9246 if (merge) { 9247 attributes.reset_is_clear_context(); 9248 } 9249 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9250 emit_int16(0x36, (0xC0 | encode)); 9251 } 9252 9253 void Assembler::evpermd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9254 assert(VM_Version::supports_evex() && vector_len > AVX_128bit, ""); 9255 InstructionMark im(this); 9256 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9257 attributes.set_is_evex_instruction(); 9258 attributes.set_embedded_opmask_register_specifier(mask); 9259 if (merge) { 9260 attributes.reset_is_clear_context(); 9261 } 9262 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9263 emit_int8(0x36); 9264 emit_operand(dst, src); 9265 } 9266 9267 void Assembler::evpermq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9268 assert(VM_Version::supports_evex() && vector_len > AVX_128bit, ""); 9269 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9270 attributes.set_is_evex_instruction(); 9271 attributes.set_embedded_opmask_register_specifier(mask); 9272 if (merge) { 9273 attributes.reset_is_clear_context(); 9274 } 9275 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9276 emit_int16(0x36, (0xC0 | encode)); 9277 } 9278 9279 void Assembler::evpermq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9280 assert(VM_Version::supports_evex() && vector_len > AVX_128bit, ""); 9281 InstructionMark im(this); 9282 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9283 attributes.set_is_evex_instruction(); 9284 attributes.set_embedded_opmask_register_specifier(mask); 9285 if (merge) { 9286 attributes.reset_is_clear_context(); 9287 } 9288 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9289 emit_int8(0x36); 9290 emit_operand(dst, src); 9291 } 9292 9293 void Assembler::evpsllw(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 9294 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9295 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9296 attributes.set_is_evex_instruction(); 9297 attributes.set_embedded_opmask_register_specifier(mask); 9298 if (merge) { 9299 attributes.reset_is_clear_context(); 9300 } 9301 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9302 emit_int24(0x71, (0xC0 | encode), shift & 0xFF); 9303 } 9304 9305 void Assembler::evpslld(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 9306 assert(VM_Version::supports_evex(), ""); 9307 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9308 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9309 attributes.set_is_evex_instruction(); 9310 attributes.set_embedded_opmask_register_specifier(mask); 9311 if (merge) { 9312 attributes.reset_is_clear_context(); 9313 } 9314 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9315 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 9316 } 9317 9318 void Assembler::evpsllq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 9319 assert(VM_Version::supports_evex(), ""); 9320 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9321 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9322 attributes.set_is_evex_instruction(); 9323 attributes.set_embedded_opmask_register_specifier(mask); 9324 if (merge) { 9325 attributes.reset_is_clear_context(); 9326 } 9327 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9328 emit_int24(0x73, (0xC0 | encode), shift & 0xFF); 9329 } 9330 9331 void Assembler::evpsrlw(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 9332 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9333 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9334 attributes.set_is_evex_instruction(); 9335 attributes.set_embedded_opmask_register_specifier(mask); 9336 if (merge) { 9337 attributes.reset_is_clear_context(); 9338 } 9339 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9340 emit_int24(0x71, (0xC0 | encode), shift & 0xFF); 9341 } 9342 9343 void Assembler::evpsrld(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 9344 assert(VM_Version::supports_evex(), ""); 9345 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9346 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9347 attributes.set_is_evex_instruction(); 9348 attributes.set_embedded_opmask_register_specifier(mask); 9349 if (merge) { 9350 attributes.reset_is_clear_context(); 9351 } 9352 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9353 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 9354 } 9355 9356 void Assembler::evpsrlq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 9357 assert(VM_Version::supports_evex(), ""); 9358 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9359 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9360 attributes.set_is_evex_instruction(); 9361 attributes.set_embedded_opmask_register_specifier(mask); 9362 if (merge) { 9363 attributes.reset_is_clear_context(); 9364 } 9365 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9366 emit_int24(0x73, (0xC0 | encode), shift & 0xFF); 9367 } 9368 9369 void Assembler::evpsraw(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 9370 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9371 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9372 attributes.set_is_evex_instruction(); 9373 attributes.set_embedded_opmask_register_specifier(mask); 9374 if (merge) { 9375 attributes.reset_is_clear_context(); 9376 } 9377 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9378 emit_int24(0x71, (0xC0 | encode), shift & 0xFF); 9379 } 9380 9381 void Assembler::evpsrad(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 9382 assert(VM_Version::supports_evex(), ""); 9383 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9384 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9385 attributes.set_is_evex_instruction(); 9386 attributes.set_embedded_opmask_register_specifier(mask); 9387 if (merge) { 9388 attributes.reset_is_clear_context(); 9389 } 9390 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9391 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 9392 } 9393 9394 void Assembler::evpsraq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 9395 assert(VM_Version::supports_evex(), ""); 9396 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9397 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9398 attributes.set_is_evex_instruction(); 9399 attributes.set_embedded_opmask_register_specifier(mask); 9400 if (merge) { 9401 attributes.reset_is_clear_context(); 9402 } 9403 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9404 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 9405 } 9406 9407 void Assembler::evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9408 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9409 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9410 attributes.set_is_evex_instruction(); 9411 attributes.set_embedded_opmask_register_specifier(mask); 9412 if (merge) { 9413 attributes.reset_is_clear_context(); 9414 } 9415 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9416 emit_int16((unsigned char)0xF1, (0xC0 | encode)); 9417 } 9418 9419 void Assembler::evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9420 assert(VM_Version::supports_evex(), ""); 9421 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9422 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9423 attributes.set_is_evex_instruction(); 9424 attributes.set_embedded_opmask_register_specifier(mask); 9425 if (merge) { 9426 attributes.reset_is_clear_context(); 9427 } 9428 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9429 emit_int16((unsigned char)0xF2, (0xC0 | encode)); 9430 } 9431 9432 void Assembler::evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9433 assert(VM_Version::supports_evex(), ""); 9434 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9435 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9436 attributes.set_is_evex_instruction(); 9437 attributes.set_embedded_opmask_register_specifier(mask); 9438 if (merge) { 9439 attributes.reset_is_clear_context(); 9440 } 9441 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9442 emit_int16((unsigned char)0xF3, (0xC0 | encode)); 9443 } 9444 9445 void Assembler::evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9446 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9447 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9448 attributes.set_is_evex_instruction(); 9449 attributes.set_embedded_opmask_register_specifier(mask); 9450 if (merge) { 9451 attributes.reset_is_clear_context(); 9452 } 9453 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9454 emit_int16((unsigned char)0xD1, (0xC0 | encode)); 9455 } 9456 9457 void Assembler::evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9458 assert(VM_Version::supports_evex(), ""); 9459 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9460 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9461 attributes.set_is_evex_instruction(); 9462 attributes.set_embedded_opmask_register_specifier(mask); 9463 if (merge) { 9464 attributes.reset_is_clear_context(); 9465 } 9466 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9467 emit_int16((unsigned char)0xD2, (0xC0 | encode)); 9468 } 9469 9470 void Assembler::evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9471 assert(VM_Version::supports_evex(), ""); 9472 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9473 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9474 attributes.set_is_evex_instruction(); 9475 attributes.set_embedded_opmask_register_specifier(mask); 9476 if (merge) { 9477 attributes.reset_is_clear_context(); 9478 } 9479 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9480 emit_int16((unsigned char)0xD3, (0xC0 | encode)); 9481 } 9482 9483 void Assembler::evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9484 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9485 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9486 attributes.set_is_evex_instruction(); 9487 attributes.set_embedded_opmask_register_specifier(mask); 9488 if (merge) { 9489 attributes.reset_is_clear_context(); 9490 } 9491 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9492 emit_int16((unsigned char)0xE1, (0xC0 | encode)); 9493 } 9494 9495 void Assembler::evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9496 assert(VM_Version::supports_evex(), ""); 9497 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9498 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9499 attributes.set_is_evex_instruction(); 9500 attributes.set_embedded_opmask_register_specifier(mask); 9501 if (merge) { 9502 attributes.reset_is_clear_context(); 9503 } 9504 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9505 emit_int16((unsigned char)0xE2, (0xC0 | encode)); 9506 } 9507 9508 void Assembler::evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9509 assert(VM_Version::supports_evex(), ""); 9510 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9511 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9512 attributes.set_is_evex_instruction(); 9513 attributes.set_embedded_opmask_register_specifier(mask); 9514 if (merge) { 9515 attributes.reset_is_clear_context(); 9516 } 9517 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9518 emit_int16((unsigned char)0xE2, (0xC0 | encode)); 9519 } 9520 9521 void Assembler::evpsllvw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9522 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9523 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9524 attributes.set_is_evex_instruction(); 9525 attributes.set_embedded_opmask_register_specifier(mask); 9526 if (merge) { 9527 attributes.reset_is_clear_context(); 9528 } 9529 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9530 emit_int16(0x12, (0xC0 | encode)); 9531 } 9532 9533 void Assembler::evpsllvd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9534 assert(VM_Version::supports_evex(), ""); 9535 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9536 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9537 attributes.set_is_evex_instruction(); 9538 attributes.set_embedded_opmask_register_specifier(mask); 9539 if (merge) { 9540 attributes.reset_is_clear_context(); 9541 } 9542 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9543 emit_int16(0x47, (0xC0 | encode)); 9544 } 9545 9546 void Assembler::evpsllvq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9547 assert(VM_Version::supports_evex(), ""); 9548 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9549 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9550 attributes.set_is_evex_instruction(); 9551 attributes.set_embedded_opmask_register_specifier(mask); 9552 if (merge) { 9553 attributes.reset_is_clear_context(); 9554 } 9555 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9556 emit_int16(0x47, (0xC0 | encode)); 9557 } 9558 9559 void Assembler::evpsrlvw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9560 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9561 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9562 attributes.set_is_evex_instruction(); 9563 attributes.set_embedded_opmask_register_specifier(mask); 9564 if (merge) { 9565 attributes.reset_is_clear_context(); 9566 } 9567 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9568 emit_int16(0x10, (0xC0 | encode)); 9569 } 9570 9571 void Assembler::evpsrlvd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9572 assert(VM_Version::supports_evex(), ""); 9573 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9574 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9575 attributes.set_is_evex_instruction(); 9576 attributes.set_embedded_opmask_register_specifier(mask); 9577 if (merge) { 9578 attributes.reset_is_clear_context(); 9579 } 9580 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9581 emit_int16(0x45, (0xC0 | encode)); 9582 } 9583 9584 void Assembler::evpsrlvq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9585 assert(VM_Version::supports_evex(), ""); 9586 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9587 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9588 attributes.set_is_evex_instruction(); 9589 attributes.set_embedded_opmask_register_specifier(mask); 9590 if (merge) { 9591 attributes.reset_is_clear_context(); 9592 } 9593 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9594 emit_int16(0x45, (0xC0 | encode)); 9595 } 9596 9597 void Assembler::evpsravw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9598 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9599 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9600 attributes.set_is_evex_instruction(); 9601 attributes.set_embedded_opmask_register_specifier(mask); 9602 if (merge) { 9603 attributes.reset_is_clear_context(); 9604 } 9605 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9606 emit_int16(0x11, (0xC0 | encode)); 9607 } 9608 9609 void Assembler::evpsravd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9610 assert(VM_Version::supports_evex(), ""); 9611 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9612 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9613 attributes.set_is_evex_instruction(); 9614 attributes.set_embedded_opmask_register_specifier(mask); 9615 if (merge) { 9616 attributes.reset_is_clear_context(); 9617 } 9618 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9619 emit_int16(0x46, (0xC0 | encode)); 9620 } 9621 9622 void Assembler::evpsravq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9623 assert(VM_Version::supports_evex(), ""); 9624 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9625 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9626 attributes.set_is_evex_instruction(); 9627 attributes.set_embedded_opmask_register_specifier(mask); 9628 if (merge) { 9629 attributes.reset_is_clear_context(); 9630 } 9631 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9632 emit_int16(0x46, (0xC0 | encode)); 9633 } 9634 9635 void Assembler::evpminsb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9636 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9637 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9638 attributes.set_is_evex_instruction(); 9639 attributes.set_embedded_opmask_register_specifier(mask); 9640 if (merge) { 9641 attributes.reset_is_clear_context(); 9642 } 9643 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9644 emit_int16(0x38, (0xC0 | encode)); 9645 } 9646 9647 void Assembler::evpminsb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9648 assert(VM_Version::supports_avx512bw(), ""); 9649 InstructionMark im(this); 9650 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9651 attributes.set_is_evex_instruction(); 9652 attributes.set_embedded_opmask_register_specifier(mask); 9653 if (merge) { 9654 attributes.reset_is_clear_context(); 9655 } 9656 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9657 emit_int8(0x38); 9658 emit_operand(dst, src); 9659 } 9660 9661 void Assembler::evpminsw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9662 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9663 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9664 attributes.set_is_evex_instruction(); 9665 attributes.set_embedded_opmask_register_specifier(mask); 9666 if (merge) { 9667 attributes.reset_is_clear_context(); 9668 } 9669 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9670 emit_int16((unsigned char)0xEA, (0xC0 | encode)); 9671 } 9672 9673 void Assembler::evpminsw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9674 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9675 InstructionMark im(this); 9676 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9677 attributes.set_is_evex_instruction(); 9678 attributes.set_embedded_opmask_register_specifier(mask); 9679 if (merge) { 9680 attributes.reset_is_clear_context(); 9681 } 9682 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9683 emit_int8((unsigned char)0xEA); 9684 emit_operand(dst, src); 9685 } 9686 9687 void Assembler::evpminsd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9688 assert(VM_Version::supports_evex(), ""); 9689 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9690 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9691 attributes.set_is_evex_instruction(); 9692 attributes.set_embedded_opmask_register_specifier(mask); 9693 if (merge) { 9694 attributes.reset_is_clear_context(); 9695 } 9696 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9697 emit_int16(0x39, (0xC0 | encode)); 9698 } 9699 9700 void Assembler::evpminsd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9701 assert(VM_Version::supports_evex(), ""); 9702 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9703 InstructionMark im(this); 9704 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9705 attributes.set_is_evex_instruction(); 9706 attributes.set_embedded_opmask_register_specifier(mask); 9707 if (merge) { 9708 attributes.reset_is_clear_context(); 9709 } 9710 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9711 emit_int8(0x39); 9712 emit_operand(dst, src); 9713 } 9714 9715 void Assembler::evpminsq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9716 assert(VM_Version::supports_evex(), ""); 9717 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9718 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9719 attributes.set_is_evex_instruction(); 9720 attributes.set_embedded_opmask_register_specifier(mask); 9721 if (merge) { 9722 attributes.reset_is_clear_context(); 9723 } 9724 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9725 emit_int16(0x39, (0xC0 | encode)); 9726 } 9727 9728 void Assembler::evpminsq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9729 assert(VM_Version::supports_evex(), ""); 9730 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9731 InstructionMark im(this); 9732 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9733 attributes.set_is_evex_instruction(); 9734 attributes.set_embedded_opmask_register_specifier(mask); 9735 if (merge) { 9736 attributes.reset_is_clear_context(); 9737 } 9738 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9739 emit_int8(0x39); 9740 emit_operand(dst, src); 9741 } 9742 9743 9744 void Assembler::evpmaxsb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9745 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9746 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9747 attributes.set_is_evex_instruction(); 9748 attributes.set_embedded_opmask_register_specifier(mask); 9749 if (merge) { 9750 attributes.reset_is_clear_context(); 9751 } 9752 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9753 emit_int16(0x3C, (0xC0 | encode)); 9754 } 9755 9756 void Assembler::evpmaxsb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9757 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9758 InstructionMark im(this); 9759 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9760 attributes.set_is_evex_instruction(); 9761 attributes.set_embedded_opmask_register_specifier(mask); 9762 if (merge) { 9763 attributes.reset_is_clear_context(); 9764 } 9765 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9766 emit_int8(0x3C); 9767 emit_operand(dst, src); 9768 } 9769 9770 void Assembler::evpmaxsw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9771 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9772 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9773 attributes.set_is_evex_instruction(); 9774 attributes.set_embedded_opmask_register_specifier(mask); 9775 if (merge) { 9776 attributes.reset_is_clear_context(); 9777 } 9778 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9779 emit_int16((unsigned char)0xEE, (0xC0 | encode)); 9780 } 9781 9782 void Assembler::evpmaxsw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9783 assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), ""); 9784 InstructionMark im(this); 9785 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9786 attributes.set_is_evex_instruction(); 9787 attributes.set_embedded_opmask_register_specifier(mask); 9788 if (merge) { 9789 attributes.reset_is_clear_context(); 9790 } 9791 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9792 emit_int8((unsigned char)0xEE); 9793 emit_operand(dst, src); 9794 } 9795 9796 void Assembler::evpmaxsd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9797 assert(VM_Version::supports_evex(), ""); 9798 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9799 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9800 attributes.set_is_evex_instruction(); 9801 attributes.set_embedded_opmask_register_specifier(mask); 9802 if (merge) { 9803 attributes.reset_is_clear_context(); 9804 } 9805 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9806 emit_int16(0x3D, (0xC0 | encode)); 9807 } 9808 9809 void Assembler::evpmaxsd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9810 assert(VM_Version::supports_evex(), ""); 9811 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9812 InstructionMark im(this); 9813 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9814 attributes.set_is_evex_instruction(); 9815 attributes.set_embedded_opmask_register_specifier(mask); 9816 if (merge) { 9817 attributes.reset_is_clear_context(); 9818 } 9819 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9820 emit_int8(0x3D); 9821 emit_operand(dst, src); 9822 } 9823 9824 void Assembler::evpmaxsq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9825 assert(VM_Version::supports_evex(), ""); 9826 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9827 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9828 attributes.set_is_evex_instruction(); 9829 attributes.set_embedded_opmask_register_specifier(mask); 9830 if (merge) { 9831 attributes.reset_is_clear_context(); 9832 } 9833 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9834 emit_int16(0x3D, (0xC0 | encode)); 9835 } 9836 9837 void Assembler::evpmaxsq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9838 assert(VM_Version::supports_evex(), ""); 9839 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 9840 InstructionMark im(this); 9841 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9842 attributes.set_is_evex_instruction(); 9843 attributes.set_embedded_opmask_register_specifier(mask); 9844 if (merge) { 9845 attributes.reset_is_clear_context(); 9846 } 9847 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9848 emit_int8(0x3D); 9849 emit_operand(dst, src); 9850 } 9851 9852 void Assembler::evpternlogd(XMMRegister dst, int imm8, KRegister mask, XMMRegister src2, XMMRegister src3, bool merge, int vector_len) { 9853 assert(VM_Version::supports_evex(), "requires EVEX support"); 9854 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 9855 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9856 attributes.set_is_evex_instruction(); 9857 attributes.set_embedded_opmask_register_specifier(mask); 9858 if (merge) { 9859 attributes.reset_is_clear_context(); 9860 } 9861 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 9862 emit_int24(0x25, (unsigned char)(0xC0 | encode), imm8); 9863 } 9864 9865 void Assembler::evpternlogd(XMMRegister dst, int imm8, KRegister mask, XMMRegister src2, Address src3, bool merge, int vector_len) { 9866 assert(VM_Version::supports_evex(), "requires EVEX support"); 9867 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 9868 assert(dst != xnoreg, "sanity"); 9869 InstructionMark im(this); 9870 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9871 attributes.set_is_evex_instruction(); 9872 attributes.set_embedded_opmask_register_specifier(mask); 9873 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 9874 if (merge) { 9875 attributes.reset_is_clear_context(); 9876 } 9877 vex_prefix(src3, src2->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 9878 emit_int8(0x25); 9879 emit_operand(dst, src3); 9880 emit_int8(imm8); 9881 } 9882 9883 void Assembler::evpternlogq(XMMRegister dst, int imm8, KRegister mask, XMMRegister src2, XMMRegister src3, bool merge, int vector_len) { 9884 assert(VM_Version::supports_evex(), "requires EVEX support"); 9885 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 9886 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9887 attributes.set_is_evex_instruction(); 9888 attributes.set_embedded_opmask_register_specifier(mask); 9889 if (merge) { 9890 attributes.reset_is_clear_context(); 9891 } 9892 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 9893 emit_int24(0x25, (unsigned char)(0xC0 | encode), imm8); 9894 } 9895 9896 void Assembler::evpternlogq(XMMRegister dst, int imm8, KRegister mask, XMMRegister src2, Address src3, bool merge, int vector_len) { 9897 assert(VM_Version::supports_evex(), "requires EVEX support"); 9898 assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); 9899 assert(dst != xnoreg, "sanity"); 9900 InstructionMark im(this); 9901 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9902 attributes.set_is_evex_instruction(); 9903 attributes.set_embedded_opmask_register_specifier(mask); 9904 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 9905 if (merge) { 9906 attributes.reset_is_clear_context(); 9907 } 9908 vex_prefix(src3, src2->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 9909 emit_int8(0x25); 9910 emit_operand(dst, src3); 9911 emit_int8(imm8); 9912 } 9913 9914 // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL 9915 void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len) { 9916 assert(UseAVX >= 2, ""); 9917 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 9918 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9919 emit_int16(0x58, (0xC0 | encode)); 9920 } 9921 9922 void Assembler::vpbroadcastd(XMMRegister dst, Address src, int vector_len) { 9923 assert(VM_Version::supports_avx2(), ""); 9924 assert(dst != xnoreg, "sanity"); 9925 InstructionMark im(this); 9926 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 9927 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 9928 // swap src<->dst for encoding 9929 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9930 emit_int8(0x58); 9931 emit_operand(dst, src); 9932 } 9933 9934 // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL 9935 void Assembler::vpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len) { 9936 assert(VM_Version::supports_avx2(), ""); 9937 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 9938 attributes.set_rex_vex_w_reverted(); 9939 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9940 emit_int16(0x59, (0xC0 | encode)); 9941 } 9942 9943 void Assembler::vpbroadcastq(XMMRegister dst, Address src, int vector_len) { 9944 assert(VM_Version::supports_avx2(), ""); 9945 assert(dst != xnoreg, "sanity"); 9946 InstructionMark im(this); 9947 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 9948 attributes.set_rex_vex_w_reverted(); 9949 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 9950 // swap src<->dst for encoding 9951 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9952 emit_int8(0x59); 9953 emit_operand(dst, src); 9954 } 9955 9956 void Assembler::evbroadcasti32x4(XMMRegister dst, Address src, int vector_len) { 9957 assert(vector_len != Assembler::AVX_128bit, ""); 9958 assert(VM_Version::supports_evex(), ""); 9959 assert(dst != xnoreg, "sanity"); 9960 InstructionMark im(this); 9961 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 9962 attributes.set_rex_vex_w_reverted(); 9963 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 9964 // swap src<->dst for encoding 9965 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9966 emit_int8(0x5A); 9967 emit_operand(dst, src); 9968 } 9969 9970 void Assembler::evbroadcasti64x2(XMMRegister dst, XMMRegister src, int vector_len) { 9971 assert(vector_len != Assembler::AVX_128bit, ""); 9972 assert(VM_Version::supports_avx512dq(), ""); 9973 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 9974 attributes.set_rex_vex_w_reverted(); 9975 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9976 emit_int16(0x5A, (0xC0 | encode)); 9977 } 9978 9979 void Assembler::evbroadcasti64x2(XMMRegister dst, Address src, int vector_len) { 9980 assert(vector_len != Assembler::AVX_128bit, ""); 9981 assert(VM_Version::supports_avx512dq(), ""); 9982 assert(dst != xnoreg, "sanity"); 9983 InstructionMark im(this); 9984 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 9985 attributes.set_rex_vex_w_reverted(); 9986 attributes.set_address_attributes(/* tuple_type */ EVEX_T2, /* input_size_in_bits */ EVEX_64bit); 9987 // swap src<->dst for encoding 9988 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9989 emit_int8(0x5A); 9990 emit_operand(dst, src); 9991 } 9992 9993 // scalar single/double precision replicate 9994 9995 // duplicate single precision data from src into programmed locations in dest : requires AVX512VL 9996 void Assembler::vbroadcastss(XMMRegister dst, XMMRegister src, int vector_len) { 9997 assert(VM_Version::supports_avx2(), ""); 9998 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 9999 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10000 emit_int16(0x18, (0xC0 | encode)); 10001 } 10002 10003 void Assembler::vbroadcastss(XMMRegister dst, Address src, int vector_len) { 10004 assert(VM_Version::supports_avx(), ""); 10005 assert(dst != xnoreg, "sanity"); 10006 InstructionMark im(this); 10007 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 10008 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 10009 // swap src<->dst for encoding 10010 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10011 emit_int8(0x18); 10012 emit_operand(dst, src); 10013 } 10014 10015 // duplicate double precision data from src into programmed locations in dest : requires AVX512VL 10016 void Assembler::vbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len) { 10017 assert(VM_Version::supports_avx2(), ""); 10018 assert(vector_len == AVX_256bit || vector_len == AVX_512bit, ""); 10019 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 10020 attributes.set_rex_vex_w_reverted(); 10021 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10022 emit_int16(0x19, (0xC0 | encode)); 10023 } 10024 10025 void Assembler::vbroadcastsd(XMMRegister dst, Address src, int vector_len) { 10026 assert(VM_Version::supports_avx(), ""); 10027 assert(vector_len == AVX_256bit || vector_len == AVX_512bit, ""); 10028 assert(dst != xnoreg, "sanity"); 10029 InstructionMark im(this); 10030 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 10031 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 10032 attributes.set_rex_vex_w_reverted(); 10033 // swap src<->dst for encoding 10034 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10035 emit_int8(0x19); 10036 emit_operand(dst, src); 10037 } 10038 10039 void Assembler::vbroadcastf128(XMMRegister dst, Address src, int vector_len) { 10040 assert(VM_Version::supports_avx(), ""); 10041 assert(vector_len == AVX_256bit, ""); 10042 assert(dst != xnoreg, "sanity"); 10043 InstructionMark im(this); 10044 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 10045 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 10046 // swap src<->dst for encoding 10047 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10048 emit_int8(0x1A); 10049 emit_operand(dst, src); 10050 } 10051 10052 // gpr source broadcast forms 10053 10054 // duplicate 1-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL 10055 void Assembler::evpbroadcastb(XMMRegister dst, Register src, int vector_len) { 10056 assert(VM_Version::supports_avx512bw(), ""); 10057 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 10058 attributes.set_is_evex_instruction(); 10059 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10060 emit_int16(0x7A, (0xC0 | encode)); 10061 } 10062 10063 // duplicate 2-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL 10064 void Assembler::evpbroadcastw(XMMRegister dst, Register src, int vector_len) { 10065 assert(VM_Version::supports_avx512bw(), ""); 10066 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 10067 attributes.set_is_evex_instruction(); 10068 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10069 emit_int16(0x7B, (0xC0 | encode)); 10070 } 10071 10072 // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL 10073 void Assembler::evpbroadcastd(XMMRegister dst, Register src, int vector_len) { 10074 assert(VM_Version::supports_evex(), ""); 10075 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 10076 attributes.set_is_evex_instruction(); 10077 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10078 emit_int16(0x7C, (0xC0 | encode)); 10079 } 10080 10081 // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL 10082 void Assembler::evpbroadcastq(XMMRegister dst, Register src, int vector_len) { 10083 assert(VM_Version::supports_evex(), ""); 10084 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 10085 attributes.set_is_evex_instruction(); 10086 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10087 emit_int16(0x7C, (0xC0 | encode)); 10088 } 10089 10090 void Assembler::vpgatherdd(XMMRegister dst, Address src, XMMRegister mask, int vector_len) { 10091 assert(VM_Version::supports_avx2(), ""); 10092 assert(vector_len == Assembler::AVX_128bit || vector_len == Assembler::AVX_256bit, ""); 10093 assert(dst != xnoreg, "sanity"); 10094 assert(src.isxmmindex(),"expected to be xmm index"); 10095 assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same"); 10096 InstructionMark im(this); 10097 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 10098 vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10099 emit_int8((unsigned char)0x90); 10100 emit_operand(dst, src); 10101 } 10102 10103 void Assembler::vpgatherdq(XMMRegister dst, Address src, XMMRegister mask, int vector_len) { 10104 assert(VM_Version::supports_avx2(), ""); 10105 assert(vector_len == Assembler::AVX_128bit || vector_len == Assembler::AVX_256bit, ""); 10106 assert(dst != xnoreg, "sanity"); 10107 assert(src.isxmmindex(),"expected to be xmm index"); 10108 assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same"); 10109 InstructionMark im(this); 10110 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 10111 vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10112 emit_int8((unsigned char)0x90); 10113 emit_operand(dst, src); 10114 } 10115 10116 void Assembler::vgatherdpd(XMMRegister dst, Address src, XMMRegister mask, int vector_len) { 10117 assert(VM_Version::supports_avx2(), ""); 10118 assert(vector_len == Assembler::AVX_128bit || vector_len == Assembler::AVX_256bit, ""); 10119 assert(dst != xnoreg, "sanity"); 10120 assert(src.isxmmindex(),"expected to be xmm index"); 10121 assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same"); 10122 InstructionMark im(this); 10123 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 10124 vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10125 emit_int8((unsigned char)0x92); 10126 emit_operand(dst, src); 10127 } 10128 10129 void Assembler::vgatherdps(XMMRegister dst, Address src, XMMRegister mask, int vector_len) { 10130 assert(VM_Version::supports_avx2(), ""); 10131 assert(vector_len == Assembler::AVX_128bit || vector_len == Assembler::AVX_256bit, ""); 10132 assert(dst != xnoreg, "sanity"); 10133 assert(src.isxmmindex(),"expected to be xmm index"); 10134 assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same"); 10135 InstructionMark im(this); 10136 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ true); 10137 vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10138 emit_int8((unsigned char)0x92); 10139 emit_operand(dst, src); 10140 } 10141 void Assembler::evpgatherdd(XMMRegister dst, KRegister mask, Address src, int vector_len) { 10142 assert(VM_Version::supports_evex(), ""); 10143 assert(dst != xnoreg, "sanity"); 10144 assert(src.isxmmindex(),"expected to be xmm index"); 10145 assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same"); 10146 assert(mask != k0, "instruction will #UD if mask is in k0"); 10147 InstructionMark im(this); 10148 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10149 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 10150 attributes.reset_is_clear_context(); 10151 attributes.set_embedded_opmask_register_specifier(mask); 10152 attributes.set_is_evex_instruction(); 10153 // swap src<->dst for encoding 10154 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10155 emit_int8((unsigned char)0x90); 10156 emit_operand(dst, src); 10157 } 10158 10159 void Assembler::evpgatherdq(XMMRegister dst, KRegister mask, Address src, int vector_len) { 10160 assert(VM_Version::supports_evex(), ""); 10161 assert(dst != xnoreg, "sanity"); 10162 assert(src.isxmmindex(),"expected to be xmm index"); 10163 assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same"); 10164 assert(mask != k0, "instruction will #UD if mask is in k0"); 10165 InstructionMark im(this); 10166 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10167 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 10168 attributes.reset_is_clear_context(); 10169 attributes.set_embedded_opmask_register_specifier(mask); 10170 attributes.set_is_evex_instruction(); 10171 // swap src<->dst for encoding 10172 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10173 emit_int8((unsigned char)0x90); 10174 emit_operand(dst, src); 10175 } 10176 10177 void Assembler::evgatherdpd(XMMRegister dst, KRegister mask, Address src, int vector_len) { 10178 assert(VM_Version::supports_evex(), ""); 10179 assert(dst != xnoreg, "sanity"); 10180 assert(src.isxmmindex(),"expected to be xmm index"); 10181 assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same"); 10182 assert(mask != k0, "instruction will #UD if mask is in k0"); 10183 InstructionMark im(this); 10184 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10185 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 10186 attributes.reset_is_clear_context(); 10187 attributes.set_embedded_opmask_register_specifier(mask); 10188 attributes.set_is_evex_instruction(); 10189 // swap src<->dst for encoding 10190 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10191 emit_int8((unsigned char)0x92); 10192 emit_operand(dst, src); 10193 } 10194 10195 void Assembler::evgatherdps(XMMRegister dst, KRegister mask, Address src, int vector_len) { 10196 assert(VM_Version::supports_evex(), ""); 10197 assert(dst != xnoreg, "sanity"); 10198 assert(src.isxmmindex(),"expected to be xmm index"); 10199 assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same"); 10200 assert(mask != k0, "instruction will #UD if mask is in k0"); 10201 InstructionMark im(this); 10202 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10203 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 10204 attributes.reset_is_clear_context(); 10205 attributes.set_embedded_opmask_register_specifier(mask); 10206 attributes.set_is_evex_instruction(); 10207 // swap src<->dst for encoding 10208 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10209 emit_int8((unsigned char)0x92); 10210 emit_operand(dst, src); 10211 } 10212 10213 void Assembler::evpscatterdd(Address dst, KRegister mask, XMMRegister src, int vector_len) { 10214 assert(VM_Version::supports_evex(), ""); 10215 assert(mask != k0, "instruction will #UD if mask is in k0"); 10216 InstructionMark im(this); 10217 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10218 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 10219 attributes.reset_is_clear_context(); 10220 attributes.set_embedded_opmask_register_specifier(mask); 10221 attributes.set_is_evex_instruction(); 10222 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10223 emit_int8((unsigned char)0xA0); 10224 emit_operand(src, dst); 10225 } 10226 10227 void Assembler::evpscatterdq(Address dst, KRegister mask, XMMRegister src, int vector_len) { 10228 assert(VM_Version::supports_evex(), ""); 10229 assert(mask != k0, "instruction will #UD if mask is in k0"); 10230 InstructionMark im(this); 10231 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10232 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 10233 attributes.reset_is_clear_context(); 10234 attributes.set_embedded_opmask_register_specifier(mask); 10235 attributes.set_is_evex_instruction(); 10236 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10237 emit_int8((unsigned char)0xA0); 10238 emit_operand(src, dst); 10239 } 10240 10241 void Assembler::evscatterdps(Address dst, KRegister mask, XMMRegister src, int vector_len) { 10242 assert(VM_Version::supports_evex(), ""); 10243 assert(mask != k0, "instruction will #UD if mask is in k0"); 10244 InstructionMark im(this); 10245 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10246 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 10247 attributes.reset_is_clear_context(); 10248 attributes.set_embedded_opmask_register_specifier(mask); 10249 attributes.set_is_evex_instruction(); 10250 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10251 emit_int8((unsigned char)0xA2); 10252 emit_operand(src, dst); 10253 } 10254 10255 void Assembler::evscatterdpd(Address dst, KRegister mask, XMMRegister src, int vector_len) { 10256 assert(VM_Version::supports_evex(), ""); 10257 assert(mask != k0, "instruction will #UD if mask is in k0"); 10258 InstructionMark im(this); 10259 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 10260 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 10261 attributes.reset_is_clear_context(); 10262 attributes.set_embedded_opmask_register_specifier(mask); 10263 attributes.set_is_evex_instruction(); 10264 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 10265 emit_int8((unsigned char)0xA2); 10266 emit_operand(src, dst); 10267 } 10268 // Carry-Less Multiplication Quadword 10269 void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) { 10270 assert(VM_Version::supports_clmul(), ""); 10271 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 10272 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 10273 emit_int24(0x44, (0xC0 | encode), (unsigned char)mask); 10274 } 10275 10276 // Carry-Less Multiplication Quadword 10277 void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) { 10278 assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), ""); 10279 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 10280 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 10281 emit_int24(0x44, (0xC0 | encode), (unsigned char)mask); 10282 } 10283 10284 void Assembler::evpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask, int vector_len) { 10285 assert(VM_Version::supports_avx512_vpclmulqdq(), "Requires vector carryless multiplication support"); 10286 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 10287 attributes.set_is_evex_instruction(); 10288 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 10289 emit_int24(0x44, (0xC0 | encode), (unsigned char)mask); 10290 } 10291 10292 void Assembler::vzeroupper_uncached() { 10293 if (VM_Version::supports_vzeroupper()) { 10294 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 10295 (void)vex_prefix_and_encode(0, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 10296 emit_int8(0x77); 10297 } 10298 } 10299 10300 void Assembler::fld_x(Address adr) { 10301 InstructionMark im(this); 10302 emit_int8((unsigned char)0xDB); 10303 emit_operand32(rbp, adr); 10304 } 10305 10306 void Assembler::fstp_x(Address adr) { 10307 InstructionMark im(this); 10308 emit_int8((unsigned char)0xDB); 10309 emit_operand32(rdi, adr); 10310 } 10311 10312 void Assembler::emit_operand32(Register reg, Address adr) { 10313 assert(reg->encoding() < 8, "no extended registers"); 10314 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); 10315 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, 10316 adr._rspec); 10317 } 10318 10319 #ifndef _LP64 10320 // 32bit only pieces of the assembler 10321 10322 void Assembler::emms() { 10323 NOT_LP64(assert(VM_Version::supports_mmx(), "")); 10324 emit_int16(0x0F, 0x77); 10325 } 10326 10327 void Assembler::vzeroupper() { 10328 vzeroupper_uncached(); 10329 } 10330 10331 void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) { 10332 // NO PREFIX AS NEVER 64BIT 10333 InstructionMark im(this); 10334 emit_int16((unsigned char)0x81, (0xF8 | src1->encoding())); 10335 emit_data(imm32, rspec, 0); 10336 } 10337 10338 void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) { 10339 // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs 10340 InstructionMark im(this); 10341 emit_int8((unsigned char)0x81); 10342 emit_operand(rdi, src1); 10343 emit_data(imm32, rspec, 0); 10344 } 10345 10346 // The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax, 10347 // and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded 10348 // into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise. 10349 void Assembler::cmpxchg8(Address adr) { 10350 InstructionMark im(this); 10351 emit_int16(0x0F, (unsigned char)0xC7); 10352 emit_operand(rcx, adr); 10353 } 10354 10355 void Assembler::decl(Register dst) { 10356 // Don't use it directly. Use MacroAssembler::decrementl() instead. 10357 emit_int8(0x48 | dst->encoding()); 10358 } 10359 10360 // 64bit doesn't use the x87 10361 10362 void Assembler::emit_farith(int b1, int b2, int i) { 10363 assert(isByte(b1) && isByte(b2), "wrong opcode"); 10364 assert(0 <= i && i < 8, "illegal stack offset"); 10365 emit_int16(b1, b2 + i); 10366 } 10367 10368 void Assembler::fabs() { 10369 emit_int16((unsigned char)0xD9, (unsigned char)0xE1); 10370 } 10371 10372 void Assembler::fadd(int i) { 10373 emit_farith(0xD8, 0xC0, i); 10374 } 10375 10376 void Assembler::fadd_d(Address src) { 10377 InstructionMark im(this); 10378 emit_int8((unsigned char)0xDC); 10379 emit_operand32(rax, src); 10380 } 10381 10382 void Assembler::fadd_s(Address src) { 10383 InstructionMark im(this); 10384 emit_int8((unsigned char)0xD8); 10385 emit_operand32(rax, src); 10386 } 10387 10388 void Assembler::fadda(int i) { 10389 emit_farith(0xDC, 0xC0, i); 10390 } 10391 10392 void Assembler::faddp(int i) { 10393 emit_farith(0xDE, 0xC0, i); 10394 } 10395 10396 void Assembler::fchs() { 10397 emit_int16((unsigned char)0xD9, (unsigned char)0xE0); 10398 } 10399 10400 void Assembler::fcom(int i) { 10401 emit_farith(0xD8, 0xD0, i); 10402 } 10403 10404 void Assembler::fcomp(int i) { 10405 emit_farith(0xD8, 0xD8, i); 10406 } 10407 10408 void Assembler::fcomp_d(Address src) { 10409 InstructionMark im(this); 10410 emit_int8((unsigned char)0xDC); 10411 emit_operand32(rbx, src); 10412 } 10413 10414 void Assembler::fcomp_s(Address src) { 10415 InstructionMark im(this); 10416 emit_int8((unsigned char)0xD8); 10417 emit_operand32(rbx, src); 10418 } 10419 10420 void Assembler::fcompp() { 10421 emit_int16((unsigned char)0xDE, (unsigned char)0xD9); 10422 } 10423 10424 void Assembler::fcos() { 10425 emit_int16((unsigned char)0xD9, (unsigned char)0xFF); 10426 } 10427 10428 void Assembler::fdecstp() { 10429 emit_int16((unsigned char)0xD9, (unsigned char)0xF6); 10430 } 10431 10432 void Assembler::fdiv(int i) { 10433 emit_farith(0xD8, 0xF0, i); 10434 } 10435 10436 void Assembler::fdiv_d(Address src) { 10437 InstructionMark im(this); 10438 emit_int8((unsigned char)0xDC); 10439 emit_operand32(rsi, src); 10440 } 10441 10442 void Assembler::fdiv_s(Address src) { 10443 InstructionMark im(this); 10444 emit_int8((unsigned char)0xD8); 10445 emit_operand32(rsi, src); 10446 } 10447 10448 void Assembler::fdiva(int i) { 10449 emit_farith(0xDC, 0xF8, i); 10450 } 10451 10452 // Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994) 10453 // is erroneous for some of the floating-point instructions below. 10454 10455 void Assembler::fdivp(int i) { 10456 emit_farith(0xDE, 0xF8, i); // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong) 10457 } 10458 10459 void Assembler::fdivr(int i) { 10460 emit_farith(0xD8, 0xF8, i); 10461 } 10462 10463 void Assembler::fdivr_d(Address src) { 10464 InstructionMark im(this); 10465 emit_int8((unsigned char)0xDC); 10466 emit_operand32(rdi, src); 10467 } 10468 10469 void Assembler::fdivr_s(Address src) { 10470 InstructionMark im(this); 10471 emit_int8((unsigned char)0xD8); 10472 emit_operand32(rdi, src); 10473 } 10474 10475 void Assembler::fdivra(int i) { 10476 emit_farith(0xDC, 0xF0, i); 10477 } 10478 10479 void Assembler::fdivrp(int i) { 10480 emit_farith(0xDE, 0xF0, i); // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong) 10481 } 10482 10483 void Assembler::ffree(int i) { 10484 emit_farith(0xDD, 0xC0, i); 10485 } 10486 10487 void Assembler::fild_d(Address adr) { 10488 InstructionMark im(this); 10489 emit_int8((unsigned char)0xDF); 10490 emit_operand32(rbp, adr); 10491 } 10492 10493 void Assembler::fild_s(Address adr) { 10494 InstructionMark im(this); 10495 emit_int8((unsigned char)0xDB); 10496 emit_operand32(rax, adr); 10497 } 10498 10499 void Assembler::fincstp() { 10500 emit_int16((unsigned char)0xD9, (unsigned char)0xF7); 10501 } 10502 10503 void Assembler::finit() { 10504 emit_int24((unsigned char)0x9B, (unsigned char)0xDB, (unsigned char)0xE3); 10505 } 10506 10507 void Assembler::fist_s(Address adr) { 10508 InstructionMark im(this); 10509 emit_int8((unsigned char)0xDB); 10510 emit_operand32(rdx, adr); 10511 } 10512 10513 void Assembler::fistp_d(Address adr) { 10514 InstructionMark im(this); 10515 emit_int8((unsigned char)0xDF); 10516 emit_operand32(rdi, adr); 10517 } 10518 10519 void Assembler::fistp_s(Address adr) { 10520 InstructionMark im(this); 10521 emit_int8((unsigned char)0xDB); 10522 emit_operand32(rbx, adr); 10523 } 10524 10525 void Assembler::fld1() { 10526 emit_int16((unsigned char)0xD9, (unsigned char)0xE8); 10527 } 10528 10529 void Assembler::fld_d(Address adr) { 10530 InstructionMark im(this); 10531 emit_int8((unsigned char)0xDD); 10532 emit_operand32(rax, adr); 10533 } 10534 10535 void Assembler::fld_s(Address adr) { 10536 InstructionMark im(this); 10537 emit_int8((unsigned char)0xD9); 10538 emit_operand32(rax, adr); 10539 } 10540 10541 10542 void Assembler::fld_s(int index) { 10543 emit_farith(0xD9, 0xC0, index); 10544 } 10545 10546 void Assembler::fldcw(Address src) { 10547 InstructionMark im(this); 10548 emit_int8((unsigned char)0xD9); 10549 emit_operand32(rbp, src); 10550 } 10551 10552 void Assembler::fldenv(Address src) { 10553 InstructionMark im(this); 10554 emit_int8((unsigned char)0xD9); 10555 emit_operand32(rsp, src); 10556 } 10557 10558 void Assembler::fldlg2() { 10559 emit_int16((unsigned char)0xD9, (unsigned char)0xEC); 10560 } 10561 10562 void Assembler::fldln2() { 10563 emit_int16((unsigned char)0xD9, (unsigned char)0xED); 10564 } 10565 10566 void Assembler::fldz() { 10567 emit_int16((unsigned char)0xD9, (unsigned char)0xEE); 10568 } 10569 10570 void Assembler::flog() { 10571 fldln2(); 10572 fxch(); 10573 fyl2x(); 10574 } 10575 10576 void Assembler::flog10() { 10577 fldlg2(); 10578 fxch(); 10579 fyl2x(); 10580 } 10581 10582 void Assembler::fmul(int i) { 10583 emit_farith(0xD8, 0xC8, i); 10584 } 10585 10586 void Assembler::fmul_d(Address src) { 10587 InstructionMark im(this); 10588 emit_int8((unsigned char)0xDC); 10589 emit_operand32(rcx, src); 10590 } 10591 10592 void Assembler::fmul_s(Address src) { 10593 InstructionMark im(this); 10594 emit_int8((unsigned char)0xD8); 10595 emit_operand32(rcx, src); 10596 } 10597 10598 void Assembler::fmula(int i) { 10599 emit_farith(0xDC, 0xC8, i); 10600 } 10601 10602 void Assembler::fmulp(int i) { 10603 emit_farith(0xDE, 0xC8, i); 10604 } 10605 10606 void Assembler::fnsave(Address dst) { 10607 InstructionMark im(this); 10608 emit_int8((unsigned char)0xDD); 10609 emit_operand32(rsi, dst); 10610 } 10611 10612 void Assembler::fnstcw(Address src) { 10613 InstructionMark im(this); 10614 emit_int16((unsigned char)0x9B, (unsigned char)0xD9); 10615 emit_operand32(rdi, src); 10616 } 10617 10618 void Assembler::fnstsw_ax() { 10619 emit_int16((unsigned char)0xDF, (unsigned char)0xE0); 10620 } 10621 10622 void Assembler::fprem() { 10623 emit_int16((unsigned char)0xD9, (unsigned char)0xF8); 10624 } 10625 10626 void Assembler::fprem1() { 10627 emit_int16((unsigned char)0xD9, (unsigned char)0xF5); 10628 } 10629 10630 void Assembler::frstor(Address src) { 10631 InstructionMark im(this); 10632 emit_int8((unsigned char)0xDD); 10633 emit_operand32(rsp, src); 10634 } 10635 10636 void Assembler::fsin() { 10637 emit_int16((unsigned char)0xD9, (unsigned char)0xFE); 10638 } 10639 10640 void Assembler::fsqrt() { 10641 emit_int16((unsigned char)0xD9, (unsigned char)0xFA); 10642 } 10643 10644 void Assembler::fst_d(Address adr) { 10645 InstructionMark im(this); 10646 emit_int8((unsigned char)0xDD); 10647 emit_operand32(rdx, adr); 10648 } 10649 10650 void Assembler::fst_s(Address adr) { 10651 InstructionMark im(this); 10652 emit_int8((unsigned char)0xD9); 10653 emit_operand32(rdx, adr); 10654 } 10655 10656 void Assembler::fstp_d(Address adr) { 10657 InstructionMark im(this); 10658 emit_int8((unsigned char)0xDD); 10659 emit_operand32(rbx, adr); 10660 } 10661 10662 void Assembler::fstp_d(int index) { 10663 emit_farith(0xDD, 0xD8, index); 10664 } 10665 10666 void Assembler::fstp_s(Address adr) { 10667 InstructionMark im(this); 10668 emit_int8((unsigned char)0xD9); 10669 emit_operand32(rbx, adr); 10670 } 10671 10672 void Assembler::fsub(int i) { 10673 emit_farith(0xD8, 0xE0, i); 10674 } 10675 10676 void Assembler::fsub_d(Address src) { 10677 InstructionMark im(this); 10678 emit_int8((unsigned char)0xDC); 10679 emit_operand32(rsp, src); 10680 } 10681 10682 void Assembler::fsub_s(Address src) { 10683 InstructionMark im(this); 10684 emit_int8((unsigned char)0xD8); 10685 emit_operand32(rsp, src); 10686 } 10687 10688 void Assembler::fsuba(int i) { 10689 emit_farith(0xDC, 0xE8, i); 10690 } 10691 10692 void Assembler::fsubp(int i) { 10693 emit_farith(0xDE, 0xE8, i); // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong) 10694 } 10695 10696 void Assembler::fsubr(int i) { 10697 emit_farith(0xD8, 0xE8, i); 10698 } 10699 10700 void Assembler::fsubr_d(Address src) { 10701 InstructionMark im(this); 10702 emit_int8((unsigned char)0xDC); 10703 emit_operand32(rbp, src); 10704 } 10705 10706 void Assembler::fsubr_s(Address src) { 10707 InstructionMark im(this); 10708 emit_int8((unsigned char)0xD8); 10709 emit_operand32(rbp, src); 10710 } 10711 10712 void Assembler::fsubra(int i) { 10713 emit_farith(0xDC, 0xE0, i); 10714 } 10715 10716 void Assembler::fsubrp(int i) { 10717 emit_farith(0xDE, 0xE0, i); // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong) 10718 } 10719 10720 void Assembler::ftan() { 10721 emit_int32((unsigned char)0xD9, (unsigned char)0xF2, (unsigned char)0xDD, (unsigned char)0xD8); 10722 } 10723 10724 void Assembler::ftst() { 10725 emit_int16((unsigned char)0xD9, (unsigned char)0xE4); 10726 } 10727 10728 void Assembler::fucomi(int i) { 10729 // make sure the instruction is supported (introduced for P6, together with cmov) 10730 guarantee(VM_Version::supports_cmov(), "illegal instruction"); 10731 emit_farith(0xDB, 0xE8, i); 10732 } 10733 10734 void Assembler::fucomip(int i) { 10735 // make sure the instruction is supported (introduced for P6, together with cmov) 10736 guarantee(VM_Version::supports_cmov(), "illegal instruction"); 10737 emit_farith(0xDF, 0xE8, i); 10738 } 10739 10740 void Assembler::fwait() { 10741 emit_int8((unsigned char)0x9B); 10742 } 10743 10744 void Assembler::fxch(int i) { 10745 emit_farith(0xD9, 0xC8, i); 10746 } 10747 10748 void Assembler::fyl2x() { 10749 emit_int16((unsigned char)0xD9, (unsigned char)0xF1); 10750 } 10751 10752 void Assembler::frndint() { 10753 emit_int16((unsigned char)0xD9, (unsigned char)0xFC); 10754 } 10755 10756 void Assembler::f2xm1() { 10757 emit_int16((unsigned char)0xD9, (unsigned char)0xF0); 10758 } 10759 10760 void Assembler::fldl2e() { 10761 emit_int16((unsigned char)0xD9, (unsigned char)0xEA); 10762 } 10763 #endif // !_LP64 10764 10765 // SSE SIMD prefix byte values corresponding to VexSimdPrefix encoding. 10766 static int simd_pre[4] = { 0, 0x66, 0xF3, 0xF2 }; 10767 // SSE opcode second byte values (first is 0x0F) corresponding to VexOpcode encoding. 10768 static int simd_opc[4] = { 0, 0, 0x38, 0x3A }; 10769 10770 // Generate SSE legacy REX prefix and SIMD opcode based on VEX encoding. 10771 void Assembler::rex_prefix(Address adr, XMMRegister xreg, VexSimdPrefix pre, VexOpcode opc, bool rex_w) { 10772 if (pre > 0) { 10773 emit_int8(simd_pre[pre]); 10774 } 10775 if (rex_w) { 10776 prefixq(adr, xreg); 10777 } else { 10778 prefix(adr, xreg); 10779 } 10780 if (opc > 0) { 10781 emit_int8(0x0F); 10782 int opc2 = simd_opc[opc]; 10783 if (opc2 > 0) { 10784 emit_int8(opc2); 10785 } 10786 } 10787 } 10788 10789 int Assembler::rex_prefix_and_encode(int dst_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool rex_w) { 10790 if (pre > 0) { 10791 emit_int8(simd_pre[pre]); 10792 } 10793 int encode = (rex_w) ? prefixq_and_encode(dst_enc, src_enc) : prefix_and_encode(dst_enc, src_enc); 10794 if (opc > 0) { 10795 emit_int8(0x0F); 10796 int opc2 = simd_opc[opc]; 10797 if (opc2 > 0) { 10798 emit_int8(opc2); 10799 } 10800 } 10801 return encode; 10802 } 10803 10804 10805 void Assembler::vex_prefix(bool vex_r, bool vex_b, bool vex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc) { 10806 int vector_len = _attributes->get_vector_len(); 10807 bool vex_w = _attributes->is_rex_vex_w(); 10808 if (vex_b || vex_x || vex_w || (opc == VEX_OPCODE_0F_38) || (opc == VEX_OPCODE_0F_3A)) { 10809 int byte1 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0); 10810 byte1 = (~byte1) & 0xE0; 10811 byte1 |= opc; 10812 10813 int byte2 = ((~nds_enc) & 0xf) << 3; 10814 byte2 |= (vex_w ? VEX_W : 0) | ((vector_len > 0) ? 4 : 0) | pre; 10815 10816 emit_int24((unsigned char)VEX_3bytes, byte1, byte2); 10817 } else { 10818 int byte1 = vex_r ? VEX_R : 0; 10819 byte1 = (~byte1) & 0x80; 10820 byte1 |= ((~nds_enc) & 0xf) << 3; 10821 byte1 |= ((vector_len > 0 ) ? 4 : 0) | pre; 10822 emit_int16((unsigned char)VEX_2bytes, byte1); 10823 } 10824 } 10825 10826 // This is a 4 byte encoding 10827 void Assembler::evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_r, bool evex_v, int nds_enc, VexSimdPrefix pre, VexOpcode opc){ 10828 // EVEX 0x62 prefix 10829 // byte1 = EVEX_4bytes; 10830 10831 bool vex_w = _attributes->is_rex_vex_w(); 10832 int evex_encoding = (vex_w ? VEX_W : 0); 10833 // EVEX.b is not currently used for broadcast of single element or data rounding modes 10834 _attributes->set_evex_encoding(evex_encoding); 10835 10836 // P0: byte 2, initialized to RXBR`00mm 10837 // instead of not'd 10838 int byte2 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0) | (evex_r ? EVEX_Rb : 0); 10839 byte2 = (~byte2) & 0xF0; 10840 // confine opc opcode extensions in mm bits to lower two bits 10841 // of form {0F, 0F_38, 0F_3A} 10842 byte2 |= opc; 10843 10844 // P1: byte 3 as Wvvvv1pp 10845 int byte3 = ((~nds_enc) & 0xf) << 3; 10846 // p[10] is always 1 10847 byte3 |= EVEX_F; 10848 byte3 |= (vex_w & 1) << 7; 10849 // confine pre opcode extensions in pp bits to lower two bits 10850 // of form {66, F3, F2} 10851 byte3 |= pre; 10852 10853 // P2: byte 4 as zL'Lbv'aaa 10854 // kregs are implemented in the low 3 bits as aaa 10855 int byte4 = (_attributes->is_no_reg_mask()) ? 10856 0 : 10857 _attributes->get_embedded_opmask_register_specifier(); 10858 // EVEX.v` for extending EVEX.vvvv or VIDX 10859 byte4 |= (evex_v ? 0: EVEX_V); 10860 // third EXEC.b for broadcast actions 10861 byte4 |= (_attributes->is_extended_context() ? EVEX_Rb : 0); 10862 // fourth EVEX.L'L for vector length : 0 is 128, 1 is 256, 2 is 512, currently we do not support 1024 10863 byte4 |= ((_attributes->get_vector_len())& 0x3) << 5; 10864 // last is EVEX.z for zero/merge actions 10865 if (_attributes->is_no_reg_mask() == false && 10866 _attributes->get_embedded_opmask_register_specifier() != 0) { 10867 byte4 |= (_attributes->is_clear_context() ? EVEX_Z : 0); 10868 } 10869 10870 emit_int32(EVEX_4bytes, byte2, byte3, byte4); 10871 } 10872 10873 void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes) { 10874 bool vex_r = (xreg_enc & 8) == 8; 10875 bool vex_b = adr.base_needs_rex(); 10876 bool vex_x; 10877 if (adr.isxmmindex()) { 10878 vex_x = adr.xmmindex_needs_rex(); 10879 } else { 10880 vex_x = adr.index_needs_rex(); 10881 } 10882 set_attributes(attributes); 10883 attributes->set_current_assembler(this); 10884 10885 // For EVEX instruction (which is not marked as pure EVEX instruction) check and see if this instruction 10886 // is allowed in legacy mode and has resources which will fit in it. 10887 // Pure EVEX instructions will have is_evex_instruction set in their definition. 10888 if (!attributes->is_legacy_mode()) { 10889 if (UseAVX > 2 && !attributes->is_evex_instruction() && !is_managed()) { 10890 if ((attributes->get_vector_len() != AVX_512bit) && (nds_enc < 16) && (xreg_enc < 16)) { 10891 attributes->set_is_legacy_mode(); 10892 } 10893 } 10894 } 10895 10896 if (UseAVX > 2) { 10897 assert(((!attributes->uses_vl()) || 10898 (attributes->get_vector_len() == AVX_512bit) || 10899 (!_legacy_mode_vl) || 10900 (attributes->is_legacy_mode())),"XMM register should be 0-15"); 10901 assert(((nds_enc < 16 && xreg_enc < 16) || (!attributes->is_legacy_mode())),"XMM register should be 0-15"); 10902 } 10903 10904 clear_managed(); 10905 if (UseAVX > 2 && !attributes->is_legacy_mode()) 10906 { 10907 bool evex_r = (xreg_enc >= 16); 10908 bool evex_v; 10909 // EVEX.V' is set to true when VSIB is used as we may need to use higher order XMM registers (16-31) 10910 if (adr.isxmmindex()) { 10911 evex_v = ((adr._xmmindex->encoding() > 15) ? true : false); 10912 } else { 10913 evex_v = (nds_enc >= 16); 10914 } 10915 attributes->set_is_evex_instruction(); 10916 evex_prefix(vex_r, vex_b, vex_x, evex_r, evex_v, nds_enc, pre, opc); 10917 } else { 10918 if (UseAVX > 2 && attributes->is_rex_vex_w_reverted()) { 10919 attributes->set_rex_vex_w(false); 10920 } 10921 vex_prefix(vex_r, vex_b, vex_x, nds_enc, pre, opc); 10922 } 10923 } 10924 10925 int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes) { 10926 bool vex_r = (dst_enc & 8) == 8; 10927 bool vex_b = (src_enc & 8) == 8; 10928 bool vex_x = false; 10929 set_attributes(attributes); 10930 attributes->set_current_assembler(this); 10931 10932 // For EVEX instruction (which is not marked as pure EVEX instruction) check and see if this instruction 10933 // is allowed in legacy mode and has resources which will fit in it. 10934 // Pure EVEX instructions will have is_evex_instruction set in their definition. 10935 if (!attributes->is_legacy_mode()) { 10936 if (UseAVX > 2 && !attributes->is_evex_instruction() && !is_managed()) { 10937 if ((!attributes->uses_vl() || (attributes->get_vector_len() != AVX_512bit)) && 10938 (dst_enc < 16) && (nds_enc < 16) && (src_enc < 16)) { 10939 attributes->set_is_legacy_mode(); 10940 } 10941 } 10942 } 10943 10944 if (UseAVX > 2) { 10945 // All the scalar fp instructions (with uses_vl as false) can have legacy_mode as false 10946 // Instruction with uses_vl true are vector instructions 10947 // All the vector instructions with AVX_512bit length can have legacy_mode as false 10948 // All the vector instructions with < AVX_512bit length can have legacy_mode as false if AVX512vl() is supported 10949 // Rest all should have legacy_mode set as true 10950 assert(((!attributes->uses_vl()) || 10951 (attributes->get_vector_len() == AVX_512bit) || 10952 (!_legacy_mode_vl) || 10953 (attributes->is_legacy_mode())),"XMM register should be 0-15"); 10954 // Instruction with legacy_mode true should have dst, nds and src < 15 10955 assert(((dst_enc < 16 && nds_enc < 16 && src_enc < 16) || (!attributes->is_legacy_mode())),"XMM register should be 0-15"); 10956 } 10957 10958 clear_managed(); 10959 if (UseAVX > 2 && !attributes->is_legacy_mode()) 10960 { 10961 bool evex_r = (dst_enc >= 16); 10962 bool evex_v = (nds_enc >= 16); 10963 // can use vex_x as bank extender on rm encoding 10964 vex_x = (src_enc >= 16); 10965 attributes->set_is_evex_instruction(); 10966 evex_prefix(vex_r, vex_b, vex_x, evex_r, evex_v, nds_enc, pre, opc); 10967 } else { 10968 if (UseAVX > 2 && attributes->is_rex_vex_w_reverted()) { 10969 attributes->set_rex_vex_w(false); 10970 } 10971 vex_prefix(vex_r, vex_b, vex_x, nds_enc, pre, opc); 10972 } 10973 10974 // return modrm byte components for operands 10975 return (((dst_enc & 7) << 3) | (src_enc & 7)); 10976 } 10977 10978 10979 void Assembler::simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre, 10980 VexOpcode opc, InstructionAttr *attributes) { 10981 if (UseAVX > 0) { 10982 int xreg_enc = xreg->encoding(); 10983 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 10984 vex_prefix(adr, nds_enc, xreg_enc, pre, opc, attributes); 10985 } else { 10986 assert((nds == xreg) || (nds == xnoreg), "wrong sse encoding"); 10987 rex_prefix(adr, xreg, pre, opc, attributes->is_rex_vex_w()); 10988 } 10989 } 10990 10991 int Assembler::simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre, 10992 VexOpcode opc, InstructionAttr *attributes) { 10993 int dst_enc = dst->encoding(); 10994 int src_enc = src->encoding(); 10995 if (UseAVX > 0) { 10996 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 10997 return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, attributes); 10998 } else { 10999 assert((nds == dst) || (nds == src) || (nds == xnoreg), "wrong sse encoding"); 11000 return rex_prefix_and_encode(dst_enc, src_enc, pre, opc, attributes->is_rex_vex_w()); 11001 } 11002 } 11003 11004 void Assembler::vmaxss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 11005 assert(VM_Version::supports_avx(), ""); 11006 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 11007 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 11008 emit_int16(0x5F, (0xC0 | encode)); 11009 } 11010 11011 void Assembler::vmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 11012 assert(VM_Version::supports_avx(), ""); 11013 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 11014 attributes.set_rex_vex_w_reverted(); 11015 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 11016 emit_int16(0x5F, (0xC0 | encode)); 11017 } 11018 11019 void Assembler::vminss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 11020 assert(VM_Version::supports_avx(), ""); 11021 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 11022 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 11023 emit_int16(0x5D, (0xC0 | encode)); 11024 } 11025 11026 void Assembler::vminsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 11027 assert(VM_Version::supports_avx(), ""); 11028 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 11029 attributes.set_rex_vex_w_reverted(); 11030 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 11031 emit_int16(0x5D, (0xC0 | encode)); 11032 } 11033 11034 void Assembler::vcmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len) { 11035 assert(VM_Version::supports_avx(), ""); 11036 assert(vector_len <= AVX_256bit, ""); 11037 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 11038 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 11039 emit_int24((unsigned char)0xC2, (0xC0 | encode), (0xF & cop)); 11040 } 11041 11042 void Assembler::blendvpb(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) { 11043 assert(VM_Version::supports_avx(), ""); 11044 assert(vector_len <= AVX_256bit, ""); 11045 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 11046 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11047 int src2_enc = src2->encoding(); 11048 emit_int24(0x4C, (0xC0 | encode), (0xF0 & src2_enc << 4)); 11049 } 11050 11051 void Assembler::vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) { 11052 assert(UseAVX > 0 && (vector_len == AVX_128bit || vector_len == AVX_256bit), ""); 11053 assert(vector_len <= AVX_256bit, ""); 11054 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 11055 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11056 int src2_enc = src2->encoding(); 11057 emit_int24(0x4B, (0xC0 | encode), (0xF0 & src2_enc << 4)); 11058 } 11059 11060 void Assembler::vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { 11061 assert(VM_Version::supports_avx2(), ""); 11062 assert(vector_len <= AVX_256bit, ""); 11063 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 11064 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11065 emit_int24(0x02, (0xC0 | encode), (unsigned char)imm8); 11066 } 11067 11068 void Assembler::vcmpps(XMMRegister dst, XMMRegister nds, XMMRegister src, int comparison, int vector_len) { 11069 assert(VM_Version::supports_avx(), ""); 11070 assert(vector_len <= AVX_256bit, ""); 11071 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 11072 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 11073 emit_int24((unsigned char)0xC2, (0xC0 | encode), (unsigned char)comparison); 11074 } 11075 11076 void Assembler::evcmpps(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 11077 ComparisonPredicateFP comparison, int vector_len) { 11078 assert(VM_Version::supports_evex(), ""); 11079 // Encoding: EVEX.NDS.XXX.0F.W0 C2 /r ib 11080 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11081 attributes.set_is_evex_instruction(); 11082 attributes.set_embedded_opmask_register_specifier(mask); 11083 attributes.reset_is_clear_context(); 11084 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 11085 emit_int24((unsigned char)0xC2, (0xC0 | encode), comparison); 11086 } 11087 11088 void Assembler::evcmppd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 11089 ComparisonPredicateFP comparison, int vector_len) { 11090 assert(VM_Version::supports_evex(), ""); 11091 // Encoding: EVEX.NDS.XXX.66.0F.W1 C2 /r ib 11092 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11093 attributes.set_is_evex_instruction(); 11094 attributes.set_embedded_opmask_register_specifier(mask); 11095 attributes.reset_is_clear_context(); 11096 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 11097 emit_int24((unsigned char)0xC2, (0xC0 | encode), comparison); 11098 } 11099 11100 void Assembler::blendvps(XMMRegister dst, XMMRegister src) { 11101 assert(VM_Version::supports_sse4_1(), ""); 11102 assert(UseAVX <= 0, "sse encoding is inconsistent with avx encoding"); 11103 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 11104 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11105 emit_int16(0x14, (0xC0 | encode)); 11106 } 11107 11108 void Assembler::blendvpd(XMMRegister dst, XMMRegister src) { 11109 assert(VM_Version::supports_sse4_1(), ""); 11110 assert(UseAVX <= 0, "sse encoding is inconsistent with avx encoding"); 11111 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 11112 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11113 emit_int16(0x15, (0xC0 | encode)); 11114 } 11115 11116 void Assembler::pblendvb(XMMRegister dst, XMMRegister src) { 11117 assert(VM_Version::supports_sse4_1(), ""); 11118 assert(UseAVX <= 0, "sse encoding is inconsistent with avx encoding"); 11119 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 11120 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11121 emit_int16(0x10, (0xC0 | encode)); 11122 } 11123 11124 void Assembler::vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) { 11125 assert(UseAVX > 0 && (vector_len == AVX_128bit || vector_len == AVX_256bit), ""); 11126 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 11127 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11128 int src2_enc = src2->encoding(); 11129 emit_int24(0x4A, (0xC0 | encode), (0xF0 & src2_enc << 4)); 11130 } 11131 11132 void Assembler::vblendps(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { 11133 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 11134 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11135 emit_int24(0x0C, (0xC0 | encode), imm8); 11136 } 11137 11138 void Assembler::vpcmpgtb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 11139 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), ""); 11140 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest"); 11141 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 11142 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 11143 emit_int16(0x64, (0xC0 | encode)); 11144 } 11145 11146 void Assembler::vpcmpgtw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 11147 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), ""); 11148 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest"); 11149 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 11150 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 11151 emit_int16(0x65, (0xC0 | encode)); 11152 } 11153 11154 void Assembler::vpcmpgtd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 11155 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), ""); 11156 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest"); 11157 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 11158 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 11159 emit_int16(0x66, (0xC0 | encode)); 11160 } 11161 11162 void Assembler::vpcmpgtq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 11163 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), ""); 11164 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest"); 11165 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 11166 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11167 emit_int16(0x37, (0xC0 | encode)); 11168 } 11169 11170 void Assembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 11171 int comparison, bool is_signed, int vector_len) { 11172 assert(VM_Version::supports_evex(), ""); 11173 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, ""); 11174 // Encoding: EVEX.NDS.XXX.66.0F3A.W0 1F /r ib 11175 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11176 attributes.set_is_evex_instruction(); 11177 attributes.set_embedded_opmask_register_specifier(mask); 11178 attributes.reset_is_clear_context(); 11179 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11180 int opcode = is_signed ? 0x1F : 0x1E; 11181 emit_int24(opcode, (0xC0 | encode), comparison); 11182 } 11183 11184 void Assembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, Address src, 11185 int comparison, bool is_signed, int vector_len) { 11186 assert(VM_Version::supports_evex(), ""); 11187 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, ""); 11188 // Encoding: EVEX.NDS.XXX.66.0F3A.W0 1F /r ib 11189 InstructionMark im(this); 11190 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11191 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit); 11192 attributes.set_is_evex_instruction(); 11193 attributes.set_embedded_opmask_register_specifier(mask); 11194 attributes.reset_is_clear_context(); 11195 int dst_enc = kdst->encoding(); 11196 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11197 int opcode = is_signed ? 0x1F : 0x1E; 11198 emit_int8((unsigned char)opcode); 11199 emit_operand(as_Register(dst_enc), src); 11200 emit_int8((unsigned char)comparison); 11201 } 11202 11203 void Assembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 11204 int comparison, bool is_signed, int vector_len) { 11205 assert(VM_Version::supports_evex(), ""); 11206 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, ""); 11207 // Encoding: EVEX.NDS.XXX.66.0F3A.W1 1F /r ib 11208 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11209 attributes.set_is_evex_instruction(); 11210 attributes.set_embedded_opmask_register_specifier(mask); 11211 attributes.reset_is_clear_context(); 11212 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11213 int opcode = is_signed ? 0x1F : 0x1E; 11214 emit_int24(opcode, (0xC0 | encode), comparison); 11215 } 11216 11217 void Assembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, Address src, 11218 int comparison, bool is_signed, int vector_len) { 11219 assert(VM_Version::supports_evex(), ""); 11220 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, ""); 11221 // Encoding: EVEX.NDS.XXX.66.0F3A.W1 1F /r ib 11222 InstructionMark im(this); 11223 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11224 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit); 11225 attributes.set_is_evex_instruction(); 11226 attributes.set_embedded_opmask_register_specifier(mask); 11227 attributes.reset_is_clear_context(); 11228 int dst_enc = kdst->encoding(); 11229 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11230 int opcode = is_signed ? 0x1F : 0x1E; 11231 emit_int8((unsigned char)opcode); 11232 emit_operand(as_Register(dst_enc), src); 11233 emit_int8((unsigned char)comparison); 11234 } 11235 11236 void Assembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 11237 int comparison, bool is_signed, int vector_len) { 11238 assert(VM_Version::supports_evex(), ""); 11239 assert(VM_Version::supports_avx512bw(), ""); 11240 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, ""); 11241 // Encoding: EVEX.NDS.XXX.66.0F3A.W0 3F /r ib 11242 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true); 11243 attributes.set_is_evex_instruction(); 11244 attributes.set_embedded_opmask_register_specifier(mask); 11245 attributes.reset_is_clear_context(); 11246 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11247 int opcode = is_signed ? 0x3F : 0x3E; 11248 emit_int24(opcode, (0xC0 | encode), comparison); 11249 } 11250 11251 void Assembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, 11252 int comparison, bool is_signed, int vector_len) { 11253 assert(VM_Version::supports_evex(), ""); 11254 assert(VM_Version::supports_avx512bw(), ""); 11255 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, ""); 11256 // Encoding: EVEX.NDS.XXX.66.0F3A.W0 3F /r ib 11257 InstructionMark im(this); 11258 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true); 11259 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 11260 attributes.set_is_evex_instruction(); 11261 attributes.set_embedded_opmask_register_specifier(mask); 11262 attributes.reset_is_clear_context(); 11263 int dst_enc = kdst->encoding(); 11264 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11265 int opcode = is_signed ? 0x3F : 0x3E; 11266 emit_int8((unsigned char)opcode); 11267 emit_operand(as_Register(dst_enc), src); 11268 emit_int8((unsigned char)comparison); 11269 } 11270 11271 void Assembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 11272 int comparison, bool is_signed, int vector_len) { 11273 assert(VM_Version::supports_evex(), ""); 11274 assert(VM_Version::supports_avx512bw(), ""); 11275 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, ""); 11276 // Encoding: EVEX.NDS.XXX.66.0F3A.W1 3F /r ib 11277 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true); 11278 attributes.set_is_evex_instruction(); 11279 attributes.set_embedded_opmask_register_specifier(mask); 11280 attributes.reset_is_clear_context(); 11281 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11282 int opcode = is_signed ? 0x3F : 0x3E; 11283 emit_int24(opcode, (0xC0 | encode), comparison); 11284 } 11285 11286 void Assembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, Address src, 11287 int comparison, bool is_signed, int vector_len) { 11288 assert(VM_Version::supports_evex(), ""); 11289 assert(VM_Version::supports_avx512bw(), ""); 11290 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, ""); 11291 // Encoding: EVEX.NDS.XXX.66.0F3A.W1 3F /r ib 11292 InstructionMark im(this); 11293 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true); 11294 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 11295 attributes.set_is_evex_instruction(); 11296 attributes.set_embedded_opmask_register_specifier(mask); 11297 attributes.reset_is_clear_context(); 11298 int dst_enc = kdst->encoding(); 11299 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11300 int opcode = is_signed ? 0x3F : 0x3E; 11301 emit_int8((unsigned char)opcode); 11302 emit_operand(as_Register(dst_enc), src); 11303 emit_int8((unsigned char)comparison); 11304 } 11305 11306 // Register is a class, but it would be assigned numerical value. 11307 // "0" is assigned for xmm0. Thus we need to ignore -Wnonnull. 11308 PRAGMA_DIAG_PUSH 11309 PRAGMA_NONNULL_IGNORED 11310 void Assembler::evprord(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 11311 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 11312 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11313 attributes.set_is_evex_instruction(); 11314 attributes.set_embedded_opmask_register_specifier(mask); 11315 if (merge) { 11316 attributes.reset_is_clear_context(); 11317 } 11318 int encode = vex_prefix_and_encode(xmm0->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 11319 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 11320 } 11321 11322 void Assembler::evprorq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 11323 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 11324 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11325 attributes.set_is_evex_instruction(); 11326 attributes.set_embedded_opmask_register_specifier(mask); 11327 if (merge) { 11328 attributes.reset_is_clear_context(); 11329 } 11330 int encode = vex_prefix_and_encode(xmm0->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 11331 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 11332 } 11333 PRAGMA_DIAG_POP 11334 11335 void Assembler::evprorvd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 11336 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 11337 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11338 attributes.set_is_evex_instruction(); 11339 attributes.set_embedded_opmask_register_specifier(mask); 11340 if (merge) { 11341 attributes.reset_is_clear_context(); 11342 } 11343 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11344 emit_int16(0x14, (0xC0 | encode)); 11345 } 11346 11347 void Assembler::evprorvq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 11348 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 11349 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11350 attributes.set_is_evex_instruction(); 11351 attributes.set_embedded_opmask_register_specifier(mask); 11352 if (merge) { 11353 attributes.reset_is_clear_context(); 11354 } 11355 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11356 emit_int16(0x14, (0xC0 | encode)); 11357 } 11358 11359 void Assembler::evprold(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 11360 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 11361 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11362 attributes.set_is_evex_instruction(); 11363 attributes.set_embedded_opmask_register_specifier(mask); 11364 if (merge) { 11365 attributes.reset_is_clear_context(); 11366 } 11367 int encode = vex_prefix_and_encode(xmm1->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 11368 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 11369 } 11370 11371 void Assembler::evprolq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) { 11372 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 11373 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11374 attributes.set_is_evex_instruction(); 11375 attributes.set_embedded_opmask_register_specifier(mask); 11376 if (merge) { 11377 attributes.reset_is_clear_context(); 11378 } 11379 int encode = vex_prefix_and_encode(xmm1->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 11380 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 11381 } 11382 11383 void Assembler::evprolvd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 11384 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 11385 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11386 attributes.set_is_evex_instruction(); 11387 attributes.set_embedded_opmask_register_specifier(mask); 11388 if (merge) { 11389 attributes.reset_is_clear_context(); 11390 } 11391 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11392 emit_int16(0x15, (0xC0 | encode)); 11393 } 11394 11395 void Assembler::evprolvq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 11396 assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); 11397 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11398 attributes.set_is_evex_instruction(); 11399 attributes.set_embedded_opmask_register_specifier(mask); 11400 if (merge) { 11401 attributes.reset_is_clear_context(); 11402 } 11403 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11404 emit_int16(0x15, (0xC0 | encode)); 11405 } 11406 11407 void Assembler::vpblendvb(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len) { 11408 assert(VM_Version::supports_avx(), ""); 11409 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 11410 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 11411 int mask_enc = mask->encoding(); 11412 emit_int24(0x4C, (0xC0 | encode), 0xF0 & mask_enc << 4); 11413 } 11414 11415 void Assembler::evblendmpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 11416 assert(VM_Version::supports_evex(), ""); 11417 // Encoding: EVEX.NDS.XXX.66.0F38.W1 65 /r 11418 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11419 attributes.set_is_evex_instruction(); 11420 attributes.set_embedded_opmask_register_specifier(mask); 11421 if (merge) { 11422 attributes.reset_is_clear_context(); 11423 } 11424 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11425 emit_int16(0x65, (0xC0 | encode)); 11426 } 11427 11428 void Assembler::evblendmps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 11429 assert(VM_Version::supports_evex(), ""); 11430 // Encoding: EVEX.NDS.XXX.66.0F38.W0 65 /r 11431 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11432 attributes.set_is_evex_instruction(); 11433 attributes.set_embedded_opmask_register_specifier(mask); 11434 if (merge) { 11435 attributes.reset_is_clear_context(); 11436 } 11437 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11438 emit_int16(0x65, (0xC0 | encode)); 11439 } 11440 11441 void Assembler::evpblendmb (XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 11442 assert(VM_Version::supports_evex(), ""); 11443 assert(VM_Version::supports_avx512bw(), ""); 11444 // Encoding: EVEX.NDS.512.66.0F38.W0 66 /r 11445 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true); 11446 attributes.set_is_evex_instruction(); 11447 attributes.set_embedded_opmask_register_specifier(mask); 11448 if (merge) { 11449 attributes.reset_is_clear_context(); 11450 } 11451 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11452 emit_int16(0x66, (0xC0 | encode)); 11453 } 11454 11455 void Assembler::evpblendmw (XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 11456 assert(VM_Version::supports_evex(), ""); 11457 assert(VM_Version::supports_avx512bw(), ""); 11458 // Encoding: EVEX.NDS.512.66.0F38.W1 66 /r 11459 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true); 11460 attributes.set_is_evex_instruction(); 11461 attributes.set_embedded_opmask_register_specifier(mask); 11462 if (merge) { 11463 attributes.reset_is_clear_context(); 11464 } 11465 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11466 emit_int16(0x66, (0xC0 | encode)); 11467 } 11468 11469 void Assembler::evpblendmd (XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 11470 assert(VM_Version::supports_evex(), ""); 11471 //Encoding: EVEX.NDS.512.66.0F38.W0 64 /r 11472 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11473 attributes.set_is_evex_instruction(); 11474 attributes.set_embedded_opmask_register_specifier(mask); 11475 if (merge) { 11476 attributes.reset_is_clear_context(); 11477 } 11478 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11479 emit_int16(0x64, (0xC0 | encode)); 11480 } 11481 11482 void Assembler::evpblendmq (XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 11483 assert(VM_Version::supports_evex(), ""); 11484 //Encoding: EVEX.NDS.512.66.0F38.W1 64 /r 11485 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 11486 attributes.set_is_evex_instruction(); 11487 attributes.set_embedded_opmask_register_specifier(mask); 11488 if (merge) { 11489 attributes.reset_is_clear_context(); 11490 } 11491 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11492 emit_int16(0x64, (0xC0 | encode)); 11493 } 11494 11495 void Assembler::bzhiq(Register dst, Register src1, Register src2) { 11496 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 11497 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 11498 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 11499 emit_int16((unsigned char)0xF5, (0xC0 | encode)); 11500 } 11501 11502 void Assembler::pext(Register dst, Register src1, Register src2) { 11503 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 11504 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 11505 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 11506 emit_int16((unsigned char)0xF5, (0xC0 | encode)); 11507 } 11508 11509 void Assembler::pdep(Register dst, Register src1, Register src2) { 11510 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 11511 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 11512 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes); 11513 emit_int16((unsigned char)0xF5, (0xC0 | encode)); 11514 } 11515 11516 void Assembler::shlxl(Register dst, Register src1, Register src2) { 11517 assert(VM_Version::supports_bmi2(), ""); 11518 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 11519 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11520 emit_int16((unsigned char)0xF7, (0xC0 | encode)); 11521 } 11522 11523 void Assembler::shlxq(Register dst, Register src1, Register src2) { 11524 assert(VM_Version::supports_bmi2(), ""); 11525 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 11526 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 11527 emit_int16((unsigned char)0xF7, (0xC0 | encode)); 11528 } 11529 11530 void Assembler::shrxl(Register dst, Register src1, Register src2) { 11531 assert(VM_Version::supports_bmi2(), ""); 11532 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 11533 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes); 11534 emit_int16((unsigned char)0xF7, (0xC0 | encode)); 11535 } 11536 11537 void Assembler::shrxq(Register dst, Register src1, Register src2) { 11538 assert(VM_Version::supports_bmi2(), ""); 11539 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 11540 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes); 11541 emit_int16((unsigned char)0xF7, (0xC0 | encode)); 11542 } 11543 11544 void Assembler::evpmovq2m(KRegister dst, XMMRegister src, int vector_len) { 11545 assert(VM_Version::supports_avx512vldq(), ""); 11546 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 11547 attributes.set_is_evex_instruction(); 11548 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 11549 emit_int16(0x39, (0xC0 | encode)); 11550 } 11551 11552 void Assembler::evpmovd2m(KRegister dst, XMMRegister src, int vector_len) { 11553 assert(VM_Version::supports_avx512vldq(), ""); 11554 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 11555 attributes.set_is_evex_instruction(); 11556 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 11557 emit_int16(0x39, (0xC0 | encode)); 11558 } 11559 11560 void Assembler::evpmovw2m(KRegister dst, XMMRegister src, int vector_len) { 11561 assert(VM_Version::supports_avx512vlbw(), ""); 11562 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 11563 attributes.set_is_evex_instruction(); 11564 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 11565 emit_int16(0x29, (0xC0 | encode)); 11566 } 11567 11568 void Assembler::evpmovb2m(KRegister dst, XMMRegister src, int vector_len) { 11569 assert(VM_Version::supports_avx512vlbw(), ""); 11570 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 11571 attributes.set_is_evex_instruction(); 11572 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 11573 emit_int16(0x29, (0xC0 | encode)); 11574 } 11575 11576 void Assembler::evpmovm2q(XMMRegister dst, KRegister src, int vector_len) { 11577 assert(VM_Version::supports_avx512vldq(), ""); 11578 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 11579 attributes.set_is_evex_instruction(); 11580 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 11581 emit_int16(0x38, (0xC0 | encode)); 11582 } 11583 11584 void Assembler::evpmovm2d(XMMRegister dst, KRegister src, int vector_len) { 11585 assert(VM_Version::supports_avx512vldq(), ""); 11586 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 11587 attributes.set_is_evex_instruction(); 11588 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 11589 emit_int16(0x38, (0xC0 | encode)); 11590 } 11591 11592 void Assembler::evpmovm2w(XMMRegister dst, KRegister src, int vector_len) { 11593 assert(VM_Version::supports_avx512vlbw(), ""); 11594 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 11595 attributes.set_is_evex_instruction(); 11596 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 11597 emit_int16(0x28, (0xC0 | encode)); 11598 } 11599 11600 void Assembler::evpmovm2b(XMMRegister dst, KRegister src, int vector_len) { 11601 assert(VM_Version::supports_avx512vlbw(), ""); 11602 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 11603 attributes.set_is_evex_instruction(); 11604 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 11605 emit_int16(0x28, (0xC0 | encode)); 11606 } 11607 #ifndef _LP64 11608 11609 void Assembler::incl(Register dst) { 11610 // Don't use it directly. Use MacroAssembler::incrementl() instead. 11611 emit_int8(0x40 | dst->encoding()); 11612 } 11613 11614 void Assembler::lea(Register dst, Address src) { 11615 leal(dst, src); 11616 } 11617 11618 void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) { 11619 InstructionMark im(this); 11620 emit_int8((unsigned char)0xC7); 11621 emit_operand(rax, dst); 11622 emit_data((int)imm32, rspec, 0); 11623 } 11624 11625 void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) { 11626 InstructionMark im(this); 11627 int encode = prefix_and_encode(dst->encoding()); 11628 emit_int8((0xB8 | encode)); 11629 emit_data((int)imm32, rspec, 0); 11630 } 11631 11632 void Assembler::popa() { // 32bit 11633 emit_int8(0x61); 11634 } 11635 11636 void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) { 11637 InstructionMark im(this); 11638 emit_int8(0x68); 11639 emit_data(imm32, rspec, 0); 11640 } 11641 11642 void Assembler::pusha() { // 32bit 11643 emit_int8(0x60); 11644 } 11645 11646 void Assembler::set_byte_if_not_zero(Register dst) { 11647 emit_int24(0x0F, (unsigned char)0x95, (0xC0 | dst->encoding())); 11648 } 11649 11650 #else // LP64 11651 11652 void Assembler::set_byte_if_not_zero(Register dst) { 11653 int enc = prefix_and_encode(dst->encoding(), true); 11654 emit_int24(0x0F, (unsigned char)0x95, (0xC0 | enc)); 11655 } 11656 11657 // 64bit only pieces of the assembler 11658 // This should only be used by 64bit instructions that can use rip-relative 11659 // it cannot be used by instructions that want an immediate value. 11660 11661 bool Assembler::reachable(AddressLiteral adr) { 11662 int64_t disp; 11663 relocInfo::relocType relocType = adr.reloc(); 11664 11665 // None will force a 64bit literal to the code stream. Likely a placeholder 11666 // for something that will be patched later and we need to certain it will 11667 // always be reachable. 11668 if (relocType == relocInfo::none) { 11669 return false; 11670 } 11671 if (relocType == relocInfo::internal_word_type) { 11672 // This should be rip relative and easily reachable. 11673 return true; 11674 } 11675 if (relocType == relocInfo::virtual_call_type || 11676 relocType == relocInfo::opt_virtual_call_type || 11677 relocType == relocInfo::static_call_type || 11678 relocType == relocInfo::static_stub_type ) { 11679 // This should be rip relative within the code cache and easily 11680 // reachable until we get huge code caches. (At which point 11681 // ic code is going to have issues). 11682 return true; 11683 } 11684 if (relocType != relocInfo::external_word_type && 11685 relocType != relocInfo::poll_return_type && // these are really external_word but need special 11686 relocType != relocInfo::poll_type && // relocs to identify them 11687 relocType != relocInfo::runtime_call_type ) { 11688 return false; 11689 } 11690 11691 // Stress the correction code 11692 if (ForceUnreachable) { 11693 // Must be runtimecall reloc, see if it is in the codecache 11694 // Flipping stuff in the codecache to be unreachable causes issues 11695 // with things like inline caches where the additional instructions 11696 // are not handled. 11697 if (CodeCache::find_blob(adr._target) == NULL) { 11698 return false; 11699 } 11700 } 11701 // For external_word_type/runtime_call_type if it is reachable from where we 11702 // are now (possibly a temp buffer) and where we might end up 11703 // anywhere in the codeCache then we are always reachable. 11704 // This would have to change if we ever save/restore shared code 11705 // to be more pessimistic. 11706 disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int)); 11707 if (!is_simm32(disp)) return false; 11708 disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int)); 11709 if (!is_simm32(disp)) return false; 11710 11711 disp = (int64_t)adr._target - ((int64_t)pc() + sizeof(int)); 11712 11713 // Because rip relative is a disp + address_of_next_instruction and we 11714 // don't know the value of address_of_next_instruction we apply a fudge factor 11715 // to make sure we will be ok no matter the size of the instruction we get placed into. 11716 // We don't have to fudge the checks above here because they are already worst case. 11717 11718 // 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal 11719 // + 4 because better safe than sorry. 11720 const int fudge = 12 + 4; 11721 if (disp < 0) { 11722 disp -= fudge; 11723 } else { 11724 disp += fudge; 11725 } 11726 return is_simm32(disp); 11727 } 11728 11729 void Assembler::emit_data64(jlong data, 11730 relocInfo::relocType rtype, 11731 int format) { 11732 if (rtype == relocInfo::none) { 11733 emit_int64(data); 11734 } else { 11735 emit_data64(data, Relocation::spec_simple(rtype), format); 11736 } 11737 } 11738 11739 void Assembler::emit_data64(jlong data, 11740 RelocationHolder const& rspec, 11741 int format) { 11742 assert(imm_operand == 0, "default format must be immediate in this file"); 11743 assert(imm_operand == format, "must be immediate"); 11744 assert(inst_mark() != NULL, "must be inside InstructionMark"); 11745 // Do not use AbstractAssembler::relocate, which is not intended for 11746 // embedded words. Instead, relocate to the enclosing instruction. 11747 code_section()->relocate(inst_mark(), rspec, format); 11748 #ifdef ASSERT 11749 check_relocation(rspec, format); 11750 #endif 11751 emit_int64(data); 11752 } 11753 11754 void Assembler::prefix(Register reg) { 11755 if (reg->encoding() >= 8) { 11756 prefix(REX_B); 11757 } 11758 } 11759 11760 void Assembler::prefix(Register dst, Register src, Prefix p) { 11761 if (src->encoding() >= 8) { 11762 p = (Prefix)(p | REX_B); 11763 } 11764 if (dst->encoding() >= 8) { 11765 p = (Prefix)(p | REX_R); 11766 } 11767 if (p != Prefix_EMPTY) { 11768 // do not generate an empty prefix 11769 prefix(p); 11770 } 11771 } 11772 11773 void Assembler::prefix(Register dst, Address adr, Prefix p) { 11774 if (adr.base_needs_rex()) { 11775 if (adr.index_needs_rex()) { 11776 assert(false, "prefix(Register dst, Address adr, Prefix p) does not support handling of an X"); 11777 } else { 11778 prefix(REX_B); 11779 } 11780 } else { 11781 if (adr.index_needs_rex()) { 11782 assert(false, "prefix(Register dst, Address adr, Prefix p) does not support handling of an X"); 11783 } 11784 } 11785 if (dst->encoding() >= 8) { 11786 p = (Prefix)(p | REX_R); 11787 } 11788 if (p != Prefix_EMPTY) { 11789 // do not generate an empty prefix 11790 prefix(p); 11791 } 11792 } 11793 11794 void Assembler::prefix(Address adr) { 11795 if (adr.base_needs_rex()) { 11796 if (adr.index_needs_rex()) { 11797 prefix(REX_XB); 11798 } else { 11799 prefix(REX_B); 11800 } 11801 } else { 11802 if (adr.index_needs_rex()) { 11803 prefix(REX_X); 11804 } 11805 } 11806 } 11807 11808 void Assembler::prefix(Address adr, Register reg, bool byteinst) { 11809 if (reg->encoding() < 8) { 11810 if (adr.base_needs_rex()) { 11811 if (adr.index_needs_rex()) { 11812 prefix(REX_XB); 11813 } else { 11814 prefix(REX_B); 11815 } 11816 } else { 11817 if (adr.index_needs_rex()) { 11818 prefix(REX_X); 11819 } else if (byteinst && reg->encoding() >= 4) { 11820 prefix(REX); 11821 } 11822 } 11823 } else { 11824 if (adr.base_needs_rex()) { 11825 if (adr.index_needs_rex()) { 11826 prefix(REX_RXB); 11827 } else { 11828 prefix(REX_RB); 11829 } 11830 } else { 11831 if (adr.index_needs_rex()) { 11832 prefix(REX_RX); 11833 } else { 11834 prefix(REX_R); 11835 } 11836 } 11837 } 11838 } 11839 11840 void Assembler::prefix(Address adr, XMMRegister reg) { 11841 if (reg->encoding() < 8) { 11842 if (adr.base_needs_rex()) { 11843 if (adr.index_needs_rex()) { 11844 prefix(REX_XB); 11845 } else { 11846 prefix(REX_B); 11847 } 11848 } else { 11849 if (adr.index_needs_rex()) { 11850 prefix(REX_X); 11851 } 11852 } 11853 } else { 11854 if (adr.base_needs_rex()) { 11855 if (adr.index_needs_rex()) { 11856 prefix(REX_RXB); 11857 } else { 11858 prefix(REX_RB); 11859 } 11860 } else { 11861 if (adr.index_needs_rex()) { 11862 prefix(REX_RX); 11863 } else { 11864 prefix(REX_R); 11865 } 11866 } 11867 } 11868 } 11869 11870 int Assembler::prefix_and_encode(int reg_enc, bool byteinst) { 11871 if (reg_enc >= 8) { 11872 prefix(REX_B); 11873 reg_enc -= 8; 11874 } else if (byteinst && reg_enc >= 4) { 11875 prefix(REX); 11876 } 11877 return reg_enc; 11878 } 11879 11880 int Assembler::prefix_and_encode(int dst_enc, bool dst_is_byte, int src_enc, bool src_is_byte) { 11881 if (dst_enc < 8) { 11882 if (src_enc >= 8) { 11883 prefix(REX_B); 11884 src_enc -= 8; 11885 } else if ((src_is_byte && src_enc >= 4) || (dst_is_byte && dst_enc >= 4)) { 11886 prefix(REX); 11887 } 11888 } else { 11889 if (src_enc < 8) { 11890 prefix(REX_R); 11891 } else { 11892 prefix(REX_RB); 11893 src_enc -= 8; 11894 } 11895 dst_enc -= 8; 11896 } 11897 return dst_enc << 3 | src_enc; 11898 } 11899 11900 int8_t Assembler::get_prefixq(Address adr) { 11901 int8_t prfx = get_prefixq(adr, rax); 11902 assert(REX_W <= prfx && prfx <= REX_WXB, "must be"); 11903 return prfx; 11904 } 11905 11906 int8_t Assembler::get_prefixq(Address adr, Register src) { 11907 int8_t prfx = (int8_t)(REX_W + 11908 ((int)adr.base_needs_rex()) + 11909 ((int)adr.index_needs_rex() << 1) + 11910 ((int)(src->encoding() >= 8) << 2)); 11911 #ifdef ASSERT 11912 if (src->encoding() < 8) { 11913 if (adr.base_needs_rex()) { 11914 if (adr.index_needs_rex()) { 11915 assert(prfx == REX_WXB, "must be"); 11916 } else { 11917 assert(prfx == REX_WB, "must be"); 11918 } 11919 } else { 11920 if (adr.index_needs_rex()) { 11921 assert(prfx == REX_WX, "must be"); 11922 } else { 11923 assert(prfx == REX_W, "must be"); 11924 } 11925 } 11926 } else { 11927 if (adr.base_needs_rex()) { 11928 if (adr.index_needs_rex()) { 11929 assert(prfx == REX_WRXB, "must be"); 11930 } else { 11931 assert(prfx == REX_WRB, "must be"); 11932 } 11933 } else { 11934 if (adr.index_needs_rex()) { 11935 assert(prfx == REX_WRX, "must be"); 11936 } else { 11937 assert(prfx == REX_WR, "must be"); 11938 } 11939 } 11940 } 11941 #endif 11942 return prfx; 11943 } 11944 11945 void Assembler::prefixq(Address adr) { 11946 emit_int8(get_prefixq(adr)); 11947 } 11948 11949 void Assembler::prefixq(Address adr, Register src) { 11950 emit_int8(get_prefixq(adr, src)); 11951 } 11952 11953 void Assembler::prefixq(Address adr, XMMRegister src) { 11954 if (src->encoding() < 8) { 11955 if (adr.base_needs_rex()) { 11956 if (adr.index_needs_rex()) { 11957 prefix(REX_WXB); 11958 } else { 11959 prefix(REX_WB); 11960 } 11961 } else { 11962 if (adr.index_needs_rex()) { 11963 prefix(REX_WX); 11964 } else { 11965 prefix(REX_W); 11966 } 11967 } 11968 } else { 11969 if (adr.base_needs_rex()) { 11970 if (adr.index_needs_rex()) { 11971 prefix(REX_WRXB); 11972 } else { 11973 prefix(REX_WRB); 11974 } 11975 } else { 11976 if (adr.index_needs_rex()) { 11977 prefix(REX_WRX); 11978 } else { 11979 prefix(REX_WR); 11980 } 11981 } 11982 } 11983 } 11984 11985 int Assembler::prefixq_and_encode(int reg_enc) { 11986 if (reg_enc < 8) { 11987 prefix(REX_W); 11988 } else { 11989 prefix(REX_WB); 11990 reg_enc -= 8; 11991 } 11992 return reg_enc; 11993 } 11994 11995 int Assembler::prefixq_and_encode(int dst_enc, int src_enc) { 11996 if (dst_enc < 8) { 11997 if (src_enc < 8) { 11998 prefix(REX_W); 11999 } else { 12000 prefix(REX_WB); 12001 src_enc -= 8; 12002 } 12003 } else { 12004 if (src_enc < 8) { 12005 prefix(REX_WR); 12006 } else { 12007 prefix(REX_WRB); 12008 src_enc -= 8; 12009 } 12010 dst_enc -= 8; 12011 } 12012 return dst_enc << 3 | src_enc; 12013 } 12014 12015 void Assembler::adcq(Register dst, int32_t imm32) { 12016 (void) prefixq_and_encode(dst->encoding()); 12017 emit_arith(0x81, 0xD0, dst, imm32); 12018 } 12019 12020 void Assembler::adcq(Register dst, Address src) { 12021 InstructionMark im(this); 12022 emit_int16(get_prefixq(src, dst), 0x13); 12023 emit_operand(dst, src); 12024 } 12025 12026 void Assembler::adcq(Register dst, Register src) { 12027 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 12028 emit_arith(0x13, 0xC0, dst, src); 12029 } 12030 12031 void Assembler::addq(Address dst, int32_t imm32) { 12032 InstructionMark im(this); 12033 prefixq(dst); 12034 emit_arith_operand(0x81, rax, dst, imm32); 12035 } 12036 12037 void Assembler::addq(Address dst, Register src) { 12038 InstructionMark im(this); 12039 emit_int16(get_prefixq(dst, src), 0x01); 12040 emit_operand(src, dst); 12041 } 12042 12043 void Assembler::addq(Register dst, int32_t imm32) { 12044 (void) prefixq_and_encode(dst->encoding()); 12045 emit_arith(0x81, 0xC0, dst, imm32); 12046 } 12047 12048 void Assembler::addq(Register dst, Address src) { 12049 InstructionMark im(this); 12050 emit_int16(get_prefixq(src, dst), 0x03); 12051 emit_operand(dst, src); 12052 } 12053 12054 void Assembler::addq(Register dst, Register src) { 12055 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 12056 emit_arith(0x03, 0xC0, dst, src); 12057 } 12058 12059 void Assembler::adcxq(Register dst, Register src) { 12060 //assert(VM_Version::supports_adx(), "adx instructions not supported"); 12061 emit_int8(0x66); 12062 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 12063 emit_int32(0x0F, 12064 0x38, 12065 (unsigned char)0xF6, 12066 (0xC0 | encode)); 12067 } 12068 12069 void Assembler::adoxq(Register dst, Register src) { 12070 //assert(VM_Version::supports_adx(), "adx instructions not supported"); 12071 emit_int8((unsigned char)0xF3); 12072 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 12073 emit_int32(0x0F, 12074 0x38, 12075 (unsigned char)0xF6, 12076 (0xC0 | encode)); 12077 } 12078 12079 void Assembler::andq(Address dst, int32_t imm32) { 12080 InstructionMark im(this); 12081 prefixq(dst); 12082 emit_arith_operand(0x81, as_Register(4), dst, imm32); 12083 } 12084 12085 void Assembler::andq(Register dst, int32_t imm32) { 12086 (void) prefixq_and_encode(dst->encoding()); 12087 emit_arith(0x81, 0xE0, dst, imm32); 12088 } 12089 12090 void Assembler::andq(Register dst, Address src) { 12091 InstructionMark im(this); 12092 emit_int16(get_prefixq(src, dst), 0x23); 12093 emit_operand(dst, src); 12094 } 12095 12096 void Assembler::andq(Register dst, Register src) { 12097 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 12098 emit_arith(0x23, 0xC0, dst, src); 12099 } 12100 12101 void Assembler::andq(Address dst, Register src) { 12102 InstructionMark im(this); 12103 emit_int16(get_prefixq(dst, src), 0x21); 12104 emit_operand(src, dst); 12105 } 12106 12107 void Assembler::andnq(Register dst, Register src1, Register src2) { 12108 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 12109 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12110 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 12111 emit_int16((unsigned char)0xF2, (0xC0 | encode)); 12112 } 12113 12114 void Assembler::andnq(Register dst, Register src1, Address src2) { 12115 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 12116 InstructionMark im(this); 12117 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12118 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 12119 emit_int8((unsigned char)0xF2); 12120 emit_operand(dst, src2); 12121 } 12122 12123 void Assembler::bsfq(Register dst, Register src) { 12124 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 12125 emit_int24(0x0F, (unsigned char)0xBC, (0xC0 | encode)); 12126 } 12127 12128 void Assembler::bsrq(Register dst, Register src) { 12129 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 12130 emit_int24(0x0F, (unsigned char)0xBD, (0xC0 | encode)); 12131 } 12132 12133 void Assembler::bswapq(Register reg) { 12134 int encode = prefixq_and_encode(reg->encoding()); 12135 emit_int16(0x0F, (0xC8 | encode)); 12136 } 12137 12138 void Assembler::blsiq(Register dst, Register src) { 12139 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 12140 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12141 int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 12142 emit_int16((unsigned char)0xF3, (0xC0 | encode)); 12143 } 12144 12145 void Assembler::blsiq(Register dst, Address src) { 12146 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 12147 InstructionMark im(this); 12148 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12149 vex_prefix(src, dst->encoding(), rbx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 12150 emit_int8((unsigned char)0xF3); 12151 emit_operand(rbx, src); 12152 } 12153 12154 void Assembler::blsmskq(Register dst, Register src) { 12155 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 12156 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12157 int encode = vex_prefix_and_encode(rdx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 12158 emit_int16((unsigned char)0xF3, (0xC0 | encode)); 12159 } 12160 12161 void Assembler::blsmskq(Register dst, Address src) { 12162 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 12163 InstructionMark im(this); 12164 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12165 vex_prefix(src, dst->encoding(), rdx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 12166 emit_int8((unsigned char)0xF3); 12167 emit_operand(rdx, src); 12168 } 12169 12170 void Assembler::blsrq(Register dst, Register src) { 12171 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 12172 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12173 int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 12174 emit_int16((unsigned char)0xF3, (0xC0 | encode)); 12175 } 12176 12177 void Assembler::blsrq(Register dst, Address src) { 12178 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 12179 InstructionMark im(this); 12180 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12181 vex_prefix(src, dst->encoding(), rcx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 12182 emit_int8((unsigned char)0xF3); 12183 emit_operand(rcx, src); 12184 } 12185 12186 void Assembler::cdqq() { 12187 emit_int16(REX_W, (unsigned char)0x99); 12188 } 12189 12190 void Assembler::clflush(Address adr) { 12191 assert(VM_Version::supports_clflush(), "should do"); 12192 prefix(adr); 12193 emit_int16(0x0F, (unsigned char)0xAE); 12194 emit_operand(rdi, adr); 12195 } 12196 12197 void Assembler::clflushopt(Address adr) { 12198 assert(VM_Version::supports_clflushopt(), "should do!"); 12199 // adr should be base reg only with no index or offset 12200 assert(adr.index() == noreg, "index should be noreg"); 12201 assert(adr.scale() == Address::no_scale, "scale should be no_scale"); 12202 assert(adr.disp() == 0, "displacement should be 0"); 12203 // instruction prefix is 0x66 12204 emit_int8(0x66); 12205 prefix(adr); 12206 // opcode family is 0x0F 0xAE 12207 emit_int16(0x0F, (unsigned char)0xAE); 12208 // extended opcode byte is 7 == rdi 12209 emit_operand(rdi, adr); 12210 } 12211 12212 void Assembler::clwb(Address adr) { 12213 assert(VM_Version::supports_clwb(), "should do!"); 12214 // adr should be base reg only with no index or offset 12215 assert(adr.index() == noreg, "index should be noreg"); 12216 assert(adr.scale() == Address::no_scale, "scale should be no_scale"); 12217 assert(adr.disp() == 0, "displacement should be 0"); 12218 // instruction prefix is 0x66 12219 emit_int8(0x66); 12220 prefix(adr); 12221 // opcode family is 0x0f 0xAE 12222 emit_int16(0x0F, (unsigned char)0xAE); 12223 // extended opcode byte is 6 == rsi 12224 emit_operand(rsi, adr); 12225 } 12226 12227 void Assembler::cmovq(Condition cc, Register dst, Register src) { 12228 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 12229 emit_int24(0x0F, (0x40 | cc), (0xC0 | encode)); 12230 } 12231 12232 void Assembler::cmovq(Condition cc, Register dst, Address src) { 12233 InstructionMark im(this); 12234 emit_int24(get_prefixq(src, dst), 0x0F, (0x40 | cc)); 12235 emit_operand(dst, src); 12236 } 12237 12238 void Assembler::cmpq(Address dst, int32_t imm32) { 12239 InstructionMark im(this); 12240 emit_int16(get_prefixq(dst), (unsigned char)0x81); 12241 emit_operand(rdi, dst, 4); 12242 emit_int32(imm32); 12243 } 12244 12245 void Assembler::cmpq(Register dst, int32_t imm32) { 12246 (void) prefixq_and_encode(dst->encoding()); 12247 emit_arith(0x81, 0xF8, dst, imm32); 12248 } 12249 12250 void Assembler::cmpq(Address dst, Register src) { 12251 InstructionMark im(this); 12252 emit_int16(get_prefixq(dst, src), 0x39); 12253 emit_operand(src, dst); 12254 } 12255 12256 void Assembler::cmpq(Register dst, Register src) { 12257 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 12258 emit_arith(0x3B, 0xC0, dst, src); 12259 } 12260 12261 void Assembler::cmpq(Register dst, Address src) { 12262 InstructionMark im(this); 12263 emit_int16(get_prefixq(src, dst), 0x3B); 12264 emit_operand(dst, src); 12265 } 12266 12267 void Assembler::cmpxchgq(Register reg, Address adr) { 12268 InstructionMark im(this); 12269 emit_int24(get_prefixq(adr, reg), 0x0F, (unsigned char)0xB1); 12270 emit_operand(reg, adr); 12271 } 12272 12273 void Assembler::cvtsi2sdq(XMMRegister dst, Register src) { 12274 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 12275 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 12276 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 12277 emit_int16(0x2A, (0xC0 | encode)); 12278 } 12279 12280 void Assembler::cvtsi2sdq(XMMRegister dst, Address src) { 12281 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 12282 InstructionMark im(this); 12283 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 12284 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 12285 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 12286 emit_int8(0x2A); 12287 emit_operand(dst, src); 12288 } 12289 12290 void Assembler::cvtsi2ssq(XMMRegister dst, Address src) { 12291 NOT_LP64(assert(VM_Version::supports_sse(), "")); 12292 InstructionMark im(this); 12293 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 12294 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 12295 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 12296 emit_int8(0x2A); 12297 emit_operand(dst, src); 12298 } 12299 12300 void Assembler::cvttsd2siq(Register dst, Address src) { 12301 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 12302 // F2 REX.W 0F 2C /r 12303 // CVTTSD2SI r64, xmm1/m64 12304 InstructionMark im(this); 12305 emit_int32((unsigned char)0xF2, REX_W, 0x0F, 0x2C); 12306 emit_operand(dst, src); 12307 } 12308 12309 void Assembler::cvttsd2siq(Register dst, XMMRegister src) { 12310 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 12311 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 12312 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 12313 emit_int16(0x2C, (0xC0 | encode)); 12314 } 12315 12316 void Assembler::cvtsd2siq(Register dst, XMMRegister src) { 12317 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 12318 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 12319 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 12320 emit_int16(0x2D, (0xC0 | encode)); 12321 } 12322 12323 void Assembler::cvttss2siq(Register dst, XMMRegister src) { 12324 NOT_LP64(assert(VM_Version::supports_sse(), "")); 12325 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 12326 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 12327 emit_int16(0x2C, (0xC0 | encode)); 12328 } 12329 12330 void Assembler::decl(Register dst) { 12331 // Don't use it directly. Use MacroAssembler::decrementl() instead. 12332 // Use two-byte form (one-byte form is a REX prefix in 64-bit mode) 12333 int encode = prefix_and_encode(dst->encoding()); 12334 emit_int16((unsigned char)0xFF, (0xC8 | encode)); 12335 } 12336 12337 void Assembler::decq(Register dst) { 12338 // Don't use it directly. Use MacroAssembler::decrementq() instead. 12339 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 12340 int encode = prefixq_and_encode(dst->encoding()); 12341 emit_int16((unsigned char)0xFF, 0xC8 | encode); 12342 } 12343 12344 void Assembler::decq(Address dst) { 12345 // Don't use it directly. Use MacroAssembler::decrementq() instead. 12346 InstructionMark im(this); 12347 emit_int16(get_prefixq(dst), (unsigned char)0xFF); 12348 emit_operand(rcx, dst); 12349 } 12350 12351 void Assembler::fxrstor(Address src) { 12352 emit_int24(get_prefixq(src), 0x0F, (unsigned char)0xAE); 12353 emit_operand(as_Register(1), src); 12354 } 12355 12356 void Assembler::xrstor(Address src) { 12357 emit_int24(get_prefixq(src), 0x0F, (unsigned char)0xAE); 12358 emit_operand(as_Register(5), src); 12359 } 12360 12361 void Assembler::fxsave(Address dst) { 12362 emit_int24(get_prefixq(dst), 0x0F, (unsigned char)0xAE); 12363 emit_operand(as_Register(0), dst); 12364 } 12365 12366 void Assembler::xsave(Address dst) { 12367 emit_int24(get_prefixq(dst), 0x0F, (unsigned char)0xAE); 12368 emit_operand(as_Register(4), dst); 12369 } 12370 12371 void Assembler::idivq(Register src) { 12372 int encode = prefixq_and_encode(src->encoding()); 12373 emit_int16((unsigned char)0xF7, (0xF8 | encode)); 12374 } 12375 12376 void Assembler::idivq(Address src) { 12377 InstructionMark im(this); 12378 prefixq(src); 12379 emit_int8((unsigned char)0xF7); 12380 emit_operand(as_Register(7), src); 12381 } 12382 12383 void Assembler::divq(Register src) { 12384 int encode = prefixq_and_encode(src->encoding()); 12385 emit_int16((unsigned char)0xF7, (0xF0 | encode)); 12386 } 12387 12388 void Assembler::imulq(Register dst, Register src) { 12389 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 12390 emit_int24(0x0F, (unsigned char)0xAF, (0xC0 | encode)); 12391 } 12392 12393 void Assembler::imulq(Register src) { 12394 int encode = prefixq_and_encode(src->encoding()); 12395 emit_int16((unsigned char)0xF7, (0xE8 | encode)); 12396 } 12397 12398 void Assembler::imulq(Register dst, Address src, int32_t value) { 12399 InstructionMark im(this); 12400 prefixq(src, dst); 12401 if (is8bit(value)) { 12402 emit_int8((unsigned char)0x6B); 12403 emit_operand(dst, src); 12404 emit_int8(value); 12405 } else { 12406 emit_int8((unsigned char)0x69); 12407 emit_operand(dst, src); 12408 emit_int32(value); 12409 } 12410 } 12411 12412 void Assembler::imulq(Register dst, Register src, int value) { 12413 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 12414 if (is8bit(value)) { 12415 emit_int24(0x6B, (0xC0 | encode), (value & 0xFF)); 12416 } else { 12417 emit_int16(0x69, (0xC0 | encode)); 12418 emit_int32(value); 12419 } 12420 } 12421 12422 void Assembler::imulq(Register dst, Address src) { 12423 InstructionMark im(this); 12424 emit_int24(get_prefixq(src, dst), 0x0F, (unsigned char)0xAF); 12425 emit_operand(dst, src); 12426 } 12427 12428 void Assembler::incl(Register dst) { 12429 // Don't use it directly. Use MacroAssembler::incrementl() instead. 12430 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 12431 int encode = prefix_and_encode(dst->encoding()); 12432 emit_int16((unsigned char)0xFF, (0xC0 | encode)); 12433 } 12434 12435 void Assembler::incq(Register dst) { 12436 // Don't use it directly. Use MacroAssembler::incrementq() instead. 12437 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 12438 int encode = prefixq_and_encode(dst->encoding()); 12439 emit_int16((unsigned char)0xFF, (0xC0 | encode)); 12440 } 12441 12442 void Assembler::incq(Address dst) { 12443 // Don't use it directly. Use MacroAssembler::incrementq() instead. 12444 InstructionMark im(this); 12445 emit_int16(get_prefixq(dst), (unsigned char)0xFF); 12446 emit_operand(rax, dst); 12447 } 12448 12449 void Assembler::lea(Register dst, Address src) { 12450 leaq(dst, src); 12451 } 12452 12453 void Assembler::leaq(Register dst, Address src) { 12454 InstructionMark im(this); 12455 emit_int16(get_prefixq(src, dst), (unsigned char)0x8D); 12456 emit_operand(dst, src); 12457 } 12458 12459 void Assembler::mov64(Register dst, int64_t imm64) { 12460 InstructionMark im(this); 12461 int encode = prefixq_and_encode(dst->encoding()); 12462 emit_int8(0xB8 | encode); 12463 emit_int64(imm64); 12464 } 12465 12466 void Assembler::mov64(Register dst, int64_t imm64, relocInfo::relocType rtype, int format) { 12467 InstructionMark im(this); 12468 int encode = prefixq_and_encode(dst->encoding()); 12469 emit_int8(0xB8 | encode); 12470 emit_data64(imm64, rtype, format); 12471 } 12472 12473 void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) { 12474 InstructionMark im(this); 12475 int encode = prefixq_and_encode(dst->encoding()); 12476 emit_int8(0xB8 | encode); 12477 emit_data64(imm64, rspec); 12478 } 12479 12480 void Assembler::mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec) { 12481 InstructionMark im(this); 12482 int encode = prefix_and_encode(dst->encoding()); 12483 emit_int8(0xB8 | encode); 12484 emit_data((int)imm32, rspec, narrow_oop_operand); 12485 } 12486 12487 void Assembler::mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec) { 12488 InstructionMark im(this); 12489 prefix(dst); 12490 emit_int8((unsigned char)0xC7); 12491 emit_operand(rax, dst, 4); 12492 emit_data((int)imm32, rspec, narrow_oop_operand); 12493 } 12494 12495 void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) { 12496 InstructionMark im(this); 12497 int encode = prefix_and_encode(src1->encoding()); 12498 emit_int16((unsigned char)0x81, (0xF8 | encode)); 12499 emit_data((int)imm32, rspec, narrow_oop_operand); 12500 } 12501 12502 void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) { 12503 InstructionMark im(this); 12504 prefix(src1); 12505 emit_int8((unsigned char)0x81); 12506 emit_operand(rax, src1, 4); 12507 emit_data((int)imm32, rspec, narrow_oop_operand); 12508 } 12509 12510 void Assembler::lzcntq(Register dst, Register src) { 12511 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); 12512 emit_int8((unsigned char)0xF3); 12513 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 12514 emit_int24(0x0F, (unsigned char)0xBD, (0xC0 | encode)); 12515 } 12516 12517 void Assembler::movdq(XMMRegister dst, Register src) { 12518 // table D-1 says MMX/SSE2 12519 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 12520 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 12521 int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 12522 emit_int16(0x6E, (0xC0 | encode)); 12523 } 12524 12525 void Assembler::movdq(Register dst, XMMRegister src) { 12526 // table D-1 says MMX/SSE2 12527 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 12528 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 12529 // swap src/dst to get correct prefix 12530 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 12531 emit_int16(0x7E, 12532 (0xC0 | encode)); 12533 } 12534 12535 void Assembler::movq(Register dst, Register src) { 12536 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 12537 emit_int16((unsigned char)0x8B, 12538 (0xC0 | encode)); 12539 } 12540 12541 void Assembler::movq(Register dst, Address src) { 12542 InstructionMark im(this); 12543 emit_int16(get_prefixq(src, dst), (unsigned char)0x8B); 12544 emit_operand(dst, src); 12545 } 12546 12547 void Assembler::movq(Address dst, Register src) { 12548 InstructionMark im(this); 12549 emit_int16(get_prefixq(dst, src), (unsigned char)0x89); 12550 emit_operand(src, dst); 12551 } 12552 12553 void Assembler::movq(Address dst, int32_t imm32) { 12554 InstructionMark im(this); 12555 emit_int16(get_prefixq(dst), (unsigned char)0xC7); 12556 emit_operand(as_Register(0), dst); 12557 emit_int32(imm32); 12558 } 12559 12560 void Assembler::movq(Register dst, int32_t imm32) { 12561 int encode = prefixq_and_encode(dst->encoding()); 12562 emit_int16((unsigned char)0xC7, (0xC0 | encode)); 12563 emit_int32(imm32); 12564 } 12565 12566 void Assembler::movsbq(Register dst, Address src) { 12567 InstructionMark im(this); 12568 emit_int24(get_prefixq(src, dst), 12569 0x0F, 12570 (unsigned char)0xBE); 12571 emit_operand(dst, src); 12572 } 12573 12574 void Assembler::movsbq(Register dst, Register src) { 12575 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 12576 emit_int24(0x0F, (unsigned char)0xBE, (0xC0 | encode)); 12577 } 12578 12579 void Assembler::movslq(Register dst, int32_t imm32) { 12580 // dbx shows movslq(rcx, 3) as movq $0x0000000049000000,(%rbx) 12581 // and movslq(r8, 3); as movl $0x0000000048000000,(%rbx) 12582 // as a result we shouldn't use until tested at runtime... 12583 ShouldNotReachHere(); 12584 InstructionMark im(this); 12585 int encode = prefixq_and_encode(dst->encoding()); 12586 emit_int8(0xC7 | encode); 12587 emit_int32(imm32); 12588 } 12589 12590 void Assembler::movslq(Address dst, int32_t imm32) { 12591 assert(is_simm32(imm32), "lost bits"); 12592 InstructionMark im(this); 12593 emit_int16(get_prefixq(dst), (unsigned char)0xC7); 12594 emit_operand(rax, dst, 4); 12595 emit_int32(imm32); 12596 } 12597 12598 void Assembler::movslq(Register dst, Address src) { 12599 InstructionMark im(this); 12600 emit_int16(get_prefixq(src, dst), 0x63); 12601 emit_operand(dst, src); 12602 } 12603 12604 void Assembler::movslq(Register dst, Register src) { 12605 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 12606 emit_int16(0x63, (0xC0 | encode)); 12607 } 12608 12609 void Assembler::movswq(Register dst, Address src) { 12610 InstructionMark im(this); 12611 emit_int24(get_prefixq(src, dst), 12612 0x0F, 12613 (unsigned char)0xBF); 12614 emit_operand(dst, src); 12615 } 12616 12617 void Assembler::movswq(Register dst, Register src) { 12618 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 12619 emit_int24(0x0F, (unsigned char)0xBF, (0xC0 | encode)); 12620 } 12621 12622 void Assembler::movzbq(Register dst, Address src) { 12623 InstructionMark im(this); 12624 emit_int24(get_prefixq(src, dst), 12625 0x0F, 12626 (unsigned char)0xB6); 12627 emit_operand(dst, src); 12628 } 12629 12630 void Assembler::movzbq(Register dst, Register src) { 12631 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 12632 emit_int24(0x0F, (unsigned char)0xB6, (0xC0 | encode)); 12633 } 12634 12635 void Assembler::movzwq(Register dst, Address src) { 12636 InstructionMark im(this); 12637 emit_int24(get_prefixq(src, dst), 12638 0x0F, 12639 (unsigned char)0xB7); 12640 emit_operand(dst, src); 12641 } 12642 12643 void Assembler::movzwq(Register dst, Register src) { 12644 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 12645 emit_int24(0x0F, (unsigned char)0xB7, (0xC0 | encode)); 12646 } 12647 12648 void Assembler::mulq(Address src) { 12649 InstructionMark im(this); 12650 emit_int16(get_prefixq(src), (unsigned char)0xF7); 12651 emit_operand(rsp, src); 12652 } 12653 12654 void Assembler::mulq(Register src) { 12655 int encode = prefixq_and_encode(src->encoding()); 12656 emit_int16((unsigned char)0xF7, (0xE0 | encode)); 12657 } 12658 12659 void Assembler::mulxq(Register dst1, Register dst2, Register src) { 12660 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 12661 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12662 int encode = vex_prefix_and_encode(dst1->encoding(), dst2->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes); 12663 emit_int16((unsigned char)0xF6, (0xC0 | encode)); 12664 } 12665 12666 void Assembler::negq(Register dst) { 12667 int encode = prefixq_and_encode(dst->encoding()); 12668 emit_int16((unsigned char)0xF7, (0xD8 | encode)); 12669 } 12670 12671 void Assembler::negq(Address dst) { 12672 InstructionMark im(this); 12673 emit_int16(get_prefixq(dst), (unsigned char)0xF7); 12674 emit_operand(as_Register(3), dst); 12675 } 12676 12677 void Assembler::notq(Register dst) { 12678 int encode = prefixq_and_encode(dst->encoding()); 12679 emit_int16((unsigned char)0xF7, (0xD0 | encode)); 12680 } 12681 12682 void Assembler::btsq(Address dst, int imm8) { 12683 assert(isByte(imm8), "not a byte"); 12684 InstructionMark im(this); 12685 emit_int24(get_prefixq(dst), 12686 0x0F, 12687 (unsigned char)0xBA); 12688 emit_operand(rbp /* 5 */, dst, 1); 12689 emit_int8(imm8); 12690 } 12691 12692 void Assembler::btrq(Address dst, int imm8) { 12693 assert(isByte(imm8), "not a byte"); 12694 InstructionMark im(this); 12695 emit_int24(get_prefixq(dst), 12696 0x0F, 12697 (unsigned char)0xBA); 12698 emit_operand(rsi /* 6 */, dst, 1); 12699 emit_int8(imm8); 12700 } 12701 12702 void Assembler::orq(Address dst, int32_t imm32) { 12703 InstructionMark im(this); 12704 prefixq(dst); 12705 emit_arith_operand(0x81, as_Register(1), dst, imm32); 12706 } 12707 12708 void Assembler::orq(Address dst, Register src) { 12709 InstructionMark im(this); 12710 emit_int16(get_prefixq(dst, src), (unsigned char)0x09); 12711 emit_operand(src, dst); 12712 } 12713 12714 void Assembler::orq(Register dst, int32_t imm32) { 12715 (void) prefixq_and_encode(dst->encoding()); 12716 emit_arith(0x81, 0xC8, dst, imm32); 12717 } 12718 12719 void Assembler::orq(Register dst, Address src) { 12720 InstructionMark im(this); 12721 emit_int16(get_prefixq(src, dst), 0x0B); 12722 emit_operand(dst, src); 12723 } 12724 12725 void Assembler::orq(Register dst, Register src) { 12726 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 12727 emit_arith(0x0B, 0xC0, dst, src); 12728 } 12729 12730 void Assembler::popcntq(Register dst, Address src) { 12731 assert(VM_Version::supports_popcnt(), "must support"); 12732 InstructionMark im(this); 12733 emit_int32((unsigned char)0xF3, 12734 get_prefixq(src, dst), 12735 0x0F, 12736 (unsigned char)0xB8); 12737 emit_operand(dst, src); 12738 } 12739 12740 void Assembler::popcntq(Register dst, Register src) { 12741 assert(VM_Version::supports_popcnt(), "must support"); 12742 emit_int8((unsigned char)0xF3); 12743 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 12744 emit_int24(0x0F, (unsigned char)0xB8, (0xC0 | encode)); 12745 } 12746 12747 void Assembler::popq(Address dst) { 12748 InstructionMark im(this); 12749 emit_int16(get_prefixq(dst), (unsigned char)0x8F); 12750 emit_operand(rax, dst); 12751 } 12752 12753 void Assembler::popq(Register dst) { 12754 emit_int8((unsigned char)0x58 | dst->encoding()); 12755 } 12756 12757 // Precomputable: popa, pusha, vzeroupper 12758 12759 // The result of these routines are invariant from one invocation to another 12760 // invocation for the duration of a run. Caching the result on bootstrap 12761 // and copying it out on subsequent invocations can thus be beneficial 12762 static bool precomputed = false; 12763 12764 static u_char* popa_code = NULL; 12765 static int popa_len = 0; 12766 12767 static u_char* pusha_code = NULL; 12768 static int pusha_len = 0; 12769 12770 static u_char* vzup_code = NULL; 12771 static int vzup_len = 0; 12772 12773 void Assembler::precompute_instructions() { 12774 assert(!Universe::is_fully_initialized(), "must still be single threaded"); 12775 guarantee(!precomputed, "only once"); 12776 precomputed = true; 12777 ResourceMark rm; 12778 12779 // Make a temporary buffer big enough for the routines we're capturing 12780 int size = 256; 12781 char* tmp_code = NEW_RESOURCE_ARRAY(char, size); 12782 CodeBuffer buffer((address)tmp_code, size); 12783 MacroAssembler masm(&buffer); 12784 12785 address begin_popa = masm.code_section()->end(); 12786 masm.popa_uncached(); 12787 address end_popa = masm.code_section()->end(); 12788 masm.pusha_uncached(); 12789 address end_pusha = masm.code_section()->end(); 12790 masm.vzeroupper_uncached(); 12791 address end_vzup = masm.code_section()->end(); 12792 12793 // Save the instructions to permanent buffers. 12794 popa_len = (int)(end_popa - begin_popa); 12795 popa_code = NEW_C_HEAP_ARRAY(u_char, popa_len, mtInternal); 12796 memcpy(popa_code, begin_popa, popa_len); 12797 12798 pusha_len = (int)(end_pusha - end_popa); 12799 pusha_code = NEW_C_HEAP_ARRAY(u_char, pusha_len, mtInternal); 12800 memcpy(pusha_code, end_popa, pusha_len); 12801 12802 vzup_len = (int)(end_vzup - end_pusha); 12803 if (vzup_len > 0) { 12804 vzup_code = NEW_C_HEAP_ARRAY(u_char, vzup_len, mtInternal); 12805 memcpy(vzup_code, end_pusha, vzup_len); 12806 } else { 12807 vzup_code = pusha_code; // dummy 12808 } 12809 12810 assert(masm.code()->total_oop_size() == 0 && 12811 masm.code()->total_metadata_size() == 0 && 12812 masm.code()->total_relocation_size() == 0, 12813 "pre-computed code can't reference oops, metadata or contain relocations"); 12814 } 12815 12816 static void emit_copy(CodeSection* code_section, u_char* src, int src_len) { 12817 assert(src != NULL, "code to copy must have been pre-computed"); 12818 assert(code_section->limit() - code_section->end() > src_len, "code buffer not large enough"); 12819 address end = code_section->end(); 12820 memcpy(end, src, src_len); 12821 code_section->set_end(end + src_len); 12822 } 12823 12824 void Assembler::popa() { // 64bit 12825 emit_copy(code_section(), popa_code, popa_len); 12826 } 12827 12828 void Assembler::popa_uncached() { // 64bit 12829 movq(r15, Address(rsp, 0)); 12830 movq(r14, Address(rsp, wordSize)); 12831 movq(r13, Address(rsp, 2 * wordSize)); 12832 movq(r12, Address(rsp, 3 * wordSize)); 12833 movq(r11, Address(rsp, 4 * wordSize)); 12834 movq(r10, Address(rsp, 5 * wordSize)); 12835 movq(r9, Address(rsp, 6 * wordSize)); 12836 movq(r8, Address(rsp, 7 * wordSize)); 12837 movq(rdi, Address(rsp, 8 * wordSize)); 12838 movq(rsi, Address(rsp, 9 * wordSize)); 12839 movq(rbp, Address(rsp, 10 * wordSize)); 12840 // Skip rsp as it is restored automatically to the value 12841 // before the corresponding pusha when popa is done. 12842 movq(rbx, Address(rsp, 12 * wordSize)); 12843 movq(rdx, Address(rsp, 13 * wordSize)); 12844 movq(rcx, Address(rsp, 14 * wordSize)); 12845 movq(rax, Address(rsp, 15 * wordSize)); 12846 12847 addq(rsp, 16 * wordSize); 12848 } 12849 12850 // Does not actually store the value of rsp on the stack. 12851 // The slot for rsp just contains an arbitrary value. 12852 void Assembler::pusha() { // 64bit 12853 emit_copy(code_section(), pusha_code, pusha_len); 12854 } 12855 12856 // Does not actually store the value of rsp on the stack. 12857 // The slot for rsp just contains an arbitrary value. 12858 void Assembler::pusha_uncached() { // 64bit 12859 subq(rsp, 16 * wordSize); 12860 12861 movq(Address(rsp, 15 * wordSize), rax); 12862 movq(Address(rsp, 14 * wordSize), rcx); 12863 movq(Address(rsp, 13 * wordSize), rdx); 12864 movq(Address(rsp, 12 * wordSize), rbx); 12865 // Skip rsp as the value is normally not used. There are a few places where 12866 // the original value of rsp needs to be known but that can be computed 12867 // from the value of rsp immediately after pusha (rsp + 16 * wordSize). 12868 movq(Address(rsp, 10 * wordSize), rbp); 12869 movq(Address(rsp, 9 * wordSize), rsi); 12870 movq(Address(rsp, 8 * wordSize), rdi); 12871 movq(Address(rsp, 7 * wordSize), r8); 12872 movq(Address(rsp, 6 * wordSize), r9); 12873 movq(Address(rsp, 5 * wordSize), r10); 12874 movq(Address(rsp, 4 * wordSize), r11); 12875 movq(Address(rsp, 3 * wordSize), r12); 12876 movq(Address(rsp, 2 * wordSize), r13); 12877 movq(Address(rsp, wordSize), r14); 12878 movq(Address(rsp, 0), r15); 12879 } 12880 12881 void Assembler::vzeroupper() { 12882 emit_copy(code_section(), vzup_code, vzup_len); 12883 } 12884 12885 void Assembler::pushq(Address src) { 12886 InstructionMark im(this); 12887 emit_int16(get_prefixq(src), (unsigned char)0xFF); 12888 emit_operand(rsi, src); 12889 } 12890 12891 void Assembler::rclq(Register dst, int imm8) { 12892 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 12893 int encode = prefixq_and_encode(dst->encoding()); 12894 if (imm8 == 1) { 12895 emit_int16((unsigned char)0xD1, (0xD0 | encode)); 12896 } else { 12897 emit_int24((unsigned char)0xC1, (0xD0 | encode), imm8); 12898 } 12899 } 12900 12901 void Assembler::rcrq(Register dst, int imm8) { 12902 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 12903 int encode = prefixq_and_encode(dst->encoding()); 12904 if (imm8 == 1) { 12905 emit_int16((unsigned char)0xD1, (0xD8 | encode)); 12906 } else { 12907 emit_int24((unsigned char)0xC1, (0xD8 | encode), imm8); 12908 } 12909 } 12910 12911 12912 void Assembler::rorxq(Register dst, Register src, int imm8) { 12913 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 12914 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12915 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes); 12916 emit_int24((unsigned char)0xF0, (0xC0 | encode), imm8); 12917 } 12918 12919 void Assembler::rorxd(Register dst, Register src, int imm8) { 12920 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 12921 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 12922 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes); 12923 emit_int24((unsigned char)0xF0, (0xC0 | encode), imm8); 12924 } 12925 12926 #ifdef _LP64 12927 void Assembler::salq(Address dst, int imm8) { 12928 InstructionMark im(this); 12929 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 12930 if (imm8 == 1) { 12931 emit_int16(get_prefixq(dst), (unsigned char)0xD1); 12932 emit_operand(as_Register(4), dst); 12933 } 12934 else { 12935 emit_int16(get_prefixq(dst), (unsigned char)0xC1); 12936 emit_operand(as_Register(4), dst); 12937 emit_int8(imm8); 12938 } 12939 } 12940 12941 void Assembler::salq(Address dst) { 12942 InstructionMark im(this); 12943 emit_int16(get_prefixq(dst), (unsigned char)0xD3); 12944 emit_operand(as_Register(4), dst); 12945 } 12946 12947 void Assembler::salq(Register dst, int imm8) { 12948 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 12949 int encode = prefixq_and_encode(dst->encoding()); 12950 if (imm8 == 1) { 12951 emit_int16((unsigned char)0xD1, (0xE0 | encode)); 12952 } else { 12953 emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8); 12954 } 12955 } 12956 12957 void Assembler::salq(Register dst) { 12958 int encode = prefixq_and_encode(dst->encoding()); 12959 emit_int16((unsigned char)0xD3, (0xE0 | encode)); 12960 } 12961 12962 void Assembler::sarq(Address dst, int imm8) { 12963 InstructionMark im(this); 12964 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 12965 if (imm8 == 1) { 12966 emit_int16(get_prefixq(dst), (unsigned char)0xD1); 12967 emit_operand(as_Register(7), dst); 12968 } 12969 else { 12970 emit_int16(get_prefixq(dst), (unsigned char)0xC1); 12971 emit_operand(as_Register(7), dst); 12972 emit_int8(imm8); 12973 } 12974 } 12975 12976 void Assembler::sarq(Address dst) { 12977 InstructionMark im(this); 12978 emit_int16(get_prefixq(dst), (unsigned char)0xD3); 12979 emit_operand(as_Register(7), dst); 12980 } 12981 12982 void Assembler::sarq(Register dst, int imm8) { 12983 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 12984 int encode = prefixq_and_encode(dst->encoding()); 12985 if (imm8 == 1) { 12986 emit_int16((unsigned char)0xD1, (0xF8 | encode)); 12987 } else { 12988 emit_int24((unsigned char)0xC1, (0xF8 | encode), imm8); 12989 } 12990 } 12991 12992 void Assembler::sarq(Register dst) { 12993 int encode = prefixq_and_encode(dst->encoding()); 12994 emit_int16((unsigned char)0xD3, (0xF8 | encode)); 12995 } 12996 #endif 12997 12998 void Assembler::sbbq(Address dst, int32_t imm32) { 12999 InstructionMark im(this); 13000 prefixq(dst); 13001 emit_arith_operand(0x81, rbx, dst, imm32); 13002 } 13003 13004 void Assembler::sbbq(Register dst, int32_t imm32) { 13005 (void) prefixq_and_encode(dst->encoding()); 13006 emit_arith(0x81, 0xD8, dst, imm32); 13007 } 13008 13009 void Assembler::sbbq(Register dst, Address src) { 13010 InstructionMark im(this); 13011 emit_int16(get_prefixq(src, dst), 0x1B); 13012 emit_operand(dst, src); 13013 } 13014 13015 void Assembler::sbbq(Register dst, Register src) { 13016 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 13017 emit_arith(0x1B, 0xC0, dst, src); 13018 } 13019 13020 void Assembler::shlq(Register dst, int imm8) { 13021 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 13022 int encode = prefixq_and_encode(dst->encoding()); 13023 if (imm8 == 1) { 13024 emit_int16((unsigned char)0xD1, (0xE0 | encode)); 13025 } else { 13026 emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8); 13027 } 13028 } 13029 13030 void Assembler::shlq(Register dst) { 13031 int encode = prefixq_and_encode(dst->encoding()); 13032 emit_int16((unsigned char)0xD3, (0xE0 | encode)); 13033 } 13034 13035 void Assembler::shrq(Register dst, int imm8) { 13036 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 13037 int encode = prefixq_and_encode(dst->encoding()); 13038 if (imm8 == 1) { 13039 emit_int16((unsigned char)0xD1, (0xE8 | encode)); 13040 } 13041 else { 13042 emit_int24((unsigned char)0xC1, (0xE8 | encode), imm8); 13043 } 13044 } 13045 13046 void Assembler::shrq(Register dst) { 13047 int encode = prefixq_and_encode(dst->encoding()); 13048 emit_int16((unsigned char)0xD3, 0xE8 | encode); 13049 } 13050 13051 void Assembler::shrq(Address dst) { 13052 InstructionMark im(this); 13053 emit_int16(get_prefixq(dst), (unsigned char)0xD3); 13054 emit_operand(as_Register(5), dst); 13055 } 13056 13057 void Assembler::shrq(Address dst, int imm8) { 13058 InstructionMark im(this); 13059 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 13060 if (imm8 == 1) { 13061 emit_int16(get_prefixq(dst), (unsigned char)0xD1); 13062 emit_operand(as_Register(5), dst); 13063 } 13064 else { 13065 emit_int16(get_prefixq(dst), (unsigned char)0xC1); 13066 emit_operand(as_Register(5), dst); 13067 emit_int8(imm8); 13068 } 13069 } 13070 13071 void Assembler::subq(Address dst, int32_t imm32) { 13072 InstructionMark im(this); 13073 prefixq(dst); 13074 emit_arith_operand(0x81, rbp, dst, imm32); 13075 } 13076 13077 void Assembler::subq(Address dst, Register src) { 13078 InstructionMark im(this); 13079 emit_int16(get_prefixq(dst, src), 0x29); 13080 emit_operand(src, dst); 13081 } 13082 13083 void Assembler::subq(Register dst, int32_t imm32) { 13084 (void) prefixq_and_encode(dst->encoding()); 13085 emit_arith(0x81, 0xE8, dst, imm32); 13086 } 13087 13088 // Force generation of a 4 byte immediate value even if it fits into 8bit 13089 void Assembler::subq_imm32(Register dst, int32_t imm32) { 13090 (void) prefixq_and_encode(dst->encoding()); 13091 emit_arith_imm32(0x81, 0xE8, dst, imm32); 13092 } 13093 13094 void Assembler::subq(Register dst, Address src) { 13095 InstructionMark im(this); 13096 emit_int16(get_prefixq(src, dst), 0x2B); 13097 emit_operand(dst, src); 13098 } 13099 13100 void Assembler::subq(Register dst, Register src) { 13101 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 13102 emit_arith(0x2B, 0xC0, dst, src); 13103 } 13104 13105 void Assembler::testq(Address dst, int32_t imm32) { 13106 if (imm32 >= 0) { 13107 testl(dst, imm32); 13108 return; 13109 } 13110 InstructionMark im(this); 13111 emit_int16(get_prefixq(dst), (unsigned char)0xF7); 13112 emit_operand(as_Register(0), dst); 13113 emit_int32(imm32); 13114 } 13115 13116 void Assembler::testq(Register dst, int32_t imm32) { 13117 if (imm32 >= 0) { 13118 testl(dst, imm32); 13119 return; 13120 } 13121 // not using emit_arith because test 13122 // doesn't support sign-extension of 13123 // 8bit operands 13124 if (dst == rax) { 13125 prefix(REX_W); 13126 emit_int8((unsigned char)0xA9); 13127 emit_int32(imm32); 13128 } else { 13129 int encode = dst->encoding(); 13130 encode = prefixq_and_encode(encode); 13131 emit_int16((unsigned char)0xF7, (0xC0 | encode)); 13132 emit_int32(imm32); 13133 } 13134 } 13135 13136 void Assembler::testq(Register dst, Register src) { 13137 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 13138 emit_arith(0x85, 0xC0, dst, src); 13139 } 13140 13141 void Assembler::testq(Register dst, Address src) { 13142 InstructionMark im(this); 13143 emit_int16(get_prefixq(src, dst), (unsigned char)0x85); 13144 emit_operand(dst, src); 13145 } 13146 13147 void Assembler::xaddq(Address dst, Register src) { 13148 InstructionMark im(this); 13149 emit_int24(get_prefixq(dst, src), 0x0F, (unsigned char)0xC1); 13150 emit_operand(src, dst); 13151 } 13152 13153 void Assembler::xchgq(Register dst, Address src) { 13154 InstructionMark im(this); 13155 emit_int16(get_prefixq(src, dst), (unsigned char)0x87); 13156 emit_operand(dst, src); 13157 } 13158 13159 void Assembler::xchgq(Register dst, Register src) { 13160 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 13161 emit_int16((unsigned char)0x87, (0xc0 | encode)); 13162 } 13163 13164 void Assembler::xorq(Register dst, Register src) { 13165 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 13166 emit_arith(0x33, 0xC0, dst, src); 13167 } 13168 13169 void Assembler::xorq(Register dst, Address src) { 13170 InstructionMark im(this); 13171 emit_int16(get_prefixq(src, dst), 0x33); 13172 emit_operand(dst, src); 13173 } 13174 13175 void Assembler::xorq(Register dst, int32_t imm32) { 13176 (void) prefixq_and_encode(dst->encoding()); 13177 emit_arith(0x81, 0xF0, dst, imm32); 13178 } 13179 13180 void Assembler::xorq(Address dst, int32_t imm32) { 13181 InstructionMark im(this); 13182 prefixq(dst); 13183 emit_arith_operand(0x81, as_Register(6), dst, imm32); 13184 } 13185 13186 void Assembler::xorq(Address dst, Register src) { 13187 InstructionMark im(this); 13188 emit_int16(get_prefixq(dst, src), 0x31); 13189 emit_operand(src, dst); 13190 } 13191 13192 #endif // !LP64 13193 13194 void InstructionAttr::set_address_attributes(int tuple_type, int input_size_in_bits) { 13195 if (VM_Version::supports_evex()) { 13196 _tuple_type = tuple_type; 13197 _input_size_in_bits = input_size_in_bits; 13198 } 13199 }