1 /*
    2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
    3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    4  *
    5  * This code is free software; you can redistribute it and/or modify it
    6  * under the terms of the GNU General Public License version 2 only, as
    7  * published by the Free Software Foundation.
    8  *
    9  * This code is distributed in the hope that it will be useful, but WITHOUT
   10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   12  * version 2 for more details (a copy is included in the LICENSE file that
   13  * accompanied this code).
   14  *
   15  * You should have received a copy of the GNU General Public License version
   16  * 2 along with this work; if not, write to the Free Software Foundation,
   17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   18  *
   19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   20  * or visit www.oracle.com if you need additional information or have any
   21  * questions.
   22  *
   23  */
   24 
   25 #include "precompiled.hpp"
   26 #include "asm/assembler.hpp"
   27 #include "asm/assembler.inline.hpp"
   28 #include "gc/shared/cardTableBarrierSet.hpp"
   29 #include "interpreter/interpreter.hpp"
   30 #include "memory/resourceArea.hpp"
   31 #include "memory/universe.hpp"
   32 #include "prims/methodHandles.hpp"
   33 #include "runtime/objectMonitor.hpp"
   34 #include "runtime/os.hpp"
   35 #include "runtime/sharedRuntime.hpp"
   36 #include "runtime/stubRoutines.hpp"
   37 #include "runtime/vm_version.hpp"
   38 #include "utilities/checkedCast.hpp"
   39 #include "utilities/macros.hpp"
   40 
   41 #ifdef PRODUCT
   42 #define BLOCK_COMMENT(str) /* nothing */
   43 #define STOP(error) stop(error)
   44 #else
   45 #define BLOCK_COMMENT(str) block_comment(str)
   46 #define STOP(error) block_comment(error); stop(error)
   47 #endif
   48 
   49 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
   50 // Implementation of AddressLiteral
   51 
   52 // A 2-D table for managing compressed displacement(disp8) on EVEX enabled platforms.
   53 static const unsigned char tuple_table[Assembler::EVEX_ETUP + 1][Assembler::AVX_512bit + 1] = {
   54   // -----------------Table 4.5 -------------------- //
   55   16, 32, 64,  // EVEX_FV(0)
   56   4,  4,  4,   // EVEX_FV(1) - with Evex.b
   57   16, 32, 64,  // EVEX_FV(2) - with Evex.w
   58   8,  8,  8,   // EVEX_FV(3) - with Evex.w and Evex.b
   59   8,  16, 32,  // EVEX_HV(0)
   60   4,  4,  4,   // EVEX_HV(1) - with Evex.b
   61   // -----------------Table 4.6 -------------------- //
   62   16, 32, 64,  // EVEX_FVM(0)
   63   1,  1,  1,   // EVEX_T1S(0)
   64   2,  2,  2,   // EVEX_T1S(1)
   65   4,  4,  4,   // EVEX_T1S(2)
   66   8,  8,  8,   // EVEX_T1S(3)
   67   4,  4,  4,   // EVEX_T1F(0)
   68   8,  8,  8,   // EVEX_T1F(1)
   69   8,  8,  8,   // EVEX_T2(0)
   70   0,  16, 16,  // EVEX_T2(1)
   71   0,  16, 16,  // EVEX_T4(0)
   72   0,  0,  32,  // EVEX_T4(1)
   73   0,  0,  32,  // EVEX_T8(0)
   74   8,  16, 32,  // EVEX_HVM(0)
   75   4,  8,  16,  // EVEX_QVM(0)
   76   2,  4,  8,   // EVEX_OVM(0)
   77   16, 16, 16,  // EVEX_M128(0)
   78   8,  32, 64,  // EVEX_DUP(0)
   79   1,   1,  1,  // EVEX_NOSCALE(0)
   80   0,  0,  0    // EVEX_ETUP
   81 };
   82 
   83 AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) {
   84   _is_lval = false;
   85   _target = target;
   86   switch (rtype) {
   87   case relocInfo::oop_type:
   88   case relocInfo::metadata_type:
   89     // Oops are a special case. Normally they would be their own section
   90     // but in cases like icBuffer they are literals in the code stream that
   91     // we don't have a section for. We use none so that we get a literal address
   92     // which is always patchable.
   93     break;
   94   case relocInfo::external_word_type:
   95     _rspec = external_word_Relocation::spec(target);
   96     break;
   97   case relocInfo::internal_word_type:
   98     _rspec = internal_word_Relocation::spec(target);
   99     break;
  100   case relocInfo::opt_virtual_call_type:
  101     _rspec = opt_virtual_call_Relocation::spec();
  102     break;
  103   case relocInfo::static_call_type:
  104     _rspec = static_call_Relocation::spec();
  105     break;
  106   case relocInfo::runtime_call_type:
  107     _rspec = runtime_call_Relocation::spec();
  108     break;
  109   case relocInfo::poll_type:
  110   case relocInfo::poll_return_type:
  111     _rspec = Relocation::spec_simple(rtype);
  112     break;
  113   case relocInfo::none:
  114     break;
  115   default:
  116     ShouldNotReachHere();
  117     break;
  118   }
  119 }
  120 
  121 // Implementation of Address
  122 
  123 #ifdef _LP64
  124 
  125 Address Address::make_array(ArrayAddress adr) {
  126   // Not implementable on 64bit machines
  127   // Should have been handled higher up the call chain.
  128   ShouldNotReachHere();
  129   return Address();
  130 }
  131 
  132 // exceedingly dangerous constructor
  133 Address::Address(int disp, address loc, relocInfo::relocType rtype) {
  134   _base  = noreg;
  135   _index = noreg;
  136   _scale = no_scale;
  137   _disp  = disp;
  138   _xmmindex = xnoreg;
  139   _isxmmindex = false;
  140   switch (rtype) {
  141     case relocInfo::external_word_type:
  142       _rspec = external_word_Relocation::spec(loc);
  143       break;
  144     case relocInfo::internal_word_type:
  145       _rspec = internal_word_Relocation::spec(loc);
  146       break;
  147     case relocInfo::runtime_call_type:
  148       // HMM
  149       _rspec = runtime_call_Relocation::spec();
  150       break;
  151     case relocInfo::poll_type:
  152     case relocInfo::poll_return_type:
  153       _rspec = Relocation::spec_simple(rtype);
  154       break;
  155     case relocInfo::none:
  156       break;
  157     default:
  158       ShouldNotReachHere();
  159   }
  160 }
  161 #else // LP64
  162 
  163 Address Address::make_array(ArrayAddress adr) {
  164   AddressLiteral base = adr.base();
  165   Address index = adr.index();
  166   assert(index._disp == 0, "must not have disp"); // maybe it can?
  167   Address array(index._base, index._index, index._scale, (intptr_t) base.target());
  168   array._rspec = base._rspec;
  169   return array;
  170 }
  171 
  172 // exceedingly dangerous constructor
  173 Address::Address(address loc, RelocationHolder spec) {
  174   _base  = noreg;
  175   _index = noreg;
  176   _scale = no_scale;
  177   _disp  = (intptr_t) loc;
  178   _rspec = spec;
  179   _xmmindex = xnoreg;
  180   _isxmmindex = false;
  181 }
  182 
  183 #endif // _LP64
  184 
  185 
  186 
  187 // Convert the raw encoding form into the form expected by the constructor for
  188 // Address.  An index of 4 (rsp) corresponds to having no index, so convert
  189 // that to noreg for the Address constructor.
  190 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) {
  191   RelocationHolder rspec = RelocationHolder::none;
  192   if (disp_reloc != relocInfo::none) {
  193     rspec = Relocation::spec_simple(disp_reloc);
  194   }
  195   bool valid_index = index != rsp->encoding();
  196   if (valid_index) {
  197     Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp));
  198     madr._rspec = rspec;
  199     return madr;
  200   } else {
  201     Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp));
  202     madr._rspec = rspec;
  203     return madr;
  204   }
  205 }
  206 
  207 // Implementation of Assembler
  208 
  209 int AbstractAssembler::code_fill_byte() {
  210   return (u_char)'\xF4'; // hlt
  211 }
  212 
  213 void Assembler::init_attributes(void) {
  214   _legacy_mode_bw = (VM_Version::supports_avx512bw() == false);
  215   _legacy_mode_dq = (VM_Version::supports_avx512dq() == false);
  216   _legacy_mode_vl = (VM_Version::supports_avx512vl() == false);
  217   _legacy_mode_vlbw = (VM_Version::supports_avx512vlbw() == false);
  218   NOT_LP64(_is_managed = false;)
  219   _attributes = nullptr;
  220 }
  221 
  222 void Assembler::set_attributes(InstructionAttr* attributes) {
  223   // Record the assembler in the attributes, so the attributes destructor can
  224   // clear the assembler's attributes, cleaning up the otherwise dangling
  225   // pointer.  gcc13 has a false positive warning, because it doesn't tie that
  226   // cleanup to the assignment of _attributes here.
  227   attributes->set_current_assembler(this);
  228   PRAGMA_DIAG_PUSH
  229   PRAGMA_DANGLING_POINTER_IGNORED
  230   _attributes = attributes;
  231   PRAGMA_DIAG_POP
  232 }
  233 
  234 void Assembler::membar(Membar_mask_bits order_constraint) {
  235   // We only have to handle StoreLoad
  236   if (order_constraint & StoreLoad) {
  237     // All usable chips support "locked" instructions which suffice
  238     // as barriers, and are much faster than the alternative of
  239     // using cpuid instruction. We use here a locked add [esp-C],0.
  240     // This is conveniently otherwise a no-op except for blowing
  241     // flags, and introducing a false dependency on target memory
  242     // location. We can't do anything with flags, but we can avoid
  243     // memory dependencies in the current method by locked-adding
  244     // somewhere else on the stack. Doing [esp+C] will collide with
  245     // something on stack in current method, hence we go for [esp-C].
  246     // It is convenient since it is almost always in data cache, for
  247     // any small C.  We need to step back from SP to avoid data
  248     // dependencies with other things on below SP (callee-saves, for
  249     // example). Without a clear way to figure out the minimal safe
  250     // distance from SP, it makes sense to step back the complete
  251     // cache line, as this will also avoid possible second-order effects
  252     // with locked ops against the cache line. Our choice of offset
  253     // is bounded by x86 operand encoding, which should stay within
  254     // [-128; +127] to have the 8-byte displacement encoding.
  255     //
  256     // Any change to this code may need to revisit other places in
  257     // the code where this idiom is used, in particular the
  258     // orderAccess code.
  259 
  260     int offset = -VM_Version::L1_line_size();
  261     if (offset < -128) {
  262       offset = -128;
  263     }
  264 
  265     lock();
  266     addl(Address(rsp, offset), 0);// Assert the lock# signal here
  267   }
  268 }
  269 
  270 // make this go away someday
  271 void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) {
  272   if (rtype == relocInfo::none)
  273     emit_int32(data);
  274   else
  275     emit_data(data, Relocation::spec_simple(rtype), format);
  276 }
  277 
  278 void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) {
  279   assert(imm_operand == 0, "default format must be immediate in this file");
  280   assert(inst_mark() != nullptr, "must be inside InstructionMark");
  281   if (rspec.type() !=  relocInfo::none) {
  282     #ifdef ASSERT
  283       check_relocation(rspec, format);
  284     #endif
  285     // Do not use AbstractAssembler::relocate, which is not intended for
  286     // embedded words.  Instead, relocate to the enclosing instruction.
  287 
  288     // hack. call32 is too wide for mask so use disp32
  289     if (format == call32_operand)
  290       code_section()->relocate(inst_mark(), rspec, disp32_operand);
  291     else
  292       code_section()->relocate(inst_mark(), rspec, format);
  293   }
  294   emit_int32(data);
  295 }
  296 
  297 static int encode(Register r) {
  298   return r->encoding() & 7;
  299 }
  300 
  301 void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
  302   assert(dst->has_byte_register(), "must have byte register");
  303   assert(isByte(op1) && isByte(op2), "wrong opcode");
  304   assert(isByte(imm8), "not a byte");
  305   assert((op1 & 0x01) == 0, "should be 8bit operation");
  306   emit_int24(op1, (op2 | encode(dst)), imm8);
  307 }
  308 
  309 
  310 void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) {
  311   assert(isByte(op1) && isByte(op2), "wrong opcode");
  312   assert(op1 == 0x81, "Unexpected opcode");
  313   if (is8bit(imm32)) {
  314     emit_int24(op1 | 0x02,        // set sign bit
  315                op2 | encode(dst),
  316                imm32 & 0xFF);
  317   } else if (dst == rax) {
  318     switch (op2) {
  319       case 0xD0: emit_int8(0x15); break; // adc
  320       case 0xC0: emit_int8(0x05); break; // add
  321       case 0xE0: emit_int8(0x25); break; // and
  322       case 0xF8: emit_int8(0x3D); break; // cmp
  323       case 0xC8: emit_int8(0x0D); break; // or
  324       case 0xD8: emit_int8(0x1D); break; // sbb
  325       case 0xE8: emit_int8(0x2D); break; // sub
  326       case 0xF0: emit_int8(0x35); break; // xor
  327       default: ShouldNotReachHere();
  328     }
  329     emit_int32(imm32);
  330   } else {
  331     emit_int16(op1, (op2 | encode(dst)));
  332     emit_int32(imm32);
  333   }
  334 }
  335 
  336 // Force generation of a 4 byte immediate value even if it fits into 8bit
  337 void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) {
  338   assert(isByte(op1) && isByte(op2), "wrong opcode");
  339   assert((op1 & 0x01) == 1, "should be 32bit operation");
  340   assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
  341   emit_int16(op1, (op2 | encode(dst)));
  342   emit_int32(imm32);
  343 }
  344 
  345 // immediate-to-memory forms
  346 void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) {
  347   assert((op1 & 0x01) == 1, "should be 32bit operation");
  348   assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
  349   if (is8bit(imm32)) {
  350     emit_int8(op1 | 0x02); // set sign bit
  351     emit_operand(rm, adr, 1);
  352     emit_int8(imm32 & 0xFF);
  353   } else {
  354     emit_int8(op1);
  355     emit_operand(rm, adr, 4);
  356     emit_int32(imm32);
  357   }
  358 }
  359 
  360 void Assembler::emit_arith_operand_imm32(int op1, Register rm, Address adr, int32_t imm32) {
  361   assert(op1 == 0x81, "unexpected opcode");
  362   emit_int8(op1);
  363   emit_operand(rm, adr, 4);
  364   emit_int32(imm32);
  365 }
  366 
  367 void Assembler::emit_arith(int op1, int op2, Register dst, Register src) {
  368   assert(isByte(op1) && isByte(op2), "wrong opcode");
  369   emit_int16(op1, (op2 | encode(dst) << 3 | encode(src)));
  370 }
  371 
  372 
  373 bool Assembler::query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len,
  374                                            int cur_tuple_type, int in_size_in_bits, int cur_encoding) {
  375   int mod_idx = 0;
  376   // We will test if the displacement fits the compressed format and if so
  377   // apply the compression to the displacement iff the result is8bit.
  378   if (VM_Version::supports_evex() && is_evex_inst) {
  379     switch (cur_tuple_type) {
  380     case EVEX_FV:
  381       if ((cur_encoding & VEX_W) == VEX_W) {
  382         mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 3 : 2;
  383       } else {
  384         mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0;
  385       }
  386       break;
  387 
  388     case EVEX_HV:
  389       mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0;
  390       break;
  391 
  392     case EVEX_FVM:
  393       break;
  394 
  395     case EVEX_T1S:
  396       switch (in_size_in_bits) {
  397       case EVEX_8bit:
  398         break;
  399 
  400       case EVEX_16bit:
  401         mod_idx = 1;
  402         break;
  403 
  404       case EVEX_32bit:
  405         mod_idx = 2;
  406         break;
  407 
  408       case EVEX_64bit:
  409         mod_idx = 3;
  410         break;
  411       }
  412       break;
  413 
  414     case EVEX_T1F:
  415     case EVEX_T2:
  416     case EVEX_T4:
  417       mod_idx = (in_size_in_bits == EVEX_64bit) ? 1 : 0;
  418       break;
  419 
  420     case EVEX_T8:
  421       break;
  422 
  423     case EVEX_HVM:
  424       break;
  425 
  426     case EVEX_QVM:
  427       break;
  428 
  429     case EVEX_OVM:
  430       break;
  431 
  432     case EVEX_M128:
  433       break;
  434 
  435     case EVEX_DUP:
  436       break;
  437 
  438     case EVEX_NOSCALE:
  439       break;
  440 
  441     default:
  442       assert(0, "no valid evex tuple_table entry");
  443       break;
  444     }
  445 
  446     if (vector_len >= AVX_128bit && vector_len <= AVX_512bit) {
  447       int disp_factor = tuple_table[cur_tuple_type + mod_idx][vector_len];
  448       if ((disp % disp_factor) == 0) {
  449         int new_disp = disp / disp_factor;
  450         if ((-0x80 <= new_disp && new_disp < 0x80)) {
  451           disp = new_disp;
  452         }
  453       } else {
  454         return false;
  455       }
  456     }
  457   }
  458   return (-0x80 <= disp && disp < 0x80);
  459 }
  460 
  461 
  462 bool Assembler::emit_compressed_disp_byte(int &disp) {
  463   int mod_idx = 0;
  464   // We will test if the displacement fits the compressed format and if so
  465   // apply the compression to the displacement iff the result is8bit.
  466   if (VM_Version::supports_evex() && _attributes && _attributes->is_evex_instruction()) {
  467     int evex_encoding = _attributes->get_evex_encoding();
  468     int tuple_type = _attributes->get_tuple_type();
  469     switch (tuple_type) {
  470     case EVEX_FV:
  471       if ((evex_encoding & VEX_W) == VEX_W) {
  472         mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 3 : 2;
  473       } else {
  474         mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0;
  475       }
  476       break;
  477 
  478     case EVEX_HV:
  479       mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0;
  480       break;
  481 
  482     case EVEX_FVM:
  483       break;
  484 
  485     case EVEX_T1S:
  486       switch (_attributes->get_input_size()) {
  487       case EVEX_8bit:
  488         break;
  489 
  490       case EVEX_16bit:
  491         mod_idx = 1;
  492         break;
  493 
  494       case EVEX_32bit:
  495         mod_idx = 2;
  496         break;
  497 
  498       case EVEX_64bit:
  499         mod_idx = 3;
  500         break;
  501       }
  502       break;
  503 
  504     case EVEX_T1F:
  505     case EVEX_T2:
  506     case EVEX_T4:
  507       mod_idx = (_attributes->get_input_size() == EVEX_64bit) ? 1 : 0;
  508       break;
  509 
  510     case EVEX_T8:
  511       break;
  512 
  513     case EVEX_HVM:
  514       break;
  515 
  516     case EVEX_QVM:
  517       break;
  518 
  519     case EVEX_OVM:
  520       break;
  521 
  522     case EVEX_M128:
  523       break;
  524 
  525     case EVEX_DUP:
  526       break;
  527 
  528     case EVEX_NOSCALE:
  529       break;
  530 
  531     default:
  532       assert(0, "no valid evex tuple_table entry");
  533       break;
  534     }
  535 
  536     int vector_len = _attributes->get_vector_len();
  537     if (vector_len >= AVX_128bit && vector_len <= AVX_512bit) {
  538       int disp_factor = tuple_table[tuple_type + mod_idx][vector_len];
  539       if ((disp % disp_factor) == 0) {
  540         int new_disp = disp / disp_factor;
  541         if (is8bit(new_disp)) {
  542           disp = new_disp;
  543         }
  544       } else {
  545         return false;
  546       }
  547     }
  548   }
  549   return is8bit(disp);
  550 }
  551 
  552 bool Assembler::needs_rex2(Register reg1, Register reg2, Register reg3) {
  553   bool rex2 = (reg1->is_valid() && reg1->encoding() >= 16) ||
  554               (reg2->is_valid() && reg2->encoding() >= 16) ||
  555               (reg3->is_valid() && reg3->encoding() >= 16);
  556   assert(!rex2 || UseAPX, "extended gpr use requires UseAPX");
  557   return rex2;
  558 }
  559 
  560 bool Assembler::needs_eevex(Register reg1, Register reg2, Register reg3) {
  561   return needs_rex2(reg1, reg2, reg3);
  562 }
  563 
  564 bool Assembler::needs_eevex(int enc1, int enc2, int enc3) {
  565   bool eevex = enc1 >= 16 || enc2 >= 16 || enc3 >=16;
  566   assert(!eevex || UseAPX, "extended gpr use requires UseAPX");
  567   return eevex;
  568 }
  569 
  570 static bool is_valid_encoding(int reg_enc) {
  571   return reg_enc >= 0;
  572 }
  573 
  574 static int raw_encode(Register reg) {
  575   assert(reg == noreg || reg->is_valid(), "sanity");
  576   int reg_enc = reg->raw_encoding();
  577   assert(reg_enc == -1 || is_valid_encoding(reg_enc), "sanity");
  578   return reg_enc;
  579 }
  580 
  581 static int raw_encode(XMMRegister xmmreg) {
  582   assert(xmmreg == xnoreg || xmmreg->is_valid(), "sanity");
  583   int xmmreg_enc = xmmreg->raw_encoding();
  584   assert(xmmreg_enc == -1 || is_valid_encoding(xmmreg_enc), "sanity");
  585   return xmmreg_enc;
  586 }
  587 
  588 static int raw_encode(KRegister kreg) {
  589   assert(kreg == knoreg || kreg->is_valid(), "sanity");
  590   int kreg_enc = kreg->raw_encoding();
  591   assert(kreg_enc == -1 || is_valid_encoding(kreg_enc), "sanity");
  592   return kreg_enc;
  593 }
  594 
  595 static int modrm_encoding(int mod, int dst_enc, int src_enc) {
  596   return (mod & 3) << 6 | (dst_enc & 7) << 3 | (src_enc & 7);
  597 }
  598 
  599 static int sib_encoding(Address::ScaleFactor scale, int index_enc, int base_enc) {
  600   return (scale & 3) << 6 | (index_enc & 7) << 3 | (base_enc & 7);
  601 }
  602 
  603 inline void Assembler::emit_modrm(int mod, int dst_enc, int src_enc) {
  604   assert((mod & 3) != 0b11, "forbidden");
  605   int modrm = modrm_encoding(mod, dst_enc, src_enc);
  606   emit_int8(modrm);
  607 }
  608 
  609 inline void Assembler::emit_modrm_disp8(int mod, int dst_enc, int src_enc,
  610                                         int disp) {
  611   int modrm = modrm_encoding(mod, dst_enc, src_enc);
  612   emit_int16(modrm, disp & 0xFF);
  613 }
  614 
  615 inline void Assembler::emit_modrm_sib(int mod, int dst_enc, int src_enc,
  616                                       Address::ScaleFactor scale, int index_enc, int base_enc) {
  617   int modrm = modrm_encoding(mod, dst_enc, src_enc);
  618   int sib = sib_encoding(scale, index_enc, base_enc);
  619   emit_int16(modrm, sib);
  620 }
  621 
  622 inline void Assembler::emit_modrm_sib_disp8(int mod, int dst_enc, int src_enc,
  623                                             Address::ScaleFactor scale, int index_enc, int base_enc,
  624                                             int disp) {
  625   int modrm = modrm_encoding(mod, dst_enc, src_enc);
  626   int sib = sib_encoding(scale, index_enc, base_enc);
  627   emit_int24(modrm, sib, disp & 0xFF);
  628 }
  629 
  630 void Assembler::emit_operand_helper(int reg_enc, int base_enc, int index_enc,
  631                                     Address::ScaleFactor scale, int disp,
  632                                     RelocationHolder const& rspec,
  633                                     int post_addr_length) {
  634   bool no_relocation = (rspec.type() == relocInfo::none);
  635 
  636   if (is_valid_encoding(base_enc)) {
  637     if (is_valid_encoding(index_enc)) {
  638       assert(scale != Address::no_scale, "inconsistent address");
  639       // [base + index*scale + disp]
  640       if (disp == 0 && no_relocation && ((base_enc & 0x7) != 5)) {
  641         // [base + index*scale]
  642         // !(rbp | r13 | r21 | r29)
  643         // [00 reg 100][ss index base]
  644         emit_modrm_sib(0b00, reg_enc, 0b100,
  645                        scale, index_enc, base_enc);
  646       } else if (emit_compressed_disp_byte(disp) && no_relocation) {
  647         // [base + index*scale + imm8]
  648         // [01 reg 100][ss index base] imm8
  649         emit_modrm_sib_disp8(0b01, reg_enc, 0b100,
  650                              scale, index_enc, base_enc,
  651                              disp);
  652       } else {
  653         // [base + index*scale + disp32]
  654         // [10 reg 100][ss index base] disp32
  655         emit_modrm_sib(0b10, reg_enc, 0b100,
  656                        scale, index_enc, base_enc);
  657         emit_data(disp, rspec, disp32_operand);
  658       }
  659     } else if ((base_enc & 0x7) == 4) {
  660       // rsp | r12 | r20 | r28
  661       // [rsp + disp]
  662       if (disp == 0 && no_relocation) {
  663         // [rsp]
  664         // [00 reg 100][00 100 100]
  665         emit_modrm_sib(0b00, reg_enc, 0b100,
  666                        Address::times_1, 0b100, 0b100);
  667       } else if (emit_compressed_disp_byte(disp) && no_relocation) {
  668         // [rsp + imm8]
  669         // [01 reg 100][00 100 100] disp8
  670         emit_modrm_sib_disp8(0b01, reg_enc, 0b100,
  671                              Address::times_1, 0b100, 0b100,
  672                              disp);
  673       } else {
  674         // [rsp + imm32]
  675         // [10 reg 100][00 100 100] disp32
  676         emit_modrm_sib(0b10, reg_enc, 0b100,
  677                        Address::times_1, 0b100, 0b100);
  678         emit_data(disp, rspec, disp32_operand);
  679       }
  680     } else {
  681       // [base + disp]
  682       // !(rsp | r12 | r20 | r28) were handled above
  683       assert(((base_enc & 0x7) != 4), "illegal addressing mode");
  684       if (disp == 0 && no_relocation &&  ((base_enc & 0x7) != 5)) {
  685         // [base]
  686         // !(rbp | r13 | r21 | r29)
  687         // [00 reg base]
  688         emit_modrm(0, reg_enc, base_enc);
  689       } else if (emit_compressed_disp_byte(disp) && no_relocation) {
  690         // [base + disp8]
  691         // [01 reg base] disp8
  692         emit_modrm_disp8(0b01, reg_enc, base_enc,
  693                          disp);
  694       } else {
  695         // [base + disp32]
  696         // [10 reg base] disp32
  697         emit_modrm(0b10, reg_enc, base_enc);
  698         emit_data(disp, rspec, disp32_operand);
  699       }
  700     }
  701   } else {
  702     if (is_valid_encoding(index_enc)) {
  703       assert(scale != Address::no_scale, "inconsistent address");
  704       // base == noreg
  705       // [index*scale + disp]
  706       // [00 reg 100][ss index 101] disp32
  707       emit_modrm_sib(0b00, reg_enc, 0b100,
  708                      scale, index_enc, 0b101 /* no base */);
  709       emit_data(disp, rspec, disp32_operand);
  710     } else if (!no_relocation) {
  711       // base == noreg, index == noreg
  712       // [disp] (64bit) RIP-RELATIVE (32bit) abs
  713       // [00 reg 101] disp32
  714 
  715       emit_modrm(0b00, reg_enc, 0b101 /* no base */);
  716       // Note that the RIP-rel. correction applies to the generated
  717       // disp field, but _not_ to the target address in the rspec.
  718 
  719       // disp was created by converting the target address minus the pc
  720       // at the start of the instruction. That needs more correction here.
  721       // intptr_t disp = target - next_ip;
  722       assert(inst_mark() != nullptr, "must be inside InstructionMark");
  723       address next_ip = pc() + sizeof(int32_t) + post_addr_length;
  724       int64_t adjusted = disp;
  725       // Do rip-rel adjustment for 64bit
  726       LP64_ONLY(adjusted -=  (next_ip - inst_mark()));
  727       assert(is_simm32(adjusted),
  728              "must be 32bit offset (RIP relative address)");
  729       emit_data((int32_t) adjusted, rspec, disp32_operand);
  730 
  731     } else {
  732       // base == noreg, index == noreg, no_relocation == true
  733       // 32bit never did this, did everything as the rip-rel/disp code above
  734       // [disp] ABSOLUTE
  735       // [00 reg 100][00 100 101] disp32
  736       emit_modrm_sib(0b00, reg_enc, 0b100 /* no base */,
  737                      Address::times_1, 0b100, 0b101);
  738       emit_data(disp, rspec, disp32_operand);
  739     }
  740   }
  741 }
  742 
  743 void Assembler::emit_operand(Register reg, Register base, Register index,
  744                              Address::ScaleFactor scale, int disp,
  745                              RelocationHolder const& rspec,
  746                              int post_addr_length) {
  747   assert(!index->is_valid() || index != rsp, "illegal addressing mode");
  748   emit_operand_helper(raw_encode(reg), raw_encode(base), raw_encode(index),
  749                       scale, disp, rspec, post_addr_length);
  750 
  751 }
  752 void Assembler::emit_operand(XMMRegister xmmreg, Register base, Register index,
  753                              Address::ScaleFactor scale, int disp,
  754                              RelocationHolder const& rspec,
  755                              int post_addr_length) {
  756   assert(!index->is_valid() || index != rsp, "illegal addressing mode");
  757   assert(xmmreg->encoding() < 16 || UseAVX > 2, "not supported");
  758   emit_operand_helper(raw_encode(xmmreg), raw_encode(base), raw_encode(index),
  759                       scale, disp, rspec, post_addr_length);
  760 }
  761 
  762 void Assembler::emit_operand(XMMRegister xmmreg, Register base, XMMRegister xmmindex,
  763                              Address::ScaleFactor scale, int disp,
  764                              RelocationHolder const& rspec,
  765                              int post_addr_length) {
  766   assert(xmmreg->encoding() < 16 || UseAVX > 2, "not supported");
  767   assert(xmmindex->encoding() < 16 || UseAVX > 2, "not supported");
  768   emit_operand_helper(raw_encode(xmmreg), raw_encode(base), raw_encode(xmmindex),
  769                       scale, disp, rspec, post_addr_length);
  770 }
  771 
  772 void Assembler::emit_operand(KRegister kreg, Address adr,
  773                              int post_addr_length) {
  774   emit_operand(kreg, adr._base, adr._index, adr._scale, adr._disp,
  775                adr._rspec,
  776                post_addr_length);
  777 }
  778 
  779 void Assembler::emit_operand(KRegister kreg, Register base, Register index,
  780                              Address::ScaleFactor scale, int disp,
  781                              RelocationHolder const& rspec,
  782                              int post_addr_length) {
  783   assert(!index->is_valid() || index != rsp, "illegal addressing mode");
  784   emit_operand_helper(raw_encode(kreg), raw_encode(base), raw_encode(index),
  785                       scale, disp, rspec, post_addr_length);
  786 }
  787 
  788 // Secret local extension to Assembler::WhichOperand:
  789 #define end_pc_operand (_WhichOperand_limit)
  790 
  791 address Assembler::locate_operand(address inst, WhichOperand which) {
  792   // Decode the given instruction, and return the address of
  793   // an embedded 32-bit operand word.
  794 
  795   // If "which" is disp32_operand, selects the displacement portion
  796   // of an effective address specifier.
  797   // If "which" is imm64_operand, selects the trailing immediate constant.
  798   // If "which" is call32_operand, selects the displacement of a call or jump.
  799   // Caller is responsible for ensuring that there is such an operand,
  800   // and that it is 32/64 bits wide.
  801 
  802   // If "which" is end_pc_operand, find the end of the instruction.
  803 
  804   address ip = inst;
  805   bool is_64bit = false;
  806 
  807   debug_only(bool has_disp32 = false);
  808   int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn
  809 
  810   again_after_prefix:
  811   switch (0xFF & *ip++) {
  812 
  813   // These convenience macros generate groups of "case" labels for the switch.
  814 #define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3
  815 #define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \
  816              case (x)+4: case (x)+5: case (x)+6: case (x)+7
  817 #define REP16(x) REP8((x)+0): \
  818               case REP8((x)+8)
  819 
  820   case CS_segment:
  821   case SS_segment:
  822   case DS_segment:
  823   case ES_segment:
  824   case FS_segment:
  825   case GS_segment:
  826     // Seems dubious
  827     LP64_ONLY(assert(false, "shouldn't have that prefix"));
  828     assert(ip == inst+1, "only one prefix allowed");
  829     goto again_after_prefix;
  830 
  831   case 0x67:
  832   case REX:
  833   case REX_B:
  834   case REX_X:
  835   case REX_XB:
  836   case REX_R:
  837   case REX_RB:
  838   case REX_RX:
  839   case REX_RXB:
  840     NOT_LP64(assert(false, "64bit prefixes"));
  841     goto again_after_prefix;
  842 
  843   case REX2:
  844     NOT_LP64(assert(false, "64bit prefixes"));
  845     if ((0xFF & *ip++) & REX2BIT_W) {
  846       is_64bit = true;
  847     }
  848     goto again_after_prefix;
  849 
  850   case REX_W:
  851   case REX_WB:
  852   case REX_WX:
  853   case REX_WXB:
  854   case REX_WR:
  855   case REX_WRB:
  856   case REX_WRX:
  857   case REX_WRXB:
  858     NOT_LP64(assert(false, "64bit prefixes"));
  859     is_64bit = true;
  860     goto again_after_prefix;
  861 
  862   case 0xFF: // pushq a; decl a; incl a; call a; jmp a
  863   case 0x88: // movb a, r
  864   case 0x89: // movl a, r
  865   case 0x8A: // movb r, a
  866   case 0x8B: // movl r, a
  867   case 0x8F: // popl a
  868     debug_only(has_disp32 = true);
  869     break;
  870 
  871   case 0x68: // pushq #32
  872     if (which == end_pc_operand) {
  873       return ip + 4;
  874     }
  875     assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate");
  876     return ip;                  // not produced by emit_operand
  877 
  878   case 0x66: // movw ... (size prefix)
  879     again_after_size_prefix2:
  880     switch (0xFF & *ip++) {
  881     case REX:
  882     case REX_B:
  883     case REX_X:
  884     case REX_XB:
  885     case REX_R:
  886     case REX_RB:
  887     case REX_RX:
  888     case REX_RXB:
  889     case REX_W:
  890     case REX_WB:
  891     case REX_WX:
  892     case REX_WXB:
  893     case REX_WR:
  894     case REX_WRB:
  895     case REX_WRX:
  896     case REX_WRXB:
  897       NOT_LP64(assert(false, "64bit prefix found"));
  898       goto again_after_size_prefix2;
  899 
  900     case REX2:
  901       NOT_LP64(assert(false, "64bit prefix found"));
  902       if ((0xFF & *ip++) & REX2BIT_W) {
  903         is_64bit = true;
  904       }
  905       goto again_after_size_prefix2;
  906 
  907     case 0x8B: // movw r, a
  908     case 0x89: // movw a, r
  909       debug_only(has_disp32 = true);
  910       break;
  911     case 0xC7: // movw a, #16
  912       debug_only(has_disp32 = true);
  913       tail_size = 2;  // the imm16
  914       break;
  915     case 0x0F: // several SSE/SSE2 variants
  916       ip--;    // reparse the 0x0F
  917       goto again_after_prefix;
  918     default:
  919       ShouldNotReachHere();
  920     }
  921     break;
  922 
  923   case REP8(0xB8): // movl/q r, #32/#64(oop?)
  924     if (which == end_pc_operand)  return ip + (is_64bit ? 8 : 4);
  925     // these asserts are somewhat nonsensical
  926 #ifndef _LP64
  927     assert(which == imm_operand || which == disp32_operand,
  928            "which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip));
  929 #else
  930     assert(((which == call32_operand || which == imm_operand) && is_64bit) ||
  931            (which == narrow_oop_operand && !is_64bit),
  932            "which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip));
  933 #endif // _LP64
  934     return ip;
  935 
  936   case 0x69: // imul r, a, #32
  937   case 0xC7: // movl a, #32(oop?)
  938     tail_size = 4;
  939     debug_only(has_disp32 = true); // has both kinds of operands!
  940     break;
  941 
  942   case 0x0F: // movx..., etc.
  943     switch (0xFF & *ip++) {
  944     case 0x3A: // pcmpestri
  945       tail_size = 1;
  946     case 0x38: // ptest, pmovzxbw
  947       ip++; // skip opcode
  948       debug_only(has_disp32 = true); // has both kinds of operands!
  949       break;
  950 
  951     case 0x70: // pshufd r, r/a, #8
  952       debug_only(has_disp32 = true); // has both kinds of operands!
  953     case 0x73: // psrldq r, #8
  954       tail_size = 1;
  955       break;
  956 
  957     case 0x10: // movups
  958     case 0x11: // movups
  959     case 0x12: // movlps
  960     case 0x28: // movaps
  961     case 0x29: // movaps
  962     case 0x2E: // ucomiss
  963     case 0x2F: // comiss
  964     case 0x54: // andps
  965     case 0x55: // andnps
  966     case 0x56: // orps
  967     case 0x57: // xorps
  968     case 0x58: // addpd
  969     case 0x59: // mulpd
  970     case 0x6E: // movd
  971     case 0x7E: // movd
  972     case 0x6F: // movdq
  973     case 0x7F: // movdq
  974     case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush
  975     case 0xD6: // movq
  976     case 0xFE: // paddd
  977       debug_only(has_disp32 = true);
  978       break;
  979 
  980     case 0xAD: // shrd r, a, %cl
  981     case 0xAF: // imul r, a
  982     case 0xBE: // movsbl r, a (movsxb)
  983     case 0xBF: // movswl r, a (movsxw)
  984     case 0xB6: // movzbl r, a (movzxb)
  985     case 0xB7: // movzwl r, a (movzxw)
  986     case REP16(0x40): // cmovl cc, r, a
  987     case 0xB0: // cmpxchgb
  988     case 0xB1: // cmpxchg
  989     case 0xC1: // xaddl
  990     case 0xC7: // cmpxchg8
  991     case REP16(0x90): // setcc a
  992       debug_only(has_disp32 = true);
  993       // fall out of the switch to decode the address
  994       break;
  995 
  996     case 0xC4: // pinsrw r, a, #8
  997       debug_only(has_disp32 = true);
  998     case 0xC5: // pextrw r, r, #8
  999       tail_size = 1;  // the imm8
 1000       break;
 1001 
 1002     case 0xAC: // shrd r, a, #8
 1003       debug_only(has_disp32 = true);
 1004       tail_size = 1;  // the imm8
 1005       break;
 1006 
 1007     case REP16(0x80): // jcc rdisp32
 1008       if (which == end_pc_operand)  return ip + 4;
 1009       assert(which == call32_operand, "jcc has no disp32 or imm");
 1010       return ip;
 1011     default:
 1012       fatal("not handled: 0x0F%2X", 0xFF & *(ip-1));
 1013     }
 1014     break;
 1015 
 1016   case 0x81: // addl a, #32; addl r, #32
 1017     // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
 1018     // on 32bit in the case of cmpl, the imm might be an oop
 1019     tail_size = 4;
 1020     debug_only(has_disp32 = true); // has both kinds of operands!
 1021     break;
 1022 
 1023   case 0x83: // addl a, #8; addl r, #8
 1024     // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
 1025     debug_only(has_disp32 = true); // has both kinds of operands!
 1026     tail_size = 1;
 1027     break;
 1028 
 1029   case 0x15: // adc rax, #32
 1030   case 0x05: // add rax, #32
 1031   case 0x25: // and rax, #32
 1032   case 0x3D: // cmp rax, #32
 1033   case 0x0D: // or  rax, #32
 1034   case 0x1D: // sbb rax, #32
 1035   case 0x2D: // sub rax, #32
 1036   case 0x35: // xor rax, #32
 1037     return which == end_pc_operand ? ip + 4 : ip;
 1038 
 1039   case 0x9B:
 1040     switch (0xFF & *ip++) {
 1041     case 0xD9: // fnstcw a
 1042       debug_only(has_disp32 = true);
 1043       break;
 1044     default:
 1045       ShouldNotReachHere();
 1046     }
 1047     break;
 1048 
 1049   case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a
 1050   case REP4(0x10): // adc...
 1051   case REP4(0x20): // and...
 1052   case REP4(0x30): // xor...
 1053   case REP4(0x08): // or...
 1054   case REP4(0x18): // sbb...
 1055   case REP4(0x28): // sub...
 1056   case 0xF7: // mull a
 1057   case 0x8D: // lea r, a
 1058   case 0x87: // xchg r, a
 1059   case REP4(0x38): // cmp...
 1060   case 0x85: // test r, a
 1061     debug_only(has_disp32 = true); // has both kinds of operands!
 1062     break;
 1063 
 1064   case 0xA8: // testb rax, #8
 1065     return which == end_pc_operand ? ip + 1 : ip;
 1066   case 0xA9: // testl/testq rax, #32
 1067     return which == end_pc_operand ? ip + 4 : ip;
 1068 
 1069   case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8
 1070   case 0xC6: // movb a, #8
 1071   case 0x80: // cmpb a, #8
 1072   case 0x6B: // imul r, a, #8
 1073     debug_only(has_disp32 = true); // has both kinds of operands!
 1074     tail_size = 1; // the imm8
 1075     break;
 1076 
 1077   case 0xC4: // VEX_3bytes
 1078   case 0xC5: // VEX_2bytes
 1079     assert((UseAVX > 0), "shouldn't have VEX prefix");
 1080     assert(ip == inst+1, "no prefixes allowed");
 1081     // C4 and C5 are also used as opcodes for PINSRW and PEXTRW instructions
 1082     // but they have prefix 0x0F and processed when 0x0F processed above.
 1083     //
 1084     // In 32-bit mode the VEX first byte C4 and C5 alias onto LDS and LES
 1085     // instructions (these instructions are not supported in 64-bit mode).
 1086     // To distinguish them bits [7:6] are set in the VEX second byte since
 1087     // ModRM byte can not be of the form 11xxxxxx in 32-bit mode. To set
 1088     // those VEX bits REX and vvvv bits are inverted.
 1089     //
 1090     // Fortunately C2 doesn't generate these instructions so we don't need
 1091     // to check for them in product version.
 1092 
 1093     // Check second byte
 1094     NOT_LP64(assert((0xC0 & *ip) == 0xC0, "shouldn't have LDS and LES instructions"));
 1095 
 1096     int vex_opcode;
 1097     // First byte
 1098     if ((0xFF & *inst) == VEX_3bytes) {
 1099       vex_opcode = VEX_OPCODE_MASK & *ip;
 1100       ip++; // third byte
 1101       is_64bit = ((VEX_W & *ip) == VEX_W);
 1102     } else {
 1103       vex_opcode = VEX_OPCODE_0F;
 1104     }
 1105     ip++; // opcode
 1106     // To find the end of instruction (which == end_pc_operand).
 1107     switch (vex_opcode) {
 1108       case VEX_OPCODE_0F:
 1109         switch (0xFF & *ip) {
 1110         case 0x70: // pshufd r, r/a, #8
 1111         case 0x71: // ps[rl|ra|ll]w r, #8
 1112         case 0x72: // ps[rl|ra|ll]d r, #8
 1113         case 0x73: // ps[rl|ra|ll]q r, #8
 1114         case 0xC2: // cmp[ps|pd|ss|sd] r, r, r/a, #8
 1115         case 0xC4: // pinsrw r, r, r/a, #8
 1116         case 0xC5: // pextrw r/a, r, #8
 1117         case 0xC6: // shufp[s|d] r, r, r/a, #8
 1118           tail_size = 1;  // the imm8
 1119           break;
 1120         }
 1121         break;
 1122       case VEX_OPCODE_0F_3A:
 1123         tail_size = 1;
 1124         break;
 1125     }
 1126     ip++; // skip opcode
 1127     debug_only(has_disp32 = true); // has both kinds of operands!
 1128     break;
 1129 
 1130   case 0x62: // EVEX_4bytes
 1131     assert(VM_Version::cpu_supports_evex(), "shouldn't have EVEX prefix");
 1132     assert(ip == inst+1, "no prefixes allowed");
 1133     // no EVEX collisions, all instructions that have 0x62 opcodes
 1134     // have EVEX versions and are subopcodes of 0x66
 1135     ip++; // skip P0 and examine W in P1
 1136     is_64bit = ((VEX_W & *ip) == VEX_W);
 1137     ip++; // move to P2
 1138     ip++; // skip P2, move to opcode
 1139     // To find the end of instruction (which == end_pc_operand).
 1140     switch (0xFF & *ip) {
 1141     case 0x22: // pinsrd r, r/a, #8
 1142     case 0x61: // pcmpestri r, r/a, #8
 1143     case 0x70: // pshufd r, r/a, #8
 1144     case 0x73: // psrldq r, #8
 1145     case 0x1f: // evpcmpd/evpcmpq
 1146     case 0x3f: // evpcmpb/evpcmpw
 1147       tail_size = 1;  // the imm8
 1148       break;
 1149     default:
 1150       break;
 1151     }
 1152     ip++; // skip opcode
 1153     debug_only(has_disp32 = true); // has both kinds of operands!
 1154     break;
 1155 
 1156   case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1
 1157   case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl
 1158   case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a
 1159   case 0xDD: // fld_d a; fst_d a; fstp_d a
 1160   case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a
 1161   case 0xDF: // fild_d a; fistp_d a
 1162   case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a
 1163   case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a
 1164   case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a
 1165     debug_only(has_disp32 = true);
 1166     break;
 1167 
 1168   case 0xE8: // call rdisp32
 1169   case 0xE9: // jmp  rdisp32
 1170     if (which == end_pc_operand)  return ip + 4;
 1171     assert(which == call32_operand, "call has no disp32 or imm");
 1172     return ip;
 1173 
 1174   case 0xF0:                    // Lock
 1175     goto again_after_prefix;
 1176 
 1177   case 0xF3:                    // For SSE
 1178   case 0xF2:                    // For SSE2
 1179     switch (0xFF & *ip++) {
 1180     case REX:
 1181     case REX_B:
 1182     case REX_X:
 1183     case REX_XB:
 1184     case REX_R:
 1185     case REX_RB:
 1186     case REX_RX:
 1187     case REX_RXB:
 1188     case REX_W:
 1189     case REX_WB:
 1190     case REX_WX:
 1191     case REX_WXB:
 1192     case REX_WR:
 1193     case REX_WRB:
 1194     case REX_WRX:
 1195     case REX_WRXB:
 1196     case REX2:
 1197       NOT_LP64(assert(false, "found 64bit prefix"));
 1198       ip++;
 1199     default:
 1200       ip++;
 1201     }
 1202     debug_only(has_disp32 = true); // has both kinds of operands!
 1203     break;
 1204 
 1205   default:
 1206     ShouldNotReachHere();
 1207 
 1208 #undef REP8
 1209 #undef REP16
 1210   }
 1211 
 1212   assert(which != call32_operand, "instruction is not a call, jmp, or jcc");
 1213 #ifdef _LP64
 1214   assert(which != imm_operand, "instruction is not a movq reg, imm64");
 1215 #else
 1216   // assert(which != imm_operand || has_imm32, "instruction has no imm32 field");
 1217   assert(which != imm_operand || has_disp32, "instruction has no imm32 field");
 1218 #endif // LP64
 1219   assert(which != disp32_operand || has_disp32, "instruction has no disp32 field");
 1220 
 1221   // parse the output of emit_operand
 1222   int op2 = 0xFF & *ip++;
 1223   int base = op2 & 0x07;
 1224   int op3 = -1;
 1225   const int b100 = 4;
 1226   const int b101 = 5;
 1227   if (base == b100 && (op2 >> 6) != 3) {
 1228     op3 = 0xFF & *ip++;
 1229     base = op3 & 0x07;   // refetch the base
 1230   }
 1231   // now ip points at the disp (if any)
 1232 
 1233   switch (op2 >> 6) {
 1234   case 0:
 1235     // [00 reg  100][ss index base]
 1236     // [00 reg  100][00   100  esp]
 1237     // [00 reg base]
 1238     // [00 reg  100][ss index  101][disp32]
 1239     // [00 reg  101]               [disp32]
 1240 
 1241     if (base == b101) {
 1242       if (which == disp32_operand)
 1243         return ip;              // caller wants the disp32
 1244       ip += 4;                  // skip the disp32
 1245     }
 1246     break;
 1247 
 1248   case 1:
 1249     // [01 reg  100][ss index base][disp8]
 1250     // [01 reg  100][00   100  esp][disp8]
 1251     // [01 reg base]               [disp8]
 1252     ip += 1;                    // skip the disp8
 1253     break;
 1254 
 1255   case 2:
 1256     // [10 reg  100][ss index base][disp32]
 1257     // [10 reg  100][00   100  esp][disp32]
 1258     // [10 reg base]               [disp32]
 1259     if (which == disp32_operand)
 1260       return ip;                // caller wants the disp32
 1261     ip += 4;                    // skip the disp32
 1262     break;
 1263 
 1264   case 3:
 1265     // [11 reg base]  (not a memory addressing mode)
 1266     break;
 1267   }
 1268 
 1269   if (which == end_pc_operand) {
 1270     return ip + tail_size;
 1271   }
 1272 
 1273 #ifdef _LP64
 1274   assert(which == narrow_oop_operand && !is_64bit, "instruction is not a movl adr, imm32");
 1275 #else
 1276   assert(which == imm_operand, "instruction has only an imm field");
 1277 #endif // LP64
 1278   return ip;
 1279 }
 1280 
 1281 address Assembler::locate_next_instruction(address inst) {
 1282   // Secretly share code with locate_operand:
 1283   return locate_operand(inst, end_pc_operand);
 1284 }
 1285 
 1286 
 1287 #ifdef ASSERT
 1288 void Assembler::check_relocation(RelocationHolder const& rspec, int format) {
 1289   address inst = inst_mark();
 1290   assert(inst != nullptr && inst < pc(), "must point to beginning of instruction");
 1291   address opnd;
 1292 
 1293   Relocation* r = rspec.reloc();
 1294   if (r->type() == relocInfo::none) {
 1295     return;
 1296   } else if (r->is_call() || format == call32_operand) {
 1297     // assert(format == imm32_operand, "cannot specify a nonzero format");
 1298     opnd = locate_operand(inst, call32_operand);
 1299   } else if (r->is_data()) {
 1300     assert(format == imm_operand || format == disp32_operand
 1301            LP64_ONLY(|| format == narrow_oop_operand), "format ok");
 1302     opnd = locate_operand(inst, (WhichOperand)format);
 1303   } else {
 1304     assert(format == imm_operand, "cannot specify a format");
 1305     return;
 1306   }
 1307   assert(opnd == pc(), "must put operand where relocs can find it");
 1308 }
 1309 #endif // ASSERT
 1310 
 1311 void Assembler::emit_operand(Register reg, Address adr, int post_addr_length) {
 1312   emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec, post_addr_length);
 1313 }
 1314 
 1315 void Assembler::emit_operand(XMMRegister reg, Address adr, int post_addr_length) {
 1316   if (adr.isxmmindex()) {
 1317      emit_operand(reg, adr._base, adr._xmmindex, adr._scale, adr._disp, adr._rspec, post_addr_length);
 1318   } else {
 1319      emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec, post_addr_length);
 1320   }
 1321 }
 1322 
 1323 void Assembler::emit_opcode_prefix_and_encoding(int byte1, int byte2, int ocp_and_encoding, int byte3) {
 1324   int opcode_prefix = (ocp_and_encoding & 0xFF00) >> 8;
 1325   if (opcode_prefix != 0) {
 1326     emit_int32(opcode_prefix, (unsigned char)byte1, byte2 | (ocp_and_encoding & 0xFF), byte3);
 1327   } else {
 1328     emit_int24((unsigned char)byte1, byte2 | (ocp_and_encoding & 0xFF), byte3);
 1329   }
 1330 }
 1331 
 1332 void Assembler::emit_opcode_prefix_and_encoding(int byte1, int byte2, int ocp_and_encoding) {
 1333   int opcode_prefix = (ocp_and_encoding & 0xFF00) >> 8;
 1334   if (opcode_prefix != 0) {
 1335     emit_int24(opcode_prefix, (unsigned char)byte1, byte2 | (ocp_and_encoding & 0xFF));
 1336   } else {
 1337     emit_int16((unsigned char)byte1, byte2 | (ocp_and_encoding & 0xFF));
 1338   }
 1339 }
 1340 
 1341 void Assembler::emit_opcode_prefix_and_encoding(int byte1, int ocp_and_encoding) {
 1342   int opcode_prefix = (ocp_and_encoding & 0xFF00) >> 8;
 1343   if (opcode_prefix != 0) {
 1344     emit_int16(opcode_prefix, (unsigned char)byte1 | (ocp_and_encoding & 0xFF));
 1345   } else {
 1346     emit_int8((unsigned char)byte1 | (ocp_and_encoding & 0xFF));
 1347   }
 1348 }
 1349 
 1350 // Now the Assembler instructions (identical for 32/64 bits)
 1351 
 1352 void Assembler::adcl(Address dst, int32_t imm32) {
 1353   InstructionMark im(this);
 1354   prefix(dst);
 1355   emit_arith_operand(0x81, rdx, dst, imm32);
 1356 }
 1357 
 1358 void Assembler::adcl(Address dst, Register src) {
 1359   InstructionMark im(this);
 1360   prefix(dst, src);
 1361   emit_int8(0x11);
 1362   emit_operand(src, dst, 0);
 1363 }
 1364 
 1365 void Assembler::adcl(Register dst, int32_t imm32) {
 1366   prefix(dst);
 1367   emit_arith(0x81, 0xD0, dst, imm32);
 1368 }
 1369 
 1370 void Assembler::adcl(Register dst, Address src) {
 1371   InstructionMark im(this);
 1372   prefix(src, dst);
 1373   emit_int8(0x13);
 1374   emit_operand(dst, src, 0);
 1375 }
 1376 
 1377 void Assembler::adcl(Register dst, Register src) {
 1378   (void) prefix_and_encode(dst->encoding(), src->encoding());
 1379   emit_arith(0x13, 0xC0, dst, src);
 1380 }
 1381 
 1382 void Assembler::addl(Address dst, int32_t imm32) {
 1383   InstructionMark im(this);
 1384   prefix(dst);
 1385   emit_arith_operand(0x81, rax, dst, imm32);
 1386 }
 1387 
 1388 void Assembler::eaddl(Register dst, Address src, int32_t imm32, bool no_flags) {
 1389   InstructionMark im(this);
 1390   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 1391   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 1392   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 1393   emit_arith_operand(0x81, rax, src, imm32);
 1394 }
 1395 
 1396 void Assembler::addb(Address dst, int imm8) {
 1397   InstructionMark im(this);
 1398   prefix(dst);
 1399   emit_int8((unsigned char)0x80);
 1400   emit_operand(rax, dst, 1);
 1401   emit_int8(imm8);
 1402 }
 1403 
 1404 void Assembler::addb(Address dst, Register src) {
 1405   InstructionMark im(this);
 1406   prefix(dst, src);
 1407   emit_int8(0x00);
 1408   emit_operand(src, dst, 0);
 1409 }
 1410 
 1411 void Assembler::addb(Register dst, int imm8) {
 1412   (void) prefix_and_encode(dst->encoding(), true);
 1413   emit_arith_b(0x80, 0xC0, dst, imm8);
 1414 }
 1415 
 1416 void Assembler::addw(Address dst, int imm16) {
 1417   InstructionMark im(this);
 1418   emit_int8(0x66);
 1419   prefix(dst);
 1420   emit_int8((unsigned char)0x81);
 1421   emit_operand(rax, dst, 2);
 1422   emit_int16(imm16);
 1423 }
 1424 
 1425 void Assembler::addw(Address dst, Register src) {
 1426   InstructionMark im(this);
 1427   emit_int8(0x66);
 1428   prefix(dst, src);
 1429   emit_int8(0x01);
 1430   emit_operand(src, dst, 0);
 1431 }
 1432 
 1433 void Assembler::addl(Address dst, Register src) {
 1434   InstructionMark im(this);
 1435   prefix(dst, src);
 1436   emit_int8(0x01);
 1437   emit_operand(src, dst, 0);
 1438 }
 1439 
 1440 void Assembler::eaddl(Register dst, Address src1, Register src2, bool no_flags) {
 1441   InstructionMark im(this);
 1442   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 1443   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 1444   evex_prefix_ndd(src1, dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 1445   emit_int8(0x01);
 1446   emit_operand(src2, src1, 0);
 1447 }
 1448 
 1449 void Assembler::addl(Register dst, int32_t imm32) {
 1450   prefix(dst);
 1451   emit_arith(0x81, 0xC0, dst, imm32);
 1452 }
 1453 
 1454 void Assembler::eaddl(Register dst, Register src, int32_t imm32, bool no_flags) {
 1455   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 1456   (void) evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 1457   emit_arith(0x81, 0xC0, src, imm32);
 1458 }
 1459 
 1460 void Assembler::addl(Register dst, Address src) {
 1461   InstructionMark im(this);
 1462   prefix(src, dst);
 1463   emit_int8(0x03);
 1464   emit_operand(dst, src, 0);
 1465 }
 1466 
 1467 void Assembler::eaddl(Register dst, Register src1, Address src2, bool no_flags) {
 1468   InstructionMark im(this);
 1469   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 1470   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 1471   evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 1472   emit_int8(0x03);
 1473   emit_operand(src1, src2, 0);
 1474 }
 1475 
 1476 void Assembler::addl(Register dst, Register src) {
 1477   (void) prefix_and_encode(dst->encoding(), src->encoding());
 1478   emit_arith(0x03, 0xC0, dst, src);
 1479 }
 1480 
 1481 void Assembler::eaddl(Register dst, Register src1, Register src2, bool no_flags) {
 1482   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 1483   (void) evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 1484   // opcode matches gcc
 1485   emit_arith(0x01, 0xC0, src1, src2);
 1486 }
 1487 
 1488 void Assembler::addr_nop_4() {
 1489   assert(UseAddressNop, "no CPU support");
 1490   // 4 bytes: NOP DWORD PTR [EAX+0]
 1491   emit_int32(0x0F,
 1492              0x1F,
 1493              0x40, // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc);
 1494              0);   // 8-bits offset (1 byte)
 1495 }
 1496 
 1497 void Assembler::addr_nop_5() {
 1498   assert(UseAddressNop, "no CPU support");
 1499   // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
 1500   emit_int32(0x0F,
 1501              0x1F,
 1502              0x44,  // emit_rm(cbuf, 0x1, EAX_enc, 0x4);
 1503              0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
 1504   emit_int8(0);     // 8-bits offset (1 byte)
 1505 }
 1506 
 1507 void Assembler::addr_nop_7() {
 1508   assert(UseAddressNop, "no CPU support");
 1509   // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
 1510   emit_int24(0x0F,
 1511              0x1F,
 1512              (unsigned char)0x80);
 1513                    // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc);
 1514   emit_int32(0);   // 32-bits offset (4 bytes)
 1515 }
 1516 
 1517 void Assembler::addr_nop_8() {
 1518   assert(UseAddressNop, "no CPU support");
 1519   // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
 1520   emit_int32(0x0F,
 1521              0x1F,
 1522              (unsigned char)0x84,
 1523                     // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
 1524              0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
 1525   emit_int32(0);    // 32-bits offset (4 bytes)
 1526 }
 1527 
 1528 void Assembler::addsd(XMMRegister dst, XMMRegister src) {
 1529   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 1530   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 1531   attributes.set_rex_vex_w_reverted();
 1532   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 1533   emit_int16(0x58, (0xC0 | encode));
 1534 }
 1535 
 1536 void Assembler::addsd(XMMRegister dst, Address src) {
 1537   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 1538   InstructionMark im(this);
 1539   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 1540   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 1541   attributes.set_rex_vex_w_reverted();
 1542   simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 1543   emit_int8(0x58);
 1544   emit_operand(dst, src, 0);
 1545 }
 1546 
 1547 void Assembler::addss(XMMRegister dst, XMMRegister src) {
 1548   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 1549   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 1550   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 1551   emit_int16(0x58, (0xC0 | encode));
 1552 }
 1553 
 1554 void Assembler::addss(XMMRegister dst, Address src) {
 1555   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 1556   InstructionMark im(this);
 1557   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 1558   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
 1559   simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 1560   emit_int8(0x58);
 1561   emit_operand(dst, src, 0);
 1562 }
 1563 
 1564 void Assembler::aesdec(XMMRegister dst, Address src) {
 1565   assert(VM_Version::supports_aes(), "");
 1566   InstructionMark im(this);
 1567   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 1568   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 1569   emit_int8((unsigned char)0xDE);
 1570   emit_operand(dst, src, 0);
 1571 }
 1572 
 1573 void Assembler::aesdec(XMMRegister dst, XMMRegister src) {
 1574   assert(VM_Version::supports_aes(), "");
 1575   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 1576   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 1577   emit_int16((unsigned char)0xDE, (0xC0 | encode));
 1578 }
 1579 
 1580 void Assembler::vaesdec(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 1581   assert(VM_Version::supports_avx512_vaes(), "");
 1582   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 1583   attributes.set_is_evex_instruction();
 1584   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 1585   emit_int16((unsigned char)0xDE, (0xC0 | encode));
 1586 }
 1587 
 1588 
 1589 void Assembler::aesdeclast(XMMRegister dst, Address src) {
 1590   assert(VM_Version::supports_aes(), "");
 1591   InstructionMark im(this);
 1592   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 1593   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 1594   emit_int8((unsigned char)0xDF);
 1595   emit_operand(dst, src, 0);
 1596 }
 1597 
 1598 void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) {
 1599   assert(VM_Version::supports_aes(), "");
 1600   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 1601   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 1602   emit_int16((unsigned char)0xDF, (0xC0 | encode));
 1603 }
 1604 
 1605 void Assembler::vaesdeclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 1606   assert(VM_Version::supports_avx512_vaes(), "");
 1607   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 1608   attributes.set_is_evex_instruction();
 1609   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 1610   emit_int16((unsigned char)0xDF, (0xC0 | encode));
 1611 }
 1612 
 1613 void Assembler::aesenc(XMMRegister dst, Address src) {
 1614   assert(VM_Version::supports_aes(), "");
 1615   InstructionMark im(this);
 1616   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 1617   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 1618   emit_int8((unsigned char)0xDC);
 1619   emit_operand(dst, src, 0);
 1620 }
 1621 
 1622 void Assembler::aesenc(XMMRegister dst, XMMRegister src) {
 1623   assert(VM_Version::supports_aes(), "");
 1624   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 1625   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 1626   emit_int16((unsigned char)0xDC, 0xC0 | encode);
 1627 }
 1628 
 1629 void Assembler::vaesenc(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 1630   assert(VM_Version::supports_avx512_vaes(), "requires vaes support/enabling");
 1631   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 1632   attributes.set_is_evex_instruction();
 1633   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 1634   emit_int16((unsigned char)0xDC, (0xC0 | encode));
 1635 }
 1636 
 1637 void Assembler::aesenclast(XMMRegister dst, Address src) {
 1638   assert(VM_Version::supports_aes(), "");
 1639   InstructionMark im(this);
 1640   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 1641   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 1642   emit_int8((unsigned char)0xDD);
 1643   emit_operand(dst, src, 0);
 1644 }
 1645 
 1646 void Assembler::aesenclast(XMMRegister dst, XMMRegister src) {
 1647   assert(VM_Version::supports_aes(), "");
 1648   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 1649   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 1650   emit_int16((unsigned char)0xDD, (0xC0 | encode));
 1651 }
 1652 
 1653 void Assembler::vaesenclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 1654   assert(VM_Version::supports_avx512_vaes(), "requires vaes support/enabling");
 1655   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 1656   attributes.set_is_evex_instruction();
 1657   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 1658   emit_int16((unsigned char)0xDD, (0xC0 | encode));
 1659 }
 1660 
 1661 void Assembler::andb(Address dst, Register src) {
 1662   InstructionMark im(this);
 1663   prefix(dst, src, true);
 1664   emit_int8(0x20);
 1665   emit_operand(src, dst, 0);
 1666 }
 1667 
 1668 void Assembler::andl(Address dst, int32_t imm32) {
 1669   InstructionMark im(this);
 1670   prefix(dst);
 1671   emit_arith_operand(0x81, as_Register(4), dst, imm32);
 1672 }
 1673 
 1674 void Assembler::eandl(Register dst, Address src, int32_t imm32, bool no_flags) {
 1675   InstructionMark im(this);
 1676   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 1677   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 1678   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 1679   emit_arith_operand(0x81, rsp, src, imm32);
 1680 }
 1681 
 1682 void Assembler::andl(Register dst, int32_t imm32) {
 1683   prefix(dst);
 1684   emit_arith(0x81, 0xE0, dst, imm32);
 1685 }
 1686 
 1687 void Assembler::eandl(Register dst, Register src, int32_t imm32, bool no_flags) {
 1688   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 1689   (void) evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 1690   emit_arith(0x81, 0xE0, src, imm32);
 1691 }
 1692 
 1693 void Assembler::andl(Address dst, Register src) {
 1694   InstructionMark im(this);
 1695   prefix(dst, src);
 1696   emit_int8(0x21);
 1697   emit_operand(src, dst, 0);
 1698 }
 1699 
 1700 void Assembler::andl(Register dst, Address src) {
 1701   InstructionMark im(this);
 1702   prefix(src, dst);
 1703   emit_int8(0x23);
 1704   emit_operand(dst, src, 0);
 1705 }
 1706 
 1707 void Assembler::eandl(Register dst, Register src1, Address src2, bool no_flags) {
 1708   InstructionMark im(this);
 1709   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 1710   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 1711   evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 1712   emit_int8(0x23);
 1713   emit_operand(src1, src2, 0);
 1714 }
 1715 
 1716 void Assembler::andl(Register dst, Register src) {
 1717   (void) prefix_and_encode(dst->encoding(), src->encoding());
 1718   emit_arith(0x23, 0xC0, dst, src);
 1719 }
 1720 
 1721 void Assembler::eandl(Register dst, Register src1, Register src2, bool no_flags) {
 1722   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 1723   (void) evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 1724   // opcode matches gcc
 1725   emit_arith(0x21, 0xC0, src1, src2);
 1726 }
 1727 
 1728 void Assembler::andnl(Register dst, Register src1, Register src2) {
 1729   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
 1730   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 1731   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes, true);
 1732   emit_int16((unsigned char)0xF2, (0xC0 | encode));
 1733 }
 1734 
 1735 void Assembler::andnl(Register dst, Register src1, Address src2) {
 1736   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
 1737   InstructionMark im(this);
 1738   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 1739   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 1740   vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
 1741   emit_int8((unsigned char)0xF2);
 1742   emit_operand(dst, src2, 0);
 1743 }
 1744 
 1745 void Assembler::bsfl(Register dst, Register src) {
 1746   int encode = prefix_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
 1747   emit_opcode_prefix_and_encoding((unsigned char)0xBC, 0xC0, encode);
 1748 }
 1749 
 1750 void Assembler::bsrl(Register dst, Register src) {
 1751   int encode = prefix_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
 1752   emit_opcode_prefix_and_encoding((unsigned char)0xBD, 0xC0, encode);
 1753 }
 1754 
 1755 void Assembler::bswapl(Register reg) { // bswap
 1756   int encode = prefix_and_encode(reg->encoding(), false, true /* is_map1 */);
 1757   emit_opcode_prefix_and_encoding((unsigned char)0xC8, encode);
 1758 }
 1759 
 1760 void Assembler::blsil(Register dst, Register src) {
 1761   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
 1762   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 1763   int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes, true);
 1764   emit_int16((unsigned char)0xF3, (0xC0 | encode));
 1765 }
 1766 
 1767 void Assembler::blsil(Register dst, Address src) {
 1768   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
 1769   InstructionMark im(this);
 1770   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 1771   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 1772   vex_prefix(src, dst->encoding(), rbx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
 1773   emit_int8((unsigned char)0xF3);
 1774   emit_operand(rbx, src, 0);
 1775 }
 1776 
 1777 void Assembler::blsmskl(Register dst, Register src) {
 1778   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
 1779   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 1780   int encode = vex_prefix_and_encode(rdx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes, true);
 1781   emit_int16((unsigned char)0xF3,
 1782              0xC0 | encode);
 1783 }
 1784 
 1785 void Assembler::blsmskl(Register dst, Address src) {
 1786   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
 1787   InstructionMark im(this);
 1788   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 1789   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 1790   vex_prefix(src, dst->encoding(), rdx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
 1791   emit_int8((unsigned char)0xF3);
 1792   emit_operand(rdx, src, 0);
 1793 }
 1794 
 1795 void Assembler::blsrl(Register dst, Register src) {
 1796   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
 1797   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 1798   int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes, true);
 1799   emit_int16((unsigned char)0xF3, (0xC0 | encode));
 1800 }
 1801 
 1802 void Assembler::blsrl(Register dst, Address src) {
 1803   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
 1804   InstructionMark im(this);
 1805   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 1806   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 1807   vex_prefix(src, dst->encoding(), rcx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
 1808   emit_int8((unsigned char)0xF3);
 1809   emit_operand(rcx, src, 0);
 1810 }
 1811 
 1812 void Assembler::call(Label& L, relocInfo::relocType rtype) {
 1813   // suspect disp32 is always good
 1814   int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand);
 1815 
 1816   if (L.is_bound()) {
 1817     const int long_size = 5;
 1818     int offs = (int)( target(L) - pc() );
 1819     assert(offs <= 0, "assembler error");
 1820     InstructionMark im(this);
 1821     // 1110 1000 #32-bit disp
 1822     emit_int8((unsigned char)0xE8);
 1823     emit_data(offs - long_size, rtype, operand);
 1824   } else {
 1825     InstructionMark im(this);
 1826     // 1110 1000 #32-bit disp
 1827     L.add_patch_at(code(), locator());
 1828 
 1829     emit_int8((unsigned char)0xE8);
 1830     emit_data(int(0), rtype, operand);
 1831   }
 1832 }
 1833 
 1834 void Assembler::call(Register dst) {
 1835   int encode = prefix_and_encode(dst->encoding());
 1836   emit_int16((unsigned char)0xFF, (0xD0 | encode));
 1837 }
 1838 
 1839 
 1840 void Assembler::call(Address adr) {
 1841   assert(!adr._rspec.reloc()->is_data(), "should not use ExternalAddress for call");
 1842   InstructionMark im(this);
 1843   prefix(adr);
 1844   emit_int8((unsigned char)0xFF);
 1845   emit_operand(rdx, adr, 0);
 1846 }
 1847 
 1848 void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
 1849   InstructionMark im(this);
 1850   emit_int8((unsigned char)0xE8);
 1851   intptr_t disp = entry - (pc() + sizeof(int32_t));
 1852   // Entry is null in case of a scratch emit.
 1853   assert(entry == nullptr || is_simm32(disp), "disp=" INTPTR_FORMAT " must be 32bit offset (call2)", disp);
 1854   // Technically, should use call32_operand, but this format is
 1855   // implied by the fact that we're emitting a call instruction.
 1856 
 1857   int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand);
 1858   emit_data((int) disp, rspec, operand);
 1859 }
 1860 
 1861 void Assembler::cdql() {
 1862   emit_int8((unsigned char)0x99);
 1863 }
 1864 
 1865 void Assembler::cld() {
 1866   emit_int8((unsigned char)0xFC);
 1867 }
 1868 
 1869 void Assembler::cmovl(Condition cc, Register dst, Register src) {
 1870   NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
 1871   int encode = prefix_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
 1872   emit_opcode_prefix_and_encoding(0x40 | cc, 0xC0, encode);
 1873 }
 1874 
 1875 void Assembler::ecmovl(Condition cc, Register dst, Register src1, Register src2) {
 1876   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 1877   int encode = evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes);
 1878   emit_int16((0x40 | cc), (0xC0 | encode));
 1879 }
 1880 
 1881 void Assembler::cmovl(Condition cc, Register dst, Address src) {
 1882   InstructionMark im(this);
 1883   NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
 1884   prefix(src, dst, false, true /* is_map1 */);
 1885   emit_int8((0x40 | cc));
 1886   emit_operand(dst, src, 0);
 1887 }
 1888 
 1889 void Assembler::ecmovl(Condition cc, Register dst, Register src1, Address src2) {
 1890   InstructionMark im(this);
 1891   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 1892   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 1893   evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes);
 1894   emit_int8((0x40 | cc));
 1895   emit_operand(src1, src2, 0);
 1896 }
 1897 
 1898 void Assembler::cmpb(Address dst, Register reg) {
 1899   assert(reg->has_byte_register(), "must have byte register");
 1900   InstructionMark im(this);
 1901   prefix(dst, reg, true);
 1902   emit_int8((unsigned char)0x38);
 1903   emit_operand(reg, dst, 0);
 1904 }
 1905 
 1906 void Assembler::cmpb(Register reg, Address dst) {
 1907   assert(reg->has_byte_register(), "must have byte register");
 1908   InstructionMark im(this);
 1909   prefix(dst, reg, true);
 1910   emit_int8((unsigned char)0x3a);
 1911   emit_operand(reg, dst, 0);
 1912 }
 1913 
 1914 void Assembler::cmpb(Address dst, int imm8) {
 1915   InstructionMark im(this);
 1916   prefix(dst);
 1917   emit_int8((unsigned char)0x80);
 1918   emit_operand(rdi, dst, 1);
 1919   emit_int8(imm8);
 1920 }
 1921 
 1922 void Assembler::cmpb(Register dst, int imm8) {
 1923   prefix(dst);
 1924   emit_arith_b(0x80, 0xF8, dst, imm8);
 1925 }
 1926 
 1927 void Assembler::cmpl(Address dst, int32_t imm32) {
 1928   InstructionMark im(this);
 1929   prefix(dst);
 1930   emit_arith_operand(0x81, as_Register(7), dst, imm32);
 1931 }
 1932 
 1933 void Assembler::cmpl(Register dst, int32_t imm32) {
 1934   prefix(dst);
 1935   emit_arith(0x81, 0xF8, dst, imm32);
 1936 }
 1937 
 1938 void Assembler::cmpl(Register dst, Register src) {
 1939   (void) prefix_and_encode(dst->encoding(), src->encoding());
 1940   emit_arith(0x3B, 0xC0, dst, src);
 1941 }
 1942 
 1943 void Assembler::cmpl(Register dst, Address src) {
 1944   InstructionMark im(this);
 1945   prefix(src, dst);
 1946   emit_int8(0x3B);
 1947   emit_operand(dst, src, 0);
 1948 }
 1949 
 1950 void Assembler::cmpl(Address dst,  Register reg) {
 1951   InstructionMark im(this);
 1952   prefix(dst, reg);
 1953   emit_int8(0x39);
 1954   emit_operand(reg, dst, 0);
 1955 }
 1956 
 1957 void Assembler::cmpl_imm32(Address dst, int32_t imm32) {
 1958   InstructionMark im(this);
 1959   prefix(dst);
 1960   emit_arith_operand_imm32(0x81, as_Register(7), dst, imm32);
 1961 }
 1962 
 1963 void Assembler::cmpw(Address dst, int imm16) {
 1964   InstructionMark im(this);
 1965   emit_int8(0x66);
 1966   prefix(dst);
 1967   emit_int8((unsigned char)0x81);
 1968   emit_operand(rdi, dst, 2);
 1969   emit_int16(imm16);
 1970 }
 1971 
 1972 void Assembler::cmpw(Address dst, Register reg) {
 1973   InstructionMark im(this);
 1974   emit_int8(0x66);
 1975   prefix(dst, reg);
 1976   emit_int8((unsigned char)0x39);
 1977   emit_operand(reg, dst, 0);
 1978 }
 1979 
 1980 // The 32-bit cmpxchg compares the value at adr with the contents of rax,
 1981 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,.
 1982 // The ZF is set if the compared values were equal, and cleared otherwise.
 1983 void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg
 1984   InstructionMark im(this);
 1985   prefix(adr, reg, false, true /* is_map1 */);
 1986   emit_int8((unsigned char)0xB1);
 1987   emit_operand(reg, adr, 0);
 1988 }
 1989 
 1990 void Assembler::cmpxchgw(Register reg, Address adr) { // cmpxchg
 1991   InstructionMark im(this);
 1992   size_prefix();
 1993   prefix(adr, reg, false, true /* is_map1 */);
 1994   emit_int8((unsigned char)0xB1);
 1995   emit_operand(reg, adr, 0);
 1996 }
 1997 
 1998 // The 8-bit cmpxchg compares the value at adr with the contents of rax,
 1999 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,.
 2000 // The ZF is set if the compared values were equal, and cleared otherwise.
 2001 void Assembler::cmpxchgb(Register reg, Address adr) { // cmpxchg
 2002   InstructionMark im(this);
 2003   prefix(adr, reg, true, true /* is_map1 */);
 2004   emit_int8((unsigned char)0xB0);
 2005   emit_operand(reg, adr, 0);
 2006 }
 2007 
 2008 void Assembler::comisd(XMMRegister dst, Address src) {
 2009   // NOTE: dbx seems to decode this as comiss even though the
 2010   // 0x66 is there. Strangely ucomisd comes out correct
 2011   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 2012   InstructionMark im(this);
 2013   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);;
 2014   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 2015   attributes.set_rex_vex_w_reverted();
 2016   simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2017   emit_int8(0x2F);
 2018   emit_operand(dst, src, 0);
 2019 }
 2020 
 2021 void Assembler::comisd(XMMRegister dst, XMMRegister src) {
 2022   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 2023   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2024   attributes.set_rex_vex_w_reverted();
 2025   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2026   emit_int16(0x2F, (0xC0 | encode));
 2027 }
 2028 
 2029 void Assembler::comiss(XMMRegister dst, Address src) {
 2030   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 2031   InstructionMark im(this);
 2032   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2033   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
 2034   simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2035   emit_int8(0x2F);
 2036   emit_operand(dst, src, 0);
 2037 }
 2038 
 2039 void Assembler::comiss(XMMRegister dst, XMMRegister src) {
 2040   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 2041   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2042   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2043   emit_int16(0x2F, (0xC0 | encode));
 2044 }
 2045 
 2046 void Assembler::cpuid() {
 2047   emit_int16(0x0F, (unsigned char)0xA2);
 2048 }
 2049 
 2050 // Opcode / Instruction                      Op /  En  64 - Bit Mode     Compat / Leg Mode Description                  Implemented
 2051 // F2 0F 38 F0 / r       CRC32 r32, r / m8   RM        Valid             Valid             Accumulate CRC32 on r / m8.  v
 2052 // F2 REX 0F 38 F0 / r   CRC32 r32, r / m8*  RM        Valid             N.E.              Accumulate CRC32 on r / m8.  -
 2053 // F2 REX.W 0F 38 F0 / r CRC32 r64, r / m8   RM        Valid             N.E.              Accumulate CRC32 on r / m8.  -
 2054 //
 2055 // F2 0F 38 F1 / r       CRC32 r32, r / m16  RM        Valid             Valid             Accumulate CRC32 on r / m16. v
 2056 //
 2057 // F2 0F 38 F1 / r       CRC32 r32, r / m32  RM        Valid             Valid             Accumulate CRC32 on r / m32. v
 2058 //
 2059 // F2 REX.W 0F 38 F1 / r CRC32 r64, r / m64  RM        Valid             N.E.              Accumulate CRC32 on r / m64. v
 2060 void Assembler::crc32(Register crc, Register v, int8_t sizeInBytes) {
 2061   assert(VM_Version::supports_sse4_2(), "");
 2062   if (needs_eevex(crc, v)) {
 2063     InstructionAttr attributes(AVX_128bit, /* rex_w */ sizeInBytes == 8, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2064     int encode = vex_prefix_and_encode(crc->encoding(), 0, v->encoding(), sizeInBytes == 2 ? VEX_SIMD_66 : VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, true);
 2065     emit_int16(sizeInBytes == 1 ? (unsigned char)0xF0 : (unsigned char)0xF1, (0xC0 | encode));
 2066   } else {
 2067     int8_t w = 0x01;
 2068     Prefix p = Prefix_EMPTY;
 2069 
 2070     emit_int8((unsigned char)0xF2);
 2071     switch (sizeInBytes) {
 2072     case 1:
 2073       w = 0;
 2074       break;
 2075     case 2:
 2076     case 4:
 2077       break;
 2078     LP64_ONLY(case 8:)
 2079       // This instruction is not valid in 32 bits
 2080       // Note:
 2081       // http://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf
 2082       //
 2083       // Page B - 72   Vol. 2C says
 2084       // qwreg2 to qwreg            1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : 11 qwreg1 qwreg2
 2085       // mem64 to qwreg             1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : mod qwreg r / m
 2086       //                                                                            F0!!!
 2087       // while 3 - 208 Vol. 2A
 2088       // F2 REX.W 0F 38 F1 / r       CRC32 r64, r / m64             RM         Valid      N.E.Accumulate CRC32 on r / m64.
 2089       //
 2090       // the 0 on a last bit is reserved for a different flavor of this instruction :
 2091       // F2 REX.W 0F 38 F0 / r       CRC32 r64, r / m8              RM         Valid      N.E.Accumulate CRC32 on r / m8.
 2092       p = REX_W;
 2093       break;
 2094     default:
 2095       assert(0, "Unsupported value for a sizeInBytes argument");
 2096       break;
 2097     }
 2098     LP64_ONLY(prefix(crc, v, p);)
 2099     emit_int32(0x0F,
 2100                0x38,
 2101                0xF0 | w,
 2102                0xC0 | ((crc->encoding() & 0x7) << 3) | (v->encoding() & 7));
 2103   }
 2104 }
 2105 
 2106 void Assembler::crc32(Register crc, Address adr, int8_t sizeInBytes) {
 2107   assert(VM_Version::supports_sse4_2(), "");
 2108   InstructionMark im(this);
 2109   if (needs_eevex(crc, adr.base(), adr.index())) {
 2110     InstructionAttr attributes(AVX_128bit, /* vex_w */ sizeInBytes == 8, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2111     attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
 2112     vex_prefix(adr, 0, crc->encoding(), sizeInBytes == 2 ? VEX_SIMD_66 : VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes);
 2113     emit_int8(sizeInBytes == 1 ? (unsigned char)0xF0 : (unsigned char)0xF1);
 2114     emit_operand(crc, adr, 0);
 2115   } else {
 2116     int8_t w = 0x01;
 2117     Prefix p = Prefix_EMPTY;
 2118 
 2119     emit_int8((uint8_t)0xF2);
 2120     switch (sizeInBytes) {
 2121     case 1:
 2122       w = 0;
 2123       break;
 2124     case 2:
 2125     case 4:
 2126       break;
 2127     LP64_ONLY(case 8:)
 2128       // This instruction is not valid in 32 bits
 2129       p = REX_W;
 2130       break;
 2131     default:
 2132       assert(0, "Unsupported value for a sizeInBytes argument");
 2133       break;
 2134     }
 2135     LP64_ONLY(prefix(crc, adr, p);)
 2136     emit_int24(0x0F, 0x38, (0xF0 | w));
 2137     emit_operand(crc, adr, 0);
 2138   }
 2139 }
 2140 
 2141 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
 2142   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 2143   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2144   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 2145   emit_int16((unsigned char)0xE6, (0xC0 | encode));
 2146 }
 2147 
 2148 void Assembler::vcvtdq2pd(XMMRegister dst, XMMRegister src, int vector_len) {
 2149   assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
 2150   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2151   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 2152   emit_int16((unsigned char)0xE6, (0xC0 | encode));
 2153 }
 2154 
 2155 void Assembler::vcvtps2ph(XMMRegister dst, XMMRegister src, int imm8, int vector_len) {
 2156   assert(VM_Version::supports_evex() || VM_Version::supports_f16c(), "");
 2157   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /*uses_vl */ true);
 2158   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 2159   emit_int24(0x1D, (0xC0 | encode), imm8);
 2160 }
 2161 
 2162 void Assembler::evcvtps2ph(Address dst, KRegister mask, XMMRegister src, int imm8, int vector_len) {
 2163   assert(VM_Version::supports_evex(), "");
 2164   InstructionMark im(this);
 2165   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /*uses_vl */ true);
 2166   attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_64bit);
 2167   attributes.reset_is_clear_context();
 2168   attributes.set_embedded_opmask_register_specifier(mask);
 2169   attributes.set_is_evex_instruction();
 2170   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 2171   emit_int8(0x1D);
 2172   emit_operand(src, dst, 1);
 2173   emit_int8(imm8);
 2174 }
 2175 
 2176 void Assembler::vcvtps2ph(Address dst, XMMRegister src, int imm8, int vector_len) {
 2177   assert(VM_Version::supports_evex() || VM_Version::supports_f16c(), "");
 2178   InstructionMark im(this);
 2179   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /*uses_vl */ true);
 2180   attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
 2181   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 2182   emit_int8(0x1D);
 2183   emit_operand(src, dst, 1);
 2184   emit_int8(imm8);
 2185 }
 2186 
 2187 void Assembler::vcvtph2ps(XMMRegister dst, XMMRegister src, int vector_len) {
 2188   assert(VM_Version::supports_evex() || VM_Version::supports_f16c(), "");
 2189   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */false, /* no_mask_reg */ true, /* uses_vl */ true);
 2190   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 2191   emit_int16(0x13, (0xC0 | encode));
 2192 }
 2193 
 2194 void Assembler::vcvtph2ps(XMMRegister dst, Address src, int vector_len) {
 2195   assert(VM_Version::supports_evex() || VM_Version::supports_f16c(), "");
 2196   InstructionMark im(this);
 2197   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /*uses_vl */ true);
 2198   attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
 2199   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 2200   emit_int8(0x13);
 2201   emit_operand(dst, src, 0);
 2202 }
 2203 
 2204 void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
 2205   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 2206   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2207   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2208   emit_int16(0x5B, (0xC0 | encode));
 2209 }
 2210 
 2211 void Assembler::vcvtdq2ps(XMMRegister dst, XMMRegister src, int vector_len) {
 2212   assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
 2213   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2214   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2215   emit_int16(0x5B, (0xC0 | encode));
 2216 }
 2217 
 2218 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
 2219   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 2220   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2221   attributes.set_rex_vex_w_reverted();
 2222   int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 2223   emit_int16(0x5A, (0xC0 | encode));
 2224 }
 2225 
 2226 void Assembler::cvtsd2ss(XMMRegister dst, Address src) {
 2227   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 2228   InstructionMark im(this);
 2229   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2230   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 2231   attributes.set_rex_vex_w_reverted();
 2232   simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 2233   emit_int8(0x5A);
 2234   emit_operand(dst, src, 0);
 2235 }
 2236 
 2237 void Assembler::cvtsi2sdl(XMMRegister dst, Register src) {
 2238   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 2239   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2240   int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes, true);
 2241   emit_int16(0x2A, (0xC0 | encode));
 2242 }
 2243 
 2244 void Assembler::cvtsi2sdl(XMMRegister dst, Address src) {
 2245   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 2246   InstructionMark im(this);
 2247   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2248   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
 2249   simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 2250   emit_int8(0x2A);
 2251   emit_operand(dst, src, 0);
 2252 }
 2253 
 2254 void Assembler::cvtsi2ssl(XMMRegister dst, Register src) {
 2255   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 2256   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2257   int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes, true);
 2258   emit_int16(0x2A, (0xC0 | encode));
 2259 }
 2260 
 2261 void Assembler::cvtsi2ssl(XMMRegister dst, Address src) {
 2262   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 2263   InstructionMark im(this);
 2264   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2265   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
 2266   simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 2267   emit_int8(0x2A);
 2268   emit_operand(dst, src, 0);
 2269 }
 2270 
 2271 void Assembler::cvtsi2ssq(XMMRegister dst, Register src) {
 2272   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 2273   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2274   int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes, true);
 2275   emit_int16(0x2A, (0xC0 | encode));
 2276 }
 2277 
 2278 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
 2279   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 2280   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2281   int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 2282   emit_int16(0x5A, (0xC0 | encode));
 2283 }
 2284 
 2285 void Assembler::cvtss2sd(XMMRegister dst, Address src) {
 2286   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 2287   InstructionMark im(this);
 2288   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2289   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
 2290   simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 2291   emit_int8(0x5A);
 2292   emit_operand(dst, src, 0);
 2293 }
 2294 
 2295 
 2296 void Assembler::cvttsd2sil(Register dst, XMMRegister src) {
 2297   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 2298   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2299   int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 2300   emit_int16(0x2C, (0xC0 | encode));
 2301 }
 2302 
 2303 void Assembler::cvtss2sil(Register dst, XMMRegister src) {
 2304   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 2305   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2306   int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 2307   emit_int16(0x2D, (0xC0 | encode));
 2308 }
 2309 
 2310 void Assembler::cvttss2sil(Register dst, XMMRegister src) {
 2311   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 2312   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2313   int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 2314   emit_int16(0x2C, (0xC0 | encode));
 2315 }
 2316 
 2317 void Assembler::cvttpd2dq(XMMRegister dst, XMMRegister src) {
 2318   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 2319   int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
 2320   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2321   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2322   emit_int16((unsigned char)0xE6, (0xC0 | encode));
 2323 }
 2324 
 2325 void Assembler::pabsb(XMMRegister dst, XMMRegister src) {
 2326   assert(VM_Version::supports_ssse3(), "");
 2327   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 2328   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 2329   emit_int16(0x1C, (0xC0 | encode));
 2330 }
 2331 
 2332 void Assembler::pabsw(XMMRegister dst, XMMRegister src) {
 2333   assert(VM_Version::supports_ssse3(), "");
 2334   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 2335   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 2336   emit_int16(0x1D, (0xC0 | encode));
 2337 }
 2338 
 2339 void Assembler::pabsd(XMMRegister dst, XMMRegister src) {
 2340   assert(VM_Version::supports_ssse3(), "");
 2341   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2342   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 2343   emit_int16(0x1E, (0xC0 | encode));
 2344 }
 2345 
 2346 void Assembler::vpabsb(XMMRegister dst, XMMRegister src, int vector_len) {
 2347   assert(vector_len == AVX_128bit ? VM_Version::supports_avx()      :
 2348          vector_len == AVX_256bit ? VM_Version::supports_avx2()     :
 2349          vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : false, "not supported");
 2350   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 2351   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 2352   emit_int16(0x1C, (0xC0 | encode));
 2353 }
 2354 
 2355 void Assembler::vpabsw(XMMRegister dst, XMMRegister src, int vector_len) {
 2356   assert(vector_len == AVX_128bit ? VM_Version::supports_avx()      :
 2357          vector_len == AVX_256bit ? VM_Version::supports_avx2()     :
 2358          vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : false, "");
 2359   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 2360   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 2361   emit_int16(0x1D, (0xC0 | encode));
 2362 }
 2363 
 2364 void Assembler::vpabsd(XMMRegister dst, XMMRegister src, int vector_len) {
 2365   assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
 2366   vector_len == AVX_256bit? VM_Version::supports_avx2() :
 2367   vector_len == AVX_512bit? VM_Version::supports_evex() : 0, "");
 2368   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2369   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 2370   emit_int16(0x1E, (0xC0 | encode));
 2371 }
 2372 
 2373 void Assembler::evpabsq(XMMRegister dst, XMMRegister src, int vector_len) {
 2374   assert(UseAVX > 2, "");
 2375   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2376   attributes.set_is_evex_instruction();
 2377   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 2378   emit_int16(0x1F, (0xC0 | encode));
 2379 }
 2380 
 2381 void Assembler::vcvtps2pd(XMMRegister dst, XMMRegister src, int vector_len) {
 2382   assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
 2383   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2384   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2385   emit_int16(0x5A, (0xC0 | encode));
 2386 }
 2387 
 2388 void Assembler::vcvtpd2ps(XMMRegister dst, XMMRegister src, int vector_len) {
 2389   assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
 2390   InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2391   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2392   attributes.set_rex_vex_w_reverted();
 2393   emit_int16(0x5A, (0xC0 | encode));
 2394 }
 2395 
 2396 void Assembler::vcvttps2dq(XMMRegister dst, XMMRegister src, int vector_len) {
 2397   assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
 2398   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2399   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 2400   emit_int16(0x5B, (0xC0 | encode));
 2401 }
 2402 
 2403 void Assembler::vcvttpd2dq(XMMRegister dst, XMMRegister src, int vector_len) {
 2404   assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
 2405   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2406   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2407   emit_int16((unsigned char)0xE6, (0xC0 | encode));
 2408 }
 2409 
 2410 void Assembler::vcvtps2dq(XMMRegister dst, XMMRegister src, int vector_len) {
 2411   assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
 2412   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2413   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2414   emit_int16(0x5B, (0xC0 | encode));
 2415 }
 2416 
 2417 void Assembler::evcvttps2qq(XMMRegister dst, XMMRegister src, int vector_len) {
 2418   assert(VM_Version::supports_avx512dq(), "");
 2419   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2420   attributes.set_is_evex_instruction();
 2421   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2422   emit_int16(0x7A, (0xC0 | encode));
 2423 }
 2424 
 2425 void Assembler::evcvtpd2qq(XMMRegister dst, XMMRegister src, int vector_len) {
 2426   assert(VM_Version::supports_avx512dq(), "");
 2427   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2428   attributes.set_is_evex_instruction();
 2429   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2430   emit_int16(0x7B, (0xC0 | encode));
 2431 }
 2432 
 2433 void Assembler::evcvtqq2ps(XMMRegister dst, XMMRegister src, int vector_len) {
 2434   assert(VM_Version::supports_avx512dq(), "");
 2435   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2436   attributes.set_is_evex_instruction();
 2437   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2438   emit_int16(0x5B, (0xC0 | encode));
 2439 }
 2440 
 2441 void Assembler::evcvttpd2qq(XMMRegister dst, XMMRegister src, int vector_len) {
 2442   assert(VM_Version::supports_avx512dq(), "");
 2443   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2444   attributes.set_is_evex_instruction();
 2445   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2446   emit_int16(0x7A, (0xC0 | encode));
 2447 }
 2448 
 2449 void Assembler::evcvtqq2pd(XMMRegister dst, XMMRegister src, int vector_len) {
 2450   assert(VM_Version::supports_avx512dq(), "");
 2451   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2452   attributes.set_is_evex_instruction();
 2453   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 2454   emit_int16((unsigned char)0xE6, (0xC0 | encode));
 2455 }
 2456 
 2457 void Assembler::evpmovwb(XMMRegister dst, XMMRegister src, int vector_len) {
 2458   assert(VM_Version::supports_avx512bw(), "");
 2459   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2460   attributes.set_is_evex_instruction();
 2461   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
 2462   emit_int16(0x30, (0xC0 | encode));
 2463 }
 2464 
 2465 void Assembler::evpmovdw(XMMRegister dst, XMMRegister src, int vector_len) {
 2466   assert(UseAVX > 2, "");
 2467   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2468   attributes.set_is_evex_instruction();
 2469   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
 2470   emit_int16(0x33, (0xC0 | encode));
 2471 }
 2472 
 2473 void Assembler::evpmovdb(XMMRegister dst, XMMRegister src, int vector_len) {
 2474   assert(UseAVX > 2, "");
 2475   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2476   attributes.set_is_evex_instruction();
 2477   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
 2478   emit_int16(0x31, (0xC0 | encode));
 2479 }
 2480 
 2481 void Assembler::evpmovqd(XMMRegister dst, XMMRegister src, int vector_len) {
 2482   assert(UseAVX > 2, "");
 2483   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2484   attributes.set_is_evex_instruction();
 2485   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
 2486   emit_int16(0x35, (0xC0 | encode));
 2487 }
 2488 
 2489 void Assembler::evpmovqb(XMMRegister dst, XMMRegister src, int vector_len) {
 2490   assert(UseAVX > 2, "");
 2491   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2492   attributes.set_is_evex_instruction();
 2493   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
 2494   emit_int16(0x32, (0xC0 | encode));
 2495 }
 2496 
 2497 void Assembler::evpmovqw(XMMRegister dst, XMMRegister src, int vector_len) {
 2498   assert(UseAVX > 2, "");
 2499   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2500   attributes.set_is_evex_instruction();
 2501   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
 2502   emit_int16(0x34, (0xC0 | encode));
 2503 }
 2504 
 2505 void Assembler::evpmovsqd(XMMRegister dst, XMMRegister src, int vector_len) {
 2506   assert(UseAVX > 2, "");
 2507   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2508   attributes.set_is_evex_instruction();
 2509   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
 2510   emit_int16(0x25, (0xC0 | encode));
 2511 }
 2512 
 2513 void Assembler::decl(Address dst) {
 2514   // Don't use it directly. Use MacroAssembler::decrement() instead.
 2515   InstructionMark im(this);
 2516   prefix(dst);
 2517   emit_int8((unsigned char)0xFF);
 2518   emit_operand(rcx, dst, 0);
 2519 }
 2520 
 2521 void Assembler::edecl(Register dst, Address src, bool no_flags) {
 2522   InstructionMark im(this);
 2523   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2524   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 2525   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 2526   emit_int8((unsigned char)0xFF);
 2527   emit_operand(rcx, src, 0);
 2528 }
 2529 
 2530 void Assembler::divsd(XMMRegister dst, Address src) {
 2531   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 2532   InstructionMark im(this);
 2533   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2534   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 2535   attributes.set_rex_vex_w_reverted();
 2536   simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 2537   emit_int8(0x5E);
 2538   emit_operand(dst, src, 0);
 2539 }
 2540 
 2541 void Assembler::divsd(XMMRegister dst, XMMRegister src) {
 2542   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 2543   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2544   attributes.set_rex_vex_w_reverted();
 2545   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 2546   emit_int16(0x5E, (0xC0 | encode));
 2547 }
 2548 
 2549 void Assembler::divss(XMMRegister dst, Address src) {
 2550   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 2551   InstructionMark im(this);
 2552   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2553   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
 2554   simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 2555   emit_int8(0x5E);
 2556   emit_operand(dst, src, 0);
 2557 }
 2558 
 2559 void Assembler::divss(XMMRegister dst, XMMRegister src) {
 2560   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 2561   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2562   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 2563   emit_int16(0x5E, (0xC0 | encode));
 2564 }
 2565 
 2566 void Assembler::hlt() {
 2567   emit_int8((unsigned char)0xF4);
 2568 }
 2569 
 2570 void Assembler::idivl(Register src) {
 2571   int encode = prefix_and_encode(src->encoding());
 2572   emit_int16((unsigned char)0xF7, (0xF8 | encode));
 2573 }
 2574 
 2575 void Assembler::eidivl(Register src, bool no_flags) { // Signed
 2576   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2577   int encode = evex_prefix_and_encode_nf(0, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 2578   emit_int16((unsigned char)0xF7, (0xF8 | encode));
 2579 }
 2580 
 2581 void Assembler::divl(Register src) { // Unsigned
 2582   int encode = prefix_and_encode(src->encoding());
 2583   emit_int16((unsigned char)0xF7, (0xF0 | encode));
 2584 }
 2585 
 2586 void Assembler::edivl(Register src, bool no_flags) { // Unsigned
 2587   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2588   int encode = evex_prefix_and_encode_nf(0, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 2589   emit_int16((unsigned char)0xF7, (0xF0 | encode));
 2590 }
 2591 
 2592 void Assembler::imull(Register src) {
 2593   int encode = prefix_and_encode(src->encoding());
 2594   emit_int16((unsigned char)0xF7, (0xE8 | encode));
 2595 }
 2596 
 2597 void Assembler::eimull(Register src, bool no_flags) {
 2598   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2599   int encode = evex_prefix_and_encode_nf(0, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 2600   emit_int16((unsigned char)0xF7, (0xE8 | encode));
 2601 }
 2602 
 2603 void Assembler::imull(Register dst, Register src) {
 2604   int encode = prefix_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
 2605   emit_opcode_prefix_and_encoding((unsigned char)0xAF, 0xC0, encode);
 2606 }
 2607 
 2608 void Assembler::eimull(Register dst, Register src1, Register src2, bool no_flags) {
 2609   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2610   int encode = evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 2611   emit_int16((unsigned char)0xAF, (0xC0 | encode));
 2612 }
 2613 
 2614 void Assembler::imull(Register dst, Address src, int32_t value) {
 2615   InstructionMark im(this);
 2616   prefix(src, dst);
 2617   if (is8bit(value)) {
 2618     emit_int8((unsigned char)0x6B);
 2619     emit_operand(dst, src, 1);
 2620     emit_int8(value);
 2621   } else {
 2622     emit_int8((unsigned char)0x69);
 2623     emit_operand(dst, src, 4);
 2624     emit_int32(value);
 2625   }
 2626 }
 2627 
 2628 void Assembler::eimull(Register dst, Address src, int32_t value, bool no_flags) {
 2629   InstructionMark im(this);
 2630   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2631   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 2632   evex_prefix_ndd(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 2633   if (is8bit(value)) {
 2634     emit_int8((unsigned char)0x6B);
 2635     emit_operand(dst, src, 1);
 2636     emit_int8(value);
 2637   } else {
 2638     emit_int8((unsigned char)0x69);
 2639     emit_operand(dst, src, 4);
 2640     emit_int32(value);
 2641   }
 2642 }
 2643 
 2644 void Assembler::imull(Register dst, Register src, int value) {
 2645   int encode = prefix_and_encode(dst->encoding(), src->encoding());
 2646   if (is8bit(value)) {
 2647     emit_int24(0x6B, (0xC0 | encode), value & 0xFF);
 2648   } else {
 2649     emit_int16(0x69, (0xC0 | encode));
 2650     emit_int32(value);
 2651   }
 2652 }
 2653 
 2654 void Assembler::eimull(Register dst, Register src, int value, bool no_flags) {
 2655   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2656   int encode = evex_prefix_and_encode_nf(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 2657   if (is8bit(value)) {
 2658     emit_int24(0x6B, (0xC0 | encode), value & 0xFF);
 2659   } else {
 2660     emit_int16(0x69, (0xC0 | encode));
 2661     emit_int32(value);
 2662   }
 2663 }
 2664 
 2665 void Assembler::imull(Register dst, Address src) {
 2666   InstructionMark im(this);
 2667   prefix(src, dst, false, true /* is_map1 */);
 2668   emit_int8((unsigned char)0xAF);
 2669   emit_operand(dst, src, 0);
 2670 }
 2671 
 2672 void Assembler::eimull(Register dst, Register src1, Address src2, bool no_flags) {
 2673   InstructionMark im(this);
 2674   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2675   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 2676   evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 2677   emit_int8((unsigned char)0xAF);
 2678   emit_operand(src1, src2, 0);
 2679 }
 2680 
 2681 void Assembler::incl(Address dst) {
 2682   // Don't use it directly. Use MacroAssembler::increment() instead.
 2683   InstructionMark im(this);
 2684   prefix(dst);
 2685   emit_int8((unsigned char)0xFF);
 2686   emit_operand(rax, dst, 0);
 2687 }
 2688 
 2689 void Assembler::eincl(Register dst, Address src, bool no_flags) {
 2690   // Don't use it directly. Use MacroAssembler::increment() instead.
 2691   InstructionMark im(this);
 2692   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2693   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 2694   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 2695   emit_int8((unsigned char)0xFF);
 2696   emit_operand(rax, src, 0);
 2697 }
 2698 
 2699 void Assembler::jcc(Condition cc, Label& L, bool maybe_short) {
 2700   InstructionMark im(this);
 2701   assert((0 <= cc) && (cc < 16), "illegal cc");
 2702   if (L.is_bound()) {
 2703     address dst = target(L);
 2704     assert(dst != nullptr, "jcc most probably wrong");
 2705 
 2706     const int short_size = 2;
 2707     const int long_size = 6;
 2708     int offs = checked_cast<int>((intptr_t)dst - (intptr_t)pc());
 2709     if (maybe_short && is8bit(offs - short_size)) {
 2710       // 0111 tttn #8-bit disp
 2711       emit_int16(0x70 | cc, (offs - short_size) & 0xFF);
 2712     } else {
 2713       // 0000 1111 1000 tttn #32-bit disp
 2714       assert(is_simm32(offs - long_size),
 2715              "must be 32bit offset (call4)");
 2716       emit_int16(0x0F, (0x80 | cc));
 2717       emit_int32(offs - long_size);
 2718     }
 2719   } else {
 2720     // Note: could eliminate cond. jumps to this jump if condition
 2721     //       is the same however, seems to be rather unlikely case.
 2722     // Note: use jccb() if label to be bound is very close to get
 2723     //       an 8-bit displacement
 2724     L.add_patch_at(code(), locator());
 2725     emit_int16(0x0F, (0x80 | cc));
 2726     emit_int32(0);
 2727   }
 2728 }
 2729 
 2730 void Assembler::jccb_0(Condition cc, Label& L, const char* file, int line) {
 2731   if (L.is_bound()) {
 2732     const int short_size = 2;
 2733     address entry = target(L);
 2734 #ifdef ASSERT
 2735     int dist = checked_cast<int>((intptr_t)entry - (intptr_t)(pc() + short_size));
 2736     int delta = short_branch_delta();
 2737     if (delta != 0) {
 2738       dist += (dist < 0 ? (-delta) :delta);
 2739     }
 2740     assert(is8bit(dist), "Displacement too large for a short jmp at %s:%d", file, line);
 2741 #endif
 2742     int offs = checked_cast<int>((intptr_t)entry - (intptr_t)pc());
 2743     // 0111 tttn #8-bit disp
 2744     emit_int16(0x70 | cc, (offs - short_size) & 0xFF);
 2745   } else {
 2746     InstructionMark im(this);
 2747     L.add_patch_at(code(), locator(), file, line);
 2748     emit_int16(0x70 | cc, 0);
 2749   }
 2750 }
 2751 
 2752 void Assembler::jmp(Address adr) {
 2753   InstructionMark im(this);
 2754   prefix(adr);
 2755   emit_int8((unsigned char)0xFF);
 2756   emit_operand(rsp, adr, 0);
 2757 }
 2758 
 2759 void Assembler::jmp(Label& L, bool maybe_short) {
 2760   if (L.is_bound()) {
 2761     address entry = target(L);
 2762     assert(entry != nullptr, "jmp most probably wrong");
 2763     InstructionMark im(this);
 2764     const int short_size = 2;
 2765     const int long_size = 5;
 2766     int offs = checked_cast<int>(entry - pc());
 2767     if (maybe_short && is8bit(offs - short_size)) {
 2768       emit_int16((unsigned char)0xEB, ((offs - short_size) & 0xFF));
 2769     } else {
 2770       emit_int8((unsigned char)0xE9);
 2771       emit_int32(offs - long_size);
 2772     }
 2773   } else {
 2774     // By default, forward jumps are always 32-bit displacements, since
 2775     // we can't yet know where the label will be bound.  If you're sure that
 2776     // the forward jump will not run beyond 256 bytes, use jmpb to
 2777     // force an 8-bit displacement.
 2778     InstructionMark im(this);
 2779     L.add_patch_at(code(), locator());
 2780     emit_int8((unsigned char)0xE9);
 2781     emit_int32(0);
 2782   }
 2783 }
 2784 
 2785 void Assembler::jmp(Register entry) {
 2786   int encode = prefix_and_encode(entry->encoding());
 2787   emit_int16((unsigned char)0xFF, (0xE0 | encode));
 2788 }
 2789 
 2790 void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) {
 2791   InstructionMark im(this);
 2792   emit_int8((unsigned char)0xE9);
 2793   assert(dest != nullptr, "must have a target");
 2794   intptr_t disp = dest - (pc() + sizeof(int32_t));
 2795   assert(is_simm32(disp), "must be 32bit offset (jmp)");
 2796   emit_data(checked_cast<int32_t>(disp), rspec, call32_operand);
 2797 }
 2798 
 2799 void Assembler::jmpb_0(Label& L, const char* file, int line) {
 2800   if (L.is_bound()) {
 2801     const int short_size = 2;
 2802     address entry = target(L);
 2803     assert(entry != nullptr, "jmp most probably wrong");
 2804 #ifdef ASSERT
 2805     int dist = checked_cast<int>((intptr_t)entry - (intptr_t)(pc() + short_size));
 2806     int delta = short_branch_delta();
 2807     if (delta != 0) {
 2808       dist += (dist < 0 ? (-delta) :delta);
 2809     }
 2810     assert(is8bit(dist), "Displacement too large for a short jmp at %s:%d", file, line);
 2811 #endif
 2812     intptr_t offs = entry - pc();
 2813     emit_int16((unsigned char)0xEB, (offs - short_size) & 0xFF);
 2814   } else {
 2815     InstructionMark im(this);
 2816     L.add_patch_at(code(), locator(), file, line);
 2817     emit_int16((unsigned char)0xEB, 0);
 2818   }
 2819 }
 2820 
 2821 void Assembler::ldmxcsr( Address src) {
 2822   // This instruction should be SSE encoded with the REX2 prefix when an
 2823   // extended GPR is present. To be consistent when UseAPX is enabled, use
 2824   // this encoding even when an extended GPR is not used.
 2825   if (UseAVX > 0 && !UseAPX ) {
 2826     InstructionMark im(this);
 2827     InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2828     vex_prefix(src, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2829     emit_int8((unsigned char)0xAE);
 2830     emit_operand(as_Register(2), src, 0);
 2831   } else {
 2832     NOT_LP64(assert(VM_Version::supports_sse(), ""));
 2833     InstructionMark im(this);
 2834     prefix(src, true /* is_map1 */);
 2835     emit_int8((unsigned char)0xAE);
 2836     emit_operand(as_Register(2), src, 0);
 2837   }
 2838 }
 2839 
 2840 void Assembler::leal(Register dst, Address src) {
 2841   InstructionMark im(this);
 2842   prefix(src, dst);
 2843   emit_int8((unsigned char)0x8D);
 2844   emit_operand(dst, src, 0);
 2845 }
 2846 
 2847 #ifdef _LP64
 2848 void Assembler::lea(Register dst, Label& L) {
 2849   assert(dst == r10, "invalid destination register");
 2850   if (L.is_bound()) {
 2851     const int inst_size = 7;
 2852     address entry = target(L);
 2853     int offs = checked_cast<int>((intptr_t)entry - (intptr_t)pc());
 2854     emit_int8((unsigned char)0x4C);
 2855     emit_int8((unsigned char)0x8D);
 2856     emit_int8((unsigned char)0x15);
 2857     emit_int32(offs - inst_size);
 2858   } else {
 2859     InstructionMark im(this);
 2860     L.add_patch_at(code(), locator());
 2861     emit_int8((unsigned char)0x4C);
 2862     emit_int8((unsigned char)0x8D);
 2863     emit_int8((unsigned char)0x15);
 2864     emit_int32(0);
 2865   }
 2866 }
 2867 #endif
 2868 
 2869 void Assembler::lfence() {
 2870   emit_int24(0x0F, (unsigned char)0xAE, (unsigned char)0xE8);
 2871 }
 2872 
 2873 void Assembler::lock() {
 2874   emit_int8((unsigned char)0xF0);
 2875 }
 2876 
 2877 void Assembler::size_prefix() {
 2878   emit_int8(0x66);
 2879 }
 2880 
 2881 void Assembler::lzcntl(Register dst, Register src) {
 2882   assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
 2883   emit_int8((unsigned char)0xF3);
 2884   int encode = prefix_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
 2885   emit_opcode_prefix_and_encoding((unsigned char)0xBD, 0xC0, encode);
 2886 }
 2887 
 2888 void Assembler::elzcntl(Register dst, Register src, bool no_flags) {
 2889   assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
 2890   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2891   int encode = evex_prefix_and_encode_nf(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 2892   emit_int16((unsigned char)0xF5, (0xC0 | encode));
 2893 }
 2894 
 2895 void Assembler::lzcntl(Register dst, Address src) {
 2896   assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
 2897   InstructionMark im(this);
 2898   emit_int8((unsigned char)0xF3);
 2899   prefix(src, dst, false, true /* is_map1 */);
 2900   emit_int8((unsigned char)0xBD);
 2901   emit_operand(dst, src, 0);
 2902 }
 2903 
 2904 void Assembler::elzcntl(Register dst, Address src, bool no_flags) {
 2905   assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
 2906   InstructionMark im(this);
 2907   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 2908   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 2909   evex_prefix_nf(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 2910   emit_int8((unsigned char)0xF5);
 2911   emit_operand(dst, src, 0);
 2912 }
 2913 
 2914 // Emit mfence instruction
 2915 void Assembler::mfence() {
 2916   NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");)
 2917   emit_int24(0x0F, (unsigned char)0xAE, (unsigned char)0xF0);
 2918 }
 2919 
 2920 // Emit sfence instruction
 2921 void Assembler::sfence() {
 2922   NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");)
 2923   emit_int24(0x0F, (unsigned char)0xAE, (unsigned char)0xF8);
 2924 }
 2925 
 2926 void Assembler::mov(Register dst, Register src) {
 2927   LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
 2928 }
 2929 
 2930 void Assembler::movapd(XMMRegister dst, XMMRegister src) {
 2931   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 2932   int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
 2933   InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2934   attributes.set_rex_vex_w_reverted();
 2935   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2936   emit_int16(0x28, (0xC0 | encode));
 2937 }
 2938 
 2939 void Assembler::movaps(XMMRegister dst, XMMRegister src) {
 2940   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 2941   int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
 2942   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2943   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2944   emit_int16(0x28, (0xC0 | encode));
 2945 }
 2946 
 2947 void Assembler::movlhps(XMMRegister dst, XMMRegister src) {
 2948   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 2949   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2950   int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2951   emit_int16(0x16, (0xC0 | encode));
 2952 }
 2953 
 2954 void Assembler::movb(Register dst, Address src) {
 2955   NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
 2956   InstructionMark im(this);
 2957   prefix(src, dst, true);
 2958   emit_int8((unsigned char)0x8A);
 2959   emit_operand(dst, src, 0);
 2960 }
 2961 
 2962 void Assembler::movddup(XMMRegister dst, XMMRegister src) {
 2963   NOT_LP64(assert(VM_Version::supports_sse3(), ""));
 2964   int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
 2965   InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2966   attributes.set_rex_vex_w_reverted();
 2967   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 2968   emit_int16(0x12, 0xC0 | encode);
 2969 }
 2970 
 2971 void Assembler::movddup(XMMRegister dst, Address src) {
 2972   NOT_LP64(assert(VM_Version::supports_sse3(), ""));
 2973   InstructionMark im(this);
 2974   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2975   attributes.set_address_attributes(/* tuple_type */ EVEX_DUP, /* input_size_in_bits */ EVEX_64bit);
 2976   attributes.set_rex_vex_w_reverted();
 2977   simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 2978   emit_int8(0x12);
 2979   emit_operand(dst, src, 0);
 2980 }
 2981 
 2982 void Assembler::vmovddup(XMMRegister dst, Address src, int vector_len) {
 2983   assert(VM_Version::supports_avx(), "");
 2984   InstructionMark im(this);
 2985   InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2986   attributes.set_address_attributes(/* tuple_type */ EVEX_DUP, /* input_size_in_bits */ EVEX_64bit);
 2987   attributes.set_rex_vex_w_reverted();
 2988   simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 2989   emit_int8(0x12);
 2990   emit_operand(dst, src, 0);
 2991 }
 2992 
 2993 void Assembler::kmovbl(KRegister dst, KRegister src) {
 2994   assert(VM_Version::supports_avx512dq(), "");
 2995   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2996   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2997   emit_int16((unsigned char)0x90, (0xC0 | encode));
 2998 }
 2999 
 3000 void Assembler::kmovbl(KRegister dst, Register src) {
 3001   assert(VM_Version::supports_avx512dq(), "");
 3002   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3003   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes, true);
 3004   emit_int16((unsigned char)0x92, (0xC0 | encode));
 3005 }
 3006 
 3007 void Assembler::kmovbl(Register dst, KRegister src) {
 3008   assert(VM_Version::supports_avx512dq(), "");
 3009   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3010   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3011   emit_int16((unsigned char)0x93, (0xC0 | encode));
 3012 }
 3013 
 3014 void Assembler::kmovwl(KRegister dst, Register src) {
 3015   assert(VM_Version::supports_evex(), "");
 3016   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3017   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes, true);
 3018   emit_int16((unsigned char)0x92, (0xC0 | encode));
 3019 }
 3020 
 3021 void Assembler::kmovwl(Register dst, KRegister src) {
 3022   assert(VM_Version::supports_evex(), "");
 3023   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3024   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 3025   emit_int16((unsigned char)0x93, (0xC0 | encode));
 3026 }
 3027 
 3028 void Assembler::kmovwl(KRegister dst, Address src) {
 3029   assert(VM_Version::supports_evex(), "");
 3030   InstructionMark im(this);
 3031   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3032   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 3033   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 3034   emit_int8((unsigned char)0x90);
 3035   emit_operand(dst, src, 0);
 3036 }
 3037 
 3038 void Assembler::kmovwl(Address dst, KRegister src) {
 3039   assert(VM_Version::supports_evex(), "");
 3040   InstructionMark im(this);
 3041   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3042   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 3043   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 3044   emit_int8((unsigned char)0x91);
 3045   emit_operand(src, dst, 0);
 3046 }
 3047 
 3048 void Assembler::kmovwl(KRegister dst, KRegister src) {
 3049   assert(VM_Version::supports_evex(), "");
 3050   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3051   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 3052   emit_int16((unsigned char)0x90, (0xC0 | encode));
 3053 }
 3054 
 3055 void Assembler::kmovdl(KRegister dst, Register src) {
 3056   assert(VM_Version::supports_avx512bw(), "");
 3057   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3058   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes, true);
 3059   emit_int16((unsigned char)0x92, (0xC0 | encode));
 3060 }
 3061 
 3062 void Assembler::kmovdl(Register dst, KRegister src) {
 3063   assert(VM_Version::supports_avx512bw(), "");
 3064   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3065   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 3066   emit_int16((unsigned char)0x93, (0xC0 | encode));
 3067 }
 3068 
 3069 void Assembler::kmovql(KRegister dst, KRegister src) {
 3070   assert(VM_Version::supports_avx512bw(), "");
 3071   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3072   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 3073   emit_int16((unsigned char)0x90, (0xC0 | encode));
 3074 }
 3075 
 3076 void Assembler::kmovql(KRegister dst, Address src) {
 3077   assert(VM_Version::supports_avx512bw(), "");
 3078   InstructionMark im(this);
 3079   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3080   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 3081   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 3082   emit_int8((unsigned char)0x90);
 3083   emit_operand(dst, src, 0);
 3084 }
 3085 
 3086 void Assembler::kmovql(Address dst, KRegister src) {
 3087   assert(VM_Version::supports_avx512bw(), "");
 3088   InstructionMark im(this);
 3089   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3090   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 3091   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 3092   emit_int8((unsigned char)0x91);
 3093   emit_operand(src, dst, 0);
 3094 }
 3095 
 3096 void Assembler::kmovql(KRegister dst, Register src) {
 3097   assert(VM_Version::supports_avx512bw(), "");
 3098   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3099   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes, true);
 3100   emit_int16((unsigned char)0x92, (0xC0 | encode));
 3101 }
 3102 
 3103 void Assembler::kmovql(Register dst, KRegister src) {
 3104   assert(VM_Version::supports_avx512bw(), "");
 3105   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3106   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 3107   emit_int16((unsigned char)0x93, (0xC0 | encode));
 3108 }
 3109 
 3110 void Assembler::knotwl(KRegister dst, KRegister src) {
 3111   assert(VM_Version::supports_evex(), "");
 3112   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3113   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 3114   emit_int16(0x44, (0xC0 | encode));
 3115 }
 3116 
 3117 void Assembler::knotbl(KRegister dst, KRegister src) {
 3118   assert(VM_Version::supports_evex(), "");
 3119   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3120   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3121   emit_int16(0x44, (0xC0 | encode));
 3122 }
 3123 
 3124 void Assembler::korbl(KRegister dst, KRegister src1, KRegister src2) {
 3125   assert(VM_Version::supports_avx512dq(), "");
 3126   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3127   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3128   emit_int16(0x45, (0xC0 | encode));
 3129 }
 3130 
 3131 void Assembler::korwl(KRegister dst, KRegister src1, KRegister src2) {
 3132   assert(VM_Version::supports_evex(), "");
 3133   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3134   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 3135   emit_int16(0x45, (0xC0 | encode));
 3136 }
 3137 
 3138 void Assembler::kordl(KRegister dst, KRegister src1, KRegister src2) {
 3139   assert(VM_Version::supports_avx512bw(), "");
 3140   InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3141   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3142   emit_int16(0x45, (0xC0 | encode));
 3143 }
 3144 
 3145 void Assembler::korql(KRegister dst, KRegister src1, KRegister src2) {
 3146   assert(VM_Version::supports_avx512bw(), "");
 3147   InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3148   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 3149   emit_int16(0x45, (0xC0 | encode));
 3150 }
 3151 
 3152 void Assembler::kxorbl(KRegister dst, KRegister src1, KRegister src2) {
 3153   assert(VM_Version::supports_avx512dq(), "");
 3154   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3155   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3156   emit_int16(0x47, (0xC0 | encode));
 3157 }
 3158 
 3159 void Assembler::kxnorwl(KRegister dst, KRegister src1, KRegister src2) {
 3160   assert(VM_Version::supports_evex(), "");
 3161   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3162   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 3163   emit_int16(0x46, (0xC0 | encode));
 3164 }
 3165 
 3166 void Assembler::kxorwl(KRegister dst, KRegister src1, KRegister src2) {
 3167   assert(VM_Version::supports_evex(), "");
 3168   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3169   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 3170   emit_int16(0x47, (0xC0 | encode));
 3171 }
 3172 
 3173 void Assembler::kxordl(KRegister dst, KRegister src1, KRegister src2) {
 3174   assert(VM_Version::supports_avx512bw(), "");
 3175   InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3176   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3177   emit_int16(0x47, (0xC0 | encode));
 3178 }
 3179 
 3180 void Assembler::kxorql(KRegister dst, KRegister src1, KRegister src2) {
 3181   assert(VM_Version::supports_avx512bw(), "");
 3182   InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3183   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 3184   emit_int16(0x47, (0xC0 | encode));
 3185 }
 3186 
 3187 void Assembler::kandbl(KRegister dst, KRegister src1, KRegister src2) {
 3188   assert(VM_Version::supports_avx512dq(), "");
 3189   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3190   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3191   emit_int16(0x41, (0xC0 | encode));
 3192 }
 3193 
 3194 void Assembler::kandwl(KRegister dst, KRegister src1, KRegister src2) {
 3195   assert(VM_Version::supports_evex(), "");
 3196   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3197   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 3198   emit_int16(0x41, (0xC0 | encode));
 3199 }
 3200 
 3201 void Assembler::kanddl(KRegister dst, KRegister src1, KRegister src2) {
 3202   assert(VM_Version::supports_avx512bw(), "");
 3203   InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3204   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3205   emit_int16(0x41, (0xC0 | encode));
 3206 }
 3207 
 3208 void Assembler::kandql(KRegister dst, KRegister src1, KRegister src2) {
 3209   assert(VM_Version::supports_avx512bw(), "");
 3210   InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3211   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 3212   emit_int16(0x41, (0xC0 | encode));
 3213 }
 3214 
 3215 void Assembler::knotdl(KRegister dst, KRegister src) {
 3216   assert(VM_Version::supports_avx512bw(), "");
 3217   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3218   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3219   emit_int16(0x44, (0xC0 | encode));
 3220 }
 3221 
 3222 void Assembler::knotql(KRegister dst, KRegister src) {
 3223   assert(VM_Version::supports_avx512bw(), "");
 3224   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3225   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 3226   emit_int16(0x44, (0xC0 | encode));
 3227 }
 3228 
 3229 // This instruction produces ZF or CF flags
 3230 void Assembler::kortestbl(KRegister src1, KRegister src2) {
 3231   assert(VM_Version::supports_avx512dq(), "");
 3232   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3233   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3234   emit_int16((unsigned char)0x98, (0xC0 | encode));
 3235 }
 3236 
 3237 // This instruction produces ZF or CF flags
 3238 void Assembler::kortestwl(KRegister src1, KRegister src2) {
 3239   assert(VM_Version::supports_evex(), "");
 3240   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3241   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 3242   emit_int16((unsigned char)0x98, (0xC0 | encode));
 3243 }
 3244 
 3245 // This instruction produces ZF or CF flags
 3246 void Assembler::kortestdl(KRegister src1, KRegister src2) {
 3247   assert(VM_Version::supports_avx512bw(), "");
 3248   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3249   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3250   emit_int16((unsigned char)0x98, (0xC0 | encode));
 3251 }
 3252 
 3253 // This instruction produces ZF or CF flags
 3254 void Assembler::kortestql(KRegister src1, KRegister src2) {
 3255   assert(VM_Version::supports_avx512bw(), "");
 3256   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3257   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 3258   emit_int16((unsigned char)0x98, (0xC0 | encode));
 3259 }
 3260 
 3261 // This instruction produces ZF or CF flags
 3262 void Assembler::ktestql(KRegister src1, KRegister src2) {
 3263   assert(VM_Version::supports_avx512bw(), "");
 3264   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3265   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 3266   emit_int16((unsigned char)0x99, (0xC0 | encode));
 3267 }
 3268 
 3269 void Assembler::ktestdl(KRegister src1, KRegister src2) {
 3270   assert(VM_Version::supports_avx512bw(), "");
 3271   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3272   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3273   emit_int16((unsigned char)0x99, (0xC0 | encode));
 3274 }
 3275 
 3276 void Assembler::ktestwl(KRegister src1, KRegister src2) {
 3277   assert(VM_Version::supports_avx512dq(), "");
 3278   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3279   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 3280   emit_int16((unsigned char)0x99, (0xC0 | encode));
 3281 }
 3282 
 3283 void Assembler::ktestbl(KRegister src1, KRegister src2) {
 3284   assert(VM_Version::supports_avx512dq(), "");
 3285   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3286   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3287   emit_int16((unsigned char)0x99, (0xC0 | encode));
 3288 }
 3289 
 3290 void Assembler::ktestq(KRegister src1, KRegister src2) {
 3291   assert(VM_Version::supports_avx512bw(), "");
 3292   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3293   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 3294   emit_int16((unsigned char)0x99, (0xC0 | encode));
 3295 }
 3296 
 3297 void Assembler::ktestd(KRegister src1, KRegister src2) {
 3298   assert(VM_Version::supports_avx512bw(), "");
 3299   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3300   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3301   emit_int16((unsigned char)0x99, (0xC0 | encode));
 3302 }
 3303 
 3304 void Assembler::kxnorbl(KRegister dst, KRegister src1, KRegister src2) {
 3305   assert(VM_Version::supports_avx512dq(), "");
 3306   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3307   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3308   emit_int16(0x46, (0xC0 | encode));
 3309 }
 3310 
 3311 void Assembler::kshiftlbl(KRegister dst, KRegister src, int imm8) {
 3312   assert(VM_Version::supports_avx512dq(), "");
 3313   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3314   int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 3315   emit_int16(0x32, (0xC0 | encode));
 3316   emit_int8(imm8);
 3317 }
 3318 
 3319 void Assembler::kshiftlql(KRegister dst, KRegister src, int imm8) {
 3320   assert(VM_Version::supports_avx512bw(), "");
 3321   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3322   int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 3323   emit_int16(0x33, (0xC0 | encode));
 3324   emit_int8(imm8);
 3325 }
 3326 
 3327 
 3328 void Assembler::kshiftrbl(KRegister dst, KRegister src, int imm8) {
 3329   assert(VM_Version::supports_avx512dq(), "");
 3330   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3331   int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 3332   emit_int16(0x30, (0xC0 | encode));
 3333 }
 3334 
 3335 void Assembler::kshiftrwl(KRegister dst, KRegister src, int imm8) {
 3336   assert(VM_Version::supports_evex(), "");
 3337   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3338   int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 3339   emit_int16(0x30, (0xC0 | encode));
 3340   emit_int8(imm8);
 3341 }
 3342 
 3343 void Assembler::kshiftrdl(KRegister dst, KRegister src, int imm8) {
 3344   assert(VM_Version::supports_avx512bw(), "");
 3345   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3346   int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 3347   emit_int16(0x31, (0xC0 | encode));
 3348   emit_int8(imm8);
 3349 }
 3350 
 3351 void Assembler::kshiftrql(KRegister dst, KRegister src, int imm8) {
 3352   assert(VM_Version::supports_avx512bw(), "");
 3353   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3354   int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 3355   emit_int16(0x31, (0xC0 | encode));
 3356   emit_int8(imm8);
 3357 }
 3358 
 3359 void Assembler::kunpckdql(KRegister dst, KRegister src1, KRegister src2) {
 3360   assert(VM_Version::supports_avx512bw(), "");
 3361   InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3362   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 3363   emit_int16(0x4B, (0xC0 | encode));
 3364 }
 3365 
 3366 void Assembler::movb(Address dst, int imm8) {
 3367   InstructionMark im(this);
 3368    prefix(dst);
 3369   emit_int8((unsigned char)0xC6);
 3370   emit_operand(rax, dst, 1);
 3371   emit_int8(imm8);
 3372 }
 3373 
 3374 
 3375 void Assembler::movb(Address dst, Register src) {
 3376   assert(src->has_byte_register(), "must have byte register");
 3377   InstructionMark im(this);
 3378   prefix(dst, src, true);
 3379   emit_int8((unsigned char)0x88);
 3380   emit_operand(src, dst, 0);
 3381 }
 3382 
 3383 void Assembler::movdl(XMMRegister dst, Register src) {
 3384   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 3385   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3386   int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes, true);
 3387   emit_int16(0x6E, (0xC0 | encode));
 3388 }
 3389 
 3390 void Assembler::movdl(Register dst, XMMRegister src) {
 3391   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 3392   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3393   // swap src/dst to get correct prefix
 3394   int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes, true);
 3395   emit_int16(0x7E, (0xC0 | encode));
 3396 }
 3397 
 3398 void Assembler::movdl(XMMRegister dst, Address src) {
 3399   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 3400   InstructionMark im(this);
 3401   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3402   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
 3403   simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3404   emit_int8(0x6E);
 3405   emit_operand(dst, src, 0);
 3406 }
 3407 
 3408 void Assembler::movdl(Address dst, XMMRegister src) {
 3409   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 3410   InstructionMark im(this);
 3411   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3412   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
 3413   simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3414   emit_int8(0x7E);
 3415   emit_operand(src, dst, 0);
 3416 }
 3417 
 3418 void Assembler::movdqa(XMMRegister dst, XMMRegister src) {
 3419   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 3420   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 3421   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3422   emit_int16(0x6F, (0xC0 | encode));
 3423 }
 3424 
 3425 void Assembler::movdqa(XMMRegister dst, Address src) {
 3426   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 3427   InstructionMark im(this);
 3428   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 3429   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3430   simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3431   emit_int8(0x6F);
 3432   emit_operand(dst, src, 0);
 3433 }
 3434 
 3435 void Assembler::movdqu(XMMRegister dst, Address src) {
 3436   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 3437   InstructionMark im(this);
 3438   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 3439   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3440   simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 3441   emit_int8(0x6F);
 3442   emit_operand(dst, src, 0);
 3443 }
 3444 
 3445 void Assembler::movdqu(XMMRegister dst, XMMRegister src) {
 3446   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 3447   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 3448   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 3449   emit_int16(0x6F, (0xC0 | encode));
 3450 }
 3451 
 3452 void Assembler::movdqu(Address dst, XMMRegister src) {
 3453   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 3454   InstructionMark im(this);
 3455   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 3456   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3457   attributes.reset_is_clear_context();
 3458   simd_prefix(src, xnoreg, dst, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 3459   emit_int8(0x7F);
 3460   emit_operand(src, dst, 0);
 3461 }
 3462 
 3463 // Move Unaligned 256bit Vector
 3464 void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) {
 3465   assert(UseAVX > 0, "");
 3466   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 3467   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 3468   emit_int16(0x6F, (0xC0 | encode));
 3469 }
 3470 
 3471 void Assembler::vmovdqu(XMMRegister dst, Address src) {
 3472   assert(UseAVX > 0, "");
 3473   InstructionMark im(this);
 3474   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 3475   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3476   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 3477   emit_int8(0x6F);
 3478   emit_operand(dst, src, 0);
 3479 }
 3480 
 3481 void Assembler::vmovdqu(Address dst, XMMRegister src) {
 3482   assert(UseAVX > 0, "");
 3483   InstructionMark im(this);
 3484   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 3485   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3486   attributes.reset_is_clear_context();
 3487   // swap src<->dst for encoding
 3488   assert(src != xnoreg, "sanity");
 3489   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 3490   emit_int8(0x7F);
 3491   emit_operand(src, dst, 0);
 3492 }
 3493 
 3494 void Assembler::vpmaskmovd(XMMRegister dst, XMMRegister mask, Address src, int vector_len) {
 3495   assert((VM_Version::supports_avx2() && vector_len == AVX_256bit), "");
 3496   InstructionMark im(this);
 3497   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
 3498   vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 3499   emit_int8((unsigned char)0x8C);
 3500   emit_operand(dst, src, 0);
 3501 }
 3502 
 3503 void Assembler::vpmaskmovq(XMMRegister dst, XMMRegister mask, Address src, int vector_len) {
 3504   assert((VM_Version::supports_avx2() && vector_len == AVX_256bit), "");
 3505   InstructionMark im(this);
 3506   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
 3507   vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 3508   emit_int8((unsigned char)0x8C);
 3509   emit_operand(dst, src, 0);
 3510 }
 3511 
 3512 void Assembler::vmaskmovps(XMMRegister dst, Address src, XMMRegister mask, int vector_len) {
 3513   assert(UseAVX > 0, "requires some form of AVX");
 3514   InstructionMark im(this);
 3515   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3516   vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 3517   emit_int8(0x2C);
 3518   emit_operand(dst, src, 0);
 3519 }
 3520 
 3521 void Assembler::vmaskmovpd(XMMRegister dst, Address src, XMMRegister mask, int vector_len) {
 3522   assert(UseAVX > 0, "requires some form of AVX");
 3523   InstructionMark im(this);
 3524   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3525   vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 3526   emit_int8(0x2D);
 3527   emit_operand(dst, src, 0);
 3528 }
 3529 
 3530 void Assembler::vmaskmovps(Address dst, XMMRegister src, XMMRegister mask, int vector_len) {
 3531   assert(UseAVX > 0, "");
 3532   InstructionMark im(this);
 3533   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3534   vex_prefix(dst, mask->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 3535   emit_int8(0x2E);
 3536   emit_operand(src, dst, 0);
 3537 }
 3538 
 3539 void Assembler::vmaskmovpd(Address dst, XMMRegister src, XMMRegister mask, int vector_len) {
 3540   assert(UseAVX > 0, "");
 3541   InstructionMark im(this);
 3542   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3543   vex_prefix(dst, mask->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 3544   emit_int8(0x2F);
 3545   emit_operand(src, dst, 0);
 3546 }
 3547 
 3548 // Move Unaligned EVEX enabled Vector (programmable : 8,16,32,64)
 3549 void Assembler::evmovdqub(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 3550   assert(VM_Version::supports_avx512vlbw(), "");
 3551   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
 3552   attributes.set_embedded_opmask_register_specifier(mask);
 3553   attributes.set_is_evex_instruction();
 3554   if (merge) {
 3555     attributes.reset_is_clear_context();
 3556   }
 3557   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 3558   emit_int16(0x6F, (0xC0 | encode));
 3559 }
 3560 
 3561 void Assembler::evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) {
 3562   // Unmasked instruction
 3563   evmovdqub(dst, k0, src, /*merge*/ false, vector_len);
 3564 }
 3565 
 3566 void Assembler::evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
 3567   assert(VM_Version::supports_avx512vlbw(), "");
 3568   InstructionMark im(this);
 3569   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
 3570   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3571   attributes.set_embedded_opmask_register_specifier(mask);
 3572   attributes.set_is_evex_instruction();
 3573   if (merge) {
 3574     attributes.reset_is_clear_context();
 3575   }
 3576   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 3577   emit_int8(0x6F);
 3578   emit_operand(dst, src, 0);
 3579 }
 3580 
 3581 void Assembler::evmovdqub(XMMRegister dst, Address src, int vector_len) {
 3582   // Unmasked instruction
 3583   evmovdqub(dst, k0, src, /*merge*/ false, vector_len);
 3584 }
 3585 
 3586 void Assembler::evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 3587   assert(VM_Version::supports_avx512vlbw(), "");
 3588   assert(src != xnoreg, "sanity");
 3589   InstructionMark im(this);
 3590   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
 3591   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3592   attributes.set_embedded_opmask_register_specifier(mask);
 3593   attributes.set_is_evex_instruction();
 3594   if (merge) {
 3595     attributes.reset_is_clear_context();
 3596   }
 3597   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 3598   emit_int8(0x7F);
 3599   emit_operand(src, dst, 0);
 3600 }
 3601 
 3602 void Assembler::evmovdquw(XMMRegister dst, Address src, int vector_len) {
 3603   // Unmasked instruction
 3604   evmovdquw(dst, k0, src, /*merge*/ false, vector_len);
 3605 }
 3606 
 3607 void Assembler::evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
 3608   assert(VM_Version::supports_avx512vlbw(), "");
 3609   InstructionMark im(this);
 3610   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
 3611   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3612   attributes.set_embedded_opmask_register_specifier(mask);
 3613   attributes.set_is_evex_instruction();
 3614   if (merge) {
 3615     attributes.reset_is_clear_context();
 3616   }
 3617   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 3618   emit_int8(0x6F);
 3619   emit_operand(dst, src, 0);
 3620 }
 3621 
 3622 void Assembler::evmovdquw(Address dst, XMMRegister src, int vector_len) {
 3623   // Unmasked instruction
 3624   evmovdquw(dst, k0, src, /*merge*/ false, vector_len);
 3625 }
 3626 
 3627 void Assembler::evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 3628   assert(VM_Version::supports_avx512vlbw(), "");
 3629   assert(src != xnoreg, "sanity");
 3630   InstructionMark im(this);
 3631   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
 3632   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3633   attributes.set_embedded_opmask_register_specifier(mask);
 3634   attributes.set_is_evex_instruction();
 3635   if (merge) {
 3636     attributes.reset_is_clear_context();
 3637   }
 3638   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 3639   emit_int8(0x7F);
 3640   emit_operand(src, dst, 0);
 3641 }
 3642 
 3643 void Assembler::evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) {
 3644   // Unmasked instruction
 3645   evmovdqul(dst, k0, src, /*merge*/ false, vector_len);
 3646 }
 3647 
 3648 void Assembler::evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 3649   assert(VM_Version::supports_evex(), "");
 3650   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 3651   attributes.set_embedded_opmask_register_specifier(mask);
 3652   attributes.set_is_evex_instruction();
 3653   if (merge) {
 3654     attributes.reset_is_clear_context();
 3655   }
 3656   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 3657   emit_int16(0x6F, (0xC0 | encode));
 3658 }
 3659 
 3660 void Assembler::evmovdqul(XMMRegister dst, Address src, int vector_len) {
 3661   // Unmasked instruction
 3662   evmovdqul(dst, k0, src, /*merge*/ false, vector_len);
 3663 }
 3664 
 3665 void Assembler::evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
 3666   assert(VM_Version::supports_evex(), "");
 3667   InstructionMark im(this);
 3668   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false , /* uses_vl */ true);
 3669   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3670   attributes.set_embedded_opmask_register_specifier(mask);
 3671   attributes.set_is_evex_instruction();
 3672   if (merge) {
 3673     attributes.reset_is_clear_context();
 3674   }
 3675   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 3676   emit_int8(0x6F);
 3677   emit_operand(dst, src, 0);
 3678 }
 3679 
 3680 void Assembler::evmovdqul(Address dst, XMMRegister src, int vector_len) {
 3681   // Unmasked isntruction
 3682   evmovdqul(dst, k0, src, /*merge*/ true, vector_len);
 3683 }
 3684 
 3685 void Assembler::evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 3686   assert(VM_Version::supports_evex(), "");
 3687   assert(src != xnoreg, "sanity");
 3688   InstructionMark im(this);
 3689   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 3690   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3691   attributes.set_embedded_opmask_register_specifier(mask);
 3692   attributes.set_is_evex_instruction();
 3693   if (merge) {
 3694     attributes.reset_is_clear_context();
 3695   }
 3696   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 3697   emit_int8(0x7F);
 3698   emit_operand(src, dst, 0);
 3699 }
 3700 
 3701 void Assembler::evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) {
 3702   // Unmasked instruction
 3703   evmovdquq(dst, k0, src, /*merge*/ false, vector_len);
 3704 }
 3705 
 3706 void Assembler::evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 3707   assert(VM_Version::supports_evex(), "");
 3708   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 3709   attributes.set_embedded_opmask_register_specifier(mask);
 3710   attributes.set_is_evex_instruction();
 3711   if (merge) {
 3712     attributes.reset_is_clear_context();
 3713   }
 3714   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 3715   emit_int16(0x6F, (0xC0 | encode));
 3716 }
 3717 
 3718 void Assembler::evmovdquq(XMMRegister dst, Address src, int vector_len) {
 3719   // Unmasked instruction
 3720   evmovdquq(dst, k0, src, /*merge*/ false, vector_len);
 3721 }
 3722 
 3723 void Assembler::evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
 3724   assert(VM_Version::supports_evex(), "");
 3725   InstructionMark im(this);
 3726   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 3727   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3728   attributes.set_embedded_opmask_register_specifier(mask);
 3729   attributes.set_is_evex_instruction();
 3730   if (merge) {
 3731     attributes.reset_is_clear_context();
 3732   }
 3733   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 3734   emit_int8(0x6F);
 3735   emit_operand(dst, src, 0);
 3736 }
 3737 
 3738 void Assembler::evmovntdquq(Address dst, XMMRegister src, int vector_len) {
 3739   // Unmasked instruction
 3740   evmovntdquq(dst, k0, src, /*merge*/ true, vector_len);
 3741 }
 3742 
 3743 void Assembler::evmovntdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 3744   assert(VM_Version::supports_evex(), "");
 3745   assert(src != xnoreg, "sanity");
 3746   InstructionMark im(this);
 3747   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 3748   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3749   attributes.set_embedded_opmask_register_specifier(mask);
 3750   if (merge) {
 3751     attributes.reset_is_clear_context();
 3752   }
 3753   attributes.set_is_evex_instruction();
 3754   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3755   emit_int8(0xE7);
 3756   emit_operand(src, dst, 0);
 3757 }
 3758 
 3759 void Assembler::evmovdquq(Address dst, XMMRegister src, int vector_len) {
 3760   // Unmasked instruction
 3761   evmovdquq(dst, k0, src, /*merge*/ true, vector_len);
 3762 }
 3763 
 3764 void Assembler::evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 3765   assert(VM_Version::supports_evex(), "");
 3766   assert(src != xnoreg, "sanity");
 3767   InstructionMark im(this);
 3768   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 3769   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3770   attributes.set_embedded_opmask_register_specifier(mask);
 3771   if (merge) {
 3772     attributes.reset_is_clear_context();
 3773   }
 3774   attributes.set_is_evex_instruction();
 3775   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 3776   emit_int8(0x7F);
 3777   emit_operand(src, dst, 0);
 3778 }
 3779 
 3780 // Uses zero extension on 64bit
 3781 
 3782 void Assembler::movl(Register dst, int32_t imm32) {
 3783   int encode = prefix_and_encode(dst->encoding());
 3784   emit_int8(0xB8 | encode);
 3785   emit_int32(imm32);
 3786 }
 3787 
 3788 void Assembler::movl(Register dst, Register src) {
 3789   int encode = prefix_and_encode(dst->encoding(), src->encoding());
 3790   emit_int16((unsigned char)0x8B, (0xC0 | encode));
 3791 }
 3792 
 3793 void Assembler::movl(Register dst, Address src) {
 3794   InstructionMark im(this);
 3795   prefix(src, dst);
 3796   emit_int8((unsigned char)0x8B);
 3797   emit_operand(dst, src, 0);
 3798 }
 3799 
 3800 void Assembler::movl(Address dst, int32_t imm32) {
 3801   InstructionMark im(this);
 3802   prefix(dst);
 3803   emit_int8((unsigned char)0xC7);
 3804   emit_operand(rax, dst, 4);
 3805   emit_int32(imm32);
 3806 }
 3807 
 3808 void Assembler::movl(Address dst, Register src) {
 3809   InstructionMark im(this);
 3810   prefix(dst, src);
 3811   emit_int8((unsigned char)0x89);
 3812   emit_operand(src, dst, 0);
 3813 }
 3814 
 3815 // New cpus require to use movsd and movss to avoid partial register stall
 3816 // when loading from memory. But for old Opteron use movlpd instead of movsd.
 3817 // The selection is done in MacroAssembler::movdbl() and movflt().
 3818 void Assembler::movlpd(XMMRegister dst, Address src) {
 3819   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 3820   InstructionMark im(this);
 3821   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 3822   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 3823   attributes.set_rex_vex_w_reverted();
 3824   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3825   emit_int8(0x12);
 3826   emit_operand(dst, src, 0);
 3827 }
 3828 
 3829 void Assembler::movq(XMMRegister dst, Address src) {
 3830   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 3831   InstructionMark im(this);
 3832   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3833   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 3834   attributes.set_rex_vex_w_reverted();
 3835   simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 3836   emit_int8(0x7E);
 3837   emit_operand(dst, src, 0);
 3838 }
 3839 
 3840 void Assembler::movq(Address dst, XMMRegister src) {
 3841   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 3842   InstructionMark im(this);
 3843   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3844   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 3845   attributes.set_rex_vex_w_reverted();
 3846   simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3847   emit_int8((unsigned char)0xD6);
 3848   emit_operand(src, dst, 0);
 3849 }
 3850 
 3851 void Assembler::movq(XMMRegister dst, XMMRegister src) {
 3852   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 3853   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3854   attributes.set_rex_vex_w_reverted();
 3855   int encode = simd_prefix_and_encode(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3856   emit_int16((unsigned char)0xD6, (0xC0 | encode));
 3857 }
 3858 
 3859 void Assembler::movq(Register dst, XMMRegister src) {
 3860   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 3861   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3862   // swap src/dst to get correct prefix
 3863   int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes, true);
 3864   emit_int16(0x7E, (0xC0 | encode));
 3865 }
 3866 
 3867 void Assembler::movq(XMMRegister dst, Register src) {
 3868   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 3869   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3870   int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes, true);
 3871   emit_int16(0x6E, (0xC0 | encode));
 3872 }
 3873 
 3874 void Assembler::movsbl(Register dst, Address src) { // movsxb
 3875   InstructionMark im(this);
 3876   prefix(src, dst, false, true /* is_map1 */);
 3877   emit_int8((unsigned char)0xBE);
 3878   emit_operand(dst, src, 0);
 3879 }
 3880 
 3881 void Assembler::movsbl(Register dst, Register src) { // movsxb
 3882   NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
 3883   int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true, true /* is_map1 */);
 3884   emit_opcode_prefix_and_encoding((unsigned char)0xBE, 0xC0, encode);
 3885 }
 3886 
 3887 void Assembler::movsd(XMMRegister dst, XMMRegister src) {
 3888   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 3889   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3890   attributes.set_rex_vex_w_reverted();
 3891   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 3892   emit_int16(0x10, (0xC0 | encode));
 3893 }
 3894 
 3895 void Assembler::movsd(XMMRegister dst, Address src) {
 3896   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 3897   InstructionMark im(this);
 3898   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3899   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 3900   attributes.set_rex_vex_w_reverted();
 3901   simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 3902   emit_int8(0x10);
 3903   emit_operand(dst, src, 0);
 3904 }
 3905 
 3906 void Assembler::movsd(Address dst, XMMRegister src) {
 3907   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 3908   InstructionMark im(this);
 3909   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3910   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 3911   attributes.reset_is_clear_context();
 3912   attributes.set_rex_vex_w_reverted();
 3913   simd_prefix(src, xnoreg, dst, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 3914   emit_int8(0x11);
 3915   emit_operand(src, dst, 0);
 3916 }
 3917 
 3918 void Assembler::vmovsd(XMMRegister dst, XMMRegister src, XMMRegister src2) {
 3919   assert(UseAVX > 0, "Requires some form of AVX");
 3920   InstructionMark im(this);
 3921   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3922   int encode = vex_prefix_and_encode(src2->encoding(), src->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 3923   emit_int16(0x11, (0xC0 | encode));
 3924 }
 3925 
 3926 void Assembler::movss(XMMRegister dst, XMMRegister src) {
 3927   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 3928   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3929   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 3930   emit_int16(0x10, (0xC0 | encode));
 3931 }
 3932 
 3933 void Assembler::movss(XMMRegister dst, Address src) {
 3934   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 3935   InstructionMark im(this);
 3936   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3937   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
 3938   simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 3939   emit_int8(0x10);
 3940   emit_operand(dst, src, 0);
 3941 }
 3942 
 3943 void Assembler::movss(Address dst, XMMRegister src) {
 3944   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 3945   InstructionMark im(this);
 3946   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3947   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
 3948   attributes.reset_is_clear_context();
 3949   simd_prefix(src, xnoreg, dst, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 3950   emit_int8(0x11);
 3951   emit_operand(src, dst, 0);
 3952 }
 3953 
 3954 void Assembler::movswl(Register dst, Address src) { // movsxw
 3955   InstructionMark im(this);
 3956   prefix(src, dst, false, true /* is_map1 */);
 3957   emit_int8((unsigned char)0xBF);
 3958   emit_operand(dst, src, 0);
 3959 }
 3960 
 3961 void Assembler::movswl(Register dst, Register src) { // movsxw
 3962   int encode = prefix_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
 3963   emit_opcode_prefix_and_encoding((unsigned char)0xBF, 0xC0, encode);
 3964 }
 3965 
 3966 void Assembler::movups(XMMRegister dst, Address src) {
 3967   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 3968   InstructionMark im(this);
 3969   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 3970   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3971   simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 3972   emit_int8(0x10);
 3973   emit_operand(dst, src, 0);
 3974 }
 3975 
 3976 void Assembler::vmovups(XMMRegister dst, Address src, int vector_len) {
 3977   assert(vector_len == AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), "");
 3978   InstructionMark im(this);
 3979   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 3980   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3981   simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 3982   emit_int8(0x10);
 3983   emit_operand(dst, src, 0);
 3984 }
 3985 
 3986 void Assembler::movups(Address dst, XMMRegister src) {
 3987   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 3988   InstructionMark im(this);
 3989   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 3990   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3991   simd_prefix(src, xnoreg, dst, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 3992   emit_int8(0x11);
 3993   emit_operand(src, dst, 0);
 3994 }
 3995 
 3996 void Assembler::vmovups(Address dst, XMMRegister src, int vector_len) {
 3997   assert(vector_len == AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), "");
 3998   InstructionMark im(this);
 3999   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 4000   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 4001   simd_prefix(src, xnoreg, dst, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 4002   emit_int8(0x11);
 4003   emit_operand(src, dst, 0);
 4004 }
 4005 
 4006 void Assembler::movw(Address dst, int imm16) {
 4007   InstructionMark im(this);
 4008 
 4009   emit_int8(0x66); // switch to 16-bit mode
 4010   prefix(dst);
 4011   emit_int8((unsigned char)0xC7);
 4012   emit_operand(rax, dst, 2);
 4013   emit_int16(imm16);
 4014 }
 4015 
 4016 void Assembler::movw(Register dst, Address src) {
 4017   InstructionMark im(this);
 4018   emit_int8(0x66);
 4019   prefix(src, dst);
 4020   emit_int8((unsigned char)0x8B);
 4021   emit_operand(dst, src, 0);
 4022 }
 4023 
 4024 void Assembler::movw(Address dst, Register src) {
 4025   InstructionMark im(this);
 4026   emit_int8(0x66);
 4027   prefix(dst, src);
 4028   emit_int8((unsigned char)0x89);
 4029   emit_operand(src, dst, 0);
 4030 }
 4031 
 4032 void Assembler::movzbl(Register dst, Address src) { // movzxb
 4033   InstructionMark im(this);
 4034   prefix(src, dst, false, true /* is_map1 */);
 4035   emit_int8((unsigned char)0xB6);
 4036   emit_operand(dst, src, 0);
 4037 }
 4038 
 4039 void Assembler::movzbl(Register dst, Register src) { // movzxb
 4040   NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
 4041   int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true, true /* is_map1 */);
 4042   emit_opcode_prefix_and_encoding((unsigned char)0xB6, 0xC0, encode);
 4043 }
 4044 
 4045 void Assembler::movzwl(Register dst, Address src) { // movzxw
 4046   InstructionMark im(this);
 4047   prefix(src, dst, false, true /* is_map1 */);
 4048   emit_int8((unsigned char)0xB7);
 4049   emit_operand(dst, src, 0);
 4050 }
 4051 
 4052 void Assembler::movzwl(Register dst, Register src) { // movzxw
 4053   int encode = prefix_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
 4054   emit_opcode_prefix_and_encoding((unsigned char)0xB7, 0xC0, encode);
 4055 }
 4056 
 4057 void Assembler::mull(Address src) {
 4058   InstructionMark im(this);
 4059   prefix(src);
 4060   emit_int8((unsigned char)0xF7);
 4061   emit_operand(rsp, src, 0);
 4062 }
 4063 
 4064 void Assembler::emull(Address src, bool no_flags) {
 4065   InstructionMark im(this);
 4066   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 4067   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 4068   evex_prefix_nf(src, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 4069   emit_int8((unsigned char)0xF7);
 4070   emit_operand(rsp, src, 0);
 4071 }
 4072 
 4073 void Assembler::mull(Register src) {
 4074   int encode = prefix_and_encode(src->encoding());
 4075   emit_int16((unsigned char)0xF7, (0xE0 | encode));
 4076 }
 4077 
 4078 void Assembler::emull(Register src, bool no_flags) {
 4079   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 4080   int encode = evex_prefix_and_encode_nf(0, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 4081   emit_int16((unsigned char)0xF7, (0xE0 | encode));
 4082 }
 4083 
 4084 void Assembler::mulsd(XMMRegister dst, Address src) {
 4085   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 4086   InstructionMark im(this);
 4087   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 4088   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 4089   attributes.set_rex_vex_w_reverted();
 4090   simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 4091   emit_int8(0x59);
 4092   emit_operand(dst, src, 0);
 4093 }
 4094 
 4095 void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
 4096   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 4097   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 4098   attributes.set_rex_vex_w_reverted();
 4099   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 4100   emit_int16(0x59, (0xC0 | encode));
 4101 }
 4102 
 4103 void Assembler::mulss(XMMRegister dst, Address src) {
 4104   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 4105   InstructionMark im(this);
 4106   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 4107   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
 4108   simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 4109   emit_int8(0x59);
 4110   emit_operand(dst, src, 0);
 4111 }
 4112 
 4113 void Assembler::mulss(XMMRegister dst, XMMRegister src) {
 4114   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 4115   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 4116   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 4117   emit_int16(0x59, (0xC0 | encode));
 4118 }
 4119 
 4120 void Assembler::negl(Register dst) {
 4121   int encode = prefix_and_encode(dst->encoding());
 4122   emit_int16((unsigned char)0xF7, (0xD8 | encode));
 4123 }
 4124 
 4125 void Assembler::enegl(Register dst, Register src, bool no_flags) {
 4126   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 4127   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 4128   emit_int16((unsigned char)0xF7, (0xD8 | encode));
 4129 }
 4130 
 4131 void Assembler::negl(Address dst) {
 4132   InstructionMark im(this);
 4133   prefix(dst);
 4134   emit_int8((unsigned char)0xF7);
 4135   emit_operand(as_Register(3), dst, 0);
 4136 }
 4137 
 4138 void Assembler::enegl(Register dst, Address src, bool no_flags) {
 4139   InstructionMark im(this);
 4140   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 4141   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 4142   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 4143   emit_int8((unsigned char)0xF7);
 4144   emit_operand(as_Register(3), src, 0);
 4145 }
 4146 
 4147 void Assembler::nop(uint i) {
 4148 #ifdef ASSERT
 4149   assert(i > 0, " ");
 4150   // The fancy nops aren't currently recognized by debuggers making it a
 4151   // pain to disassemble code while debugging. If asserts are on clearly
 4152   // speed is not an issue so simply use the single byte traditional nop
 4153   // to do alignment.
 4154 
 4155   for (; i > 0 ; i--) emit_int8((unsigned char)0x90);
 4156   return;
 4157 
 4158 #endif // ASSERT
 4159 
 4160   if (UseAddressNop && VM_Version::is_intel()) {
 4161     //
 4162     // Using multi-bytes nops "0x0F 0x1F [address]" for Intel
 4163     //  1: 0x90
 4164     //  2: 0x66 0x90
 4165     //  3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
 4166     //  4: 0x0F 0x1F 0x40 0x00
 4167     //  5: 0x0F 0x1F 0x44 0x00 0x00
 4168     //  6: 0x66 0x0F 0x1F 0x44 0x00 0x00
 4169     //  7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
 4170     //  8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
 4171     //  9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
 4172     // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
 4173     // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
 4174 
 4175     // The rest coding is Intel specific - don't use consecutive address nops
 4176 
 4177     // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
 4178     // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
 4179     // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
 4180     // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
 4181 
 4182     while(i >= 15) {
 4183       // For Intel don't generate consecutive address nops (mix with regular nops)
 4184       i -= 15;
 4185       emit_int24(0x66, 0x66, 0x66);
 4186       addr_nop_8();
 4187       emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90);
 4188     }
 4189     switch (i) {
 4190       case 14:
 4191         emit_int8(0x66); // size prefix
 4192       case 13:
 4193         emit_int8(0x66); // size prefix
 4194       case 12:
 4195         addr_nop_8();
 4196         emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90);
 4197         break;
 4198       case 11:
 4199         emit_int8(0x66); // size prefix
 4200       case 10:
 4201         emit_int8(0x66); // size prefix
 4202       case 9:
 4203         emit_int8(0x66); // size prefix
 4204       case 8:
 4205         addr_nop_8();
 4206         break;
 4207       case 7:
 4208         addr_nop_7();
 4209         break;
 4210       case 6:
 4211         emit_int8(0x66); // size prefix
 4212       case 5:
 4213         addr_nop_5();
 4214         break;
 4215       case 4:
 4216         addr_nop_4();
 4217         break;
 4218       case 3:
 4219         // Don't use "0x0F 0x1F 0x00" - need patching safe padding
 4220         emit_int8(0x66); // size prefix
 4221       case 2:
 4222         emit_int8(0x66); // size prefix
 4223       case 1:
 4224         emit_int8((unsigned char)0x90);
 4225                          // nop
 4226         break;
 4227       default:
 4228         assert(i == 0, " ");
 4229     }
 4230     return;
 4231   }
 4232   if (UseAddressNop && VM_Version::is_amd_family()) {
 4233     //
 4234     // Using multi-bytes nops "0x0F 0x1F [address]" for AMD.
 4235     //  1: 0x90
 4236     //  2: 0x66 0x90
 4237     //  3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
 4238     //  4: 0x0F 0x1F 0x40 0x00
 4239     //  5: 0x0F 0x1F 0x44 0x00 0x00
 4240     //  6: 0x66 0x0F 0x1F 0x44 0x00 0x00
 4241     //  7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
 4242     //  8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
 4243     //  9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
 4244     // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
 4245     // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
 4246 
 4247     // The rest coding is AMD specific - use consecutive address nops
 4248 
 4249     // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
 4250     // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
 4251     // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
 4252     // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
 4253     // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
 4254     //     Size prefixes (0x66) are added for larger sizes
 4255 
 4256     while(i >= 22) {
 4257       i -= 11;
 4258       emit_int24(0x66, 0x66, 0x66);
 4259       addr_nop_8();
 4260     }
 4261     // Generate first nop for size between 21-12
 4262     switch (i) {
 4263       case 21:
 4264         i -= 1;
 4265         emit_int8(0x66); // size prefix
 4266       case 20:
 4267       case 19:
 4268         i -= 1;
 4269         emit_int8(0x66); // size prefix
 4270       case 18:
 4271       case 17:
 4272         i -= 1;
 4273         emit_int8(0x66); // size prefix
 4274       case 16:
 4275       case 15:
 4276         i -= 8;
 4277         addr_nop_8();
 4278         break;
 4279       case 14:
 4280       case 13:
 4281         i -= 7;
 4282         addr_nop_7();
 4283         break;
 4284       case 12:
 4285         i -= 6;
 4286         emit_int8(0x66); // size prefix
 4287         addr_nop_5();
 4288         break;
 4289       default:
 4290         assert(i < 12, " ");
 4291     }
 4292 
 4293     // Generate second nop for size between 11-1
 4294     switch (i) {
 4295       case 11:
 4296         emit_int8(0x66); // size prefix
 4297       case 10:
 4298         emit_int8(0x66); // size prefix
 4299       case 9:
 4300         emit_int8(0x66); // size prefix
 4301       case 8:
 4302         addr_nop_8();
 4303         break;
 4304       case 7:
 4305         addr_nop_7();
 4306         break;
 4307       case 6:
 4308         emit_int8(0x66); // size prefix
 4309       case 5:
 4310         addr_nop_5();
 4311         break;
 4312       case 4:
 4313         addr_nop_4();
 4314         break;
 4315       case 3:
 4316         // Don't use "0x0F 0x1F 0x00" - need patching safe padding
 4317         emit_int8(0x66); // size prefix
 4318       case 2:
 4319         emit_int8(0x66); // size prefix
 4320       case 1:
 4321         emit_int8((unsigned char)0x90);
 4322                          // nop
 4323         break;
 4324       default:
 4325         assert(i == 0, " ");
 4326     }
 4327     return;
 4328   }
 4329 
 4330   if (UseAddressNop && VM_Version::is_zx()) {
 4331     //
 4332     // Using multi-bytes nops "0x0F 0x1F [address]" for ZX
 4333     //  1: 0x90
 4334     //  2: 0x66 0x90
 4335     //  3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
 4336     //  4: 0x0F 0x1F 0x40 0x00
 4337     //  5: 0x0F 0x1F 0x44 0x00 0x00
 4338     //  6: 0x66 0x0F 0x1F 0x44 0x00 0x00
 4339     //  7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
 4340     //  8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
 4341     //  9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
 4342     // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
 4343     // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
 4344 
 4345     // The rest coding is ZX specific - don't use consecutive address nops
 4346 
 4347     // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
 4348     // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
 4349     // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
 4350     // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
 4351 
 4352     while (i >= 15) {
 4353       // For ZX don't generate consecutive address nops (mix with regular nops)
 4354       i -= 15;
 4355       emit_int24(0x66, 0x66, 0x66);
 4356       addr_nop_8();
 4357       emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90);
 4358     }
 4359     switch (i) {
 4360       case 14:
 4361         emit_int8(0x66); // size prefix
 4362       case 13:
 4363         emit_int8(0x66); // size prefix
 4364       case 12:
 4365         addr_nop_8();
 4366         emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90);
 4367         break;
 4368       case 11:
 4369         emit_int8(0x66); // size prefix
 4370       case 10:
 4371         emit_int8(0x66); // size prefix
 4372       case 9:
 4373         emit_int8(0x66); // size prefix
 4374       case 8:
 4375         addr_nop_8();
 4376         break;
 4377       case 7:
 4378         addr_nop_7();
 4379         break;
 4380       case 6:
 4381         emit_int8(0x66); // size prefix
 4382       case 5:
 4383         addr_nop_5();
 4384         break;
 4385       case 4:
 4386         addr_nop_4();
 4387         break;
 4388       case 3:
 4389         // Don't use "0x0F 0x1F 0x00" - need patching safe padding
 4390         emit_int8(0x66); // size prefix
 4391       case 2:
 4392         emit_int8(0x66); // size prefix
 4393       case 1:
 4394         emit_int8((unsigned char)0x90);
 4395                          // nop
 4396         break;
 4397       default:
 4398         assert(i == 0, " ");
 4399     }
 4400     return;
 4401   }
 4402 
 4403   // Using nops with size prefixes "0x66 0x90".
 4404   // From AMD Optimization Guide:
 4405   //  1: 0x90
 4406   //  2: 0x66 0x90
 4407   //  3: 0x66 0x66 0x90
 4408   //  4: 0x66 0x66 0x66 0x90
 4409   //  5: 0x66 0x66 0x90 0x66 0x90
 4410   //  6: 0x66 0x66 0x90 0x66 0x66 0x90
 4411   //  7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90
 4412   //  8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90
 4413   //  9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
 4414   // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
 4415   //
 4416   while (i > 12) {
 4417     i -= 4;
 4418     emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90);
 4419   }
 4420   // 1 - 12 nops
 4421   if (i > 8) {
 4422     if (i > 9) {
 4423       i -= 1;
 4424       emit_int8(0x66);
 4425     }
 4426     i -= 3;
 4427     emit_int24(0x66, 0x66, (unsigned char)0x90);
 4428   }
 4429   // 1 - 8 nops
 4430   if (i > 4) {
 4431     if (i > 6) {
 4432       i -= 1;
 4433       emit_int8(0x66);
 4434     }
 4435     i -= 3;
 4436     emit_int24(0x66, 0x66, (unsigned char)0x90);
 4437   }
 4438   switch (i) {
 4439     case 4:
 4440       emit_int8(0x66);
 4441     case 3:
 4442       emit_int8(0x66);
 4443     case 2:
 4444       emit_int8(0x66);
 4445     case 1:
 4446       emit_int8((unsigned char)0x90);
 4447       break;
 4448     default:
 4449       assert(i == 0, " ");
 4450   }
 4451 }
 4452 
 4453 void Assembler::notl(Register dst) {
 4454   int encode = prefix_and_encode(dst->encoding());
 4455   emit_int16((unsigned char)0xF7, (0xD0 | encode));
 4456 }
 4457 
 4458 void Assembler::enotl(Register dst, Register src) {
 4459   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 4460   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes);
 4461   emit_int16((unsigned char)0xF7, (0xD0 | encode));
 4462 }
 4463 
 4464 void Assembler::orw(Register dst, Register src) {
 4465   (void)prefix_and_encode(dst->encoding(), src->encoding());
 4466   emit_arith(0x0B, 0xC0, dst, src);
 4467 }
 4468 
 4469 void Assembler::eorw(Register dst, Register src1, Register src2, bool no_flags) {
 4470   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 4471   (void) evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 4472   emit_arith(0x0B, 0xC0, src1, src2);
 4473 }
 4474 
 4475 void Assembler::orl(Address dst, int32_t imm32) {
 4476   InstructionMark im(this);
 4477   prefix(dst);
 4478   emit_arith_operand(0x81, rcx, dst, imm32);
 4479 }
 4480 
 4481 void Assembler::eorl(Register dst, Address src, int32_t imm32, bool no_flags) {
 4482   InstructionMark im(this);
 4483   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 4484   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 4485   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 4486   emit_arith_operand(0x81, rcx, src, imm32);
 4487 }
 4488 
 4489 void Assembler::orl(Register dst, int32_t imm32) {
 4490   prefix(dst);
 4491   emit_arith(0x81, 0xC8, dst, imm32);
 4492 }
 4493 
 4494 void Assembler::eorl(Register dst, Register src, int32_t imm32, bool no_flags) {
 4495   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 4496   evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 4497   emit_arith(0x81, 0xC8, src, imm32);
 4498 }
 4499 
 4500 void Assembler::orl(Register dst, Address src) {
 4501   InstructionMark im(this);
 4502   prefix(src, dst);
 4503   emit_int8(0x0B);
 4504   emit_operand(dst, src, 0);
 4505 }
 4506 
 4507 void Assembler::eorl(Register dst, Register src1, Address src2, bool no_flags) {
 4508   InstructionMark im(this);
 4509   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 4510   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
 4511   evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 4512   emit_int8(0x0B);
 4513   emit_operand(src1, src2, 0);
 4514 }
 4515 
 4516 void Assembler::orl(Register dst, Register src) {
 4517   (void) prefix_and_encode(dst->encoding(), src->encoding());
 4518   emit_arith(0x0B, 0xC0, dst, src);
 4519 }
 4520 
 4521 void Assembler::eorl(Register dst, Register src1, Register src2, bool no_flags) {
 4522   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 4523   (void) evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 4524   // opcode matches gcc
 4525   emit_arith(0x09, 0xC0, src1, src2);
 4526 }
 4527 
 4528 void Assembler::orl(Address dst, Register src) {
 4529   InstructionMark im(this);
 4530   prefix(dst, src);
 4531   emit_int8(0x09);
 4532   emit_operand(src, dst, 0);
 4533 }
 4534 
 4535 void Assembler::eorl(Register dst, Address src1, Register src2, bool no_flags) {
 4536   InstructionMark im(this);
 4537   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 4538   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 4539   evex_prefix_ndd(src1, dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 4540   emit_int8(0x09);
 4541   emit_operand(src2, src1, 0);
 4542 }
 4543 
 4544 void Assembler::orb(Address dst, int imm8) {
 4545   InstructionMark im(this);
 4546   prefix(dst);
 4547   emit_int8((unsigned char)0x80);
 4548   emit_operand(rcx, dst, 1);
 4549   emit_int8(imm8);
 4550 }
 4551 
 4552 void Assembler::eorb(Register dst, Address src, int imm8, bool no_flags) {
 4553   InstructionMark im(this);
 4554   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 4555   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_8bit);
 4556   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 4557   emit_int8((unsigned char)0x80);
 4558   emit_operand(rcx, src, 1);
 4559   emit_int8(imm8);
 4560 }
 4561 
 4562 void Assembler::orb(Address dst, Register src) {
 4563   InstructionMark im(this);
 4564   prefix(dst, src, true);
 4565   emit_int8(0x08);
 4566   emit_operand(src, dst, 0);
 4567 }
 4568 
 4569 void Assembler::eorb(Register dst, Address src1, Register src2, bool no_flags) {
 4570   InstructionMark im(this);
 4571   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 4572   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_8bit);
 4573   evex_prefix_ndd(src1, dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 4574   emit_int8(0x08);
 4575   emit_operand(src2, src1, 0);
 4576 }
 4577 
 4578 void Assembler::packsswb(XMMRegister dst, XMMRegister src) {
 4579   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 4580   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 4581   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4582   emit_int16(0x63, (0xC0 | encode));
 4583 }
 4584 
 4585 void Assembler::vpacksswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 4586   assert(UseAVX > 0, "some form of AVX must be enabled");
 4587   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 4588   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4589   emit_int16(0x63, (0xC0 | encode));
 4590 }
 4591 
 4592 void Assembler::packssdw(XMMRegister dst, XMMRegister src) {
 4593   assert(VM_Version::supports_sse2(), "");
 4594   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 4595   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4596   emit_int16(0x6B, (0xC0 | encode));
 4597 }
 4598 
 4599 void Assembler::vpackssdw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 4600   assert(UseAVX > 0, "some form of AVX must be enabled");
 4601   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 4602   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4603   emit_int16(0x6B, (0xC0 | encode));
 4604 }
 4605 
 4606 void Assembler::packuswb(XMMRegister dst, Address src) {
 4607   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 4608   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
 4609   InstructionMark im(this);
 4610   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 4611   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 4612   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4613   emit_int8(0x67);
 4614   emit_operand(dst, src, 0);
 4615 }
 4616 
 4617 void Assembler::packuswb(XMMRegister dst, XMMRegister src) {
 4618   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 4619   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 4620   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4621   emit_int16(0x67, (0xC0 | encode));
 4622 }
 4623 
 4624 void Assembler::vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 4625   assert(UseAVX > 0, "some form of AVX must be enabled");
 4626   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 4627   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4628   emit_int16(0x67, (0xC0 | encode));
 4629 }
 4630 
 4631 void Assembler::packusdw(XMMRegister dst, XMMRegister src) {
 4632   assert(VM_Version::supports_sse4_1(), "");
 4633   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 4634   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 4635   emit_int16(0x2B, (0xC0 | encode));
 4636 }
 4637 
 4638 void Assembler::vpackusdw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 4639   assert(UseAVX > 0, "some form of AVX must be enabled");
 4640   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 4641   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 4642   emit_int16(0x2B, (0xC0 | encode));
 4643 }
 4644 
 4645 void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len) {
 4646   assert(VM_Version::supports_avx2(), "");
 4647   assert(vector_len != AVX_128bit, "");
 4648   // VEX.256.66.0F3A.W1 00 /r ib
 4649   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 4650   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 4651   emit_int24(0x00, (0xC0 | encode), imm8);
 4652 }
 4653 
 4654 void Assembler::vpermq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 4655   assert(vector_len == AVX_256bit ? VM_Version::supports_avx512vl() :
 4656          vector_len == AVX_512bit ? VM_Version::supports_evex()     : false, "not supported");
 4657   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 4658   attributes.set_is_evex_instruction();
 4659   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 4660   emit_int16(0x36, (0xC0 | encode));
 4661 }
 4662 
 4663 void Assembler::vpermb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 4664   assert(VM_Version::supports_avx512_vbmi(), "");
 4665   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 4666   attributes.set_is_evex_instruction();
 4667   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 4668   emit_int16((unsigned char)0x8D, (0xC0 | encode));
 4669 }
 4670 
 4671 void Assembler::vpermb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 4672   assert(VM_Version::supports_avx512_vbmi(), "");
 4673   InstructionMark im(this);
 4674   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 4675   attributes.set_is_evex_instruction();
 4676   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 4677   emit_int8((unsigned char)0x8D);
 4678   emit_operand(dst, src, 0);
 4679 }
 4680 
 4681 void Assembler::vpermw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 4682   assert(vector_len == AVX_128bit ? VM_Version::supports_avx512vlbw() :
 4683          vector_len == AVX_256bit ? VM_Version::supports_avx512vlbw() :
 4684          vector_len == AVX_512bit ? VM_Version::supports_avx512bw()   : false, "not supported");
 4685   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 4686   attributes.set_is_evex_instruction();
 4687   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 4688   emit_int16((unsigned char)0x8D, (0xC0 | encode));
 4689 }
 4690 
 4691 void Assembler::vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 4692   assert((vector_len == AVX_256bit && VM_Version::supports_avx2()) ||
 4693          (vector_len == AVX_512bit && VM_Version::supports_evex()), "");
 4694   // VEX.NDS.256.66.0F38.W0 36 /r
 4695   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 4696   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 4697   emit_int16(0x36, (0xC0 | encode));
 4698 }
 4699 
 4700 void Assembler::vpermd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 4701   assert((vector_len == AVX_256bit && VM_Version::supports_avx2()) ||
 4702          (vector_len == AVX_512bit && VM_Version::supports_evex()), "");
 4703   // VEX.NDS.256.66.0F38.W0 36 /r
 4704   InstructionMark im(this);
 4705   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 4706   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 4707   emit_int8(0x36);
 4708   emit_operand(dst, src, 0);
 4709 }
 4710 
 4711 void Assembler::vpermps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 4712   assert((vector_len == AVX_256bit && VM_Version::supports_avx2()) ||
 4713          (vector_len == AVX_512bit && VM_Version::supports_evex()), "");
 4714   // VEX.NDS.XXX.66.0F38.W0 16 /r
 4715   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 4716   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 4717   emit_int16(0x16, (0xC0 | encode));
 4718 }
 4719 
 4720 void Assembler::vperm2i128(XMMRegister dst,  XMMRegister nds, XMMRegister src, int imm8) {
 4721   assert(VM_Version::supports_avx2(), "");
 4722   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 4723   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 4724   emit_int24(0x46, (0xC0 | encode), imm8);
 4725 }
 4726 
 4727 void Assembler::vperm2f128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
 4728   assert(VM_Version::supports_avx(), "");
 4729   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 4730   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 4731   emit_int24(0x06, (0xC0 | encode), imm8);
 4732 }
 4733 
 4734 void Assembler::vpermilps(XMMRegister dst, XMMRegister src, int imm8, int vector_len) {
 4735   assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
 4736   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 4737   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 4738   emit_int24(0x04, (0xC0 | encode), imm8);
 4739 }
 4740 
 4741 void Assembler::vpermilps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 4742   assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
 4743   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 4744   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 4745   emit_int16(0x0C, (0xC0 | encode));
 4746 }
 4747 
 4748 void Assembler::vpermilpd(XMMRegister dst, XMMRegister src, int imm8, int vector_len) {
 4749   assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), "");
 4750   InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(),/* legacy_mode */ false,/* no_mask_reg */ true, /* uses_vl */ false);
 4751   attributes.set_rex_vex_w_reverted();
 4752   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 4753   emit_int24(0x05, (0xC0 | encode), imm8);
 4754 }
 4755 
 4756 void Assembler::vpermpd(XMMRegister dst, XMMRegister src, int imm8, int vector_len) {
 4757   assert(vector_len <= AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_evex(), "");
 4758   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */false, /* no_mask_reg */ true, /* uses_vl */ false);
 4759   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 4760   emit_int24(0x01, (0xC0 | encode), imm8);
 4761 }
 4762 
 4763 void Assembler::evpmultishiftqb(XMMRegister dst, XMMRegister ctl, XMMRegister src, int vector_len) {
 4764   assert(VM_Version::supports_avx512_vbmi(), "");
 4765   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 4766   attributes.set_is_evex_instruction();
 4767   int encode = vex_prefix_and_encode(dst->encoding(), ctl->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 4768   emit_int16((unsigned char)0x83, (unsigned char)(0xC0 | encode));
 4769 }
 4770 
 4771 void Assembler::pause() {
 4772   emit_int16((unsigned char)0xF3, (unsigned char)0x90);
 4773 }
 4774 
 4775 void Assembler::ud2() {
 4776   emit_int16(0x0F, 0x0B);
 4777 }
 4778 
 4779 void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
 4780   assert(VM_Version::supports_sse4_2(), "");
 4781   assert(!needs_eevex(src.base(), src.index()), "does not support extended gprs as BASE or INDEX of address operand");
 4782   InstructionMark im(this);
 4783   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 4784   simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 4785   emit_int8(0x61);
 4786   emit_operand(dst, src, 1);
 4787   emit_int8(imm8);
 4788 }
 4789 
 4790 void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
 4791   assert(VM_Version::supports_sse4_2(), "");
 4792   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 4793   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 4794   emit_int24(0x61, (0xC0 | encode), imm8);
 4795 }
 4796 
 4797 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
 4798 void Assembler::pcmpeqb(XMMRegister dst, XMMRegister src) {
 4799   assert(VM_Version::supports_sse2(), "");
 4800   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 4801   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4802   emit_int16(0x74, (0xC0 | encode));
 4803 }
 4804 
 4805 void Assembler::vpcmpCCbwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, int vector_len) {
 4806   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
 4807   assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
 4808   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 4809   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4810   emit_int16(cond_encoding, (0xC0 | encode));
 4811 }
 4812 
 4813 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
 4814 void Assembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 4815   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
 4816   assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
 4817   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 4818   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4819   emit_int16(0x74, (0xC0 | encode));
 4820 }
 4821 
 4822 void Assembler::vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) {
 4823   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
 4824   assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
 4825   InstructionMark im(this);
 4826   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 4827   vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4828   emit_int8(0x74);
 4829   emit_operand(dst, src2, 0);
 4830 }
 4831 
 4832 // In this context, kdst is written the mask used to process the equal components
 4833 void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) {
 4834   assert(VM_Version::supports_avx512bw(), "");
 4835   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 4836   attributes.set_is_evex_instruction();
 4837   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4838   emit_int16(0x74, (0xC0 | encode));
 4839 }
 4840 
 4841 void Assembler::evpcmpgtb(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
 4842   assert(VM_Version::supports_avx512vlbw(), "");
 4843   InstructionMark im(this);
 4844   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 4845   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 4846   attributes.set_is_evex_instruction();
 4847   int dst_enc = kdst->encoding();
 4848   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4849   emit_int8(0x64);
 4850   emit_operand(as_Register(dst_enc), src, 0);
 4851 }
 4852 
 4853 void Assembler::evpcmpgtb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) {
 4854   assert(VM_Version::supports_avx512vlbw(), "");
 4855   InstructionMark im(this);
 4856   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 4857   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 4858   attributes.reset_is_clear_context();
 4859   attributes.set_embedded_opmask_register_specifier(mask);
 4860   attributes.set_is_evex_instruction();
 4861   int dst_enc = kdst->encoding();
 4862   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4863   emit_int8(0x64);
 4864   emit_operand(as_Register(dst_enc), src, 0);
 4865 }
 4866 
 4867 void Assembler::evpcmpuw(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) {
 4868   assert(VM_Version::supports_avx512vlbw(), "");
 4869   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 4870   attributes.set_is_evex_instruction();
 4871   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 4872   emit_int24(0x3E, (0xC0 | encode), vcc);
 4873 }
 4874 
 4875 void Assembler::evpcmpuq(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) {
 4876   assert(VM_Version::supports_avx512vl(), "");
 4877   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 4878   attributes.set_is_evex_instruction();
 4879   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 4880   emit_int24(0x1E, (0xC0 | encode), vcc);
 4881 }
 4882 
 4883 void Assembler::evpcmpuw(KRegister kdst, XMMRegister nds, Address src, ComparisonPredicate vcc, int vector_len) {
 4884   assert(VM_Version::supports_avx512vlbw(), "");
 4885   InstructionMark im(this);
 4886   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 4887   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 4888   attributes.set_is_evex_instruction();
 4889   int dst_enc = kdst->encoding();
 4890   vex_prefix(src, nds->encoding(), kdst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 4891   emit_int8(0x3E);
 4892   emit_operand(as_Register(dst_enc), src, 1);
 4893   emit_int8(vcc);
 4894 }
 4895 
 4896 void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
 4897   assert(VM_Version::supports_avx512bw(), "");
 4898   InstructionMark im(this);
 4899   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 4900   attributes.set_is_evex_instruction();
 4901   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 4902   int dst_enc = kdst->encoding();
 4903   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4904   emit_int8(0x74);
 4905   emit_operand(as_Register(dst_enc), src, 0);
 4906 }
 4907 
 4908 void Assembler::evpcmpeqb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) {
 4909   assert(VM_Version::supports_avx512vlbw(), "");
 4910   InstructionMark im(this);
 4911   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 4912   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 4913   attributes.reset_is_clear_context();
 4914   attributes.set_embedded_opmask_register_specifier(mask);
 4915   attributes.set_is_evex_instruction();
 4916   vex_prefix(src, nds->encoding(), kdst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4917   emit_int8(0x74);
 4918   emit_operand(as_Register(kdst->encoding()), src, 0);
 4919 }
 4920 
 4921 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
 4922 void Assembler::pcmpeqw(XMMRegister dst, XMMRegister src) {
 4923   assert(VM_Version::supports_sse2(), "");
 4924   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 4925   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4926   emit_int16(0x75, (0xC0 | encode));
 4927 }
 4928 
 4929 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
 4930 void Assembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 4931   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
 4932   assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
 4933   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 4934   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4935   emit_int16(0x75, (0xC0 | encode));
 4936 }
 4937 
 4938 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
 4939 void Assembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 4940   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
 4941   assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
 4942   InstructionMark im(this);
 4943   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 4944   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4945   emit_int8(0x75);
 4946   emit_operand(dst, src, 0);
 4947 }
 4948 
 4949 // In this context, kdst is written the mask used to process the equal components
 4950 void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) {
 4951   assert(VM_Version::supports_avx512bw(), "");
 4952   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 4953   attributes.set_is_evex_instruction();
 4954   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4955   emit_int16(0x75, (0xC0 | encode));
 4956 }
 4957 
 4958 void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
 4959   assert(VM_Version::supports_avx512bw(), "");
 4960   InstructionMark im(this);
 4961   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 4962   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 4963   attributes.set_is_evex_instruction();
 4964   int dst_enc = kdst->encoding();
 4965   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4966   emit_int8(0x75);
 4967   emit_operand(as_Register(dst_enc), src, 0);
 4968 }
 4969 
 4970 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
 4971 void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
 4972   assert(VM_Version::supports_sse2(), "");
 4973   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 4974   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4975   emit_int16(0x76, (0xC0 | encode));
 4976 }
 4977 
 4978 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
 4979 void Assembler::vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 4980   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
 4981   assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
 4982   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 4983   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4984   emit_int16(0x76, (0xC0 | encode));
 4985 }
 4986 
 4987 // In this context, kdst is written the mask used to process the equal components
 4988 void Assembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int vector_len) {
 4989   assert(VM_Version::supports_evex(), "");
 4990   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 4991   attributes.set_is_evex_instruction();
 4992   attributes.reset_is_clear_context();
 4993   attributes.set_embedded_opmask_register_specifier(mask);
 4994   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4995   emit_int16(0x76, (0xC0 | encode));
 4996 }
 4997 
 4998 void Assembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) {
 4999   assert(VM_Version::supports_evex(), "");
 5000   InstructionMark im(this);
 5001   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 5002   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 5003   attributes.set_is_evex_instruction();
 5004   attributes.reset_is_clear_context();
 5005   attributes.set_embedded_opmask_register_specifier(mask);
 5006   int dst_enc = kdst->encoding();
 5007   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 5008   emit_int8(0x76);
 5009   emit_operand(as_Register(dst_enc), src, 0);
 5010 }
 5011 
 5012 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
 5013 void Assembler::pcmpeqq(XMMRegister dst, XMMRegister src) {
 5014   assert(VM_Version::supports_sse4_1(), "");
 5015   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 5016   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5017   emit_int16(0x29, (0xC0 | encode));
 5018 }
 5019 
 5020 void Assembler::evpcmpeqq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int vector_len) {
 5021   assert(VM_Version::supports_evex(), "");
 5022   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 5023   attributes.set_is_evex_instruction();
 5024   attributes.reset_is_clear_context();
 5025   attributes.set_embedded_opmask_register_specifier(mask);
 5026   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5027   emit_int16(0x29, (0xC0 | encode));
 5028 }
 5029 
 5030 void Assembler::vpcmpCCq(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, int vector_len) {
 5031   assert(VM_Version::supports_avx(), "");
 5032   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 5033   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5034   emit_int16(cond_encoding, (0xC0 | encode));
 5035 }
 5036 
 5037 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
 5038 void Assembler::vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 5039   assert(VM_Version::supports_avx(), "");
 5040   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 5041   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5042   emit_int16(0x29, (0xC0 | encode));
 5043 }
 5044 
 5045 // In this context, kdst is written the mask used to process the equal components
 5046 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) {
 5047   assert(VM_Version::supports_evex(), "");
 5048   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5049   attributes.reset_is_clear_context();
 5050   attributes.set_is_evex_instruction();
 5051   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5052   emit_int16(0x29, (0xC0 | encode));
 5053 }
 5054 
 5055 // In this context, kdst is written the mask used to process the equal components
 5056 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
 5057   assert(VM_Version::supports_evex(), "");
 5058   InstructionMark im(this);
 5059   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5060   attributes.reset_is_clear_context();
 5061   attributes.set_is_evex_instruction();
 5062   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 5063   int dst_enc = kdst->encoding();
 5064   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5065   emit_int8(0x29);
 5066   emit_operand(as_Register(dst_enc), src, 0);
 5067 }
 5068 
 5069 void Assembler::pcmpgtq(XMMRegister dst, XMMRegister src) {
 5070   assert(VM_Version::supports_sse4_1(), "");
 5071   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 5072   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5073   emit_int16(0x37, (0xC0 | encode));
 5074 }
 5075 
 5076 void Assembler::pmovmskb(Register dst, XMMRegister src) {
 5077   assert(VM_Version::supports_sse2(), "");
 5078   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 5079   int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 5080   emit_int16((unsigned char)0xD7, (0xC0 | encode));
 5081 }
 5082 
 5083 void Assembler::vpmovmskb(Register dst, XMMRegister src, int vec_enc) {
 5084   assert((VM_Version::supports_avx() && vec_enc == AVX_128bit) ||
 5085          (VM_Version::supports_avx2() && vec_enc  == AVX_256bit), "");
 5086   InstructionAttr attributes(vec_enc, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 5087   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 5088   emit_int16((unsigned char)0xD7, (0xC0 | encode));
 5089 }
 5090 
 5091 void Assembler::vmovmskps(Register dst, XMMRegister src, int vec_enc) {
 5092   assert(VM_Version::supports_avx(), "");
 5093   InstructionAttr attributes(vec_enc, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 5094   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 5095   emit_int16(0x50, (0xC0 | encode));
 5096 }
 5097 
 5098 void Assembler::vmovmskpd(Register dst, XMMRegister src, int vec_enc) {
 5099   assert(VM_Version::supports_avx(), "");
 5100   InstructionAttr attributes(vec_enc, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 5101   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 5102   emit_int16(0x50, (0xC0 | encode));
 5103 }
 5104 
 5105 
 5106 void Assembler::pextrd(Register dst, XMMRegister src, int imm8) {
 5107   assert(VM_Version::supports_sse4_1(), "");
 5108   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
 5109   int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes, true);
 5110   emit_int24(0x16, (0xC0 | encode), imm8);
 5111 }
 5112 
 5113 void Assembler::pextrd(Address dst, XMMRegister src, int imm8) {
 5114   assert(VM_Version::supports_sse4_1(), "");
 5115   InstructionMark im(this);
 5116   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
 5117   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
 5118   simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 5119   emit_int8(0x16);
 5120   emit_operand(src, dst, 1);
 5121   emit_int8(imm8);
 5122 }
 5123 
 5124 void Assembler::pextrq(Register dst, XMMRegister src, int imm8) {
 5125   assert(VM_Version::supports_sse4_1(), "");
 5126   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
 5127   int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes, true);
 5128   emit_int24(0x16, (0xC0 | encode), imm8);
 5129 }
 5130 
 5131 void Assembler::pextrq(Address dst, XMMRegister src, int imm8) {
 5132   assert(VM_Version::supports_sse4_1(), "");
 5133   InstructionMark im(this);
 5134   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
 5135   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 5136   simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 5137   emit_int8(0x16);
 5138   emit_operand(src, dst, 1);
 5139   emit_int8(imm8);
 5140 }
 5141 
 5142 void Assembler::pextrw(Register dst, XMMRegister src, int imm8) {
 5143   assert(VM_Version::supports_sse2(), "");
 5144   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
 5145   int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 5146   emit_int24((unsigned char)0xC5, (0xC0 | encode), imm8);
 5147 }
 5148 
 5149 void Assembler::pextrw(Address dst, XMMRegister src, int imm8) {
 5150   assert(VM_Version::supports_sse4_1(), "");
 5151   InstructionMark im(this);
 5152   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
 5153   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit);
 5154   simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 5155   emit_int8(0x15);
 5156   emit_operand(src, dst, 1);
 5157   emit_int8(imm8);
 5158 }
 5159 
 5160 void Assembler::pextrb(Register dst, XMMRegister src, int imm8) {
 5161   assert(VM_Version::supports_sse4_1(), "");
 5162   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
 5163   int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes, true);
 5164   emit_int24(0x14, (0xC0 | encode), imm8);
 5165 }
 5166 
 5167 void Assembler::pextrb(Address dst, XMMRegister src, int imm8) {
 5168   assert(VM_Version::supports_sse4_1(), "");
 5169   InstructionMark im(this);
 5170   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
 5171   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit);
 5172   simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 5173   emit_int8(0x14);
 5174   emit_operand(src, dst, 1);
 5175   emit_int8(imm8);
 5176 }
 5177 
 5178 void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) {
 5179   assert(VM_Version::supports_sse4_1(), "");
 5180   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
 5181   int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes, true);
 5182   emit_int24(0x22, (0xC0 | encode), imm8);
 5183 }
 5184 
 5185 void Assembler::pinsrd(XMMRegister dst, Address src, int imm8) {
 5186   assert(VM_Version::supports_sse4_1(), "");
 5187   InstructionMark im(this);
 5188   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
 5189   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
 5190   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 5191   emit_int8(0x22);
 5192   emit_operand(dst, src, 1);
 5193   emit_int8(imm8);
 5194 }
 5195 
 5196 void Assembler::vpinsrd(XMMRegister dst, XMMRegister nds, Register src, int imm8) {
 5197   assert(VM_Version::supports_avx(), "");
 5198   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
 5199   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes, true);
 5200   emit_int24(0x22, (0xC0 | encode), imm8);
 5201 }
 5202 
 5203 void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) {
 5204   assert(VM_Version::supports_sse4_1(), "");
 5205   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
 5206   int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes, true);
 5207   emit_int24(0x22, (0xC0 | encode), imm8);
 5208 }
 5209 
 5210 void Assembler::pinsrq(XMMRegister dst, Address src, int imm8) {
 5211   assert(VM_Version::supports_sse4_1(), "");
 5212   InstructionMark im(this);
 5213   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
 5214   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 5215   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 5216   emit_int8(0x22);
 5217   emit_operand(dst, src, 1);
 5218   emit_int8(imm8);
 5219 }
 5220 
 5221 void Assembler::vpinsrq(XMMRegister dst, XMMRegister nds, Register src, int imm8) {
 5222   assert(VM_Version::supports_avx(), "");
 5223   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
 5224   int encode = vex_prefix_and_encode(dst->encoding(),  nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes, true);
 5225   emit_int24(0x22, (0xC0 | encode), imm8);
 5226 }
 5227 
 5228 void Assembler::pinsrw(XMMRegister dst, Register src, int imm8) {
 5229   assert(VM_Version::supports_sse2(), "");
 5230   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
 5231   int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes, true);
 5232   emit_int24((unsigned char)0xC4, (0xC0 | encode), imm8);
 5233 }
 5234 
 5235 void Assembler::pinsrw(XMMRegister dst, Address src, int imm8) {
 5236   assert(VM_Version::supports_sse2(), "");
 5237   InstructionMark im(this);
 5238   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
 5239   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit);
 5240   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 5241   emit_int8((unsigned char)0xC4);
 5242   emit_operand(dst, src, 1);
 5243   emit_int8(imm8);
 5244 }
 5245 
 5246 void Assembler::vpinsrw(XMMRegister dst, XMMRegister nds, Register src, int imm8) {
 5247   assert(VM_Version::supports_avx(), "");
 5248   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
 5249   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes, true);
 5250   emit_int24((unsigned char)0xC4, (0xC0 | encode), imm8);
 5251 }
 5252 
 5253 void Assembler::pinsrb(XMMRegister dst, Address src, int imm8) {
 5254   assert(VM_Version::supports_sse4_1(), "");
 5255   InstructionMark im(this);
 5256   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
 5257   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit);
 5258   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 5259   emit_int8(0x20);
 5260   emit_operand(dst, src, 1);
 5261   emit_int8(imm8);
 5262 }
 5263 
 5264 void Assembler::pinsrb(XMMRegister dst, Register src, int imm8) {
 5265   assert(VM_Version::supports_sse4_1(), "");
 5266   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
 5267   int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes, true);
 5268   emit_int24(0x20, (0xC0 | encode), imm8);
 5269 }
 5270 
 5271 void Assembler::vpinsrb(XMMRegister dst, XMMRegister nds, Register src, int imm8) {
 5272   assert(VM_Version::supports_avx(), "");
 5273   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
 5274   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes, true);
 5275   emit_int24(0x20, (0xC0 | encode), imm8);
 5276 }
 5277 
 5278 void Assembler::insertps(XMMRegister dst, XMMRegister src, int imm8) {
 5279   assert(VM_Version::supports_sse4_1(), "");
 5280   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 5281   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 5282   emit_int24(0x21, (0xC0 | encode), imm8);
 5283 }
 5284 
 5285 void Assembler::vinsertps(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
 5286   assert(VM_Version::supports_avx(), "");
 5287   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 5288   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 5289   emit_int24(0x21, (0xC0 | encode), imm8);
 5290 }
 5291 
 5292 void Assembler::pmovzxbw(XMMRegister dst, Address src) {
 5293   assert(VM_Version::supports_sse4_1(), "");
 5294   InstructionMark im(this);
 5295   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 5296   attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
 5297   simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5298   emit_int8(0x30);
 5299   emit_operand(dst, src, 0);
 5300 }
 5301 
 5302 void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
 5303   assert(VM_Version::supports_sse4_1(), "");
 5304   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 5305   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5306   emit_int16(0x30, (0xC0 | encode));
 5307 }
 5308 
 5309 void Assembler::pmovsxbw(XMMRegister dst, XMMRegister src) {
 5310   assert(VM_Version::supports_sse4_1(), "");
 5311   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 5312   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5313   emit_int16(0x20, (0xC0 | encode));
 5314 }
 5315 
 5316 void Assembler::pmovzxdq(XMMRegister dst, XMMRegister src) {
 5317   assert(VM_Version::supports_sse4_1(), "");
 5318   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5319   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5320   emit_int16(0x35, (0xC0 | encode));
 5321 }
 5322 
 5323 void Assembler::pmovsxbd(XMMRegister dst, XMMRegister src) {
 5324   assert(VM_Version::supports_sse4_1(), "");
 5325   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5326   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5327   emit_int16(0x21, (0xC0 | encode));
 5328 }
 5329 
 5330 void Assembler::pmovzxbd(XMMRegister dst, XMMRegister src) {
 5331   assert(VM_Version::supports_sse4_1(), "");
 5332   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5333   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5334   emit_int16(0x31, (0xC0 | encode));
 5335 }
 5336 
 5337 void Assembler::pmovsxbq(XMMRegister dst, XMMRegister src) {
 5338   assert(VM_Version::supports_sse4_1(), "");
 5339   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5340   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5341   emit_int16(0x22, (0xC0 | encode));
 5342 }
 5343 
 5344 void Assembler::pmovsxwd(XMMRegister dst, XMMRegister src) {
 5345   assert(VM_Version::supports_sse4_1(), "");
 5346   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5347   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5348   emit_int16(0x23, (0xC0 | encode));
 5349 }
 5350 
 5351 void Assembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) {
 5352   assert(VM_Version::supports_avx(), "");
 5353   InstructionMark im(this);
 5354   assert(dst != xnoreg, "sanity");
 5355   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 5356   attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
 5357   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5358   emit_int8(0x30);
 5359   emit_operand(dst, src, 0);
 5360 }
 5361 
 5362 void Assembler::vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) {
 5363   assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
 5364   vector_len == AVX_256bit? VM_Version::supports_avx2() :
 5365   vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, "");
 5366   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 5367   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5368   emit_int16(0x30, (unsigned char) (0xC0 | encode));
 5369 }
 5370 
 5371 void Assembler::vpmovsxbw(XMMRegister dst, XMMRegister src, int vector_len) {
 5372   assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
 5373   vector_len == AVX_256bit? VM_Version::supports_avx2() :
 5374   vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, "");
 5375   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 5376   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5377   emit_int16(0x20, (0xC0 | encode));
 5378 }
 5379 
 5380 void Assembler::evpmovzxbw(XMMRegister dst, KRegister mask, Address src, int vector_len) {
 5381   assert(VM_Version::supports_avx512vlbw(), "");
 5382   assert(dst != xnoreg, "sanity");
 5383   InstructionMark im(this);
 5384   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
 5385   attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
 5386   attributes.set_embedded_opmask_register_specifier(mask);
 5387   attributes.set_is_evex_instruction();
 5388   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5389   emit_int8(0x30);
 5390   emit_operand(dst, src, 0);
 5391 }
 5392 
 5393 void Assembler::evpmovzxbd(XMMRegister dst, KRegister mask, Address src, int vector_len) {
 5394   assert(VM_Version::supports_avx512vl(), "");
 5395   assert(dst != xnoreg, "sanity");
 5396   InstructionMark im(this);
 5397   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
 5398   attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
 5399   attributes.set_embedded_opmask_register_specifier(mask);
 5400   attributes.set_is_evex_instruction();
 5401   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5402   emit_int8(0x31);
 5403   emit_operand(dst, src, 0);
 5404 }
 5405 
 5406 void Assembler::evpmovzxbd(XMMRegister dst, Address src, int vector_len) {
 5407   evpmovzxbd(dst, k0, src, vector_len);
 5408 }
 5409 
 5410 void Assembler::evpandd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 5411   assert(VM_Version::supports_evex(), "");
 5412   // Encoding: EVEX.NDS.XXX.66.0F.W0 DB /r
 5413   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 5414   attributes.set_is_evex_instruction();
 5415   attributes.set_embedded_opmask_register_specifier(mask);
 5416   if (merge) {
 5417     attributes.reset_is_clear_context();
 5418   }
 5419   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 5420   emit_int16((unsigned char)0xDB, (0xC0 | encode));
 5421 }
 5422 
 5423 void Assembler::vpmovzxdq(XMMRegister dst, XMMRegister src, int vector_len) {
 5424   assert(vector_len > AVX_128bit ? VM_Version::supports_avx2() : VM_Version::supports_avx(), "");
 5425   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5426   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5427   emit_int16(0x35, (0xC0 | encode));
 5428 }
 5429 
 5430 void Assembler::vpmovzxbd(XMMRegister dst, XMMRegister src, int vector_len) {
 5431   assert(vector_len > AVX_128bit ? VM_Version::supports_avx2() : VM_Version::supports_avx(), "");
 5432   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5433   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5434   emit_int16(0x31, (0xC0 | encode));
 5435 }
 5436 
 5437 void Assembler::vpmovzxbq(XMMRegister dst, XMMRegister src, int vector_len) {
 5438   assert(vector_len > AVX_128bit ? VM_Version::supports_avx2() : VM_Version::supports_avx(), "");
 5439   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5440   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5441   emit_int16(0x32, (0xC0 | encode));
 5442 }
 5443 
 5444 void Assembler::vpmovsxbd(XMMRegister dst, XMMRegister src, int vector_len) {
 5445   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
 5446          vector_len == AVX_256bit ? VM_Version::supports_avx2() :
 5447              VM_Version::supports_evex(), "");
 5448   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5449   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5450   emit_int16(0x21, (0xC0 | encode));
 5451 }
 5452 
 5453 void Assembler::vpmovsxbq(XMMRegister dst, XMMRegister src, int vector_len) {
 5454   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
 5455          vector_len == AVX_256bit ? VM_Version::supports_avx2() :
 5456              VM_Version::supports_evex(), "");
 5457   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5458   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5459   emit_int16(0x22, (0xC0 | encode));
 5460 }
 5461 
 5462 void Assembler::vpmovsxwd(XMMRegister dst, XMMRegister src, int vector_len) {
 5463   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
 5464          vector_len == AVX_256bit ? VM_Version::supports_avx2() :
 5465              VM_Version::supports_evex(), "");
 5466   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5467   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5468   emit_int16(0x23, (0xC0 | encode));
 5469 }
 5470 
 5471 void Assembler::vpmovsxwq(XMMRegister dst, XMMRegister src, int vector_len) {
 5472   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
 5473          vector_len == AVX_256bit ? VM_Version::supports_avx2() :
 5474              VM_Version::supports_evex(), "");
 5475   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5476   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5477   emit_int16(0x24, (0xC0 | encode));
 5478 }
 5479 
 5480 void Assembler::vpmovsxdq(XMMRegister dst, XMMRegister src, int vector_len) {
 5481   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
 5482          vector_len == AVX_256bit ? VM_Version::supports_avx2() :
 5483              VM_Version::supports_evex(), "");
 5484   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5485   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5486   emit_int16(0x25, (0xC0 | encode));
 5487 }
 5488 
 5489 void Assembler::evpmovwb(Address dst, XMMRegister src, int vector_len) {
 5490   assert(VM_Version::supports_avx512vlbw(), "");
 5491   assert(src != xnoreg, "sanity");
 5492   InstructionMark im(this);
 5493   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5494   attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
 5495   attributes.set_is_evex_instruction();
 5496   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
 5497   emit_int8(0x30);
 5498   emit_operand(src, dst, 0);
 5499 }
 5500 
 5501 void Assembler::evpmovwb(Address dst, KRegister mask, XMMRegister src, int vector_len) {
 5502   assert(VM_Version::supports_avx512vlbw(), "");
 5503   assert(src != xnoreg, "sanity");
 5504   InstructionMark im(this);
 5505   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 5506   attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
 5507   attributes.reset_is_clear_context();
 5508   attributes.set_embedded_opmask_register_specifier(mask);
 5509   attributes.set_is_evex_instruction();
 5510   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
 5511   emit_int8(0x30);
 5512   emit_operand(src, dst, 0);
 5513 }
 5514 
 5515 void Assembler::evpmovdb(Address dst, XMMRegister src, int vector_len) {
 5516   assert(VM_Version::supports_evex(), "");
 5517   assert(src != xnoreg, "sanity");
 5518   InstructionMark im(this);
 5519   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5520   attributes.set_address_attributes(/* tuple_type */ EVEX_QVM, /* input_size_in_bits */ EVEX_NObit);
 5521   attributes.set_is_evex_instruction();
 5522   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
 5523   emit_int8(0x31);
 5524   emit_operand(src, dst, 0);
 5525 }
 5526 
 5527 void Assembler::vpmovzxwd(XMMRegister dst, XMMRegister src, int vector_len) {
 5528   assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
 5529   vector_len == AVX_256bit? VM_Version::supports_avx2() :
 5530   vector_len == AVX_512bit? VM_Version::supports_evex() : 0, " ");
 5531   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5532   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5533   emit_int16(0x33, (0xC0 | encode));
 5534 }
 5535 
 5536 void Assembler::vpmovzxwq(XMMRegister dst, XMMRegister src, int vector_len) {
 5537   assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
 5538   vector_len == AVX_256bit? VM_Version::supports_avx2() :
 5539   vector_len == AVX_512bit? VM_Version::supports_evex() : 0, " ");
 5540   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5541   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5542   emit_int16(0x34, (0xC0 | encode));
 5543 }
 5544 
 5545 void Assembler::pmaddwd(XMMRegister dst, XMMRegister src) {
 5546   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 5547   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 5548   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 5549   emit_int16((unsigned char)0xF5, (0xC0 | encode));
 5550 }
 5551 
 5552 void Assembler::vpmaddwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 5553   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
 5554     (vector_len == AVX_256bit ? VM_Version::supports_avx2() :
 5555     (vector_len == AVX_512bit ? VM_Version::supports_evex() : 0)), "");
 5556   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 5557   int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 5558   emit_int16((unsigned char)0xF5, (0xC0 | encode));
 5559 }
 5560 
 5561 void Assembler::vpmaddubsw(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
 5562 assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
 5563        vector_len == AVX_256bit? VM_Version::supports_avx2() :
 5564        vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, "");
 5565   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 5566   int encode = simd_prefix_and_encode(dst, src1, src2, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5567   emit_int16(0x04, (0xC0 | encode));
 5568 }
 5569 
 5570 void Assembler::vpmadd52luq(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) {
 5571   assert ((VM_Version::supports_avxifma() && vector_len <= AVX_256bit) || (VM_Version::supports_avx512ifma() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl())), "");
 5572 
 5573   InstructionMark im(this);
 5574   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 5575 
 5576   if (VM_Version::supports_avx512ifma()) {
 5577     attributes.set_is_evex_instruction();
 5578     attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 5579   }
 5580   vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5581   emit_int8((unsigned char)0xB4);
 5582   emit_operand(dst, src2, 0);
 5583 }
 5584 
 5585 void Assembler::vpmadd52luq(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
 5586   assert ((VM_Version::supports_avxifma() && vector_len <= AVX_256bit) || (VM_Version::supports_avx512ifma() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl())), "");
 5587 
 5588   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 5589 
 5590   if (VM_Version::supports_avx512ifma()) {
 5591     attributes.set_is_evex_instruction();
 5592   }
 5593   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5594   emit_int16((unsigned char)0xB4, (0xC0 | encode));
 5595 }
 5596 
 5597 void Assembler::evpmadd52luq(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
 5598   evpmadd52luq(dst, k0, src1, src2, false, vector_len);
 5599 }
 5600 
 5601 void Assembler::evpmadd52luq(XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vector_len) {
 5602   assert(VM_Version::supports_avx512ifma(), "");
 5603   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 5604   attributes.set_is_evex_instruction();
 5605   attributes.set_embedded_opmask_register_specifier(mask);
 5606   if (merge) {
 5607     attributes.reset_is_clear_context();
 5608   }
 5609 
 5610   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5611   emit_int16((unsigned char)0xB4, (0xC0 | encode));
 5612 }
 5613 
 5614 void Assembler::vpmadd52huq(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) {
 5615   assert ((VM_Version::supports_avxifma() && vector_len <= AVX_256bit) || (VM_Version::supports_avx512ifma() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl())), "");
 5616 
 5617   InstructionMark im(this);
 5618   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 5619 
 5620   if (VM_Version::supports_avx512ifma()) {
 5621     attributes.set_is_evex_instruction();
 5622     attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 5623   }
 5624   vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5625   emit_int8((unsigned char)0xB5);
 5626   emit_operand(dst, src2, 0);
 5627 }
 5628 
 5629 void Assembler::vpmadd52huq(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
 5630   assert ((VM_Version::supports_avxifma() && vector_len <= AVX_256bit) || (VM_Version::supports_avx512ifma() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl())), "");
 5631 
 5632   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 5633 
 5634   if (VM_Version::supports_avx512ifma()) {
 5635     attributes.set_is_evex_instruction();
 5636   }
 5637   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5638   emit_int16((unsigned char)0xB5, (0xC0 | encode));
 5639 }
 5640 
 5641 void Assembler::evpmadd52huq(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
 5642   evpmadd52huq(dst, k0, src1, src2, false, vector_len);
 5643 }
 5644 
 5645 void Assembler::evpmadd52huq(XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vector_len) {
 5646   assert(VM_Version::supports_avx512ifma(), "");
 5647   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 5648   attributes.set_is_evex_instruction();
 5649   attributes.set_embedded_opmask_register_specifier(mask);
 5650   if (merge) {
 5651     attributes.reset_is_clear_context();
 5652   }
 5653 
 5654   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5655   emit_int16((unsigned char)0xB5, (0xC0 | encode));
 5656 }
 5657 
 5658 void Assembler::evpdpwssd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 5659   assert(VM_Version::supports_evex(), "");
 5660   assert(VM_Version::supports_avx512_vnni(), "must support vnni");
 5661   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5662   attributes.set_is_evex_instruction();
 5663   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5664   emit_int16(0x52, (0xC0 | encode));
 5665 }
 5666 
 5667 // generic
 5668 void Assembler::pop(Register dst) {
 5669   int encode = prefix_and_encode(dst->encoding());
 5670   emit_int8(0x58 | encode);
 5671 }
 5672 
 5673 void Assembler::popcntl(Register dst, Address src) {
 5674   assert(VM_Version::supports_popcnt(), "must support");
 5675   InstructionMark im(this);
 5676   emit_int8((unsigned char)0xF3);
 5677   prefix(src, dst, false, true /* is_map1 */);
 5678   emit_int8((unsigned char)0xB8);
 5679   emit_operand(dst, src, 0);
 5680 }
 5681 
 5682 void Assembler::epopcntl(Register dst, Address src, bool no_flags) {
 5683   assert(VM_Version::supports_popcnt(), "must support");
 5684   InstructionMark im(this);
 5685   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 5686   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 5687   evex_prefix_nf(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 5688   emit_int8((unsigned char)0x88);
 5689   emit_operand(dst, src, 0);
 5690 }
 5691 
 5692 void Assembler::popcntl(Register dst, Register src) {
 5693   assert(VM_Version::supports_popcnt(), "must support");
 5694   emit_int8((unsigned char)0xF3);
 5695   int encode = prefix_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
 5696   emit_opcode_prefix_and_encoding((unsigned char)0xB8, 0xC0, encode);
 5697 }
 5698 
 5699 void Assembler::epopcntl(Register dst, Register src, bool no_flags) {
 5700   assert(VM_Version::supports_popcnt(), "must support");
 5701   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 5702   int encode = evex_prefix_and_encode_nf(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 5703   emit_int16((unsigned char)0x88, (0xC0 | encode));
 5704 }
 5705 
 5706 void Assembler::evpopcntb(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 5707   assert(VM_Version::supports_avx512_bitalg(), "must support avx512bitalg feature");
 5708   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 5709   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 5710   attributes.set_embedded_opmask_register_specifier(mask);
 5711   attributes.set_is_evex_instruction();
 5712   if (merge) {
 5713     attributes.reset_is_clear_context();
 5714   }
 5715   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5716   emit_int16(0x54, (0xC0 | encode));
 5717 }
 5718 
 5719 void Assembler::evpopcntw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 5720   assert(VM_Version::supports_avx512_bitalg(), "must support avx512bitalg feature");
 5721   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 5722   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 5723   attributes.set_is_evex_instruction();
 5724   attributes.set_embedded_opmask_register_specifier(mask);
 5725   if (merge) {
 5726     attributes.reset_is_clear_context();
 5727   }
 5728   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5729   emit_int16(0x54, (0xC0 | encode));
 5730 }
 5731 
 5732 void Assembler::evpopcntd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 5733   assert(VM_Version::supports_avx512_vpopcntdq(), "must support vpopcntdq feature");
 5734   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 5735   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 5736   attributes.set_is_evex_instruction();
 5737   attributes.set_embedded_opmask_register_specifier(mask);
 5738   if (merge) {
 5739     attributes.reset_is_clear_context();
 5740   }
 5741   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5742   emit_int16(0x55, (0xC0 | encode));
 5743 }
 5744 
 5745 void Assembler::evpopcntq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 5746   assert(VM_Version::supports_avx512_vpopcntdq(), "must support vpopcntdq feature");
 5747   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 5748   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 5749   attributes.set_is_evex_instruction();
 5750   attributes.set_embedded_opmask_register_specifier(mask);
 5751   if (merge) {
 5752     attributes.reset_is_clear_context();
 5753   }
 5754   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5755   emit_int16(0x55, (0xC0 | encode));
 5756 }
 5757 
 5758 void Assembler::popf() {
 5759   emit_int8((unsigned char)0x9D);
 5760 }
 5761 
 5762 #ifndef _LP64 // no 32bit push/pop on amd64
 5763 void Assembler::popl(Address dst) {
 5764   // NOTE: this will adjust stack by 8byte on 64bits
 5765   InstructionMark im(this);
 5766   prefix(dst);
 5767   emit_int8((unsigned char)0x8F);
 5768   emit_operand(rax, dst, 0);
 5769 }
 5770 #endif
 5771 
 5772 void Assembler::prefetchnta(Address src) {
 5773   NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
 5774   InstructionMark im(this);
 5775   prefix(src, true /* is_map1 */);
 5776   emit_int8(0x18);
 5777   emit_operand(rax, src, 0); // 0, src
 5778 }
 5779 
 5780 void Assembler::prefetchr(Address src) {
 5781   assert(VM_Version::supports_3dnow_prefetch(), "must support");
 5782   InstructionMark im(this);
 5783   prefix(src, true /* is_map1 */);
 5784   emit_int8(0x0D);
 5785   emit_operand(rax, src, 0); // 0, src
 5786 }
 5787 
 5788 void Assembler::prefetcht0(Address src) {
 5789   NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
 5790   InstructionMark im(this);
 5791   prefix(src, true /* is_map1 */);
 5792   emit_int8(0x18);
 5793   emit_operand(rcx, src, 0); // 1, src
 5794 }
 5795 
 5796 void Assembler::prefetcht1(Address src) {
 5797   NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
 5798   InstructionMark im(this);
 5799   prefix(src, true /* is_map1 */);
 5800   emit_int8(0x18);
 5801   emit_operand(rdx, src, 0); // 2, src
 5802 }
 5803 
 5804 void Assembler::prefetcht2(Address src) {
 5805   NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
 5806   InstructionMark im(this);
 5807   prefix(src, true /* is_map1 */);
 5808   emit_int8(0x18);
 5809   emit_operand(rbx, src, 0); // 3, src
 5810 }
 5811 
 5812 void Assembler::prefetchw(Address src) {
 5813   assert(VM_Version::supports_3dnow_prefetch(), "must support");
 5814   InstructionMark im(this);
 5815   prefix(src, true /* is_map1 */);
 5816   emit_int8(0x0D);
 5817   emit_operand(rcx, src, 0); // 1, src
 5818 }
 5819 
 5820 void Assembler::prefix(Prefix p) {
 5821   emit_int8(p);
 5822 }
 5823 
 5824 void Assembler::prefix16(int prefix) {
 5825   assert(UseAPX, "APX features not enabled");
 5826   emit_int8((prefix & 0xff00) >> 8);
 5827   emit_int8(prefix & 0xff);
 5828 }
 5829 
 5830 void Assembler::pshufb(XMMRegister dst, XMMRegister src) {
 5831   assert(VM_Version::supports_ssse3(), "");
 5832   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 5833   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5834   emit_int16(0x00, (0xC0 | encode));
 5835 }
 5836 
 5837 void Assembler::evpshufb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 5838   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 5839   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 5840   attributes.set_is_evex_instruction();
 5841   attributes.set_embedded_opmask_register_specifier(mask);
 5842   if (merge) {
 5843     attributes.reset_is_clear_context();
 5844   }
 5845   int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5846   emit_int16(0x00, (0xC0 | encode));
 5847 }
 5848 
 5849 void Assembler::vpshufb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 5850   assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
 5851          vector_len == AVX_256bit? VM_Version::supports_avx2() :
 5852          vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, "");
 5853   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 5854   int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5855   emit_int16(0x00, (0xC0 | encode));
 5856 }
 5857 
 5858 void Assembler::vpshufb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 5859   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
 5860          vector_len == AVX_256bit ? VM_Version::supports_avx2() :
 5861          vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : 0, "");
 5862   InstructionMark im(this);
 5863   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 5864   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 5865   simd_prefix(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5866   emit_int8(0x00);
 5867   emit_operand(dst, src, 0);
 5868 }
 5869 
 5870 void Assembler::pshufb(XMMRegister dst, Address src) {
 5871   assert(VM_Version::supports_ssse3(), "");
 5872   InstructionMark im(this);
 5873   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 5874   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 5875   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 5876   emit_int8(0x00);
 5877   emit_operand(dst, src, 0);
 5878 }
 5879 
 5880 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
 5881   assert(isByte(mode), "invalid value");
 5882   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 5883   int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
 5884   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5885   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 5886   emit_int24(0x70, (0xC0 | encode), mode & 0xFF);
 5887 }
 5888 
 5889 void Assembler::vpshufd(XMMRegister dst, XMMRegister src, int mode, int vector_len) {
 5890   assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
 5891          (vector_len == AVX_256bit? VM_Version::supports_avx2() :
 5892          (vector_len == AVX_512bit? VM_Version::supports_evex() : 0)), "");
 5893   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 5894   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5895   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 5896   emit_int24(0x70, (0xC0 | encode), mode & 0xFF);
 5897 }
 5898 
 5899 void Assembler::pshufd(XMMRegister dst, Address src, int mode) {
 5900   assert(isByte(mode), "invalid value");
 5901   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 5902   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
 5903   InstructionMark im(this);
 5904   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5905   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 5906   simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 5907   emit_int8(0x70);
 5908   emit_operand(dst, src, 1);
 5909   emit_int8(mode & 0xFF);
 5910 }
 5911 
 5912 void Assembler::pshufhw(XMMRegister dst, XMMRegister src, int mode) {
 5913   assert(isByte(mode), "invalid value");
 5914   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 5915   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 5916   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 5917   emit_int24(0x70, (0xC0 | encode), mode & 0xFF);
 5918 }
 5919 
 5920 void Assembler::vpshufhw(XMMRegister dst, XMMRegister src, int mode, int vector_len) {
 5921     assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
 5922             (vector_len == AVX_256bit ? VM_Version::supports_avx2() :
 5923             (vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : false)), "");
 5924     NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 5925     InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 5926     int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 5927     emit_int24(0x70, (0xC0 | encode), mode & 0xFF);
 5928 }
 5929 
 5930 void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
 5931   assert(isByte(mode), "invalid value");
 5932   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 5933   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 5934   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 5935   emit_int24(0x70, (0xC0 | encode), mode & 0xFF);
 5936 }
 5937 
 5938 void Assembler::pshuflw(XMMRegister dst, Address src, int mode) {
 5939   assert(isByte(mode), "invalid value");
 5940   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 5941   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
 5942   InstructionMark im(this);
 5943   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 5944   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 5945   simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 5946   emit_int8(0x70);
 5947   emit_operand(dst, src, 1);
 5948   emit_int8(mode & 0xFF);
 5949 }
 5950 
 5951 void Assembler::vpshuflw(XMMRegister dst, XMMRegister src, int mode, int vector_len) {
 5952     assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
 5953             (vector_len == AVX_256bit ? VM_Version::supports_avx2() :
 5954             (vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : false)), "");
 5955     NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 5956     InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 5957     int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 5958     emit_int24(0x70, (0xC0 | encode), mode & 0xFF);
 5959 }
 5960 
 5961 void Assembler::evshufi64x2(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
 5962   assert(VM_Version::supports_evex(), "requires EVEX support");
 5963   assert(vector_len == Assembler::AVX_256bit || vector_len == Assembler::AVX_512bit, "");
 5964   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5965   attributes.set_is_evex_instruction();
 5966   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 5967   emit_int24(0x43, (0xC0 | encode), imm8 & 0xFF);
 5968 }
 5969 
 5970 void Assembler::shufpd(XMMRegister dst, XMMRegister src, int imm8) {
 5971   assert(isByte(imm8), "invalid value");
 5972   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 5973   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5974   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 5975   emit_int24((unsigned char)0xC6, (0xC0 | encode), imm8 & 0xFF);
 5976 }
 5977 
 5978 void Assembler::vshufpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
 5979   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5980   attributes.set_rex_vex_w_reverted();
 5981   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 5982   emit_int24((unsigned char)0xC6, (0xC0 | encode), imm8 & 0xFF);
 5983 }
 5984 
 5985 void Assembler::shufps(XMMRegister dst, XMMRegister src, int imm8) {
 5986   assert(isByte(imm8), "invalid value");
 5987   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 5988   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5989   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 5990   emit_int24((unsigned char)0xC6, (0xC0 | encode), imm8 & 0xFF);
 5991 }
 5992 
 5993 void Assembler::vshufps(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
 5994   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 5995   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 5996   emit_int24((unsigned char)0xC6, (0xC0 | encode), imm8 & 0xFF);
 5997 }
 5998 
 5999 void Assembler::psrldq(XMMRegister dst, int shift) {
 6000   // Shift left 128 bit value in dst XMMRegister by shift number of bytes.
 6001   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 6002   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 6003   int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 6004   emit_int24(0x73, (0xC0 | encode), shift);
 6005 }
 6006 
 6007 void Assembler::vpsrldq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
 6008   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
 6009          vector_len == AVX_256bit ? VM_Version::supports_avx2() :
 6010          vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : 0, "");
 6011   InstructionAttr attributes(vector_len, /*vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 6012   int encode = vex_prefix_and_encode(xmm3->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 6013   emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
 6014 }
 6015 
 6016 void Assembler::pslldq(XMMRegister dst, int shift) {
 6017   // Shift left 128 bit value in dst XMMRegister by shift number of bytes.
 6018   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 6019   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 6020   // XMM7 is for /7 encoding: 66 0F 73 /7 ib
 6021   int encode = simd_prefix_and_encode(xmm7, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 6022   emit_int24(0x73, (0xC0 | encode), shift);
 6023 }
 6024 
 6025 void Assembler::vpslldq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
 6026   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
 6027          vector_len == AVX_256bit ? VM_Version::supports_avx2() :
 6028          vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : 0, "");
 6029   InstructionAttr attributes(vector_len, /*vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 6030   int encode = vex_prefix_and_encode(xmm7->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 6031   emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
 6032 }
 6033 
 6034 void Assembler::ptest(XMMRegister dst, Address src) {
 6035   assert(VM_Version::supports_sse4_1(), "");
 6036   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
 6037   assert(!needs_eevex(src.base(), src.index()), "does not support extended gprs");
 6038   InstructionMark im(this);
 6039   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 6040   simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 6041   emit_int8(0x17);
 6042   emit_operand(dst, src, 0);
 6043 }
 6044 
 6045 void Assembler::ptest(XMMRegister dst, XMMRegister src) {
 6046   assert(VM_Version::supports_sse4_1() || VM_Version::supports_avx(), "");
 6047   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 6048   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 6049   emit_int8(0x17);
 6050   emit_int8((0xC0 | encode));
 6051 }
 6052 
 6053 void Assembler::vptest(XMMRegister dst, Address src) {
 6054   assert(VM_Version::supports_avx(), "");
 6055   assert(!needs_eevex(src.base(), src.index()), "does not support extended gprs");
 6056   InstructionMark im(this);
 6057   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 6058   assert(dst != xnoreg, "sanity");
 6059   // swap src<->dst for encoding
 6060   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 6061   emit_int8(0x17);
 6062   emit_operand(dst, src, 0);
 6063 }
 6064 
 6065 void Assembler::vptest(XMMRegister dst, XMMRegister src) {
 6066   assert(VM_Version::supports_avx(), "");
 6067   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 6068   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 6069   emit_int16(0x17, (0xC0 | encode));
 6070 }
 6071 
 6072 void Assembler::vptest(XMMRegister dst, XMMRegister src, int vector_len) {
 6073   assert(VM_Version::supports_avx(), "");
 6074   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 6075   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 6076   emit_int16(0x17, (0xC0 | encode));
 6077 }
 6078 
 6079 void Assembler::vtestps(XMMRegister dst, XMMRegister src, int vector_len) {
 6080   assert(VM_Version::supports_avx(), "");
 6081   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 6082   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 6083   emit_int16(0x0E, (0xC0 | encode));
 6084 }
 6085 
 6086 void Assembler::evptestmb(KRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 6087   assert(vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : VM_Version::supports_avx512vlbw(), "");
 6088   // Encoding: EVEX.NDS.XXX.66.0F38.W0 DB /r
 6089   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 6090   attributes.set_is_evex_instruction();
 6091   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 6092   emit_int16(0x26, (0xC0 | encode));
 6093 }
 6094 
 6095 void Assembler::evptestmd(KRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 6096   assert(vector_len == AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx512vl(), "");
 6097   // Encoding: EVEX.NDS.XXX.66.0F38.W0 DB /r
 6098   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 6099   attributes.set_is_evex_instruction();
 6100   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 6101   emit_int16(0x27, (0xC0 | encode));
 6102 }
 6103 
 6104 void Assembler::evptestnmd(KRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 6105   assert(vector_len == AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx512vl(), "");
 6106   // Encoding: EVEX.NDS.XXX.F3.0F38.W0 DB /r
 6107   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 6108   attributes.set_is_evex_instruction();
 6109   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
 6110   emit_int16(0x27, (0xC0 | encode));
 6111 }
 6112 
 6113 void Assembler::punpcklbw(XMMRegister dst, Address src) {
 6114   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 6115   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
 6116   InstructionMark im(this);
 6117   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ true, /* uses_vl */ true);
 6118   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 6119   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 6120   emit_int8(0x60);
 6121   emit_operand(dst, src, 0);
 6122 }
 6123 
 6124 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) {
 6125   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 6126   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ true, /* uses_vl */ true);
 6127   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 6128   emit_int16(0x60, (0xC0 | encode));
 6129 }
 6130 
 6131 void Assembler::punpckldq(XMMRegister dst, Address src) {
 6132   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 6133   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
 6134   InstructionMark im(this);
 6135   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 6136   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 6137   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 6138   emit_int8(0x62);
 6139   emit_operand(dst, src, 0);
 6140 }
 6141 
 6142 void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
 6143   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 6144   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 6145   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 6146   emit_int16(0x62, (0xC0 | encode));
 6147 }
 6148 
 6149 void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) {
 6150   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 6151   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 6152   attributes.set_rex_vex_w_reverted();
 6153   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 6154   emit_int16(0x6C, (0xC0 | encode));
 6155 }
 6156 
 6157 void Assembler::evpunpcklqdq(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
 6158   evpunpcklqdq(dst, k0, src1, src2, false, vector_len);
 6159 }
 6160 
 6161 void Assembler::evpunpcklqdq(XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vector_len) {
 6162   assert(VM_Version::supports_evex(), "requires AVX512F");
 6163   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL");
 6164   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 6165   attributes.set_is_evex_instruction();
 6166   attributes.set_embedded_opmask_register_specifier(mask);
 6167   if (merge) {
 6168     attributes.reset_is_clear_context();
 6169   }
 6170 
 6171   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 6172   emit_int16(0x6C, (0xC0 | encode));
 6173 }
 6174 
 6175 void Assembler::evpunpckhqdq(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
 6176   evpunpckhqdq(dst, k0, src1, src2, false, vector_len);
 6177 }
 6178 
 6179 void Assembler::evpunpckhqdq(XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vector_len) {
 6180   assert(VM_Version::supports_evex(), "requires AVX512F");
 6181   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL");
 6182   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 6183   attributes.set_is_evex_instruction();
 6184   attributes.set_embedded_opmask_register_specifier(mask);
 6185   if (merge) {
 6186     attributes.reset_is_clear_context();
 6187   }
 6188 
 6189   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 6190   emit_int16(0x6D, (0xC0 | encode));
 6191 }
 6192 
 6193 #ifdef _LP64
 6194 void Assembler::push2(Register src1, Register src2, bool with_ppx) {
 6195   assert(VM_Version::supports_apx_f(), "requires APX");
 6196   InstructionAttr attributes(0, /* rex_w */ with_ppx, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6197   /* EVEX.BASE */
 6198   int src_enc = src1->encoding();
 6199   /* EVEX.VVVV */
 6200   int nds_enc = src2->encoding();
 6201 
 6202   bool vex_b = (src_enc & 8) == 8;
 6203   bool evex_v = (nds_enc >= 16);
 6204   bool evex_b = (src_enc >= 16);
 6205 
 6206   // EVEX.ND = 1;
 6207   attributes.set_extended_context();
 6208   attributes.set_is_evex_instruction();
 6209   set_attributes(&attributes);
 6210 
 6211   evex_prefix(0, vex_b, 0, 0, evex_b, evex_v, false /*eevex_x*/, nds_enc, VEX_SIMD_NONE, /* map4 */ VEX_OPCODE_0F_3C);
 6212   emit_int16(0xFF, (0xC0 | (0x6 << 3) | (src_enc & 7)));
 6213 }
 6214 
 6215 void Assembler::pop2(Register src1, Register src2, bool with_ppx) {
 6216   assert(VM_Version::supports_apx_f(), "requires APX");
 6217   InstructionAttr attributes(0, /* rex_w */ with_ppx, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6218   /* EVEX.BASE */
 6219   int src_enc = src1->encoding();
 6220   /* EVEX.VVVV */
 6221   int nds_enc = src2->encoding();
 6222 
 6223   bool vex_b = (src_enc & 8) == 8;
 6224   bool evex_v = (nds_enc >= 16);
 6225   bool evex_b = (src_enc >= 16);
 6226 
 6227   // EVEX.ND = 1;
 6228   attributes.set_extended_context();
 6229   attributes.set_is_evex_instruction();
 6230   set_attributes(&attributes);
 6231 
 6232   evex_prefix(0, vex_b, 0, 0, evex_b, evex_v, false /*eevex_x*/, nds_enc, VEX_SIMD_NONE, /* map4 */ VEX_OPCODE_0F_3C);
 6233   emit_int16(0x8F, (0xC0 | (src_enc & 7)));
 6234 }
 6235 
 6236 void Assembler::push2p(Register src1, Register src2) {
 6237   push2(src1, src2, true);
 6238 }
 6239 
 6240 void Assembler::pop2p(Register src1, Register src2) {
 6241   pop2(src1, src2, true);
 6242 }
 6243 
 6244 void Assembler::pushp(Register src) {
 6245   assert(VM_Version::supports_apx_f(), "requires APX");
 6246   int encode = prefixq_and_encode_rex2(src->encoding());
 6247   emit_int8(0x50 | encode);
 6248 }
 6249 
 6250 void Assembler::popp(Register dst) {
 6251   assert(VM_Version::supports_apx_f(), "requires APX");
 6252   int encode = prefixq_and_encode_rex2(dst->encoding());
 6253   emit_int8((unsigned char)0x58 | encode);
 6254 }
 6255 #endif //_LP64
 6256 
 6257 
 6258 void Assembler::push(int32_t imm32) {
 6259   // in 64bits we push 64bits onto the stack but only
 6260   // take a 32bit immediate
 6261   emit_int8(0x68);
 6262   emit_int32(imm32);
 6263 }
 6264 
 6265 void Assembler::push(Register src) {
 6266   int encode = prefix_and_encode(src->encoding());
 6267   emit_int8(0x50 | encode);
 6268 }
 6269 
 6270 void Assembler::pushf() {
 6271   emit_int8((unsigned char)0x9C);
 6272 }
 6273 
 6274 #ifndef _LP64 // no 32bit push/pop on amd64
 6275 void Assembler::pushl(Address src) {
 6276   // Note this will push 64bit on 64bit
 6277   InstructionMark im(this);
 6278   prefix(src);
 6279   emit_int8((unsigned char)0xFF);
 6280   emit_operand(rsi, src, 0);
 6281 }
 6282 #endif
 6283 
 6284 void Assembler::rcll(Register dst, int imm8) {
 6285   assert(isShiftCount(imm8), "illegal shift count");
 6286   int encode = prefix_and_encode(dst->encoding());
 6287   if (imm8 == 1) {
 6288     emit_int16((unsigned char)0xD1, (0xD0 | encode));
 6289   } else {
 6290     emit_int24((unsigned char)0xC1, (0xD0 | encode), imm8);
 6291   }
 6292 }
 6293 
 6294 void Assembler::ercll(Register dst, Register src, int imm8) {
 6295   assert(isShiftCount(imm8), "illegal shift count");
 6296   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6297   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes);
 6298   if (imm8 == 1) {
 6299     emit_int16((unsigned char)0xD1, (0xD0 | encode));
 6300   } else {
 6301     emit_int24((unsigned char)0xC1, (0xD0 | encode), imm8);
 6302   }
 6303 }
 6304 
 6305 void Assembler::rcpps(XMMRegister dst, XMMRegister src) {
 6306   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 6307   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 6308   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 6309   emit_int16(0x53, (0xC0 | encode));
 6310 }
 6311 
 6312 void Assembler::rcpss(XMMRegister dst, XMMRegister src) {
 6313   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 6314   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 6315   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 6316   emit_int16(0x53, (0xC0 | encode));
 6317 }
 6318 
 6319 void Assembler::rdtsc() {
 6320   emit_int16(0x0F, 0x31);
 6321 }
 6322 
 6323 // copies data from [esi] to [edi] using rcx pointer sized words
 6324 // generic
 6325 void Assembler::rep_mov() {
 6326   // REP
 6327   // MOVSQ
 6328   LP64_ONLY(emit_int24((unsigned char)0xF3, REX_W, (unsigned char)0xA5);)
 6329   NOT_LP64( emit_int16((unsigned char)0xF3,        (unsigned char)0xA5);)
 6330 }
 6331 
 6332 // sets rcx bytes with rax, value at [edi]
 6333 void Assembler::rep_stosb() {
 6334   // REP
 6335   // STOSB
 6336   LP64_ONLY(emit_int24((unsigned char)0xF3, REX_W, (unsigned char)0xAA);)
 6337   NOT_LP64( emit_int16((unsigned char)0xF3,        (unsigned char)0xAA);)
 6338 }
 6339 
 6340 // sets rcx pointer sized words with rax, value at [edi]
 6341 // generic
 6342 void Assembler::rep_stos() {
 6343   // REP
 6344   // LP64:STOSQ, LP32:STOSD
 6345   LP64_ONLY(emit_int24((unsigned char)0xF3, REX_W, (unsigned char)0xAB);)
 6346   NOT_LP64( emit_int16((unsigned char)0xF3,        (unsigned char)0xAB);)
 6347 }
 6348 
 6349 // scans rcx pointer sized words at [edi] for occurrence of rax,
 6350 // generic
 6351 void Assembler::repne_scan() { // repne_scan
 6352   // SCASQ
 6353   LP64_ONLY(emit_int24((unsigned char)0xF2, REX_W, (unsigned char)0xAF);)
 6354   NOT_LP64( emit_int16((unsigned char)0xF2,        (unsigned char)0xAF);)
 6355 }
 6356 
 6357 #ifdef _LP64
 6358 // scans rcx 4 byte words at [edi] for occurrence of rax,
 6359 // generic
 6360 void Assembler::repne_scanl() { // repne_scan
 6361   // SCASL
 6362   emit_int16((unsigned char)0xF2, (unsigned char)0xAF);
 6363 }
 6364 #endif
 6365 
 6366 void Assembler::ret(int imm16) {
 6367   if (imm16 == 0) {
 6368     emit_int8((unsigned char)0xC3);
 6369   } else {
 6370     emit_int8((unsigned char)0xC2);
 6371     emit_int16(imm16);
 6372   }
 6373 }
 6374 
 6375 void Assembler::roll(Register dst, int imm8) {
 6376   assert(isShiftCount(imm8), "illegal shift count");
 6377   int encode = prefix_and_encode(dst->encoding());
 6378   if (imm8 == 1) {
 6379     emit_int16((unsigned char)0xD1, (0xC0 | encode));
 6380   } else {
 6381     emit_int24((unsigned char)0xC1, (0xc0 | encode), imm8);
 6382   }
 6383 }
 6384 
 6385 void Assembler::eroll(Register dst, Register src, int imm8, bool no_flags) {
 6386   assert(isShiftCount(imm8), "illegal shift count");
 6387   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6388   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6389   if (imm8 == 1) {
 6390      emit_int16((unsigned char)0xD1, (0xC0 | encode));
 6391    } else {
 6392      emit_int24((unsigned char)0xC1, (0xc0 | encode), imm8);
 6393    }
 6394 }
 6395 
 6396 void Assembler::roll(Register dst) {
 6397   int encode = prefix_and_encode(dst->encoding());
 6398   emit_int16((unsigned char)0xD3, (0xC0 | encode));
 6399 }
 6400 
 6401 void Assembler::eroll(Register dst, Register src, bool no_flags) {
 6402   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6403   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6404   emit_int16((unsigned char)0xD3, (0xC0 | encode));
 6405 }
 6406 
 6407 void Assembler::rorl(Register dst, int imm8) {
 6408   assert(isShiftCount(imm8), "illegal shift count");
 6409   int encode = prefix_and_encode(dst->encoding());
 6410   if (imm8 == 1) {
 6411     emit_int16((unsigned char)0xD1, (0xC8 | encode));
 6412   } else {
 6413     emit_int24((unsigned char)0xC1, (0xc8 | encode), imm8);
 6414   }
 6415 }
 6416 
 6417 void Assembler::erorl(Register dst, Register src, int imm8, bool no_flags) {
 6418   assert(isShiftCount(imm8), "illegal shift count");
 6419   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6420   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6421   if (imm8 == 1) {
 6422      emit_int16((unsigned char)0xD1, (0xC8 | encode));
 6423    } else {
 6424      emit_int24((unsigned char)0xC1, (0xc8 | encode), imm8);
 6425    }
 6426 }
 6427 
 6428 void Assembler::rorl(Register dst) {
 6429   int encode = prefix_and_encode(dst->encoding());
 6430   emit_int16((unsigned char)0xD3, (0xC8 | encode));
 6431 }
 6432 
 6433 void Assembler::erorl(Register dst, Register src, bool no_flags) {
 6434   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6435   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6436   emit_int16((unsigned char)0xD3, (0xC8 | encode));
 6437 }
 6438 
 6439 #ifdef _LP64
 6440 void Assembler::rorq(Register dst) {
 6441   int encode = prefixq_and_encode(dst->encoding());
 6442   emit_int16((unsigned char)0xD3, (0xC8 | encode));
 6443 }
 6444 
 6445 void Assembler::erorq(Register dst, Register src, bool no_flags) {
 6446   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6447   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6448   emit_int16((unsigned char)0xD3, (0xC8 | encode));
 6449 }
 6450 
 6451 void Assembler::rorq(Register dst, int imm8) {
 6452   assert(isShiftCount(imm8 >> 1), "illegal shift count");
 6453   int encode = prefixq_and_encode(dst->encoding());
 6454   if (imm8 == 1) {
 6455     emit_int16((unsigned char)0xD1, (0xC8 | encode));
 6456   } else {
 6457     emit_int24((unsigned char)0xC1, (0xc8 | encode), imm8);
 6458   }
 6459 }
 6460 
 6461 void Assembler::erorq(Register dst, Register src, int imm8, bool no_flags) {
 6462   assert(isShiftCount(imm8), "illegal shift count");
 6463   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6464   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6465   if (imm8 == 1) {
 6466      emit_int16((unsigned char)0xD1, (0xC8 | encode));
 6467    } else {
 6468      emit_int24((unsigned char)0xC1, (0xC8 | encode), imm8);
 6469    }
 6470 }
 6471 
 6472 void Assembler::rolq(Register dst) {
 6473   int encode = prefixq_and_encode(dst->encoding());
 6474   emit_int16((unsigned char)0xD3, (0xC0 | encode));
 6475 }
 6476 
 6477 void Assembler::erolq(Register dst, Register src, bool no_flags) {
 6478   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6479   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6480   emit_int16((unsigned char)0xD3, (0xC0 | encode));
 6481 }
 6482 
 6483 void Assembler::rolq(Register dst, int imm8) {
 6484   assert(isShiftCount(imm8 >> 1), "illegal shift count");
 6485   int encode = prefixq_and_encode(dst->encoding());
 6486   if (imm8 == 1) {
 6487     emit_int16((unsigned char)0xD1, (0xC0 | encode));
 6488   } else {
 6489     emit_int24((unsigned char)0xC1, (0xc0 | encode), imm8);
 6490   }
 6491 }
 6492 
 6493 void Assembler::erolq(Register dst, Register src, int imm8, bool no_flags) {
 6494   assert(isShiftCount(imm8), "illegal shift count");
 6495   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6496   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6497   if (imm8 == 1) {
 6498      emit_int16((unsigned char)0xD1, (0xC0 | encode));
 6499    } else {
 6500      emit_int24((unsigned char)0xC1, (0xc0 | encode), imm8);
 6501    }
 6502  }
 6503 #endif
 6504 
 6505 void Assembler::sahf() {
 6506 #ifdef _LP64
 6507   // Not supported in 64bit mode
 6508   ShouldNotReachHere();
 6509 #endif
 6510   emit_int8((unsigned char)0x9E);
 6511 }
 6512 
 6513 void Assembler::sall(Address dst, int imm8) {
 6514   InstructionMark im(this);
 6515   assert(isShiftCount(imm8), "illegal shift count");
 6516   prefix(dst);
 6517   if (imm8 == 1) {
 6518     emit_int8((unsigned char)0xD1);
 6519     emit_operand(as_Register(4), dst, 0);
 6520   }
 6521   else {
 6522     emit_int8((unsigned char)0xC1);
 6523     emit_operand(as_Register(4), dst, 1);
 6524     emit_int8(imm8);
 6525   }
 6526 }
 6527 
 6528 void Assembler::esall(Register dst, Address src, int imm8, bool no_flags) {
 6529   InstructionMark im(this);
 6530   assert(isShiftCount(imm8), "illegal shift count");
 6531   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6532   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 6533   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6534   if (imm8 == 1) {
 6535     emit_int8((unsigned char)0xD1);
 6536     emit_operand(as_Register(4), src, 0);
 6537   }
 6538   else {
 6539     emit_int8((unsigned char)0xC1);
 6540     emit_operand(as_Register(4), src, 1);
 6541     emit_int8(imm8);
 6542   }
 6543 }
 6544 
 6545 void Assembler::sall(Address dst) {
 6546   InstructionMark im(this);
 6547   prefix(dst);
 6548   emit_int8((unsigned char)0xD3);
 6549   emit_operand(as_Register(4), dst, 0);
 6550 }
 6551 
 6552 void Assembler::esall(Register dst, Address src, bool no_flags) {
 6553   InstructionMark im(this);
 6554   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6555   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 6556   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6557   emit_int8((unsigned char)0xD3);
 6558   emit_operand(as_Register(4), src, 0);
 6559 }
 6560 
 6561 void Assembler::sall(Register dst, int imm8) {
 6562   assert(isShiftCount(imm8), "illegal shift count");
 6563   int encode = prefix_and_encode(dst->encoding());
 6564   if (imm8 == 1) {
 6565     emit_int16((unsigned char)0xD1, (0xE0 | encode));
 6566   } else {
 6567     emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8);
 6568   }
 6569 }
 6570 
 6571 void Assembler::esall(Register dst, Register src, int imm8, bool no_flags) {
 6572   assert(isShiftCount(imm8), "illegal shift count");
 6573   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6574   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6575   if (imm8 == 1) {
 6576     emit_int16((unsigned char)0xD1, (0xE0 | encode));
 6577   } else {
 6578     emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8);
 6579   }
 6580 }
 6581 
 6582 void Assembler::sall(Register dst) {
 6583   int encode = prefix_and_encode(dst->encoding());
 6584   emit_int16((unsigned char)0xD3, (0xE0 | encode));
 6585 }
 6586 
 6587 void Assembler::esall(Register dst, Register src, bool no_flags) {
 6588   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6589   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6590   emit_int16((unsigned char)0xD3, (0xE0 | encode));
 6591 }
 6592 
 6593 void Assembler::sarl(Address dst, int imm8) {
 6594   assert(isShiftCount(imm8), "illegal shift count");
 6595   InstructionMark im(this);
 6596   prefix(dst);
 6597   if (imm8 == 1) {
 6598     emit_int8((unsigned char)0xD1);
 6599     emit_operand(as_Register(7), dst, 0);
 6600   }
 6601   else {
 6602     emit_int8((unsigned char)0xC1);
 6603     emit_operand(as_Register(7), dst, 1);
 6604     emit_int8(imm8);
 6605   }
 6606 }
 6607 
 6608 void Assembler::esarl(Register dst, Address src, int imm8, bool no_flags) {
 6609   assert(isShiftCount(imm8), "illegal shift count");
 6610   InstructionMark im(this);
 6611   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6612   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 6613   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6614   if (imm8 == 1) {
 6615     emit_int8((unsigned char)0xD1);
 6616     emit_operand(as_Register(7), src, 0);
 6617   }
 6618   else {
 6619     emit_int8((unsigned char)0xC1);
 6620     emit_operand(as_Register(7), src, 1);
 6621     emit_int8(imm8);
 6622   }
 6623 }
 6624 
 6625 void Assembler::sarl(Address dst) {
 6626   InstructionMark im(this);
 6627   prefix(dst);
 6628   emit_int8((unsigned char)0xD3);
 6629   emit_operand(as_Register(7), dst, 0);
 6630 }
 6631 
 6632 void Assembler::esarl(Register dst, Address src, bool no_flags) {
 6633   InstructionMark im(this);
 6634   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6635   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 6636   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6637   emit_int8((unsigned char)0xD3);
 6638   emit_operand(as_Register(7), src, 0);
 6639 }
 6640 
 6641 void Assembler::sarl(Register dst, int imm8) {
 6642   int encode = prefix_and_encode(dst->encoding());
 6643   assert(isShiftCount(imm8), "illegal shift count");
 6644   if (imm8 == 1) {
 6645     emit_int16((unsigned char)0xD1, (0xF8 | encode));
 6646   } else {
 6647     emit_int24((unsigned char)0xC1, (0xF8 | encode), imm8);
 6648   }
 6649 }
 6650 
 6651 void Assembler::esarl(Register dst, Register src, int imm8, bool no_flags) {
 6652   assert(isShiftCount(imm8), "illegal shift count");
 6653   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6654   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6655   if (imm8 == 1) {
 6656     emit_int16((unsigned char)0xD1, (0xF8 | encode));
 6657   } else {
 6658     emit_int24((unsigned char)0xC1, (0xF8 | encode), imm8);
 6659   }
 6660 }
 6661 
 6662 void Assembler::sarl(Register dst) {
 6663   int encode = prefix_and_encode(dst->encoding());
 6664   emit_int16((unsigned char)0xD3, (0xF8 | encode));
 6665 }
 6666 
 6667 void Assembler::esarl(Register dst, Register src, bool no_flags) {
 6668   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6669   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6670   emit_int16((unsigned char)0xD3, (0xF8 | encode));
 6671 }
 6672 
 6673 void Assembler::sbbl(Address dst, int32_t imm32) {
 6674   InstructionMark im(this);
 6675   prefix(dst);
 6676   emit_arith_operand(0x81, rbx, dst, imm32);
 6677 }
 6678 
 6679 void Assembler::sbbl(Register dst, int32_t imm32) {
 6680   prefix(dst);
 6681   emit_arith(0x81, 0xD8, dst, imm32);
 6682 }
 6683 
 6684 void Assembler::sbbl(Register dst, Address src) {
 6685   InstructionMark im(this);
 6686   prefix(src, dst);
 6687   emit_int8(0x1B);
 6688   emit_operand(dst, src, 0);
 6689 }
 6690 
 6691 void Assembler::sbbl(Register dst, Register src) {
 6692   (void) prefix_and_encode(dst->encoding(), src->encoding());
 6693   emit_arith(0x1B, 0xC0, dst, src);
 6694 }
 6695 
 6696 void Assembler::setb(Condition cc, Register dst) {
 6697   assert(0 <= cc && cc < 16, "illegal cc");
 6698   int encode = prefix_and_encode(dst->encoding(), true, true /* is_map1 */);
 6699   emit_opcode_prefix_and_encoding((unsigned char)0x90 | cc, 0xC0, encode);
 6700 }
 6701 
 6702 void Assembler::palignr(XMMRegister dst, XMMRegister src, int imm8) {
 6703   assert(VM_Version::supports_ssse3(), "");
 6704   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 6705   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 6706   emit_int24(0x0F, (0xC0 | encode), imm8);
 6707 }
 6708 
 6709 void Assembler::vpalignr(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
 6710   assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
 6711          vector_len == AVX_256bit? VM_Version::supports_avx2() :
 6712          0, "");
 6713   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 6714   int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 6715   emit_int24(0x0F, (0xC0 | encode), imm8);
 6716 }
 6717 
 6718 void Assembler::evalignq(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
 6719   assert(VM_Version::supports_evex(), "");
 6720   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 6721   attributes.set_is_evex_instruction();
 6722   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 6723   emit_int24(0x3, (0xC0 | encode), imm8);
 6724 }
 6725 
 6726 void Assembler::pblendw(XMMRegister dst, XMMRegister src, int imm8) {
 6727   assert(VM_Version::supports_sse4_1(), "");
 6728   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 6729   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 6730   emit_int24(0x0E, (0xC0 | encode), imm8);
 6731 }
 6732 
 6733 void Assembler::sha1rnds4(XMMRegister dst, XMMRegister src, int imm8) {
 6734   assert(VM_Version::supports_sha(), "");
 6735   int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3A, /* rex_w */ false);
 6736   emit_int24((unsigned char)0xCC, (0xC0 | encode), (unsigned char)imm8);
 6737 }
 6738 
 6739 void Assembler::sha1nexte(XMMRegister dst, XMMRegister src) {
 6740   assert(VM_Version::supports_sha(), "");
 6741   int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false);
 6742   emit_int16((unsigned char)0xC8, (0xC0 | encode));
 6743 }
 6744 
 6745 void Assembler::sha1msg1(XMMRegister dst, XMMRegister src) {
 6746   assert(VM_Version::supports_sha(), "");
 6747   int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false);
 6748   emit_int16((unsigned char)0xC9, (0xC0 | encode));
 6749 }
 6750 
 6751 void Assembler::sha1msg2(XMMRegister dst, XMMRegister src) {
 6752   assert(VM_Version::supports_sha(), "");
 6753   int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false);
 6754   emit_int16((unsigned char)0xCA, (0xC0 | encode));
 6755 }
 6756 
 6757 // xmm0 is implicit additional source to this instruction.
 6758 void Assembler::sha256rnds2(XMMRegister dst, XMMRegister src) {
 6759   assert(VM_Version::supports_sha(), "");
 6760   int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false);
 6761   emit_int16((unsigned char)0xCB, (0xC0 | encode));
 6762 }
 6763 
 6764 void Assembler::sha256msg1(XMMRegister dst, XMMRegister src) {
 6765   assert(VM_Version::supports_sha(), "");
 6766   int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false);
 6767   emit_int16((unsigned char)0xCC, (0xC0 | encode));
 6768 }
 6769 
 6770 void Assembler::sha256msg2(XMMRegister dst, XMMRegister src) {
 6771   assert(VM_Version::supports_sha(), "");
 6772   int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false);
 6773   emit_int16((unsigned char)0xCD, (0xC0 | encode));
 6774 }
 6775 
 6776 void Assembler::shll(Register dst, int imm8) {
 6777   assert(isShiftCount(imm8), "illegal shift count");
 6778   int encode = prefix_and_encode(dst->encoding());
 6779   if (imm8 == 1 ) {
 6780     emit_int16((unsigned char)0xD1, (0xE0 | encode));
 6781   } else {
 6782     emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8);
 6783   }
 6784 }
 6785 
 6786 void Assembler::eshll(Register dst, Register src, int imm8, bool no_flags) {
 6787   assert(isShiftCount(imm8), "illegal shift count");
 6788   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6789   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6790   if (imm8 == 1 ) {
 6791     emit_int16((unsigned char)0xD1, (0xE0 | encode));
 6792   } else {
 6793     emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8);
 6794   }
 6795 }
 6796 
 6797 void Assembler::shll(Register dst) {
 6798   int encode = prefix_and_encode(dst->encoding());
 6799   emit_int16((unsigned char)0xD3, (0xE0 | encode));
 6800 }
 6801 
 6802 void Assembler::eshll(Register dst, Register src, bool no_flags) {
 6803   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6804   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6805   emit_int16((unsigned char)0xD3, (0xE0 | encode));
 6806 }
 6807 
 6808 void Assembler::shrl(Register dst, int imm8) {
 6809   assert(isShiftCount(imm8), "illegal shift count");
 6810   int encode = prefix_and_encode(dst->encoding());
 6811   if (imm8 == 1) {
 6812     emit_int16((unsigned char)0xD1, (0xE8 | encode));
 6813   }
 6814   else {
 6815     emit_int24((unsigned char)0xC1, (0xE8 | encode), imm8);
 6816   }
 6817 }
 6818 
 6819 void Assembler::eshrl(Register dst, Register src, int imm8, bool no_flags) {
 6820   assert(isShiftCount(imm8), "illegal shift count");
 6821   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6822   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6823   if (imm8 == 1) {
 6824     emit_int16((unsigned char)0xD1, (0xE8 | encode));
 6825   }
 6826   else {
 6827     emit_int24((unsigned char)0xC1, (0xE8 | encode), imm8);
 6828   }
 6829 }
 6830 
 6831 void Assembler::shrl(Register dst) {
 6832   int encode = prefix_and_encode(dst->encoding());
 6833   emit_int16((unsigned char)0xD3, (0xE8 | encode));
 6834 }
 6835 
 6836 void Assembler::eshrl(Register dst, Register src, bool no_flags) {
 6837   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6838   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6839   emit_int16((unsigned char)0xD3, (0xE8 | encode));
 6840 }
 6841 
 6842 void Assembler::shrl(Address dst) {
 6843   InstructionMark im(this);
 6844   prefix(dst);
 6845   emit_int8((unsigned char)0xD3);
 6846   emit_operand(as_Register(5), dst, 0);
 6847 }
 6848 
 6849 void Assembler::eshrl(Register dst, Address src, bool no_flags) {
 6850   InstructionMark im(this);
 6851   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6852   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 6853   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6854   emit_int8((unsigned char)0xD3);
 6855   emit_operand(as_Register(5), src, 0);
 6856 }
 6857 
 6858 void Assembler::shrl(Address dst, int imm8) {
 6859   InstructionMark im(this);
 6860   assert(isShiftCount(imm8), "illegal shift count");
 6861   prefix(dst);
 6862   if (imm8 == 1) {
 6863     emit_int8((unsigned char)0xD1);
 6864     emit_operand(as_Register(5), dst, 0);
 6865   }
 6866   else {
 6867     emit_int8((unsigned char)0xC1);
 6868     emit_operand(as_Register(5), dst, 1);
 6869     emit_int8(imm8);
 6870   }
 6871 }
 6872 
 6873 void Assembler::eshrl(Register dst, Address src, int imm8, bool no_flags) {
 6874   InstructionMark im(this);
 6875   assert(isShiftCount(imm8), "illegal shift count");
 6876   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6877   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 6878   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6879   if (imm8 == 1) {
 6880     emit_int8((unsigned char)0xD1);
 6881     emit_operand(as_Register(5), src, 0);
 6882   }
 6883   else {
 6884     emit_int8((unsigned char)0xC1);
 6885     emit_operand(as_Register(5), src, 1);
 6886     emit_int8(imm8);
 6887   }
 6888 }
 6889 
 6890 void Assembler::shldl(Register dst, Register src) {
 6891   int encode = prefix_and_encode(src->encoding(), dst->encoding(), true /* is_map1 */);
 6892   emit_opcode_prefix_and_encoding((unsigned char)0xA5, 0xC0, encode);
 6893 }
 6894 
 6895 void Assembler::eshldl(Register dst, Register src1, Register src2, bool no_flags) {
 6896   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6897   int encode = evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6898   emit_int16(0xA5, (0xC0 | encode));
 6899 }
 6900 
 6901 void Assembler::shldl(Register dst, Register src, int8_t imm8) {
 6902   int encode = prefix_and_encode(src->encoding(), dst->encoding(), true /* is_map1 */);
 6903   emit_opcode_prefix_and_encoding((unsigned char)0xA4, 0xC0, encode, imm8);
 6904 }
 6905 
 6906 void Assembler::eshldl(Register dst, Register src1, Register src2, int8_t imm8, bool no_flags) {
 6907   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6908   int encode = evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6909   emit_int24(0x24, (0xC0 | encode), imm8);
 6910 }
 6911 
 6912 void Assembler::shrdl(Register dst, Register src) {
 6913   int encode = prefix_and_encode(src->encoding(), dst->encoding(), true /* is_map1 */);
 6914   emit_opcode_prefix_and_encoding((unsigned char)0xAD, 0xC0, encode);
 6915 }
 6916 
 6917 void Assembler::eshrdl(Register dst, Register src1, Register src2, bool no_flags) {
 6918   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6919   int encode = evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6920   emit_int16(0xAD, (0xC0 | encode));
 6921 }
 6922 
 6923 void Assembler::shrdl(Register dst, Register src, int8_t imm8) {
 6924   int encode = prefix_and_encode(src->encoding(), dst->encoding(), true /* is_map1 */);
 6925   emit_opcode_prefix_and_encoding((unsigned char)0xAC, 0xC0, encode, imm8);
 6926 }
 6927 
 6928 void Assembler::eshrdl(Register dst, Register src1, Register src2, int8_t imm8, bool no_flags) {
 6929   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6930   int encode = evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6931   emit_int24(0x2C, (0xC0 | encode), imm8);
 6932 }
 6933 
 6934 #ifdef _LP64
 6935 void Assembler::shldq(Register dst, Register src, int8_t imm8) {
 6936   int encode = prefixq_and_encode(src->encoding(), dst->encoding(), true /* is_map1 */);
 6937   emit_opcode_prefix_and_encoding((unsigned char)0xA4, 0xC0, encode, imm8);
 6938 }
 6939 
 6940 void Assembler::eshldq(Register dst, Register src1, Register src2, int8_t imm8, bool no_flags) {
 6941   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6942   int encode = evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6943   emit_int24(0x24, (0xC0 | encode), imm8);
 6944 }
 6945 
 6946 void Assembler::shrdq(Register dst, Register src, int8_t imm8) {
 6947   int encode = prefixq_and_encode(src->encoding(), dst->encoding(), true /* is_map1 */);
 6948   emit_opcode_prefix_and_encoding((unsigned char)0xAC, 0xC0, encode, imm8);
 6949 }
 6950 
 6951 void Assembler::eshrdq(Register dst, Register src1, Register src2, int8_t imm8, bool no_flags) {
 6952   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6953   int encode = evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 6954   emit_int24(0x2C, (0xC0 | encode), imm8);
 6955 }
 6956 #endif
 6957 
 6958 // copies a single word from [esi] to [edi]
 6959 void Assembler::smovl() {
 6960   emit_int8((unsigned char)0xA5);
 6961 }
 6962 
 6963 void Assembler::roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) {
 6964   assert(VM_Version::supports_sse4_1(), "");
 6965   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 6966   int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 6967   emit_int24(0x0B, (0xC0 | encode), (unsigned char)rmode);
 6968 }
 6969 
 6970 void Assembler::roundsd(XMMRegister dst, Address src, int32_t rmode) {
 6971   assert(VM_Version::supports_sse4_1(), "");
 6972   InstructionMark im(this);
 6973   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 6974   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 6975   emit_int8(0x0B);
 6976   emit_operand(dst, src, 1);
 6977   emit_int8((unsigned char)rmode);
 6978 }
 6979 
 6980 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
 6981   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 6982   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6983   attributes.set_rex_vex_w_reverted();
 6984   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 6985   emit_int16(0x51, (0xC0 | encode));
 6986 }
 6987 
 6988 void Assembler::sqrtsd(XMMRegister dst, Address src) {
 6989   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 6990   InstructionMark im(this);
 6991   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 6992   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 6993   attributes.set_rex_vex_w_reverted();
 6994   simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 6995   emit_int8(0x51);
 6996   emit_operand(dst, src, 0);
 6997 }
 6998 
 6999 void Assembler::sqrtss(XMMRegister dst, XMMRegister src) {
 7000   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 7001   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7002   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 7003   emit_int16(0x51, (0xC0 | encode));
 7004 }
 7005 
 7006 void Assembler::std() {
 7007   emit_int8((unsigned char)0xFD);
 7008 }
 7009 
 7010 void Assembler::sqrtss(XMMRegister dst, Address src) {
 7011   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 7012   InstructionMark im(this);
 7013   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7014   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
 7015   simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 7016   emit_int8(0x51);
 7017   emit_operand(dst, src, 0);
 7018 }
 7019 
 7020 void Assembler::stmxcsr(Address dst) {
 7021   // This instruction should be SSE encoded with the REX2 prefix when an
 7022   // extended GPR is present. To be consistent when UseAPX is enabled, use
 7023   // this encoding even when an extended GPR is not used.
 7024   if (UseAVX > 0 && !UseAPX  ) {
 7025     assert(VM_Version::supports_avx(), "");
 7026     InstructionMark im(this);
 7027     InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 7028     vex_prefix(dst, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 7029     emit_int8((unsigned char)0xAE);
 7030     emit_operand(as_Register(3), dst, 0);
 7031   } else {
 7032     NOT_LP64(assert(VM_Version::supports_sse(), ""));
 7033     InstructionMark im(this);
 7034     prefix(dst, true /* is_map1 */);
 7035     emit_int8((unsigned char)0xAE);
 7036     emit_operand(as_Register(3), dst, 0);
 7037   }
 7038 }
 7039 
 7040 void Assembler::subl(Address dst, int32_t imm32) {
 7041   InstructionMark im(this);
 7042   prefix(dst);
 7043   emit_arith_operand(0x81, rbp, dst, imm32);
 7044 }
 7045 
 7046 void Assembler::esubl(Register dst, Address src, int32_t imm32, bool no_flags) {
 7047   InstructionMark im(this);
 7048   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7049   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 7050   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 7051   emit_arith_operand(0x81, rbp, src, imm32);
 7052 }
 7053 
 7054 void Assembler::subl(Address dst, Register src) {
 7055   InstructionMark im(this);
 7056   prefix(dst, src);
 7057   emit_int8(0x29);
 7058   emit_operand(src, dst, 0);
 7059 }
 7060 
 7061 void Assembler::esubl(Register dst, Address src1, Register src2, bool no_flags) {
 7062   InstructionMark im(this);
 7063   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7064   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 7065   evex_prefix_ndd(src1, dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 7066   emit_int8(0x29);
 7067   emit_operand(src2, src1, 0);
 7068 }
 7069 
 7070 void Assembler::subl(Register dst, int32_t imm32) {
 7071   prefix(dst);
 7072   emit_arith(0x81, 0xE8, dst, imm32);
 7073 }
 7074 
 7075 void Assembler::esubl(Register dst, Register src, int32_t imm32, bool no_flags) {
 7076   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7077   (void) evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 7078   emit_arith(0x81, 0xE8, src, imm32);
 7079 }
 7080 
 7081 // Force generation of a 4 byte immediate value even if it fits into 8bit
 7082 void Assembler::subl_imm32(Register dst, int32_t imm32) {
 7083   prefix(dst);
 7084   emit_arith_imm32(0x81, 0xE8, dst, imm32);
 7085 }
 7086 
 7087 void Assembler::esubl_imm32(Register dst, Register src, int32_t imm32, bool no_flags) {
 7088   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7089   (void) evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 7090   emit_arith_imm32(0x81, 0xE8, src, imm32);
 7091 }
 7092 
 7093 void Assembler::subl(Register dst, Address src) {
 7094   InstructionMark im(this);
 7095   prefix(src, dst);
 7096   emit_int8(0x2B);
 7097   emit_operand(dst, src, 0);
 7098 }
 7099 
 7100 void Assembler::esubl(Register dst, Register src1, Address src2, bool no_flags) {
 7101   InstructionMark im(this);
 7102   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7103   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 7104   evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 7105   emit_int8(0x2B);
 7106   emit_operand(src1, src2, 0);
 7107 }
 7108 
 7109 void Assembler::subl(Register dst, Register src) {
 7110   (void) prefix_and_encode(dst->encoding(), src->encoding());
 7111   emit_arith(0x2B, 0xC0, dst, src);
 7112 }
 7113 
 7114 void Assembler::esubl(Register dst, Register src2, Register src1, bool no_flags) {
 7115   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7116   (void) evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 7117   // opcode matches gcc
 7118   emit_arith(0x29, 0xC0, src1, src2);
 7119 }
 7120 
 7121 void Assembler::subsd(XMMRegister dst, XMMRegister src) {
 7122   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 7123   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7124   attributes.set_rex_vex_w_reverted();
 7125   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 7126   emit_int16(0x5C, (0xC0 | encode));
 7127 }
 7128 
 7129 void Assembler::subsd(XMMRegister dst, Address src) {
 7130   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 7131   InstructionMark im(this);
 7132   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7133   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 7134   attributes.set_rex_vex_w_reverted();
 7135   simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 7136   emit_int8(0x5C);
 7137   emit_operand(dst, src, 0);
 7138 }
 7139 
 7140 void Assembler::subss(XMMRegister dst, XMMRegister src) {
 7141   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 7142   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true , /* uses_vl */ false);
 7143   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 7144   emit_int16(0x5C, (0xC0 | encode));
 7145 }
 7146 
 7147 void Assembler::subss(XMMRegister dst, Address src) {
 7148   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 7149   InstructionMark im(this);
 7150   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7151   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
 7152   simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 7153   emit_int8(0x5C);
 7154   emit_operand(dst, src, 0);
 7155 }
 7156 
 7157 void Assembler::testb(Register dst, int imm8, bool use_ral) {
 7158   NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
 7159   if (dst == rax) {
 7160     if (use_ral) {
 7161       emit_int8((unsigned char)0xA8);
 7162       emit_int8(imm8);
 7163     } else {
 7164       emit_int8((unsigned char)0xF6);
 7165       emit_int8((unsigned char)0xC4);
 7166       emit_int8(imm8);
 7167     }
 7168   } else {
 7169     (void) prefix_and_encode(dst->encoding(), true);
 7170     emit_arith_b(0xF6, 0xC0, dst, imm8);
 7171   }
 7172 }
 7173 
 7174 void Assembler::testb(Address dst, int imm8) {
 7175   InstructionMark im(this);
 7176   prefix(dst);
 7177   emit_int8((unsigned char)0xF6);
 7178   emit_operand(rax, dst, 1);
 7179   emit_int8(imm8);
 7180 }
 7181 
 7182 void Assembler::testl(Address dst, int32_t imm32) {
 7183   InstructionMark im(this);
 7184   prefix(dst);
 7185   emit_int8((unsigned char)0xF7);
 7186   emit_operand(as_Register(0), dst, 4);
 7187   emit_int32(imm32);
 7188 }
 7189 
 7190 void Assembler::testl(Register dst, int32_t imm32) {
 7191   // not using emit_arith because test
 7192   // doesn't support sign-extension of
 7193   // 8bit operands
 7194   if (dst == rax) {
 7195     emit_int8((unsigned char)0xA9);
 7196     emit_int32(imm32);
 7197   } else {
 7198     int encode = dst->encoding();
 7199     encode = prefix_and_encode(encode);
 7200     emit_int16((unsigned char)0xF7, (0xC0 | encode));
 7201     emit_int32(imm32);
 7202   }
 7203 }
 7204 
 7205 void Assembler::testl(Register dst, Register src) {
 7206   (void) prefix_and_encode(dst->encoding(), src->encoding());
 7207   emit_arith(0x85, 0xC0, dst, src);
 7208 }
 7209 
 7210 void Assembler::testl(Register dst, Address src) {
 7211   InstructionMark im(this);
 7212   prefix(src, dst);
 7213   emit_int8((unsigned char)0x85);
 7214   emit_operand(dst, src, 0);
 7215 }
 7216 
 7217 void Assembler::tzcntl(Register dst, Register src) {
 7218   assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
 7219   emit_int8((unsigned char)0xF3);
 7220   int encode = prefix_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
 7221   emit_opcode_prefix_and_encoding((unsigned char)0xBC, 0xC0, encode);
 7222 }
 7223 
 7224 void Assembler::etzcntl(Register dst, Register src, bool no_flags) {
 7225   assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
 7226   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7227   int encode = evex_prefix_and_encode_nf(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 7228   emit_int16((unsigned char)0xF4, (0xC0 | encode));
 7229 }
 7230 
 7231 void Assembler::tzcntl(Register dst, Address src) {
 7232   assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
 7233   InstructionMark im(this);
 7234   emit_int8((unsigned char)0xF3);
 7235   prefix(src, dst, false, true /* is_map1 */);
 7236   emit_int8((unsigned char)0xBC);
 7237   emit_operand(dst, src, 0);
 7238 }
 7239 
 7240 void Assembler::etzcntl(Register dst, Address src, bool no_flags) {
 7241   assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
 7242   InstructionMark im(this);
 7243   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7244   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 7245   evex_prefix_nf(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 7246   emit_int8((unsigned char)0xF4);
 7247   emit_operand(dst, src, 0);
 7248 }
 7249 
 7250 void Assembler::tzcntq(Register dst, Register src) {
 7251   assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
 7252   emit_int8((unsigned char)0xF3);
 7253   int encode = prefixq_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
 7254   emit_opcode_prefix_and_encoding((unsigned char)0xBC, 0xC0, encode);
 7255 }
 7256 
 7257 void Assembler::etzcntq(Register dst, Register src, bool no_flags) {
 7258   assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
 7259   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7260   int encode = evex_prefix_and_encode_nf(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 7261   emit_int16((unsigned char)0xF4, (0xC0 | encode));
 7262 }
 7263 
 7264 void Assembler::tzcntq(Register dst, Address src) {
 7265   assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
 7266   InstructionMark im(this);
 7267   emit_int8((unsigned char)0xF3);
 7268   prefixq(src, dst, true /* is_map1 */);
 7269   emit_int8((unsigned char)0xBC);
 7270   emit_operand(dst, src, 0);
 7271 }
 7272 
 7273 void Assembler::etzcntq(Register dst, Address src, bool no_flags) {
 7274   assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
 7275   InstructionMark im(this);
 7276   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7277   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
 7278   evex_prefix_nf(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 7279   emit_int8((unsigned char)0xF4);
 7280   emit_operand(dst, src, 0);
 7281 }
 7282 
 7283 void Assembler::ucomisd(XMMRegister dst, Address src) {
 7284   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 7285   InstructionMark im(this);
 7286   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7287   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 7288   attributes.set_rex_vex_w_reverted();
 7289   simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7290   emit_int8(0x2E);
 7291   emit_operand(dst, src, 0);
 7292 }
 7293 
 7294 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
 7295   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 7296   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7297   attributes.set_rex_vex_w_reverted();
 7298   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7299   emit_int16(0x2E, (0xC0 | encode));
 7300 }
 7301 
 7302 void Assembler::ucomiss(XMMRegister dst, Address src) {
 7303   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 7304   InstructionMark im(this);
 7305   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7306   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
 7307   simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 7308   emit_int8(0x2E);
 7309   emit_operand(dst, src, 0);
 7310 }
 7311 
 7312 void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
 7313   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 7314   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7315   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 7316   emit_int16(0x2E, (0xC0 | encode));
 7317 }
 7318 
 7319 void Assembler::xabort(int8_t imm8) {
 7320   emit_int24((unsigned char)0xC6, (unsigned char)0xF8, (imm8 & 0xFF));
 7321 }
 7322 
 7323 void Assembler::xaddb(Address dst, Register src) {
 7324   InstructionMark im(this);
 7325   prefix(dst, src, true, true /* is_map1 */);
 7326   emit_int8((unsigned char)0xC0);
 7327   emit_operand(src, dst, 0);
 7328 }
 7329 
 7330 void Assembler::xaddw(Address dst, Register src) {
 7331   InstructionMark im(this);
 7332   emit_int8(0x66);
 7333   prefix(dst, src, false, true /* is_map1 */);
 7334   emit_int8((unsigned char)0xC1);
 7335   emit_operand(src, dst, 0);
 7336 }
 7337 
 7338 void Assembler::xaddl(Address dst, Register src) {
 7339   InstructionMark im(this);
 7340   prefix(dst, src, false, true /* is_map1 */);
 7341   emit_int8((unsigned char)0xC1);
 7342   emit_operand(src, dst, 0);
 7343 }
 7344 
 7345 void Assembler::xbegin(Label& abort, relocInfo::relocType rtype) {
 7346   InstructionMark im(this);
 7347   relocate(rtype);
 7348   if (abort.is_bound()) {
 7349     address entry = target(abort);
 7350     assert(entry != nullptr, "abort entry null");
 7351     int offset = checked_cast<int>(entry - pc());
 7352     emit_int16((unsigned char)0xC7, (unsigned char)0xF8);
 7353     emit_int32(offset - 6); // 2 opcode + 4 address
 7354   } else {
 7355     abort.add_patch_at(code(), locator());
 7356     emit_int16((unsigned char)0xC7, (unsigned char)0xF8);
 7357     emit_int32(0);
 7358   }
 7359 }
 7360 
 7361 void Assembler::xchgb(Register dst, Address src) { // xchg
 7362   InstructionMark im(this);
 7363   prefix(src, dst, true);
 7364   emit_int8((unsigned char)0x86);
 7365   emit_operand(dst, src, 0);
 7366 }
 7367 
 7368 void Assembler::xchgw(Register dst, Address src) { // xchg
 7369   InstructionMark im(this);
 7370   emit_int8(0x66);
 7371   prefix(src, dst);
 7372   emit_int8((unsigned char)0x87);
 7373   emit_operand(dst, src, 0);
 7374 }
 7375 
 7376 void Assembler::xchgl(Register dst, Address src) { // xchg
 7377   InstructionMark im(this);
 7378   prefix(src, dst);
 7379   emit_int8((unsigned char)0x87);
 7380   emit_operand(dst, src, 0);
 7381 }
 7382 
 7383 void Assembler::xchgl(Register dst, Register src) {
 7384   int encode = prefix_and_encode(dst->encoding(), src->encoding());
 7385   emit_int16((unsigned char)0x87, (0xC0 | encode));
 7386 }
 7387 
 7388 void Assembler::xend() {
 7389   emit_int24(0x0F, 0x01, (unsigned char)0xD5);
 7390 }
 7391 
 7392 void Assembler::xgetbv() {
 7393   emit_int24(0x0F, 0x01, (unsigned char)0xD0);
 7394 }
 7395 
 7396 void Assembler::xorl(Address dst, int32_t imm32) {
 7397   InstructionMark im(this);
 7398   prefix(dst);
 7399   emit_arith_operand(0x81, as_Register(6), dst, imm32);
 7400 }
 7401 
 7402 void Assembler::exorl(Register dst, Address src, int32_t imm32, bool no_flags) {
 7403   InstructionMark im(this);
 7404   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7405   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 7406   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 7407   emit_arith_operand(0x81, as_Register(6), src, imm32);
 7408 }
 7409 
 7410 void Assembler::xorl(Register dst, int32_t imm32) {
 7411   prefix(dst);
 7412   emit_arith(0x81, 0xF0, dst, imm32);
 7413 }
 7414 
 7415 void Assembler::exorl(Register dst, Register src, int32_t imm32, bool no_flags) {
 7416   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7417   evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 7418   emit_arith(0x81, 0xF0, src, imm32);
 7419 }
 7420 
 7421 void Assembler::xorl(Register dst, Address src) {
 7422   InstructionMark im(this);
 7423   prefix(src, dst);
 7424   emit_int8(0x33);
 7425   emit_operand(dst, src, 0);
 7426 }
 7427 
 7428 void Assembler::exorl(Register dst, Register src1, Address src2, bool no_flags) {
 7429   InstructionMark im(this);
 7430   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7431   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 7432   evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 7433   emit_int8(0x33);
 7434   emit_operand(src1, src2, 0);
 7435 }
 7436 
 7437 void Assembler::xorl(Register dst, Register src) {
 7438   (void) prefix_and_encode(dst->encoding(), src->encoding());
 7439   emit_arith(0x33, 0xC0, dst, src);
 7440 }
 7441 
 7442 void Assembler::exorl(Register dst, Register src1, Register src2, bool no_flags) {
 7443   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7444   (void) evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 7445   // opcode matches gcc
 7446   emit_arith(0x31, 0xC0, src1, src2);
 7447 }
 7448 
 7449 void Assembler::xorl(Address dst, Register src) {
 7450   InstructionMark im(this);
 7451   prefix(dst, src);
 7452   emit_int8(0x31);
 7453   emit_operand(src, dst, 0);
 7454 }
 7455 
 7456 void Assembler::exorl(Register dst, Address src1, Register src2, bool no_flags) {
 7457   InstructionMark im(this);
 7458   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7459   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
 7460   evex_prefix_ndd(src1, dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 7461   emit_int8(0x31);
 7462   emit_operand(src2, src1, 0);
 7463 }
 7464 
 7465 void Assembler::xorb(Register dst, Address src) {
 7466   InstructionMark im(this);
 7467   prefix(src, dst);
 7468   emit_int8(0x32);
 7469   emit_operand(dst, src, 0);
 7470 }
 7471 
 7472 void Assembler::exorb(Register dst, Register src1, Address src2, bool no_flags) {
 7473   InstructionMark im(this);
 7474   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7475   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_8bit);
 7476   evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 7477   emit_int8(0x32);
 7478   emit_operand(src1, src2, 0);
 7479 }
 7480 
 7481 void Assembler::xorb(Address dst, Register src) {
 7482   InstructionMark im(this);
 7483   prefix(dst, src, true);
 7484   emit_int8(0x30);
 7485   emit_operand(src, dst, 0);
 7486 }
 7487 
 7488 void Assembler::exorb(Register dst, Address src1, Register src2, bool no_flags) {
 7489   InstructionMark im(this);
 7490   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7491   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_8bit);
 7492   evex_prefix_ndd(src1, dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
 7493   emit_int8(0x30);
 7494   emit_operand(src2, src1, 0);
 7495 }
 7496 
 7497 void Assembler::xorw(Register dst, Address src) {
 7498   InstructionMark im(this);
 7499   emit_int8(0x66);
 7500   prefix(src, dst);
 7501   emit_int8(0x33);
 7502   emit_operand(dst, src, 0);
 7503 }
 7504 
 7505 void Assembler::exorw(Register dst, Register src1, Address src2, bool no_flags) {
 7506   InstructionMark im(this);
 7507   emit_int8(0x66);
 7508   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7509   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
 7510   evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3C, &attributes, no_flags);
 7511   emit_int8(0x33);
 7512   emit_operand(src1, src2, 0);
 7513 }
 7514 
 7515 // AVX 3-operands scalar float-point arithmetic instructions
 7516 
 7517 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) {
 7518   assert(VM_Version::supports_avx(), "");
 7519   InstructionMark im(this);
 7520   InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7521   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 7522   attributes.set_rex_vex_w_reverted();
 7523   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 7524   emit_int8(0x58);
 7525   emit_operand(dst, src, 0);
 7526 }
 7527 
 7528 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
 7529   assert(VM_Version::supports_avx(), "");
 7530   InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7531   attributes.set_rex_vex_w_reverted();
 7532   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 7533   emit_int16(0x58, (0xC0 | encode));
 7534 }
 7535 
 7536 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) {
 7537   assert(VM_Version::supports_avx(), "");
 7538   InstructionMark im(this);
 7539   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7540   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
 7541   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 7542   emit_int8(0x58);
 7543   emit_operand(dst, src, 0);
 7544 }
 7545 
 7546 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
 7547   assert(VM_Version::supports_avx(), "");
 7548   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7549   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 7550   emit_int16(0x58, (0xC0 | encode));
 7551 }
 7552 
 7553 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) {
 7554   assert(VM_Version::supports_avx(), "");
 7555   InstructionMark im(this);
 7556   InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7557   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 7558   attributes.set_rex_vex_w_reverted();
 7559   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 7560   emit_int8(0x5E);
 7561   emit_operand(dst, src, 0);
 7562 }
 7563 
 7564 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
 7565   assert(VM_Version::supports_avx(), "");
 7566   InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7567   attributes.set_rex_vex_w_reverted();
 7568   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 7569   emit_int16(0x5E, (0xC0 | encode));
 7570 }
 7571 
 7572 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) {
 7573   assert(VM_Version::supports_avx(), "");
 7574   InstructionMark im(this);
 7575   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7576   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
 7577   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 7578   emit_int8(0x5E);
 7579   emit_operand(dst, src, 0);
 7580 }
 7581 
 7582 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
 7583   assert(VM_Version::supports_avx(), "");
 7584   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7585   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 7586   emit_int16(0x5E, (0xC0 | encode));
 7587 }
 7588 
 7589 void Assembler::vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
 7590   assert(VM_Version::supports_fma(), "");
 7591   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7592   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 7593   emit_int16((unsigned char)0xB9, (0xC0 | encode));
 7594 }
 7595 
 7596 void Assembler::evfnmadd213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2, EvexRoundPrefix rmode) { // Need to add rmode for rounding mode support
 7597   assert(VM_Version::supports_evex(), "");
 7598   InstructionAttr attributes(rmode, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7599   attributes.set_extended_context();
 7600   attributes.set_is_evex_instruction();
 7601   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 7602   emit_int16((unsigned char)0xAD, (0xC0 | encode));
 7603 }
 7604 
 7605 void Assembler::vfnmadd213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
 7606   assert(VM_Version::supports_fma(), "");
 7607   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7608   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 7609   emit_int16((unsigned char)0xAD, (0xC0 | encode));
 7610 }
 7611 
 7612 void Assembler::vfnmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
 7613   assert(VM_Version::supports_fma(), "");
 7614   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7615   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 7616   emit_int16((unsigned char)0xBD, (0xC0 | encode));
 7617 }
 7618 
 7619 void Assembler::vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
 7620   assert(VM_Version::supports_fma(), "");
 7621   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7622   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 7623   emit_int16((unsigned char)0xB9, (0xC0 | encode));
 7624 }
 7625 
 7626 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) {
 7627   assert(VM_Version::supports_avx(), "");
 7628   InstructionMark im(this);
 7629   InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7630   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 7631   attributes.set_rex_vex_w_reverted();
 7632   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 7633   emit_int8(0x59);
 7634   emit_operand(dst, src, 0);
 7635 }
 7636 
 7637 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
 7638   assert(VM_Version::supports_avx(), "");
 7639   InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7640   attributes.set_rex_vex_w_reverted();
 7641   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 7642   emit_int16(0x59, (0xC0 | encode));
 7643 }
 7644 
 7645 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) {
 7646   assert(VM_Version::supports_avx(), "");
 7647   InstructionMark im(this);
 7648   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7649   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
 7650   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 7651   emit_int8(0x59);
 7652   emit_operand(dst, src, 0);
 7653 }
 7654 
 7655 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
 7656   assert(VM_Version::supports_avx(), "");
 7657   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7658   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 7659   emit_int16(0x59, (0xC0 | encode));
 7660 }
 7661 
 7662 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) {
 7663   assert(VM_Version::supports_avx(), "");
 7664   InstructionMark im(this);
 7665   InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7666   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 7667   attributes.set_rex_vex_w_reverted();
 7668   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 7669   emit_int8(0x5C);
 7670   emit_operand(dst, src, 0);
 7671 }
 7672 
 7673 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
 7674   assert(VM_Version::supports_avx(), "");
 7675   InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7676   attributes.set_rex_vex_w_reverted();
 7677   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 7678   emit_int16(0x5C, (0xC0 | encode));
 7679 }
 7680 
 7681 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) {
 7682   assert(VM_Version::supports_avx(), "");
 7683   InstructionMark im(this);
 7684   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7685   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
 7686   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 7687   emit_int8(0x5C);
 7688   emit_operand(dst, src, 0);
 7689 }
 7690 
 7691 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
 7692   assert(VM_Version::supports_avx(), "");
 7693   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7694   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 7695   emit_int16(0x5C, (0xC0 | encode));
 7696 }
 7697 
 7698 //====================VECTOR ARITHMETIC=====================================
 7699 
 7700 // Float-point vector arithmetic
 7701 
 7702 void Assembler::addpd(XMMRegister dst, XMMRegister src) {
 7703   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 7704   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7705   attributes.set_rex_vex_w_reverted();
 7706   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7707   emit_int16(0x58, (0xC0 | encode));
 7708 }
 7709 
 7710 void Assembler::addpd(XMMRegister dst, Address src) {
 7711   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 7712   InstructionMark im(this);
 7713   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7714   attributes.set_rex_vex_w_reverted();
 7715   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 7716   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7717   emit_int8(0x58);
 7718   emit_operand(dst, src, 0);
 7719 }
 7720 
 7721 
 7722 void Assembler::addps(XMMRegister dst, XMMRegister src) {
 7723   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 7724   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7725   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 7726   emit_int16(0x58, (0xC0 | encode));
 7727 }
 7728 
 7729 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 7730   assert(VM_Version::supports_avx(), "");
 7731   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7732   attributes.set_rex_vex_w_reverted();
 7733   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7734   emit_int16(0x58, (0xC0 | encode));
 7735 }
 7736 
 7737 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 7738   assert(VM_Version::supports_avx(), "");
 7739   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7740   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 7741   emit_int16(0x58, (0xC0 | encode));
 7742 }
 7743 
 7744 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 7745   assert(VM_Version::supports_avx(), "");
 7746   InstructionMark im(this);
 7747   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7748   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 7749   attributes.set_rex_vex_w_reverted();
 7750   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7751   emit_int8(0x58);
 7752   emit_operand(dst, src, 0);
 7753 }
 7754 
 7755 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 7756   assert(VM_Version::supports_avx(), "");
 7757   InstructionMark im(this);
 7758   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7759   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 7760   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 7761   emit_int8(0x58);
 7762   emit_operand(dst, src, 0);
 7763 }
 7764 
 7765 void Assembler::subpd(XMMRegister dst, XMMRegister src) {
 7766   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 7767   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7768   attributes.set_rex_vex_w_reverted();
 7769   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7770   emit_int16(0x5C, (0xC0 | encode));
 7771 }
 7772 
 7773 void Assembler::subps(XMMRegister dst, XMMRegister src) {
 7774   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 7775   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7776   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 7777   emit_int16(0x5C, (0xC0 | encode));
 7778 }
 7779 
 7780 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 7781   assert(VM_Version::supports_avx(), "");
 7782   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7783   attributes.set_rex_vex_w_reverted();
 7784   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7785   emit_int16(0x5C, (0xC0 | encode));
 7786 }
 7787 
 7788 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 7789   assert(VM_Version::supports_avx(), "");
 7790   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7791   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 7792   emit_int16(0x5C, (0xC0 | encode));
 7793 }
 7794 
 7795 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 7796   assert(VM_Version::supports_avx(), "");
 7797   InstructionMark im(this);
 7798   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7799   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 7800   attributes.set_rex_vex_w_reverted();
 7801   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7802   emit_int8(0x5C);
 7803   emit_operand(dst, src, 0);
 7804 }
 7805 
 7806 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 7807   assert(VM_Version::supports_avx(), "");
 7808   InstructionMark im(this);
 7809   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7810   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 7811   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 7812   emit_int8(0x5C);
 7813   emit_operand(dst, src, 0);
 7814 }
 7815 
 7816 void Assembler::mulpd(XMMRegister dst, XMMRegister src) {
 7817   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 7818   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7819   attributes.set_rex_vex_w_reverted();
 7820   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7821   emit_int16(0x59, (0xC0 | encode));
 7822 }
 7823 
 7824 void Assembler::mulpd(XMMRegister dst, Address src) {
 7825   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 7826   InstructionMark im(this);
 7827   InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7828   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 7829   attributes.set_rex_vex_w_reverted();
 7830   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7831   emit_int8(0x59);
 7832   emit_operand(dst, src, 0);
 7833 }
 7834 
 7835 void Assembler::mulps(XMMRegister dst, XMMRegister src) {
 7836   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 7837   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7838   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 7839   emit_int16(0x59, (0xC0 | encode));
 7840 }
 7841 
 7842 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 7843   assert(VM_Version::supports_avx(), "");
 7844   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7845   attributes.set_rex_vex_w_reverted();
 7846   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7847   emit_int16(0x59, (0xC0 | encode));
 7848 }
 7849 
 7850 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 7851   assert(VM_Version::supports_avx(), "");
 7852   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7853   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 7854   emit_int16(0x59, (0xC0 | encode));
 7855 }
 7856 
 7857 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 7858   assert(VM_Version::supports_avx(), "");
 7859   InstructionMark im(this);
 7860   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7861   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 7862   attributes.set_rex_vex_w_reverted();
 7863   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7864   emit_int8(0x59);
 7865   emit_operand(dst, src, 0);
 7866 }
 7867 
 7868 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 7869   assert(VM_Version::supports_avx(), "");
 7870   InstructionMark im(this);
 7871   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7872   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 7873   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 7874   emit_int8(0x59);
 7875   emit_operand(dst, src, 0);
 7876 }
 7877 
 7878 void Assembler::vfmadd231pd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
 7879   assert(VM_Version::supports_fma(), "");
 7880   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7881   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 7882   emit_int16((unsigned char)0xB8, (0xC0 | encode));
 7883 }
 7884 
 7885 void Assembler::vfmadd231ps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
 7886   assert(VM_Version::supports_fma(), "");
 7887   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7888   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 7889   emit_int16((unsigned char)0xB8, (0xC0 | encode));
 7890 }
 7891 
 7892 void Assembler::vfmadd231pd(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) {
 7893   assert(VM_Version::supports_fma(), "");
 7894   InstructionMark im(this);
 7895   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7896   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 7897   vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 7898   emit_int8((unsigned char)0xB8);
 7899   emit_operand(dst, src2, 0);
 7900 }
 7901 
 7902 void Assembler::vfmadd231ps(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) {
 7903   assert(VM_Version::supports_fma(), "");
 7904   InstructionMark im(this);
 7905   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7906   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 7907   vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 7908   emit_int8((unsigned char)0xB8);
 7909   emit_operand(dst, src2, 0);
 7910 }
 7911 
 7912 void Assembler::divpd(XMMRegister dst, XMMRegister src) {
 7913   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 7914   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7915   attributes.set_rex_vex_w_reverted();
 7916   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7917   emit_int16(0x5E, (0xC0 | encode));
 7918 }
 7919 
 7920 void Assembler::divps(XMMRegister dst, XMMRegister src) {
 7921   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 7922   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7923   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 7924   emit_int16(0x5E, (0xC0 | encode));
 7925 }
 7926 
 7927 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 7928   assert(VM_Version::supports_avx(), "");
 7929   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7930   attributes.set_rex_vex_w_reverted();
 7931   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7932   emit_int16(0x5E, (0xC0 | encode));
 7933 }
 7934 
 7935 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 7936   assert(VM_Version::supports_avx(), "");
 7937   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7938   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 7939   emit_int16(0x5E, (0xC0 | encode));
 7940 }
 7941 
 7942 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 7943   assert(VM_Version::supports_avx(), "");
 7944   InstructionMark im(this);
 7945   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7946   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 7947   attributes.set_rex_vex_w_reverted();
 7948   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7949   emit_int8(0x5E);
 7950   emit_operand(dst, src, 0);
 7951 }
 7952 
 7953 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 7954   assert(VM_Version::supports_avx(), "");
 7955   InstructionMark im(this);
 7956   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7957   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 7958   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 7959   emit_int8(0x5E);
 7960   emit_operand(dst, src, 0);
 7961 }
 7962 
 7963 void Assembler::vroundpd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len) {
 7964   assert(VM_Version::supports_avx(), "");
 7965   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 7966   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 7967   emit_int24(0x09, (0xC0 | encode), (rmode));
 7968 }
 7969 
 7970 void Assembler::vroundpd(XMMRegister dst, Address src, int32_t rmode,  int vector_len) {
 7971   assert(VM_Version::supports_avx(), "");
 7972   assert(!needs_eevex(src.base(), src.index()), "does not support extended gprs as BASE or INDEX of address operand");
 7973   InstructionMark im(this);
 7974   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 7975   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 7976   emit_int8(0x09);
 7977   emit_operand(dst, src, 1);
 7978   emit_int8((rmode));
 7979 }
 7980 
 7981 void Assembler::vroundsd(XMMRegister dst, XMMRegister src, XMMRegister src2, int32_t rmode) {
 7982   assert(VM_Version::supports_avx(), "");
 7983   assert(rmode <= 0x0f, "rmode 0x%x", rmode);
 7984   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7985   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 7986   emit_int24(0x0B, (0xC0 | encode), (rmode));
 7987 }
 7988 
 7989 void Assembler::vrndscalesd(XMMRegister dst,  XMMRegister src1, XMMRegister src2, int32_t rmode) {
 7990   assert(VM_Version::supports_evex(), "requires EVEX support");
 7991   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 7992   attributes.set_is_evex_instruction();
 7993   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 7994   emit_int24(0x0B, (0xC0 | encode), (rmode));
 7995 }
 7996 
 7997 void Assembler::vrndscalepd(XMMRegister dst,  XMMRegister src,  int32_t rmode, int vector_len) {
 7998   assert(VM_Version::supports_evex(), "requires EVEX support");
 7999   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8000   attributes.set_is_evex_instruction();
 8001   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 8002   emit_int24(0x09, (0xC0 | encode), (rmode));
 8003 }
 8004 
 8005 void Assembler::vrndscalepd(XMMRegister dst, Address src, int32_t rmode, int vector_len) {
 8006   assert(VM_Version::supports_evex(), "requires EVEX support");
 8007   assert(dst != xnoreg, "sanity");
 8008   InstructionMark im(this);
 8009   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8010   attributes.set_is_evex_instruction();
 8011   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 8012   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 8013   emit_int8(0x09);
 8014   emit_operand(dst, src, 1);
 8015   emit_int8((rmode));
 8016 }
 8017 
 8018 void Assembler::vsqrtpd(XMMRegister dst, XMMRegister src, int vector_len) {
 8019   assert(VM_Version::supports_avx(), "");
 8020   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8021   attributes.set_rex_vex_w_reverted();
 8022   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8023   emit_int16(0x51, (0xC0 | encode));
 8024 }
 8025 
 8026 void Assembler::vsqrtpd(XMMRegister dst, Address src, int vector_len) {
 8027   assert(VM_Version::supports_avx(), "");
 8028   InstructionMark im(this);
 8029   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8030   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 8031   attributes.set_rex_vex_w_reverted();
 8032   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8033   emit_int8(0x51);
 8034   emit_operand(dst, src, 0);
 8035 }
 8036 
 8037 void Assembler::vsqrtps(XMMRegister dst, XMMRegister src, int vector_len) {
 8038   assert(VM_Version::supports_avx(), "");
 8039   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8040   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 8041   emit_int16(0x51, (0xC0 | encode));
 8042 }
 8043 
 8044 void Assembler::vsqrtps(XMMRegister dst, Address src, int vector_len) {
 8045   assert(VM_Version::supports_avx(), "");
 8046   InstructionMark im(this);
 8047   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8048   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 8049   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 8050   emit_int8(0x51);
 8051   emit_operand(dst, src, 0);
 8052 }
 8053 
 8054 void Assembler::andpd(XMMRegister dst, XMMRegister src) {
 8055   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8056   InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
 8057   attributes.set_rex_vex_w_reverted();
 8058   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8059   emit_int16(0x54, (0xC0 | encode));
 8060 }
 8061 
 8062 void Assembler::andnpd(XMMRegister dst, XMMRegister src) {
 8063   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8064   InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
 8065   attributes.set_rex_vex_w_reverted();
 8066   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8067   emit_int16(0x55, (0xC0 | encode));
 8068 }
 8069 
 8070 void Assembler::andps(XMMRegister dst, XMMRegister src) {
 8071   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 8072   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
 8073   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 8074   emit_int16(0x54, (0xC0 | encode));
 8075 }
 8076 
 8077 void Assembler::andps(XMMRegister dst, Address src) {
 8078   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 8079   InstructionMark im(this);
 8080   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
 8081   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 8082   simd_prefix(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 8083   emit_int8(0x54);
 8084   emit_operand(dst, src, 0);
 8085 }
 8086 
 8087 void Assembler::andpd(XMMRegister dst, Address src) {
 8088   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8089   InstructionMark im(this);
 8090   InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
 8091   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 8092   attributes.set_rex_vex_w_reverted();
 8093   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8094   emit_int8(0x54);
 8095   emit_operand(dst, src, 0);
 8096 }
 8097 
 8098 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8099   assert(VM_Version::supports_avx(), "");
 8100   InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
 8101   attributes.set_rex_vex_w_reverted();
 8102   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8103   emit_int16(0x54, (0xC0 | encode));
 8104 }
 8105 
 8106 void Assembler::vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8107   assert(VM_Version::supports_avx(), "");
 8108   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
 8109   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 8110   emit_int16(0x54, (0xC0 | encode));
 8111 }
 8112 
 8113 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 8114   assert(VM_Version::supports_avx(), "");
 8115   InstructionMark im(this);
 8116   InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
 8117   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 8118   attributes.set_rex_vex_w_reverted();
 8119   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8120   emit_int8(0x54);
 8121   emit_operand(dst, src, 0);
 8122 }
 8123 
 8124 void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 8125   assert(VM_Version::supports_avx(), "");
 8126   InstructionMark im(this);
 8127   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
 8128   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 8129   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 8130   emit_int8(0x54);
 8131   emit_operand(dst, src, 0);
 8132 }
 8133 
 8134 void Assembler::unpckhpd(XMMRegister dst, XMMRegister src) {
 8135   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8136   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8137   attributes.set_rex_vex_w_reverted();
 8138   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8139   emit_int8(0x15);
 8140   emit_int8((0xC0 | encode));
 8141 }
 8142 
 8143 void Assembler::unpcklpd(XMMRegister dst, XMMRegister src) {
 8144   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8145   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8146   attributes.set_rex_vex_w_reverted();
 8147   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8148   emit_int16(0x14, (0xC0 | encode));
 8149 }
 8150 
 8151 void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
 8152   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8153   InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
 8154   attributes.set_rex_vex_w_reverted();
 8155   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8156   emit_int16(0x57, (0xC0 | encode));
 8157 }
 8158 
 8159 void Assembler::xorps(XMMRegister dst, XMMRegister src) {
 8160   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 8161   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
 8162   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 8163   emit_int16(0x57, (0xC0 | encode));
 8164 }
 8165 
 8166 void Assembler::xorpd(XMMRegister dst, Address src) {
 8167   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8168   InstructionMark im(this);
 8169   InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
 8170   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 8171   attributes.set_rex_vex_w_reverted();
 8172   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8173   emit_int8(0x57);
 8174   emit_operand(dst, src, 0);
 8175 }
 8176 
 8177 void Assembler::xorps(XMMRegister dst, Address src) {
 8178   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 8179   InstructionMark im(this);
 8180   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
 8181   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 8182   simd_prefix(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 8183   emit_int8(0x57);
 8184   emit_operand(dst, src, 0);
 8185 }
 8186 
 8187 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8188   assert(VM_Version::supports_avx(), "");
 8189   InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
 8190   attributes.set_rex_vex_w_reverted();
 8191   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8192   emit_int16(0x57, (0xC0 | encode));
 8193 }
 8194 
 8195 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8196   assert(VM_Version::supports_avx(), "");
 8197   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
 8198   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 8199   emit_int16(0x57, (0xC0 | encode));
 8200 }
 8201 
 8202 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 8203   assert(VM_Version::supports_avx(), "");
 8204   InstructionMark im(this);
 8205   InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
 8206   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 8207   attributes.set_rex_vex_w_reverted();
 8208   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8209   emit_int8(0x57);
 8210   emit_operand(dst, src, 0);
 8211 }
 8212 
 8213 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 8214   assert(VM_Version::supports_avx(), "");
 8215   InstructionMark im(this);
 8216   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
 8217   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 8218   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 8219   emit_int8(0x57);
 8220   emit_operand(dst, src, 0);
 8221 }
 8222 
 8223 // Integer vector arithmetic
 8224 void Assembler::vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8225   assert((VM_Version::supports_avx() && (vector_len == 0)) ||
 8226          VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
 8227   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
 8228   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8229   emit_int16(0x01, (0xC0 | encode));
 8230 }
 8231 
 8232 void Assembler::vphaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8233   assert((VM_Version::supports_avx() && (vector_len == 0)) ||
 8234          VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
 8235   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
 8236   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8237   emit_int16(0x02, (0xC0 | encode));
 8238 }
 8239 
 8240 void Assembler::paddb(XMMRegister dst, XMMRegister src) {
 8241   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8242   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8243   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8244   emit_int16((unsigned char)0xFC, (0xC0 | encode));
 8245 }
 8246 
 8247 void Assembler::paddw(XMMRegister dst, XMMRegister src) {
 8248   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8249   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8250   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8251   emit_int16((unsigned char)0xFD, (0xC0 | encode));
 8252 }
 8253 
 8254 void Assembler::paddd(XMMRegister dst, XMMRegister src) {
 8255   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8256   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8257   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8258   emit_int16((unsigned char)0xFE, (0xC0 | encode));
 8259 }
 8260 
 8261 void Assembler::paddd(XMMRegister dst, Address src) {
 8262   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8263   InstructionMark im(this);
 8264   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8265   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8266   emit_int8((unsigned char)0xFE);
 8267   emit_operand(dst, src, 0);
 8268 }
 8269 
 8270 void Assembler::paddq(XMMRegister dst, XMMRegister src) {
 8271   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8272   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8273   attributes.set_rex_vex_w_reverted();
 8274   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8275   emit_int16((unsigned char)0xD4, (0xC0 | encode));
 8276 }
 8277 
 8278 void Assembler::phaddw(XMMRegister dst, XMMRegister src) {
 8279   assert(VM_Version::supports_sse3(), "");
 8280   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
 8281   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8282   emit_int16(0x01, (0xC0 | encode));
 8283 }
 8284 
 8285 void Assembler::phaddd(XMMRegister dst, XMMRegister src) {
 8286   assert(VM_Version::supports_sse3(), "");
 8287   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
 8288   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8289   emit_int16(0x02, (0xC0 | encode));
 8290 }
 8291 
 8292 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8293   assert(UseAVX > 0, "requires some form of AVX");
 8294   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8295   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8296   emit_int16((unsigned char)0xFC, (0xC0 | encode));
 8297 }
 8298 
 8299 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8300   assert(UseAVX > 0, "requires some form of AVX");
 8301   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8302   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8303   emit_int16((unsigned char)0xFD, (0xC0 | encode));
 8304 }
 8305 
 8306 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8307   assert(UseAVX > 0, "requires some form of AVX");
 8308   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8309   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8310   emit_int16((unsigned char)0xFE, (0xC0 | encode));
 8311 }
 8312 
 8313 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8314   assert(UseAVX > 0, "requires some form of AVX");
 8315   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8316   attributes.set_rex_vex_w_reverted();
 8317   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8318   emit_int16((unsigned char)0xD4, (0xC0 | encode));
 8319 }
 8320 
 8321 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 8322   assert(UseAVX > 0, "requires some form of AVX");
 8323   InstructionMark im(this);
 8324   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8325   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 8326   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8327   emit_int8((unsigned char)0xFC);
 8328   emit_operand(dst, src, 0);
 8329 }
 8330 
 8331 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 8332   assert(UseAVX > 0, "requires some form of AVX");
 8333   InstructionMark im(this);
 8334   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8335   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 8336   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8337   emit_int8((unsigned char)0xFD);
 8338   emit_operand(dst, src, 0);
 8339 }
 8340 
 8341 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 8342   assert(UseAVX > 0, "requires some form of AVX");
 8343   InstructionMark im(this);
 8344   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8345   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 8346   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8347   emit_int8((unsigned char)0xFE);
 8348   emit_operand(dst, src, 0);
 8349 }
 8350 
 8351 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 8352   assert(UseAVX > 0, "requires some form of AVX");
 8353   InstructionMark im(this);
 8354   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8355   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 8356   attributes.set_rex_vex_w_reverted();
 8357   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8358   emit_int8((unsigned char)0xD4);
 8359   emit_operand(dst, src, 0);
 8360 }
 8361 
 8362 void Assembler::psubb(XMMRegister dst, XMMRegister src) {
 8363   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8364   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8365   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8366   emit_int16((unsigned char)0xF8, (0xC0 | encode));
 8367 }
 8368 
 8369 void Assembler::psubw(XMMRegister dst, XMMRegister src) {
 8370   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8371   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8372   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8373   emit_int16((unsigned char)0xF9, (0xC0 | encode));
 8374 }
 8375 
 8376 void Assembler::psubd(XMMRegister dst, XMMRegister src) {
 8377   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8378   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8379   emit_int16((unsigned char)0xFA, (0xC0 | encode));
 8380 }
 8381 
 8382 void Assembler::psubq(XMMRegister dst, XMMRegister src) {
 8383   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8384   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8385   attributes.set_rex_vex_w_reverted();
 8386   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8387   emit_int8((unsigned char)0xFB);
 8388   emit_int8((0xC0 | encode));
 8389 }
 8390 
 8391 void Assembler::vpsubusb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8392   assert(UseAVX > 0, "requires some form of AVX");
 8393   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8394   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8395   emit_int16((unsigned char)0xD8, (0xC0 | encode));
 8396 }
 8397 
 8398 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8399   assert(UseAVX > 0, "requires some form of AVX");
 8400   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8401   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8402   emit_int16((unsigned char)0xF8, (0xC0 | encode));
 8403 }
 8404 
 8405 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8406   assert(UseAVX > 0, "requires some form of AVX");
 8407   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8408   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8409   emit_int16((unsigned char)0xF9, (0xC0 | encode));
 8410 }
 8411 
 8412 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8413   assert(UseAVX > 0, "requires some form of AVX");
 8414   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8415   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8416   emit_int16((unsigned char)0xFA, (0xC0 | encode));
 8417 }
 8418 
 8419 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8420   assert(UseAVX > 0, "requires some form of AVX");
 8421   InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8422   attributes.set_rex_vex_w_reverted();
 8423   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8424   emit_int16((unsigned char)0xFB, (0xC0 | encode));
 8425 }
 8426 
 8427 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 8428   assert(UseAVX > 0, "requires some form of AVX");
 8429   InstructionMark im(this);
 8430   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8431   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 8432   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8433   emit_int8((unsigned char)0xF8);
 8434   emit_operand(dst, src, 0);
 8435 }
 8436 
 8437 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 8438   assert(UseAVX > 0, "requires some form of AVX");
 8439   InstructionMark im(this);
 8440   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8441   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 8442   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8443   emit_int8((unsigned char)0xF9);
 8444   emit_operand(dst, src, 0);
 8445 }
 8446 
 8447 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 8448   assert(UseAVX > 0, "requires some form of AVX");
 8449   InstructionMark im(this);
 8450   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8451   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 8452   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8453   emit_int8((unsigned char)0xFA);
 8454   emit_operand(dst, src, 0);
 8455 }
 8456 
 8457 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 8458   assert(UseAVX > 0, "requires some form of AVX");
 8459   InstructionMark im(this);
 8460   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8461   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 8462   attributes.set_rex_vex_w_reverted();
 8463   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8464   emit_int8((unsigned char)0xFB);
 8465   emit_operand(dst, src, 0);
 8466 }
 8467 
 8468 void Assembler::pmullw(XMMRegister dst, XMMRegister src) {
 8469   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8470   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8471   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8472   emit_int16((unsigned char)0xD5, (0xC0 | encode));
 8473 }
 8474 
 8475 void Assembler::pmulld(XMMRegister dst, XMMRegister src) {
 8476   assert(VM_Version::supports_sse4_1(), "");
 8477   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8478   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8479   emit_int16(0x40, (0xC0 | encode));
 8480 }
 8481 
 8482 void Assembler::pmuludq(XMMRegister dst, XMMRegister src) {
 8483   assert(VM_Version::supports_sse2(), "");
 8484   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8485   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8486   emit_int16((unsigned char)0xF4, (0xC0 | encode));
 8487 }
 8488 
 8489 void Assembler::vpmulhuw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8490   assert((vector_len == AVX_128bit && VM_Version::supports_avx()) ||
 8491          (vector_len == AVX_256bit && VM_Version::supports_avx2()) ||
 8492          (vector_len == AVX_512bit && VM_Version::supports_avx512bw()), "");
 8493   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8494   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8495   emit_int16((unsigned char)0xE4, (0xC0 | encode));
 8496 }
 8497 
 8498 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8499   assert(UseAVX > 0, "requires some form of AVX");
 8500   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8501   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8502   emit_int16((unsigned char)0xD5, (0xC0 | encode));
 8503 }
 8504 
 8505 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8506   assert(UseAVX > 0, "requires some form of AVX");
 8507   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8508   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8509   emit_int16(0x40, (0xC0 | encode));
 8510 }
 8511 
 8512 void Assembler::evpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8513   assert(UseAVX > 2, "requires some form of EVEX");
 8514   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
 8515   attributes.set_is_evex_instruction();
 8516   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8517   emit_int16(0x40, (0xC0 | encode));
 8518 }
 8519 
 8520 void Assembler::vpmuludq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8521   assert(UseAVX > 0, "requires some form of AVX");
 8522   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8523   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8524   emit_int16((unsigned char)0xF4, (0xC0 | encode));
 8525 }
 8526 
 8527 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 8528   assert(UseAVX > 0, "requires some form of AVX");
 8529   InstructionMark im(this);
 8530   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8531   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 8532   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8533   emit_int8((unsigned char)0xD5);
 8534   emit_operand(dst, src, 0);
 8535 }
 8536 
 8537 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 8538   assert(UseAVX > 0, "requires some form of AVX");
 8539   InstructionMark im(this);
 8540   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8541   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 8542   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8543   emit_int8(0x40);
 8544   emit_operand(dst, src, 0);
 8545 }
 8546 
 8547 void Assembler::evpmullq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 8548   assert(UseAVX > 2, "requires some form of EVEX");
 8549   InstructionMark im(this);
 8550   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
 8551   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 8552   attributes.set_is_evex_instruction();
 8553   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8554   emit_int8(0x40);
 8555   emit_operand(dst, src, 0);
 8556 }
 8557 
 8558 // Min, max
 8559 void Assembler::pminsb(XMMRegister dst, XMMRegister src) {
 8560   assert(VM_Version::supports_sse4_1(), "");
 8561   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8562   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8563   emit_int16(0x38, (0xC0 | encode));
 8564 }
 8565 
 8566 void Assembler::vpminsb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8567   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
 8568         (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), "");
 8569   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8570   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8571   emit_int16(0x38, (0xC0 | encode));
 8572 }
 8573 
 8574 void Assembler::vpminub(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8575   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
 8576         (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), "");
 8577   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8578   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8579   emit_int16(0xDA, (0xC0 | encode));
 8580 }
 8581 
 8582 void Assembler::pminsw(XMMRegister dst, XMMRegister src) {
 8583   assert(VM_Version::supports_sse2(), "");
 8584   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8585   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8586   emit_int16((unsigned char)0xEA, (0xC0 | encode));
 8587 }
 8588 
 8589 void Assembler::vpminsw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8590   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
 8591         (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), "");
 8592   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8593   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8594   emit_int16((unsigned char)0xEA, (0xC0 | encode));
 8595 }
 8596 
 8597 void Assembler::pminsd(XMMRegister dst, XMMRegister src) {
 8598   assert(VM_Version::supports_sse4_1(), "");
 8599   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8600   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8601   emit_int16(0x39, (0xC0 | encode));
 8602 }
 8603 
 8604 void Assembler::vpminsd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8605   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
 8606         (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_evex()), "");
 8607   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8608   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8609   emit_int16(0x39, (0xC0 | encode));
 8610 }
 8611 
 8612 void Assembler::vpminsq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8613   assert(UseAVX > 2, "requires AVX512F");
 8614   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8615   attributes.set_is_evex_instruction();
 8616   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8617   emit_int16(0x39, (0xC0 | encode));
 8618 }
 8619 
 8620 void Assembler::minps(XMMRegister dst, XMMRegister src) {
 8621   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 8622   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8623   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 8624   emit_int16(0x5D, (0xC0 | encode));
 8625 }
 8626 void Assembler::vminps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8627   assert(vector_len >= AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), "");
 8628   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8629   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 8630   emit_int16(0x5D, (0xC0 | encode));
 8631 }
 8632 
 8633 void Assembler::minpd(XMMRegister dst, XMMRegister src) {
 8634   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 8635   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8636   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8637   emit_int16(0x5D, (0xC0 | encode));
 8638 }
 8639 void Assembler::vminpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8640   assert(vector_len >= AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), "");
 8641   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8642   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8643   emit_int16(0x5D, (0xC0 | encode));
 8644 }
 8645 
 8646 void Assembler::pmaxsb(XMMRegister dst, XMMRegister src) {
 8647   assert(VM_Version::supports_sse4_1(), "");
 8648   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8649   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8650   emit_int16(0x3C, (0xC0 | encode));
 8651 }
 8652 
 8653 void Assembler::vpmaxsb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8654   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
 8655         (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), "");
 8656   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8657   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8658   emit_int16(0x3C, (0xC0 | encode));
 8659 }
 8660 
 8661 void Assembler::pmaxsw(XMMRegister dst, XMMRegister src) {
 8662   assert(VM_Version::supports_sse2(), "");
 8663   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8664   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8665   emit_int16((unsigned char)0xEE, (0xC0 | encode));
 8666 }
 8667 
 8668 void Assembler::vpmaxsw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8669   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
 8670         (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), "");
 8671   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8672   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8673   emit_int16((unsigned char)0xEE, (0xC0 | encode));
 8674 }
 8675 
 8676 void Assembler::pmaxsd(XMMRegister dst, XMMRegister src) {
 8677   assert(VM_Version::supports_sse4_1(), "");
 8678   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8679   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8680   emit_int16(0x3D, (0xC0 | encode));
 8681 }
 8682 
 8683 void Assembler::vpmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8684   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
 8685         (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_evex()), "");
 8686   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8687   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8688   emit_int16(0x3D, (0xC0 | encode));
 8689 }
 8690 
 8691 void Assembler::vpmaxsq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8692   assert(UseAVX > 2, "requires AVX512F");
 8693   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8694   attributes.set_is_evex_instruction();
 8695   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8696   emit_int16(0x3D, (0xC0 | encode));
 8697 }
 8698 
 8699 void Assembler::maxps(XMMRegister dst, XMMRegister src) {
 8700   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 8701   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8702   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 8703   emit_int16(0x5F, (0xC0 | encode));
 8704 }
 8705 
 8706 void Assembler::vmaxps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8707   assert(vector_len >= AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), "");
 8708   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8709   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 8710   emit_int16(0x5F, (0xC0 | encode));
 8711 }
 8712 
 8713 void Assembler::maxpd(XMMRegister dst, XMMRegister src) {
 8714   NOT_LP64(assert(VM_Version::supports_sse(), ""));
 8715   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8716   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8717   emit_int16(0x5F, (0xC0 | encode));
 8718 }
 8719 
 8720 void Assembler::vmaxpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8721   assert(vector_len >= AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), "");
 8722   InstructionAttr attributes(vector_len, /* vex_w */true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8723   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8724   emit_int16(0x5F, (0xC0 | encode));
 8725 }
 8726 
 8727 // Shift packed integers left by specified number of bits.
 8728 void Assembler::psllw(XMMRegister dst, int shift) {
 8729   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8730   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8731   // XMM6 is for /6 encoding: 66 0F 71 /6 ib
 8732   int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8733   emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
 8734 }
 8735 
 8736 void Assembler::pslld(XMMRegister dst, int shift) {
 8737   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8738   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8739   // XMM6 is for /6 encoding: 66 0F 72 /6 ib
 8740   int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8741   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
 8742 }
 8743 
 8744 void Assembler::psllq(XMMRegister dst, int shift) {
 8745   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8746   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8747   // XMM6 is for /6 encoding: 66 0F 73 /6 ib
 8748   int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8749   emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
 8750 }
 8751 
 8752 void Assembler::psllw(XMMRegister dst, XMMRegister shift) {
 8753   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8754   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8755   int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8756   emit_int16((unsigned char)0xF1, (0xC0 | encode));
 8757 }
 8758 
 8759 void Assembler::pslld(XMMRegister dst, XMMRegister shift) {
 8760   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8761   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8762   int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8763   emit_int16((unsigned char)0xF2, (0xC0 | encode));
 8764 }
 8765 
 8766 void Assembler::psllq(XMMRegister dst, XMMRegister shift) {
 8767   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8768   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8769   attributes.set_rex_vex_w_reverted();
 8770   int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8771   emit_int16((unsigned char)0xF3, (0xC0 | encode));
 8772 }
 8773 
 8774 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
 8775   assert(UseAVX > 0, "requires some form of AVX");
 8776   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8777   // XMM6 is for /6 encoding: 66 0F 71 /6 ib
 8778   int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8779   emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
 8780 }
 8781 
 8782 void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
 8783   assert(UseAVX > 0, "requires some form of AVX");
 8784   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8785   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8786   // XMM6 is for /6 encoding: 66 0F 72 /6 ib
 8787   int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8788   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
 8789 }
 8790 
 8791 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
 8792   assert(UseAVX > 0, "requires some form of AVX");
 8793   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8794   attributes.set_rex_vex_w_reverted();
 8795   // XMM6 is for /6 encoding: 66 0F 73 /6 ib
 8796   int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8797   emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
 8798 }
 8799 
 8800 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
 8801   assert(UseAVX > 0, "requires some form of AVX");
 8802   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8803   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8804   emit_int16((unsigned char)0xF1, (0xC0 | encode));
 8805 }
 8806 
 8807 void Assembler::vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
 8808   assert(UseAVX > 0, "requires some form of AVX");
 8809   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8810   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8811   emit_int16((unsigned char)0xF2, (0xC0 | encode));
 8812 }
 8813 
 8814 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
 8815   assert(UseAVX > 0, "requires some form of AVX");
 8816   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8817   attributes.set_rex_vex_w_reverted();
 8818   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8819   emit_int16((unsigned char)0xF3, (0xC0 | encode));
 8820 }
 8821 
 8822 // Shift packed integers logically right by specified number of bits.
 8823 void Assembler::psrlw(XMMRegister dst, int shift) {
 8824   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8825   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8826   // XMM2 is for /2 encoding: 66 0F 71 /2 ib
 8827   int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8828   emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
 8829 }
 8830 
 8831 void Assembler::psrld(XMMRegister dst, int shift) {
 8832   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8833   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8834   // XMM2 is for /2 encoding: 66 0F 72 /2 ib
 8835   int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8836   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
 8837 }
 8838 
 8839 void Assembler::psrlq(XMMRegister dst, int shift) {
 8840   // Do not confuse it with psrldq SSE2 instruction which
 8841   // shifts 128 bit value in xmm register by number of bytes.
 8842   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8843   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8844   attributes.set_rex_vex_w_reverted();
 8845   // XMM2 is for /2 encoding: 66 0F 73 /2 ib
 8846   int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8847   emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
 8848 }
 8849 
 8850 void Assembler::psrlw(XMMRegister dst, XMMRegister shift) {
 8851   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8852   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8853   int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8854   emit_int16((unsigned char)0xD1, (0xC0 | encode));
 8855 }
 8856 
 8857 void Assembler::psrld(XMMRegister dst, XMMRegister shift) {
 8858   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8859   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8860   int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8861   emit_int16((unsigned char)0xD2, (0xC0 | encode));
 8862 }
 8863 
 8864 void Assembler::psrlq(XMMRegister dst, XMMRegister shift) {
 8865   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8866   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8867   attributes.set_rex_vex_w_reverted();
 8868   int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8869   emit_int16((unsigned char)0xD3, (0xC0 | encode));
 8870 }
 8871 
 8872 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
 8873   assert(UseAVX > 0, "requires some form of AVX");
 8874   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8875   // XMM2 is for /2 encoding: 66 0F 71 /2 ib
 8876   int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8877   emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
 8878 }
 8879 
 8880 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
 8881   assert(UseAVX > 0, "requires some form of AVX");
 8882   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8883   // XMM2 is for /2 encoding: 66 0F 72 /2 ib
 8884   int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8885   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
 8886 }
 8887 
 8888 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
 8889   assert(UseAVX > 0, "requires some form of AVX");
 8890   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8891   attributes.set_rex_vex_w_reverted();
 8892   // XMM2 is for /2 encoding: 66 0F 73 /2 ib
 8893   int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8894   emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
 8895 }
 8896 
 8897 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
 8898   assert(UseAVX > 0, "requires some form of AVX");
 8899   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8900   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8901   emit_int16((unsigned char)0xD1, (0xC0 | encode));
 8902 }
 8903 
 8904 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
 8905   assert(UseAVX > 0, "requires some form of AVX");
 8906   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8907   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8908   emit_int16((unsigned char)0xD2, (0xC0 | encode));
 8909 }
 8910 
 8911 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
 8912   assert(UseAVX > 0, "requires some form of AVX");
 8913   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8914   attributes.set_rex_vex_w_reverted();
 8915   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8916   emit_int16((unsigned char)0xD3, (0xC0 | encode));
 8917 }
 8918 
 8919 void Assembler::evpsrlvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8920   assert(VM_Version::supports_avx512bw(), "");
 8921   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8922   attributes.set_is_evex_instruction();
 8923   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8924   emit_int16(0x10, (0xC0 | encode));
 8925 }
 8926 
 8927 void Assembler::evpsllvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 8928   assert(VM_Version::supports_avx512bw(), "");
 8929   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8930   attributes.set_is_evex_instruction();
 8931   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8932   emit_int16(0x12, (0xC0 | encode));
 8933 }
 8934 
 8935 // Shift packed integers arithmetically right by specified number of bits.
 8936 void Assembler::psraw(XMMRegister dst, int shift) {
 8937   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8938   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8939   // XMM4 is for /4 encoding: 66 0F 71 /4 ib
 8940   int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8941   emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
 8942 }
 8943 
 8944 void Assembler::psrad(XMMRegister dst, int shift) {
 8945   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8946   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8947   // XMM4 is for /4 encoding: 66 0F 72 /4 ib
 8948   int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8949   emit_int8(0x72);
 8950   emit_int8((0xC0 | encode));
 8951   emit_int8(shift & 0xFF);
 8952 }
 8953 
 8954 void Assembler::psraw(XMMRegister dst, XMMRegister shift) {
 8955   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8956   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8957   int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8958   emit_int16((unsigned char)0xE1, (0xC0 | encode));
 8959 }
 8960 
 8961 void Assembler::psrad(XMMRegister dst, XMMRegister shift) {
 8962   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 8963   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8964   int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8965   emit_int16((unsigned char)0xE2, (0xC0 | encode));
 8966 }
 8967 
 8968 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
 8969   assert(UseAVX > 0, "requires some form of AVX");
 8970   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8971   // XMM4 is for /4 encoding: 66 0F 71 /4 ib
 8972   int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8973   emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
 8974 }
 8975 
 8976 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
 8977   assert(UseAVX > 0, "requires some form of AVX");
 8978   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8979   // XMM4 is for /4 encoding: 66 0F 71 /4 ib
 8980   int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8981   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
 8982 }
 8983 
 8984 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
 8985   assert(UseAVX > 0, "requires some form of AVX");
 8986   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8987   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8988   emit_int16((unsigned char)0xE1, (0xC0 | encode));
 8989 }
 8990 
 8991 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
 8992   assert(UseAVX > 0, "requires some form of AVX");
 8993   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8994   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8995   emit_int16((unsigned char)0xE2, (0xC0 | encode));
 8996 }
 8997 
 8998 void Assembler::evpsraq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
 8999   assert(UseAVX > 2, "requires AVX512");
 9000   assert ((VM_Version::supports_avx512vl() || vector_len == 2), "requires AVX512vl");
 9001   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9002   attributes.set_is_evex_instruction();
 9003   int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9004   emit_int24((unsigned char)0x72, (0xC0 | encode), shift & 0xFF);
 9005 }
 9006 
 9007 void Assembler::evpsraq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
 9008   assert(UseAVX > 2, "requires AVX512");
 9009   assert ((VM_Version::supports_avx512vl() || vector_len == 2), "requires AVX512vl");
 9010   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9011   attributes.set_is_evex_instruction();
 9012   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9013   emit_int16((unsigned char)0xE2, (0xC0 | encode));
 9014 }
 9015 
 9016 // logical operations packed integers
 9017 void Assembler::pand(XMMRegister dst, XMMRegister src) {
 9018   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 9019   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9020   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9021   emit_int16((unsigned char)0xDB, (0xC0 | encode));
 9022 }
 9023 
 9024 void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 9025   assert(UseAVX > 0, "requires some form of AVX");
 9026   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9027   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9028   emit_int16((unsigned char)0xDB, (0xC0 | encode));
 9029 }
 9030 
 9031 void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 9032   assert(UseAVX > 0, "requires some form of AVX");
 9033   InstructionMark im(this);
 9034   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9035   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 9036   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9037   emit_int8((unsigned char)0xDB);
 9038   emit_operand(dst, src, 0);
 9039 }
 9040 
 9041 void Assembler::evpandq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 9042   evpandq(dst, k0, nds, src, false, vector_len);
 9043 }
 9044 
 9045 void Assembler::evpandq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 9046   evpandq(dst, k0, nds, src, false, vector_len);
 9047 }
 9048 
 9049 //Variable Shift packed integers logically left.
 9050 void Assembler::vpsllvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
 9051   assert(UseAVX > 1, "requires AVX2");
 9052   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9053   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9054   emit_int16(0x47, (0xC0 | encode));
 9055 }
 9056 
 9057 void Assembler::vpsllvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
 9058   assert(UseAVX > 1, "requires AVX2");
 9059   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9060   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9061   emit_int16(0x47, (0xC0 | encode));
 9062 }
 9063 
 9064 //Variable Shift packed integers logically right.
 9065 void Assembler::vpsrlvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
 9066   assert(UseAVX > 1, "requires AVX2");
 9067   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9068   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9069   emit_int16(0x45, (0xC0 | encode));
 9070 }
 9071 
 9072 void Assembler::vpsrlvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
 9073   assert(UseAVX > 1, "requires AVX2");
 9074   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9075   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9076   emit_int16(0x45, (0xC0 | encode));
 9077 }
 9078 
 9079 //Variable right Shift arithmetic packed integers .
 9080 void Assembler::vpsravd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
 9081   assert(UseAVX > 1, "requires AVX2");
 9082   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9083   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9084   emit_int16(0x46, (0xC0 | encode));
 9085 }
 9086 
 9087 void Assembler::evpsravw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 9088   assert(VM_Version::supports_avx512bw(), "");
 9089   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9090   attributes.set_is_evex_instruction();
 9091   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9092   emit_int16(0x11, (0xC0 | encode));
 9093 }
 9094 
 9095 void Assembler::evpsravq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
 9096   assert(UseAVX > 2, "requires AVX512");
 9097   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL");
 9098   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9099   attributes.set_is_evex_instruction();
 9100   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9101   emit_int16(0x46, (0xC0 | encode));
 9102 }
 9103 
 9104 void Assembler::vpshldvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
 9105   assert(VM_Version::supports_avx512_vbmi2(), "requires vbmi2");
 9106   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9107   attributes.set_is_evex_instruction();
 9108   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9109   emit_int16(0x71, (0xC0 | encode));
 9110 }
 9111 
 9112 void Assembler::vpshrdvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
 9113   assert(VM_Version::supports_avx512_vbmi2(), "requires vbmi2");
 9114   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9115   attributes.set_is_evex_instruction();
 9116   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9117   emit_int16(0x73, (0xC0 | encode));
 9118 }
 9119 
 9120 void Assembler::pandn(XMMRegister dst, XMMRegister src) {
 9121   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 9122   InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9123   attributes.set_rex_vex_w_reverted();
 9124   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9125   emit_int16((unsigned char)0xDF, (0xC0 | encode));
 9126 }
 9127 
 9128 void Assembler::vpandn(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 9129   assert(UseAVX > 0, "requires some form of AVX");
 9130   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9131   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9132   emit_int16((unsigned char)0xDF, (0xC0 | encode));
 9133 }
 9134 
 9135 void Assembler::por(XMMRegister dst, XMMRegister src) {
 9136   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 9137   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9138   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9139   emit_int16((unsigned char)0xEB, (0xC0 | encode));
 9140 }
 9141 
 9142 void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 9143   assert(UseAVX > 0, "requires some form of AVX");
 9144   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9145   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9146   emit_int16((unsigned char)0xEB, (0xC0 | encode));
 9147 }
 9148 
 9149 void Assembler::vpor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 9150   assert(UseAVX > 0, "requires some form of AVX");
 9151   InstructionMark im(this);
 9152   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9153   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 9154   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9155   emit_int8((unsigned char)0xEB);
 9156   emit_operand(dst, src, 0);
 9157 }
 9158 
 9159 void Assembler::evporq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 9160   evporq(dst, k0, nds, src, false, vector_len);
 9161 }
 9162 
 9163 void Assembler::evporq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 9164   evporq(dst, k0, nds, src, false, vector_len);
 9165 }
 9166 
 9167 void Assembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9168   assert(VM_Version::supports_evex(), "");
 9169   // Encoding: EVEX.NDS.XXX.66.0F.W0 EB /r
 9170   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9171   attributes.set_is_evex_instruction();
 9172   attributes.set_embedded_opmask_register_specifier(mask);
 9173   if (merge) {
 9174     attributes.reset_is_clear_context();
 9175   }
 9176   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9177   emit_int16((unsigned char)0xEB, (0xC0 | encode));
 9178 }
 9179 
 9180 void Assembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9181   assert(VM_Version::supports_evex(), "");
 9182   // Encoding: EVEX.NDS.XXX.66.0F.W0 EB /r
 9183   InstructionMark im(this);
 9184   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9185   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 9186   attributes.set_is_evex_instruction();
 9187   attributes.set_embedded_opmask_register_specifier(mask);
 9188   if (merge) {
 9189     attributes.reset_is_clear_context();
 9190   }
 9191   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9192   emit_int8((unsigned char)0xEB);
 9193   emit_operand(dst, src, 0);
 9194 }
 9195 
 9196 void Assembler::pxor(XMMRegister dst, XMMRegister src) {
 9197   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 9198   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9199   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9200   emit_int16((unsigned char)0xEF, (0xC0 | encode));
 9201 }
 9202 
 9203 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 9204   assert(UseAVX > 0, "requires some form of AVX");
 9205   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
 9206          vector_len == AVX_256bit ? VM_Version::supports_avx2() :
 9207          vector_len == AVX_512bit ? VM_Version::supports_evex() : 0, "");
 9208   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9209   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9210   emit_int16((unsigned char)0xEF, (0xC0 | encode));
 9211 }
 9212 
 9213 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 9214   assert(UseAVX > 0, "requires some form of AVX");
 9215   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
 9216          vector_len == AVX_256bit ? VM_Version::supports_avx2() :
 9217          vector_len == AVX_512bit ? VM_Version::supports_evex() : 0, "");
 9218   InstructionMark im(this);
 9219   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9220   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 9221   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9222   emit_int8((unsigned char)0xEF);
 9223   emit_operand(dst, src, 0);
 9224 }
 9225 
 9226 void Assembler::vpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 9227   assert(UseAVX > 2, "requires some form of EVEX");
 9228   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9229   attributes.set_rex_vex_w_reverted();
 9230   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9231   emit_int16((unsigned char)0xEF, (0xC0 | encode));
 9232 }
 9233 
 9234 void Assembler::evpxord(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9235   // Encoding: EVEX.NDS.XXX.66.0F.W0 EF /r
 9236   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9237   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9238   attributes.set_is_evex_instruction();
 9239   attributes.set_embedded_opmask_register_specifier(mask);
 9240   if (merge) {
 9241     attributes.reset_is_clear_context();
 9242   }
 9243   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9244   emit_int16((unsigned char)0xEF, (0xC0 | encode));
 9245 }
 9246 
 9247 void Assembler::evpxord(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9248   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9249   InstructionMark im(this);
 9250   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 9251   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
 9252   attributes.set_is_evex_instruction();
 9253   attributes.set_embedded_opmask_register_specifier(mask);
 9254   if (merge) {
 9255     attributes.reset_is_clear_context();
 9256   }
 9257   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9258   emit_int8((unsigned char)0xEF);
 9259   emit_operand(dst, src, 0);
 9260 }
 9261 
 9262 void Assembler::evpxorq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9263   // Encoding: EVEX.NDS.XXX.66.0F.W1 EF /r
 9264   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9265   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9266   attributes.set_is_evex_instruction();
 9267   attributes.set_embedded_opmask_register_specifier(mask);
 9268   if (merge) {
 9269     attributes.reset_is_clear_context();
 9270   }
 9271   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9272   emit_int16((unsigned char)0xEF, (0xC0 | encode));
 9273 }
 9274 
 9275 void Assembler::evpxorq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9276   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9277   InstructionMark im(this);
 9278   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 9279   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
 9280   attributes.set_is_evex_instruction();
 9281   attributes.set_embedded_opmask_register_specifier(mask);
 9282   if (merge) {
 9283     attributes.reset_is_clear_context();
 9284   }
 9285   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9286   emit_int8((unsigned char)0xEF);
 9287   emit_operand(dst, src, 0);
 9288 }
 9289 
 9290 void Assembler::evpandd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9291   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9292   InstructionMark im(this);
 9293   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 9294   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
 9295   attributes.set_is_evex_instruction();
 9296   attributes.set_embedded_opmask_register_specifier(mask);
 9297   if (merge) {
 9298     attributes.reset_is_clear_context();
 9299   }
 9300   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9301   emit_int8((unsigned char)0xDB);
 9302   emit_operand(dst, src, 0);
 9303 }
 9304 
 9305 void Assembler::evpandq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9306   assert(VM_Version::supports_evex(), "requires AVX512F");
 9307   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL");
 9308   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9309   attributes.set_is_evex_instruction();
 9310   attributes.set_embedded_opmask_register_specifier(mask);
 9311   if (merge) {
 9312     attributes.reset_is_clear_context();
 9313   }
 9314   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9315   emit_int16((unsigned char)0xDB, (0xC0 | encode));
 9316 }
 9317 
 9318 void Assembler::evpandq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9319   assert(VM_Version::supports_evex(), "requires AVX512F");
 9320   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL");
 9321   InstructionMark im(this);
 9322   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 9323   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
 9324   attributes.set_is_evex_instruction();
 9325   attributes.set_embedded_opmask_register_specifier(mask);
 9326   if (merge) {
 9327     attributes.reset_is_clear_context();
 9328   }
 9329   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9330   emit_int8((unsigned char)0xDB);
 9331   emit_operand(dst, src, 0);
 9332 }
 9333 
 9334 void Assembler::evporq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9335   assert(VM_Version::supports_evex(), "requires AVX512F");
 9336   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL");
 9337   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9338   attributes.set_is_evex_instruction();
 9339   attributes.set_embedded_opmask_register_specifier(mask);
 9340   if (merge) {
 9341     attributes.reset_is_clear_context();
 9342   }
 9343   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9344   emit_int16((unsigned char)0xEB, (0xC0 | encode));
 9345 }
 9346 
 9347 void Assembler::evporq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9348   assert(VM_Version::supports_evex(), "requires AVX512F");
 9349   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL");
 9350   InstructionMark im(this);
 9351   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 9352   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
 9353   attributes.set_is_evex_instruction();
 9354   attributes.set_embedded_opmask_register_specifier(mask);
 9355   if (merge) {
 9356     attributes.reset_is_clear_context();
 9357   }
 9358   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9359   emit_int8((unsigned char)0xEB);
 9360   emit_operand(dst, src, 0);
 9361 }
 9362 
 9363 void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 9364   assert(VM_Version::supports_evex(), "requires EVEX support");
 9365   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9366   attributes.set_is_evex_instruction();
 9367   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9368   emit_int16((unsigned char)0xEF, (0xC0 | encode));
 9369 }
 9370 
 9371 void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 9372   assert(VM_Version::supports_evex(), "requires EVEX support");
 9373   assert(dst != xnoreg, "sanity");
 9374   InstructionMark im(this);
 9375   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9376   attributes.set_is_evex_instruction();
 9377   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 9378   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9379   emit_int8((unsigned char)0xEF);
 9380   emit_operand(dst, src, 0);
 9381 }
 9382 
 9383 void Assembler::evprold(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
 9384   assert(VM_Version::supports_evex(), "requires EVEX support");
 9385   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
 9386   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9387   attributes.set_is_evex_instruction();
 9388   int encode = vex_prefix_and_encode(xmm1->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9389   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
 9390 }
 9391 
 9392 void Assembler::evprolq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
 9393   assert(VM_Version::supports_evex(), "requires EVEX support");
 9394   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
 9395   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9396   attributes.set_is_evex_instruction();
 9397   int encode = vex_prefix_and_encode(xmm1->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9398   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
 9399 }
 9400 
 9401 void Assembler::evprord(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
 9402   assert(VM_Version::supports_evex(), "requires EVEX support");
 9403   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
 9404   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9405   attributes.set_is_evex_instruction();
 9406   int encode = vex_prefix_and_encode(xmm0->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9407   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
 9408 }
 9409 
 9410 void Assembler::evprorq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
 9411   assert(VM_Version::supports_evex(), "requires EVEX support");
 9412   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
 9413   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9414   attributes.set_is_evex_instruction();
 9415   int encode = vex_prefix_and_encode(xmm0->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9416   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
 9417 }
 9418 
 9419 void Assembler::evprolvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
 9420   assert(VM_Version::supports_evex(), "requires EVEX support");
 9421   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
 9422   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9423   attributes.set_is_evex_instruction();
 9424   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9425   emit_int16(0x15, (unsigned char)(0xC0 | encode));
 9426 }
 9427 
 9428 void Assembler::evprolvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
 9429   assert(VM_Version::supports_evex(), "requires EVEX support");
 9430   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
 9431   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9432   attributes.set_is_evex_instruction();
 9433   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9434   emit_int16(0x15, (unsigned char)(0xC0 | encode));
 9435 }
 9436 
 9437 void Assembler::evprorvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
 9438   assert(VM_Version::supports_evex(), "requires EVEX support");
 9439   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
 9440   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9441   attributes.set_is_evex_instruction();
 9442   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9443   emit_int16(0x14, (unsigned char)(0xC0 | encode));
 9444 }
 9445 
 9446 void Assembler::evprorvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
 9447   assert(VM_Version::supports_evex(), "requires EVEX support");
 9448   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
 9449   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9450   attributes.set_is_evex_instruction();
 9451   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9452   emit_int16(0x14, (unsigned char)(0xC0 | encode));
 9453 }
 9454 
 9455 void Assembler::evplzcntd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 9456   assert(VM_Version::supports_avx512cd(), "");
 9457   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9458   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 9459   attributes.set_is_evex_instruction();
 9460   attributes.set_embedded_opmask_register_specifier(mask);
 9461   if (merge) {
 9462     attributes.reset_is_clear_context();
 9463   }
 9464   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9465   emit_int16(0x44, (0xC0 | encode));
 9466 }
 9467 
 9468 void Assembler::evplzcntq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 9469   assert(VM_Version::supports_avx512cd(), "");
 9470   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9471   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 9472   attributes.set_is_evex_instruction();
 9473   attributes.set_embedded_opmask_register_specifier(mask);
 9474   if (merge) {
 9475     attributes.reset_is_clear_context();
 9476   }
 9477   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9478   emit_int16(0x44, (0xC0 | encode));
 9479 }
 9480 
 9481 void Assembler::vpternlogd(XMMRegister dst, int imm8, XMMRegister src2, XMMRegister src3, int vector_len) {
 9482   assert(VM_Version::supports_evex(), "requires EVEX support");
 9483   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
 9484   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9485   attributes.set_is_evex_instruction();
 9486   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9487   emit_int8(0x25);
 9488   emit_int8((unsigned char)(0xC0 | encode));
 9489   emit_int8(imm8);
 9490 }
 9491 
 9492 void Assembler::vpternlogd(XMMRegister dst, int imm8, XMMRegister src2, Address src3, int vector_len) {
 9493   assert(VM_Version::supports_evex(), "requires EVEX support");
 9494   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
 9495   assert(dst != xnoreg, "sanity");
 9496   InstructionMark im(this);
 9497   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9498   attributes.set_is_evex_instruction();
 9499   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 9500   vex_prefix(src3, src2->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9501   emit_int8(0x25);
 9502   emit_operand(dst, src3, 1);
 9503   emit_int8(imm8);
 9504 }
 9505 
 9506 void Assembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, XMMRegister src3, int vector_len) {
 9507   assert(VM_Version::supports_evex(), "requires AVX512F");
 9508   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires AVX512VL");
 9509   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9510   attributes.set_is_evex_instruction();
 9511   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9512   emit_int8(0x25);
 9513   emit_int8((unsigned char)(0xC0 | encode));
 9514   emit_int8(imm8);
 9515 }
 9516 
 9517 void Assembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, Address src3, int vector_len) {
 9518   assert(VM_Version::supports_evex(), "requires EVEX support");
 9519   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
 9520   assert(dst != xnoreg, "sanity");
 9521   InstructionMark im(this);
 9522   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9523   attributes.set_is_evex_instruction();
 9524   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
 9525   vex_prefix(src3, src2->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9526   emit_int8(0x25);
 9527   emit_operand(dst, src3, 1);
 9528   emit_int8(imm8);
 9529 }
 9530 
 9531 void Assembler::evexpandps(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 9532   assert(VM_Version::supports_evex(), "");
 9533   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9534   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 9535   attributes.set_is_evex_instruction();
 9536   attributes.set_embedded_opmask_register_specifier(mask);
 9537   if (merge) {
 9538     attributes.reset_is_clear_context();
 9539   }
 9540   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9541   emit_int16((unsigned char)0x88, (0xC0 | encode));
 9542 }
 9543 
 9544 void Assembler::evexpandpd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 9545   assert(VM_Version::supports_evex(), "");
 9546   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9547   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 9548   attributes.set_is_evex_instruction();
 9549   attributes.set_embedded_opmask_register_specifier(mask);
 9550   if (merge) {
 9551     attributes.reset_is_clear_context();
 9552   }
 9553   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9554   emit_int16((unsigned char)0x88, (0xC0 | encode));
 9555 }
 9556 
 9557 void Assembler::evpexpandb(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 9558   assert(VM_Version::supports_avx512_vbmi2(), "");
 9559   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9560   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 9561   attributes.set_is_evex_instruction();
 9562   attributes.set_embedded_opmask_register_specifier(mask);
 9563   if (merge) {
 9564     attributes.reset_is_clear_context();
 9565   }
 9566   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9567   emit_int16(0x62, (0xC0 | encode));
 9568 }
 9569 
 9570 void Assembler::evpexpandw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 9571   assert(VM_Version::supports_avx512_vbmi2(), "");
 9572   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9573   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 9574   attributes.set_is_evex_instruction();
 9575   attributes.set_embedded_opmask_register_specifier(mask);
 9576   if (merge) {
 9577     attributes.reset_is_clear_context();
 9578   }
 9579   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9580   emit_int16(0x62, (0xC0 | encode));
 9581 }
 9582 
 9583 void Assembler::evpexpandd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 9584   assert(VM_Version::supports_evex(), "");
 9585   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9586   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 9587   attributes.set_is_evex_instruction();
 9588   attributes.set_embedded_opmask_register_specifier(mask);
 9589   if (merge) {
 9590     attributes.reset_is_clear_context();
 9591   }
 9592   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9593   emit_int16((unsigned char)0x89, (0xC0 | encode));
 9594 }
 9595 
 9596 void Assembler::evpexpandq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 9597   assert(VM_Version::supports_evex(), "");
 9598   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9599   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 9600   attributes.set_is_evex_instruction();
 9601   attributes.set_embedded_opmask_register_specifier(mask);
 9602   if (merge) {
 9603     attributes.reset_is_clear_context();
 9604   }
 9605   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9606   emit_int16((unsigned char)0x89, (0xC0 | encode));
 9607 }
 9608 
 9609 // vinserti forms
 9610 
 9611 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
 9612   assert(VM_Version::supports_avx2(), "");
 9613   assert(imm8 <= 0x01, "imm8: %u", imm8);
 9614   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9615   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9616   // last byte:
 9617   // 0x00 - insert into lower 128 bits
 9618   // 0x01 - insert into upper 128 bits
 9619   emit_int24(0x38, (0xC0 | encode), imm8 & 0x01);
 9620 }
 9621 
 9622 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
 9623   assert(VM_Version::supports_avx2(), "");
 9624   assert(dst != xnoreg, "sanity");
 9625   assert(imm8 <= 0x01, "imm8: %u", imm8);
 9626   InstructionMark im(this);
 9627   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9628   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
 9629   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9630   emit_int8(0x38);
 9631   emit_operand(dst, src, 1);
 9632   // 0x00 - insert into lower 128 bits
 9633   // 0x01 - insert into upper 128 bits
 9634   emit_int8(imm8 & 0x01);
 9635 }
 9636 
 9637 void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
 9638   assert(VM_Version::supports_evex(), "");
 9639   assert(imm8 <= 0x03, "imm8: %u", imm8);
 9640   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9641   attributes.set_is_evex_instruction();
 9642   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9643   // imm8:
 9644   // 0x00 - insert into q0 128 bits (0..127)
 9645   // 0x01 - insert into q1 128 bits (128..255)
 9646   // 0x02 - insert into q2 128 bits (256..383)
 9647   // 0x03 - insert into q3 128 bits (384..511)
 9648   emit_int24(0x38, (0xC0 | encode), imm8 & 0x03);
 9649 }
 9650 
 9651 void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
 9652   assert(VM_Version::supports_evex(), "");
 9653   assert(dst != xnoreg, "sanity");
 9654   assert(imm8 <= 0x03, "imm8: %u", imm8);
 9655   InstructionMark im(this);
 9656   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9657   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
 9658   attributes.set_is_evex_instruction();
 9659   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9660   emit_int8(0x18);
 9661   emit_operand(dst, src, 1);
 9662   // 0x00 - insert into q0 128 bits (0..127)
 9663   // 0x01 - insert into q1 128 bits (128..255)
 9664   // 0x02 - insert into q2 128 bits (256..383)
 9665   // 0x03 - insert into q3 128 bits (384..511)
 9666   emit_int8(imm8 & 0x03);
 9667 }
 9668 
 9669 void Assembler::vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
 9670   assert(VM_Version::supports_evex(), "");
 9671   assert(imm8 <= 0x01, "imm8: %u", imm8);
 9672   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9673   attributes.set_is_evex_instruction();
 9674   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9675   //imm8:
 9676   // 0x00 - insert into lower 256 bits
 9677   // 0x01 - insert into upper 256 bits
 9678   emit_int24(0x3A, (0xC0 | encode), imm8 & 0x01);
 9679 }
 9680 
 9681 void Assembler::evinserti64x2(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8, int vector_len) {
 9682    assert(VM_Version::supports_avx512dq(), "");
 9683    assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9684    InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9685    attributes.set_is_evex_instruction();
 9686    int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9687    emit_int24(0x38, (0xC0 | encode), imm8 & 0x03);
 9688 }
 9689 
 9690 
 9691 // vinsertf forms
 9692 
 9693 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
 9694   assert(VM_Version::supports_avx(), "");
 9695   assert(imm8 <= 0x01, "imm8: %u", imm8);
 9696   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9697   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9698   // imm8:
 9699   // 0x00 - insert into lower 128 bits
 9700   // 0x01 - insert into upper 128 bits
 9701   emit_int24(0x18, (0xC0 | encode), imm8 & 0x01);
 9702 }
 9703 
 9704 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
 9705   assert(VM_Version::supports_avx(), "");
 9706   assert(dst != xnoreg, "sanity");
 9707   assert(imm8 <= 0x01, "imm8: %u", imm8);
 9708   InstructionMark im(this);
 9709   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9710   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
 9711   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9712   emit_int8(0x18);
 9713   emit_operand(dst, src, 1);
 9714   // 0x00 - insert into lower 128 bits
 9715   // 0x01 - insert into upper 128 bits
 9716   emit_int8(imm8 & 0x01);
 9717 }
 9718 
 9719 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
 9720   assert(VM_Version::supports_evex(), "");
 9721   assert(imm8 <= 0x03, "imm8: %u", imm8);
 9722   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9723   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9724   // imm8:
 9725   // 0x00 - insert into q0 128 bits (0..127)
 9726   // 0x01 - insert into q1 128 bits (128..255)
 9727   // 0x02 - insert into q0 128 bits (256..383)
 9728   // 0x03 - insert into q1 128 bits (384..512)
 9729   emit_int24(0x18, (0xC0 | encode), imm8 & 0x03);
 9730 }
 9731 
 9732 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
 9733   assert(VM_Version::supports_evex(), "");
 9734   assert(dst != xnoreg, "sanity");
 9735   assert(imm8 <= 0x03, "imm8: %u", imm8);
 9736   InstructionMark im(this);
 9737   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9738   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
 9739   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9740   emit_int8(0x18);
 9741   emit_operand(dst, src, 1);
 9742   // 0x00 - insert into q0 128 bits (0..127)
 9743   // 0x01 - insert into q1 128 bits (128..255)
 9744   // 0x02 - insert into q0 128 bits (256..383)
 9745   // 0x03 - insert into q1 128 bits (384..512)
 9746   emit_int8(imm8 & 0x03);
 9747 }
 9748 
 9749 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
 9750   assert(VM_Version::supports_evex(), "");
 9751   assert(imm8 <= 0x01, "imm8: %u", imm8);
 9752   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9753   attributes.set_is_evex_instruction();
 9754   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9755   // imm8:
 9756   // 0x00 - insert into lower 256 bits
 9757   // 0x01 - insert into upper 256 bits
 9758   emit_int24(0x1A, (0xC0 | encode), imm8 & 0x01);
 9759 }
 9760 
 9761 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
 9762   assert(VM_Version::supports_evex(), "");
 9763   assert(dst != xnoreg, "sanity");
 9764   assert(imm8 <= 0x01, "imm8: %u", imm8);
 9765   InstructionMark im(this);
 9766   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9767   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit);
 9768   attributes.set_is_evex_instruction();
 9769   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9770   emit_int8(0x1A);
 9771   emit_operand(dst, src, 1);
 9772   // 0x00 - insert into lower 256 bits
 9773   // 0x01 - insert into upper 256 bits
 9774   emit_int8(imm8 & 0x01);
 9775 }
 9776 
 9777 
 9778 // vextracti forms
 9779 
 9780 void Assembler::vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
 9781   assert(VM_Version::supports_avx2(), "");
 9782   assert(imm8 <= 0x01, "imm8: %u", imm8);
 9783   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9784   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9785   // imm8:
 9786   // 0x00 - extract from lower 128 bits
 9787   // 0x01 - extract from upper 128 bits
 9788   emit_int24(0x39, (0xC0 | encode), imm8 & 0x01);
 9789 }
 9790 
 9791 void Assembler::vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
 9792   assert(VM_Version::supports_avx2(), "");
 9793   assert(src != xnoreg, "sanity");
 9794   assert(imm8 <= 0x01, "imm8: %u", imm8);
 9795   InstructionMark im(this);
 9796   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9797   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
 9798   attributes.reset_is_clear_context();
 9799   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9800   emit_int8(0x39);
 9801   emit_operand(src, dst, 1);
 9802   // 0x00 - extract from lower 128 bits
 9803   // 0x01 - extract from upper 128 bits
 9804   emit_int8(imm8 & 0x01);
 9805 }
 9806 
 9807 void Assembler::vextracti32x4(XMMRegister dst, XMMRegister src, uint8_t imm8) {
 9808   assert(VM_Version::supports_evex(), "");
 9809   assert(imm8 <= 0x03, "imm8: %u", imm8);
 9810   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9811   attributes.set_is_evex_instruction();
 9812   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9813   // imm8:
 9814   // 0x00 - extract from bits 127:0
 9815   // 0x01 - extract from bits 255:128
 9816   // 0x02 - extract from bits 383:256
 9817   // 0x03 - extract from bits 511:384
 9818   emit_int24(0x39, (0xC0 | encode), imm8 & 0x03);
 9819 }
 9820 
 9821 void Assembler::vextracti32x4(Address dst, XMMRegister src, uint8_t imm8) {
 9822   assert(VM_Version::supports_evex(), "");
 9823   assert(src != xnoreg, "sanity");
 9824   assert(imm8 <= 0x03, "imm8: %u", imm8);
 9825   InstructionMark im(this);
 9826   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9827   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
 9828   attributes.reset_is_clear_context();
 9829   attributes.set_is_evex_instruction();
 9830   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9831   emit_int8(0x39);
 9832   emit_operand(src, dst, 1);
 9833   // 0x00 - extract from bits 127:0
 9834   // 0x01 - extract from bits 255:128
 9835   // 0x02 - extract from bits 383:256
 9836   // 0x03 - extract from bits 511:384
 9837   emit_int8(imm8 & 0x03);
 9838 }
 9839 
 9840 void Assembler::vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8) {
 9841   assert(VM_Version::supports_avx512dq(), "");
 9842   assert(imm8 <= 0x03, "imm8: %u", imm8);
 9843   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9844   attributes.set_is_evex_instruction();
 9845   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9846   // imm8:
 9847   // 0x00 - extract from bits 127:0
 9848   // 0x01 - extract from bits 255:128
 9849   // 0x02 - extract from bits 383:256
 9850   // 0x03 - extract from bits 511:384
 9851   emit_int24(0x39, (0xC0 | encode), imm8 & 0x03);
 9852 }
 9853 
 9854 void Assembler::vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) {
 9855   assert(VM_Version::supports_evex(), "");
 9856   assert(imm8 <= 0x01, "imm8: %u", imm8);
 9857   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9858   attributes.set_is_evex_instruction();
 9859   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9860   // imm8:
 9861   // 0x00 - extract from lower 256 bits
 9862   // 0x01 - extract from upper 256 bits
 9863   emit_int24(0x3B, (0xC0 | encode), imm8 & 0x01);
 9864 }
 9865 
 9866 void Assembler::vextracti64x4(Address dst, XMMRegister src, uint8_t imm8) {
 9867   assert(VM_Version::supports_evex(), "");
 9868   assert(src != xnoreg, "sanity");
 9869   assert(imm8 <= 0x01, "imm8: %u", imm8);
 9870   InstructionMark im(this);
 9871   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9872   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit);
 9873   attributes.reset_is_clear_context();
 9874   attributes.set_is_evex_instruction();
 9875   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9876   emit_int8(0x38);
 9877   emit_operand(src, dst, 1);
 9878   // 0x00 - extract from lower 256 bits
 9879   // 0x01 - extract from upper 256 bits
 9880   emit_int8(imm8 & 0x01);
 9881 }
 9882 // vextractf forms
 9883 
 9884 void Assembler::vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
 9885   assert(VM_Version::supports_avx(), "");
 9886   assert(imm8 <= 0x01, "imm8: %u", imm8);
 9887   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9888   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9889   // imm8:
 9890   // 0x00 - extract from lower 128 bits
 9891   // 0x01 - extract from upper 128 bits
 9892   emit_int24(0x19, (0xC0 | encode), imm8 & 0x01);
 9893 }
 9894 
 9895 void Assembler::vextractf128(Address dst, XMMRegister src, uint8_t imm8) {
 9896   assert(VM_Version::supports_avx(), "");
 9897   assert(src != xnoreg, "sanity");
 9898   assert(imm8 <= 0x01, "imm8: %u", imm8);
 9899   InstructionMark im(this);
 9900   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9901   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
 9902   attributes.reset_is_clear_context();
 9903   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9904   emit_int8(0x19);
 9905   emit_operand(src, dst, 1);
 9906   // 0x00 - extract from lower 128 bits
 9907   // 0x01 - extract from upper 128 bits
 9908   emit_int8(imm8 & 0x01);
 9909 }
 9910 
 9911 void Assembler::vextractf32x4(XMMRegister dst, XMMRegister src, uint8_t imm8) {
 9912   assert(VM_Version::supports_evex(), "");
 9913   assert(imm8 <= 0x03, "imm8: %u", imm8);
 9914   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9915   attributes.set_is_evex_instruction();
 9916   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9917   // imm8:
 9918   // 0x00 - extract from bits 127:0
 9919   // 0x01 - extract from bits 255:128
 9920   // 0x02 - extract from bits 383:256
 9921   // 0x03 - extract from bits 511:384
 9922   emit_int24(0x19, (0xC0 | encode), imm8 & 0x03);
 9923 }
 9924 
 9925 void Assembler::vextractf32x4(Address dst, XMMRegister src, uint8_t imm8) {
 9926   assert(VM_Version::supports_evex(), "");
 9927   assert(src != xnoreg, "sanity");
 9928   assert(imm8 <= 0x03, "imm8: %u", imm8);
 9929   InstructionMark im(this);
 9930   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9931   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
 9932   attributes.reset_is_clear_context();
 9933   attributes.set_is_evex_instruction();
 9934   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9935   emit_int8(0x19);
 9936   emit_operand(src, dst, 1);
 9937   // 0x00 - extract from bits 127:0
 9938   // 0x01 - extract from bits 255:128
 9939   // 0x02 - extract from bits 383:256
 9940   // 0x03 - extract from bits 511:384
 9941   emit_int8(imm8 & 0x03);
 9942 }
 9943 
 9944 void Assembler::vextractf64x2(XMMRegister dst, XMMRegister src, uint8_t imm8) {
 9945   assert(VM_Version::supports_avx512dq(), "");
 9946   assert(imm8 <= 0x03, "imm8: %u", imm8);
 9947   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9948   attributes.set_is_evex_instruction();
 9949   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9950   // imm8:
 9951   // 0x00 - extract from bits 127:0
 9952   // 0x01 - extract from bits 255:128
 9953   // 0x02 - extract from bits 383:256
 9954   // 0x03 - extract from bits 511:384
 9955   emit_int24(0x19, (0xC0 | encode), imm8 & 0x03);
 9956 }
 9957 
 9958 void Assembler::vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) {
 9959   assert(VM_Version::supports_evex(), "");
 9960   assert(imm8 <= 0x01, "imm8: %u", imm8);
 9961   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9962   attributes.set_is_evex_instruction();
 9963   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9964   // imm8:
 9965   // 0x00 - extract from lower 256 bits
 9966   // 0x01 - extract from upper 256 bits
 9967   emit_int24(0x1B, (0xC0 | encode), imm8 & 0x01);
 9968 }
 9969 
 9970 void Assembler::vextractf64x4(Address dst, XMMRegister src, uint8_t imm8) {
 9971   assert(VM_Version::supports_evex(), "");
 9972   assert(src != xnoreg, "sanity");
 9973   assert(imm8 <= 0x01, "imm8: %u", imm8);
 9974   InstructionMark im(this);
 9975   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9976   attributes.set_address_attributes(/* tuple_type */ EVEX_T4,/* input_size_in_bits */  EVEX_64bit);
 9977   attributes.reset_is_clear_context();
 9978   attributes.set_is_evex_instruction();
 9979   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9980   emit_int8(0x1B);
 9981   emit_operand(src, dst, 1);
 9982   // 0x00 - extract from lower 256 bits
 9983   // 0x01 - extract from upper 256 bits
 9984   emit_int8(imm8 & 0x01);
 9985 }
 9986 
 9987 void Assembler::extractps(Register dst, XMMRegister src, uint8_t imm8) {
 9988   assert(VM_Version::supports_sse4_1(), "");
 9989   assert(imm8 <= 0x03, "imm8: %u", imm8);
 9990   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 9991   int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes, true);
 9992   // imm8:
 9993   // 0x00 - extract from bits 31:0
 9994   // 0x01 - extract from bits 63:32
 9995   // 0x02 - extract from bits 95:64
 9996   // 0x03 - extract from bits 127:96
 9997   emit_int24(0x17, (0xC0 | encode), imm8 & 0x03);
 9998 }
 9999 
10000 // duplicate 1-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL
10001 void Assembler::vpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len) {
10002   assert(VM_Version::supports_avx2(), "");
10003   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
10004   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10005   emit_int16(0x78, (0xC0 | encode));
10006 }
10007 
10008 void Assembler::vpbroadcastb(XMMRegister dst, Address src, int vector_len) {
10009   assert(VM_Version::supports_avx2(), "");
10010   assert(dst != xnoreg, "sanity");
10011   InstructionMark im(this);
10012   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
10013   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit);
10014   // swap src<->dst for encoding
10015   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10016   emit_int8(0x78);
10017   emit_operand(dst, src, 0);
10018 }
10019 
10020 // duplicate 2-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL
10021 void Assembler::vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len) {
10022   assert(VM_Version::supports_avx2(), "");
10023   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
10024   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10025   emit_int16(0x79, (0xC0 | encode));
10026 }
10027 
10028 void Assembler::vpbroadcastw(XMMRegister dst, Address src, int vector_len) {
10029   assert(VM_Version::supports_avx2(), "");
10030   assert(dst != xnoreg, "sanity");
10031   InstructionMark im(this);
10032   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
10033   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit);
10034   // swap src<->dst for encoding
10035   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10036   emit_int8(0x79);
10037   emit_operand(dst, src, 0);
10038 }
10039 
10040 void Assembler::vpsadbw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
10041   assert(UseAVX > 0, "requires some form of AVX");
10042   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
10043   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10044   emit_int16((unsigned char)0xF6, (0xC0 | encode));
10045 }
10046 
10047 void Assembler::vpunpckhwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
10048   assert(UseAVX > 0, "requires some form of AVX");
10049   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10050   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10051   emit_int16(0x69, (0xC0 | encode));
10052 }
10053 
10054 void Assembler::vpunpcklwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
10055   assert(UseAVX > 0, "requires some form of AVX");
10056   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10057   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10058   emit_int16(0x61, (0xC0 | encode));
10059 }
10060 
10061 void Assembler::vpunpckhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
10062   assert(UseAVX > 0, "requires some form of AVX");
10063   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10064   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10065   emit_int16(0x6A, (0xC0 | encode));
10066 }
10067 
10068 void Assembler::vpunpckhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
10069   assert(UseAVX > 0, "requires some form of AVX");
10070   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10071   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10072   emit_int16(0x6D, (0xC0 | encode));
10073 }
10074 
10075 void Assembler::vpunpckldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
10076   assert(UseAVX > 0, "requires some form of AVX");
10077   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10078   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10079   emit_int16(0x62, (0xC0 | encode));
10080 }
10081 
10082 void Assembler::vpunpcklqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
10083   assert(UseAVX > 0, "requires some form of AVX");
10084   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
10085   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10086   emit_int16(0x6C, (0xC0 | encode));
10087 }
10088 
10089 // xmm/mem sourced byte/word/dword/qword replicate
10090 void Assembler::evpaddb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10091   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10092   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10093   attributes.set_is_evex_instruction();
10094   attributes.set_embedded_opmask_register_specifier(mask);
10095   if (merge) {
10096     attributes.reset_is_clear_context();
10097   }
10098   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10099   emit_int16((unsigned char)0xFC, (0xC0 | encode));
10100 }
10101 
10102 void Assembler::evpaddb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10103   InstructionMark im(this);
10104   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10105   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10106   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10107   attributes.set_is_evex_instruction();
10108   attributes.set_embedded_opmask_register_specifier(mask);
10109   if (merge) {
10110     attributes.reset_is_clear_context();
10111   }
10112   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10113   emit_int8((unsigned char)0xFC);
10114   emit_operand(dst, src, 0);
10115 }
10116 
10117 void Assembler::evpaddw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10118   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10119   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10120   attributes.set_is_evex_instruction();
10121   attributes.set_embedded_opmask_register_specifier(mask);
10122   if (merge) {
10123     attributes.reset_is_clear_context();
10124   }
10125   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10126   emit_int16((unsigned char)0xFD, (0xC0 | encode));
10127 }
10128 
10129 void Assembler::evpaddw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10130   InstructionMark im(this);
10131   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10132   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10133   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10134   attributes.set_is_evex_instruction();
10135   attributes.set_embedded_opmask_register_specifier(mask);
10136   if (merge) {
10137     attributes.reset_is_clear_context();
10138   }
10139   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10140   emit_int8((unsigned char)0xFD);
10141   emit_operand(dst, src, 0);
10142 }
10143 
10144 void Assembler::evpaddd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10145   assert(VM_Version::supports_evex(), "");
10146   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10147   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10148   attributes.set_is_evex_instruction();
10149   attributes.set_embedded_opmask_register_specifier(mask);
10150   if (merge) {
10151     attributes.reset_is_clear_context();
10152   }
10153   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10154   emit_int16((unsigned char)0xFE, (0xC0 | encode));
10155 }
10156 
10157 void Assembler::evpaddd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10158   InstructionMark im(this);
10159   assert(VM_Version::supports_evex(), "");
10160   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10161   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10162   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10163   attributes.set_is_evex_instruction();
10164   attributes.set_embedded_opmask_register_specifier(mask);
10165   if (merge) {
10166     attributes.reset_is_clear_context();
10167   }
10168   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10169   emit_int8((unsigned char)0xFE);
10170   emit_operand(dst, src, 0);
10171 }
10172 
10173 void Assembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10174   assert(VM_Version::supports_evex(), "");
10175   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10176   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10177   attributes.set_is_evex_instruction();
10178   attributes.set_embedded_opmask_register_specifier(mask);
10179   if (merge) {
10180     attributes.reset_is_clear_context();
10181   }
10182   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10183   emit_int16((unsigned char)0xD4, (0xC0 | encode));
10184 }
10185 
10186 void Assembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10187   InstructionMark im(this);
10188   assert(VM_Version::supports_evex(), "");
10189   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10190   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10191   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10192   attributes.set_is_evex_instruction();
10193   attributes.set_embedded_opmask_register_specifier(mask);
10194   if (merge) {
10195     attributes.reset_is_clear_context();
10196   }
10197   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10198   emit_int8((unsigned char)0xD4);
10199   emit_operand(dst, src, 0);
10200 }
10201 
10202 void Assembler::evaddps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10203   assert(VM_Version::supports_evex(), "");
10204   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10205   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10206   attributes.set_is_evex_instruction();
10207   attributes.set_embedded_opmask_register_specifier(mask);
10208   if (merge) {
10209     attributes.reset_is_clear_context();
10210   }
10211   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
10212   emit_int16(0x58, (0xC0 | encode));
10213 }
10214 
10215 void Assembler::evaddps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10216   InstructionMark im(this);
10217   assert(VM_Version::supports_evex(), "");
10218   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10219   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10220   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10221   attributes.set_is_evex_instruction();
10222   attributes.set_embedded_opmask_register_specifier(mask);
10223   if (merge) {
10224     attributes.reset_is_clear_context();
10225   }
10226   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
10227   emit_int8(0x58);
10228   emit_operand(dst, src, 0);
10229 }
10230 
10231 void Assembler::evaddpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10232   assert(VM_Version::supports_evex(), "");
10233   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10234   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10235   attributes.set_is_evex_instruction();
10236   attributes.set_embedded_opmask_register_specifier(mask);
10237   if (merge) {
10238     attributes.reset_is_clear_context();
10239   }
10240   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10241   emit_int16(0x58, (0xC0 | encode));
10242 }
10243 
10244 void Assembler::evaddpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10245   InstructionMark im(this);
10246   assert(VM_Version::supports_evex(), "");
10247   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10248   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10249   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10250   attributes.set_is_evex_instruction();
10251   attributes.set_embedded_opmask_register_specifier(mask);
10252   if (merge) {
10253     attributes.reset_is_clear_context();
10254   }
10255   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10256   emit_int8(0x58);
10257   emit_operand(dst, src, 0);
10258 }
10259 
10260 void Assembler::evpsubb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10261   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10262   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10263   attributes.set_is_evex_instruction();
10264   attributes.set_embedded_opmask_register_specifier(mask);
10265   if (merge) {
10266     attributes.reset_is_clear_context();
10267   }
10268   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10269   emit_int16((unsigned char)0xF8, (0xC0 | encode));
10270 }
10271 
10272 void Assembler::evpsubb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10273   InstructionMark im(this);
10274   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10275   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10276   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10277   attributes.set_is_evex_instruction();
10278   attributes.set_embedded_opmask_register_specifier(mask);
10279   if (merge) {
10280     attributes.reset_is_clear_context();
10281   }
10282   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10283   emit_int8((unsigned char)0xF8);
10284   emit_operand(dst, src, 0);
10285 }
10286 
10287 void Assembler::evpsubw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10288   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10289   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10290   attributes.set_is_evex_instruction();
10291   attributes.set_embedded_opmask_register_specifier(mask);
10292   if (merge) {
10293     attributes.reset_is_clear_context();
10294   }
10295   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10296   emit_int16((unsigned char)0xF9, (0xC0 | encode));
10297 }
10298 
10299 void Assembler::evpsubw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10300   InstructionMark im(this);
10301   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10302   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10303   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10304   attributes.set_is_evex_instruction();
10305   attributes.set_embedded_opmask_register_specifier(mask);
10306   if (merge) {
10307     attributes.reset_is_clear_context();
10308   }
10309   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10310   emit_int8((unsigned char)0xF9);
10311   emit_operand(dst, src, 0);
10312 }
10313 
10314 void Assembler::evpsubd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10315   assert(VM_Version::supports_evex(), "");
10316   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10317   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10318   attributes.set_is_evex_instruction();
10319   attributes.set_embedded_opmask_register_specifier(mask);
10320   if (merge) {
10321     attributes.reset_is_clear_context();
10322   }
10323   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10324   emit_int16((unsigned char)0xFA, (0xC0 | encode));
10325 }
10326 
10327 void Assembler::evpsubd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10328   InstructionMark im(this);
10329   assert(VM_Version::supports_evex(), "");
10330   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10331   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10332   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10333   attributes.set_is_evex_instruction();
10334   attributes.set_embedded_opmask_register_specifier(mask);
10335   if (merge) {
10336     attributes.reset_is_clear_context();
10337   }
10338   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10339   emit_int8((unsigned char)0xFA);
10340   emit_operand(dst, src, 0);
10341 }
10342 
10343 void Assembler::evpsubq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10344   assert(VM_Version::supports_evex(), "");
10345   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10346   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10347   attributes.set_is_evex_instruction();
10348   attributes.set_embedded_opmask_register_specifier(mask);
10349   if (merge) {
10350     attributes.reset_is_clear_context();
10351   }
10352   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10353   emit_int16((unsigned char)0xFB, (0xC0 | encode));
10354 }
10355 
10356 void Assembler::evpsubq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10357   InstructionMark im(this);
10358   assert(VM_Version::supports_evex(), "");
10359   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10360   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10361   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10362   attributes.set_is_evex_instruction();
10363   attributes.set_embedded_opmask_register_specifier(mask);
10364   if (merge) {
10365     attributes.reset_is_clear_context();
10366   }
10367   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10368   emit_int8((unsigned char)0xFB);
10369   emit_operand(dst, src, 0);
10370 }
10371 
10372 void Assembler::evsubps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10373   assert(VM_Version::supports_evex(), "");
10374   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10375   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10376   attributes.set_is_evex_instruction();
10377   attributes.set_embedded_opmask_register_specifier(mask);
10378   if (merge) {
10379     attributes.reset_is_clear_context();
10380   }
10381   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
10382   emit_int16(0x5C, (0xC0 | encode));
10383 }
10384 
10385 void Assembler::evsubps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10386   InstructionMark im(this);
10387   assert(VM_Version::supports_evex(), "");
10388   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10389   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10390   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10391   attributes.set_is_evex_instruction();
10392   attributes.set_embedded_opmask_register_specifier(mask);
10393   if (merge) {
10394     attributes.reset_is_clear_context();
10395   }
10396   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
10397   emit_int8(0x5C);
10398   emit_operand(dst, src, 0);
10399 }
10400 
10401 void Assembler::evsubpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10402   assert(VM_Version::supports_evex(), "");
10403   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10404   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10405   attributes.set_is_evex_instruction();
10406   attributes.set_embedded_opmask_register_specifier(mask);
10407   if (merge) {
10408     attributes.reset_is_clear_context();
10409   }
10410   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10411   emit_int16(0x5C, (0xC0 | encode));
10412 }
10413 
10414 void Assembler::evsubpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10415   InstructionMark im(this);
10416   assert(VM_Version::supports_evex(), "");
10417   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10418   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10419   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10420   attributes.set_is_evex_instruction();
10421   attributes.set_embedded_opmask_register_specifier(mask);
10422   if (merge) {
10423     attributes.reset_is_clear_context();
10424   }
10425   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10426   emit_int8(0x5C);
10427   emit_operand(dst, src, 0);
10428 }
10429 
10430 void Assembler::evpmullw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10431   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10432   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10433   attributes.set_is_evex_instruction();
10434   attributes.set_embedded_opmask_register_specifier(mask);
10435   if (merge) {
10436     attributes.reset_is_clear_context();
10437   }
10438   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10439   emit_int16((unsigned char)0xD5, (0xC0 | encode));
10440 }
10441 
10442 void Assembler::evpmullw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10443   InstructionMark im(this);
10444   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10445   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10446   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10447   attributes.set_is_evex_instruction();
10448   attributes.set_embedded_opmask_register_specifier(mask);
10449   if (merge) {
10450     attributes.reset_is_clear_context();
10451   }
10452   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10453   emit_int8((unsigned char)0xD5);
10454   emit_operand(dst, src, 0);
10455 }
10456 
10457 void Assembler::evpmulld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10458   assert(VM_Version::supports_evex(), "");
10459   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10460   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10461   attributes.set_is_evex_instruction();
10462   attributes.set_embedded_opmask_register_specifier(mask);
10463   if (merge) {
10464     attributes.reset_is_clear_context();
10465   }
10466   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10467   emit_int16(0x40, (0xC0 | encode));
10468 }
10469 
10470 void Assembler::evpmulld(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10471   InstructionMark im(this);
10472   assert(VM_Version::supports_evex(), "");
10473   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10474   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10475   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10476   attributes.set_is_evex_instruction();
10477   attributes.set_embedded_opmask_register_specifier(mask);
10478   if (merge) {
10479     attributes.reset_is_clear_context();
10480   }
10481   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10482   emit_int8(0x40);
10483   emit_operand(dst, src, 0);
10484 }
10485 
10486 void Assembler::evpmullq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10487   assert(VM_Version::supports_avx512dq() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10488   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10489   attributes.set_is_evex_instruction();
10490   attributes.set_embedded_opmask_register_specifier(mask);
10491   if (merge) {
10492     attributes.reset_is_clear_context();
10493   }
10494   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10495   emit_int16(0x40, (0xC0 | encode));
10496 }
10497 
10498 void Assembler::evpmullq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10499   InstructionMark im(this);
10500   assert(VM_Version::supports_avx512dq() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10501   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10502   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10503   attributes.set_is_evex_instruction();
10504   attributes.set_embedded_opmask_register_specifier(mask);
10505   if (merge) {
10506     attributes.reset_is_clear_context();
10507   }
10508   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10509   emit_int8(0x40);
10510   emit_operand(dst, src, 0);
10511 }
10512 
10513 void Assembler::evmulps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10514   assert(VM_Version::supports_evex(), "");
10515   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10516   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10517   attributes.set_is_evex_instruction();
10518   attributes.set_embedded_opmask_register_specifier(mask);
10519   if (merge) {
10520     attributes.reset_is_clear_context();
10521   }
10522   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
10523   emit_int16(0x59, (0xC0 | encode));
10524 }
10525 
10526 void Assembler::evmulps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10527   InstructionMark im(this);
10528   assert(VM_Version::supports_evex(), "");
10529   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10530   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10531   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10532   attributes.set_is_evex_instruction();
10533   attributes.set_embedded_opmask_register_specifier(mask);
10534   if (merge) {
10535     attributes.reset_is_clear_context();
10536   }
10537   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
10538   emit_int8(0x59);
10539   emit_operand(dst, src, 0);
10540 }
10541 
10542 void Assembler::evmulpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10543   assert(VM_Version::supports_evex(), "");
10544   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10545   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10546   attributes.set_is_evex_instruction();
10547   attributes.set_embedded_opmask_register_specifier(mask);
10548   if (merge) {
10549     attributes.reset_is_clear_context();
10550   }
10551   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10552   emit_int16(0x59, (0xC0 | encode));
10553 }
10554 
10555 void Assembler::evmulpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10556   InstructionMark im(this);
10557   assert(VM_Version::supports_evex(), "");
10558   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10559   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10560   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10561   attributes.set_is_evex_instruction();
10562   attributes.set_embedded_opmask_register_specifier(mask);
10563   if (merge) {
10564     attributes.reset_is_clear_context();
10565   }
10566   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10567   emit_int8(0x59);
10568   emit_operand(dst, src, 0);
10569 }
10570 
10571 void Assembler::evsqrtps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10572   assert(VM_Version::supports_evex(), "");
10573   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10574   InstructionAttr attributes(vector_len,/* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10575   attributes.set_is_evex_instruction();
10576   attributes.set_embedded_opmask_register_specifier(mask);
10577   if (merge) {
10578     attributes.reset_is_clear_context();
10579   }
10580   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
10581   emit_int16(0x51, (0xC0 | encode));
10582 }
10583 
10584 void Assembler::evsqrtps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10585   InstructionMark im(this);
10586   assert(VM_Version::supports_evex(), "");
10587   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10588   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10589   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10590   attributes.set_is_evex_instruction();
10591   attributes.set_embedded_opmask_register_specifier(mask);
10592   if (merge) {
10593     attributes.reset_is_clear_context();
10594   }
10595   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
10596   emit_int8(0x51);
10597   emit_operand(dst, src, 0);
10598 }
10599 
10600 void Assembler::evsqrtpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10601   assert(VM_Version::supports_evex(), "");
10602   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10603   InstructionAttr attributes(vector_len,/* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10604   attributes.set_is_evex_instruction();
10605   attributes.set_embedded_opmask_register_specifier(mask);
10606   if (merge) {
10607     attributes.reset_is_clear_context();
10608   }
10609   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10610   emit_int16(0x51, (0xC0 | encode));
10611 }
10612 
10613 void Assembler::evsqrtpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10614   InstructionMark im(this);
10615   assert(VM_Version::supports_evex(), "");
10616   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10617   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10618   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10619   attributes.set_is_evex_instruction();
10620   attributes.set_embedded_opmask_register_specifier(mask);
10621   if (merge) {
10622     attributes.reset_is_clear_context();
10623   }
10624   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10625   emit_int8(0x51);
10626   emit_operand(dst, src, 0);
10627 }
10628 
10629 
10630 void Assembler::evdivps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10631   assert(VM_Version::supports_evex(), "");
10632   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10633   InstructionAttr attributes(vector_len,/* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10634   attributes.set_is_evex_instruction();
10635   attributes.set_embedded_opmask_register_specifier(mask);
10636   if (merge) {
10637     attributes.reset_is_clear_context();
10638   }
10639   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
10640   emit_int16(0x5E, (0xC0 | encode));
10641 }
10642 
10643 void Assembler::evdivps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10644   InstructionMark im(this);
10645   assert(VM_Version::supports_evex(), "");
10646   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10647   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10648   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10649   attributes.set_is_evex_instruction();
10650   attributes.set_embedded_opmask_register_specifier(mask);
10651   if (merge) {
10652     attributes.reset_is_clear_context();
10653   }
10654   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
10655   emit_int8(0x5E);
10656   emit_operand(dst, src, 0);
10657 }
10658 
10659 void Assembler::evdivpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10660   assert(VM_Version::supports_evex(), "");
10661   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10662   InstructionAttr attributes(vector_len,/* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10663   attributes.set_is_evex_instruction();
10664   attributes.set_embedded_opmask_register_specifier(mask);
10665   if (merge) {
10666     attributes.reset_is_clear_context();
10667   }
10668   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10669   emit_int16(0x5E, (0xC0 | encode));
10670 }
10671 
10672 void Assembler::evdivpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10673   InstructionMark im(this);
10674   assert(VM_Version::supports_evex(), "");
10675   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10676   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10677   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10678   attributes.set_is_evex_instruction();
10679   attributes.set_embedded_opmask_register_specifier(mask);
10680   if (merge) {
10681     attributes.reset_is_clear_context();
10682   }
10683   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10684   emit_int8(0x5E);
10685   emit_operand(dst, src, 0);
10686 }
10687 
10688 void Assembler::evdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src, EvexRoundPrefix rmode) {
10689   assert(VM_Version::supports_evex(), "");
10690   InstructionAttr attributes(rmode, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
10691   attributes.set_extended_context();
10692   attributes.set_is_evex_instruction();
10693   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
10694   emit_int16(0x5E, (0xC0 | encode));
10695 }
10696 
10697 void Assembler::evpabsb(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
10698   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10699   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10700   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10701   attributes.set_is_evex_instruction();
10702   attributes.set_embedded_opmask_register_specifier(mask);
10703   if (merge) {
10704     attributes.reset_is_clear_context();
10705   }
10706   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10707   emit_int16(0x1C, (0xC0 | encode));
10708 }
10709 
10710 
10711 void Assembler::evpabsb(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
10712   InstructionMark im(this);
10713   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10714   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10715   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10716   attributes.set_is_evex_instruction();
10717   attributes.set_embedded_opmask_register_specifier(mask);
10718   if (merge) {
10719     attributes.reset_is_clear_context();
10720   }
10721   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10722   emit_int8(0x1C);
10723   emit_operand(dst, src, 0);
10724 }
10725 
10726 void Assembler::evpabsw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
10727   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10728   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10729   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10730   attributes.set_is_evex_instruction();
10731   attributes.set_embedded_opmask_register_specifier(mask);
10732   if (merge) {
10733     attributes.reset_is_clear_context();
10734   }
10735   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10736   emit_int16(0x1D, (0xC0 | encode));
10737 }
10738 
10739 
10740 void Assembler::evpabsw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
10741   InstructionMark im(this);
10742   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10743   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10744   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10745   attributes.set_is_evex_instruction();
10746   attributes.set_embedded_opmask_register_specifier(mask);
10747   if (merge) {
10748     attributes.reset_is_clear_context();
10749   }
10750   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10751   emit_int8(0x1D);
10752   emit_operand(dst, src, 0);
10753 }
10754 
10755 void Assembler::evpabsd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
10756   assert(VM_Version::supports_evex(), "");
10757   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10758   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10759   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10760   attributes.set_is_evex_instruction();
10761   attributes.set_embedded_opmask_register_specifier(mask);
10762   if (merge) {
10763     attributes.reset_is_clear_context();
10764   }
10765   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10766   emit_int16(0x1E, (0xC0 | encode));
10767 }
10768 
10769 
10770 void Assembler::evpabsd(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
10771   InstructionMark im(this);
10772   assert(VM_Version::supports_evex(), "");
10773   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10774   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10775   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10776   attributes.set_is_evex_instruction();
10777   attributes.set_embedded_opmask_register_specifier(mask);
10778   if (merge) {
10779     attributes.reset_is_clear_context();
10780   }
10781   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10782   emit_int8(0x1E);
10783   emit_operand(dst, src, 0);
10784 }
10785 
10786 void Assembler::evpabsq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
10787   assert(VM_Version::supports_evex(), "");
10788   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10789   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10790   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10791   attributes.set_is_evex_instruction();
10792   attributes.set_embedded_opmask_register_specifier(mask);
10793   if (merge) {
10794     attributes.reset_is_clear_context();
10795   }
10796   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10797   emit_int16(0x1F, (0xC0 | encode));
10798 }
10799 
10800 
10801 void Assembler::evpabsq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
10802   InstructionMark im(this);
10803   assert(VM_Version::supports_evex(), "");
10804   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10805   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10806   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10807   attributes.set_is_evex_instruction();
10808   attributes.set_embedded_opmask_register_specifier(mask);
10809   if (merge) {
10810     attributes.reset_is_clear_context();
10811   }
10812   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10813   emit_int8(0x1F);
10814   emit_operand(dst, src, 0);
10815 }
10816 
10817 void Assembler::evpfma213ps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10818   assert(VM_Version::supports_evex(), "");
10819   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10820   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10821   attributes.set_is_evex_instruction();
10822   attributes.set_embedded_opmask_register_specifier(mask);
10823   if (merge) {
10824     attributes.reset_is_clear_context();
10825   }
10826   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10827   emit_int16((unsigned char)0xA8, (0xC0 | encode));
10828 }
10829 
10830 void Assembler::evpfma213ps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10831   InstructionMark im(this);
10832   assert(VM_Version::supports_evex(), "");
10833   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10834   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10835   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_NObit);
10836   attributes.set_is_evex_instruction();
10837   attributes.set_embedded_opmask_register_specifier(mask);
10838   if (merge) {
10839     attributes.reset_is_clear_context();
10840   }
10841   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10842   emit_int8((unsigned char)0xA8);
10843   emit_operand(dst, src, 0);
10844 }
10845 
10846 void Assembler::evpfma213pd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10847   assert(VM_Version::supports_evex(), "");
10848   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10849   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10850   attributes.set_is_evex_instruction();
10851   attributes.set_embedded_opmask_register_specifier(mask);
10852   if (merge) {
10853     attributes.reset_is_clear_context();
10854   }
10855   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10856   emit_int16((unsigned char)0xA8, (0xC0 | encode));
10857 }
10858 
10859 void Assembler::evpfma213pd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10860   InstructionMark im(this);
10861   assert(VM_Version::supports_evex(), "");
10862   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10863   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
10864   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
10865   attributes.set_is_evex_instruction();
10866   attributes.set_embedded_opmask_register_specifier(mask);
10867   if (merge) {
10868     attributes.reset_is_clear_context();
10869   }
10870   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10871   emit_int8((unsigned char)0xA8);
10872   emit_operand(dst, src, 0);
10873 }
10874 
10875 void Assembler::evpermb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10876   assert(VM_Version::supports_avx512_vbmi() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10877   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
10878   attributes.set_is_evex_instruction();
10879   attributes.set_embedded_opmask_register_specifier(mask);
10880   if (merge) {
10881     attributes.reset_is_clear_context();
10882   }
10883   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10884   emit_int16((unsigned char)0x8D, (0xC0 | encode));
10885 }
10886 
10887 void Assembler::evpermb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10888   assert(VM_Version::supports_avx512_vbmi() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10889   InstructionMark im(this);
10890   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
10891   attributes.set_is_evex_instruction();
10892   attributes.set_embedded_opmask_register_specifier(mask);
10893   if (merge) {
10894     attributes.reset_is_clear_context();
10895   }
10896   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10897   emit_int8((unsigned char)0x8D);
10898   emit_operand(dst, src, 0);
10899 }
10900 
10901 void Assembler::evpermw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10902   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10903   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
10904   attributes.set_is_evex_instruction();
10905   attributes.set_embedded_opmask_register_specifier(mask);
10906   if (merge) {
10907     attributes.reset_is_clear_context();
10908   }
10909   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10910   emit_int16((unsigned char)0x8D, (0xC0 | encode));
10911 }
10912 
10913 void Assembler::evpermw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10914   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10915   InstructionMark im(this);
10916   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
10917   attributes.set_is_evex_instruction();
10918   attributes.set_embedded_opmask_register_specifier(mask);
10919   if (merge) {
10920     attributes.reset_is_clear_context();
10921   }
10922   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10923   emit_int8((unsigned char)0x8D);
10924   emit_operand(dst, src, 0);
10925 }
10926 
10927 void Assembler::evpermd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10928   assert(VM_Version::supports_evex() && vector_len > AVX_128bit, "");
10929   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
10930   attributes.set_is_evex_instruction();
10931   attributes.set_embedded_opmask_register_specifier(mask);
10932   if (merge) {
10933     attributes.reset_is_clear_context();
10934   }
10935   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10936   emit_int16(0x36, (0xC0 | encode));
10937 }
10938 
10939 void Assembler::evpermd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10940   assert(VM_Version::supports_evex() && vector_len > AVX_128bit, "");
10941   InstructionMark im(this);
10942   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
10943   attributes.set_is_evex_instruction();
10944   attributes.set_embedded_opmask_register_specifier(mask);
10945   if (merge) {
10946     attributes.reset_is_clear_context();
10947   }
10948   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10949   emit_int8(0x36);
10950   emit_operand(dst, src, 0);
10951 }
10952 
10953 void Assembler::evpermq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
10954   assert(VM_Version::supports_evex() && vector_len > AVX_128bit, "");
10955   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
10956   attributes.set_is_evex_instruction();
10957   attributes.set_embedded_opmask_register_specifier(mask);
10958   if (merge) {
10959     attributes.reset_is_clear_context();
10960   }
10961   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10962   emit_int16(0x36, (0xC0 | encode));
10963 }
10964 
10965 void Assembler::evpermq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
10966   assert(VM_Version::supports_evex() && vector_len > AVX_128bit, "");
10967   InstructionMark im(this);
10968   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
10969   attributes.set_is_evex_instruction();
10970   attributes.set_embedded_opmask_register_specifier(mask);
10971   if (merge) {
10972     attributes.reset_is_clear_context();
10973   }
10974   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
10975   emit_int8(0x36);
10976   emit_operand(dst, src, 0);
10977 }
10978 
10979 void Assembler::evpsllw(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
10980   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
10981   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
10982   attributes.set_is_evex_instruction();
10983   attributes.set_embedded_opmask_register_specifier(mask);
10984   if (merge) {
10985     attributes.reset_is_clear_context();
10986   }
10987   int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
10988   emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
10989 }
10990 
10991 void Assembler::evpslld(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
10992   assert(VM_Version::supports_evex(), "");
10993   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
10994   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
10995   attributes.set_is_evex_instruction();
10996   attributes.set_embedded_opmask_register_specifier(mask);
10997   if (merge) {
10998     attributes.reset_is_clear_context();
10999   }
11000   int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11001   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
11002 }
11003 
11004 void Assembler::evpsllq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
11005   assert(VM_Version::supports_evex(), "");
11006   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11007   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11008   attributes.set_is_evex_instruction();
11009   attributes.set_embedded_opmask_register_specifier(mask);
11010   if (merge) {
11011     attributes.reset_is_clear_context();
11012   }
11013   int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11014   emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
11015 }
11016 
11017 void Assembler::evpsrlw(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
11018   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11019   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11020   attributes.set_is_evex_instruction();
11021   attributes.set_embedded_opmask_register_specifier(mask);
11022   if (merge) {
11023     attributes.reset_is_clear_context();
11024   }
11025   int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11026   emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
11027 }
11028 
11029 void Assembler::evpsrld(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
11030   assert(VM_Version::supports_evex(), "");
11031   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11032   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11033   attributes.set_is_evex_instruction();
11034   attributes.set_embedded_opmask_register_specifier(mask);
11035   if (merge) {
11036     attributes.reset_is_clear_context();
11037   }
11038   int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11039   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
11040 }
11041 
11042 void Assembler::evpsrlq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
11043   assert(VM_Version::supports_evex(), "");
11044   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11045   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11046   attributes.set_is_evex_instruction();
11047   attributes.set_embedded_opmask_register_specifier(mask);
11048   if (merge) {
11049     attributes.reset_is_clear_context();
11050   }
11051   int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11052   emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
11053 }
11054 
11055 void Assembler::evpsraw(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
11056   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11057   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11058   attributes.set_is_evex_instruction();
11059   attributes.set_embedded_opmask_register_specifier(mask);
11060   if (merge) {
11061     attributes.reset_is_clear_context();
11062   }
11063   int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11064   emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
11065 }
11066 
11067 void Assembler::evpsrad(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
11068   assert(VM_Version::supports_evex(), "");
11069   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11070   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11071   attributes.set_is_evex_instruction();
11072   attributes.set_embedded_opmask_register_specifier(mask);
11073   if (merge) {
11074     attributes.reset_is_clear_context();
11075   }
11076   int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11077   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
11078 }
11079 
11080 void Assembler::evpsraq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
11081   assert(VM_Version::supports_evex(), "");
11082   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11083   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11084   attributes.set_is_evex_instruction();
11085   attributes.set_embedded_opmask_register_specifier(mask);
11086   if (merge) {
11087     attributes.reset_is_clear_context();
11088   }
11089   int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11090   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
11091 }
11092 
11093 void Assembler::evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11094   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11095   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11096   attributes.set_is_evex_instruction();
11097   attributes.set_embedded_opmask_register_specifier(mask);
11098   if (merge) {
11099     attributes.reset_is_clear_context();
11100   }
11101   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11102   emit_int16((unsigned char)0xF1, (0xC0 | encode));
11103 }
11104 
11105 void Assembler::evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11106   assert(VM_Version::supports_evex(), "");
11107   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11108   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11109   attributes.set_is_evex_instruction();
11110   attributes.set_embedded_opmask_register_specifier(mask);
11111   if (merge) {
11112     attributes.reset_is_clear_context();
11113   }
11114   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11115   emit_int16((unsigned char)0xF2, (0xC0 | encode));
11116 }
11117 
11118 void Assembler::evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11119   assert(VM_Version::supports_evex(), "");
11120   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11121   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11122   attributes.set_is_evex_instruction();
11123   attributes.set_embedded_opmask_register_specifier(mask);
11124   if (merge) {
11125     attributes.reset_is_clear_context();
11126   }
11127   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11128   emit_int16((unsigned char)0xF3, (0xC0 | encode));
11129 }
11130 
11131 void Assembler::evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11132   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11133   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11134   attributes.set_is_evex_instruction();
11135   attributes.set_embedded_opmask_register_specifier(mask);
11136   if (merge) {
11137     attributes.reset_is_clear_context();
11138   }
11139   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11140   emit_int16((unsigned char)0xD1, (0xC0 | encode));
11141 }
11142 
11143 void Assembler::evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11144   assert(VM_Version::supports_evex(), "");
11145   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11146   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11147   attributes.set_is_evex_instruction();
11148   attributes.set_embedded_opmask_register_specifier(mask);
11149   if (merge) {
11150     attributes.reset_is_clear_context();
11151   }
11152   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11153   emit_int16((unsigned char)0xD2, (0xC0 | encode));
11154 }
11155 
11156 void Assembler::evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11157   assert(VM_Version::supports_evex(), "");
11158   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11159   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11160   attributes.set_is_evex_instruction();
11161   attributes.set_embedded_opmask_register_specifier(mask);
11162   if (merge) {
11163     attributes.reset_is_clear_context();
11164   }
11165   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11166   emit_int16((unsigned char)0xD3, (0xC0 | encode));
11167 }
11168 
11169 void Assembler::evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11170   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11171   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11172   attributes.set_is_evex_instruction();
11173   attributes.set_embedded_opmask_register_specifier(mask);
11174   if (merge) {
11175     attributes.reset_is_clear_context();
11176   }
11177   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11178   emit_int16((unsigned char)0xE1, (0xC0 | encode));
11179 }
11180 
11181 void Assembler::evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11182   assert(VM_Version::supports_evex(), "");
11183   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11184   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11185   attributes.set_is_evex_instruction();
11186   attributes.set_embedded_opmask_register_specifier(mask);
11187   if (merge) {
11188     attributes.reset_is_clear_context();
11189   }
11190   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11191   emit_int16((unsigned char)0xE2, (0xC0 | encode));
11192 }
11193 
11194 void Assembler::evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11195   assert(VM_Version::supports_evex(), "");
11196   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11197   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11198   attributes.set_is_evex_instruction();
11199   attributes.set_embedded_opmask_register_specifier(mask);
11200   if (merge) {
11201     attributes.reset_is_clear_context();
11202   }
11203   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11204   emit_int16((unsigned char)0xE2, (0xC0 | encode));
11205 }
11206 
11207 void Assembler::evpsllvw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11208   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11209   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11210   attributes.set_is_evex_instruction();
11211   attributes.set_embedded_opmask_register_specifier(mask);
11212   if (merge) {
11213     attributes.reset_is_clear_context();
11214   }
11215   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11216   emit_int16(0x12, (0xC0 | encode));
11217 }
11218 
11219 void Assembler::evpsllvd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11220   assert(VM_Version::supports_evex(), "");
11221   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11222   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11223   attributes.set_is_evex_instruction();
11224   attributes.set_embedded_opmask_register_specifier(mask);
11225   if (merge) {
11226     attributes.reset_is_clear_context();
11227   }
11228   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11229   emit_int16(0x47, (0xC0 | encode));
11230 }
11231 
11232 void Assembler::evpsllvq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11233   assert(VM_Version::supports_evex(), "");
11234   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11235   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11236   attributes.set_is_evex_instruction();
11237   attributes.set_embedded_opmask_register_specifier(mask);
11238   if (merge) {
11239     attributes.reset_is_clear_context();
11240   }
11241   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11242   emit_int16(0x47, (0xC0 | encode));
11243 }
11244 
11245 void Assembler::evpsrlvw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11246   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11247   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11248   attributes.set_is_evex_instruction();
11249   attributes.set_embedded_opmask_register_specifier(mask);
11250   if (merge) {
11251     attributes.reset_is_clear_context();
11252   }
11253   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11254   emit_int16(0x10, (0xC0 | encode));
11255 }
11256 
11257 void Assembler::evpsrlvd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11258   assert(VM_Version::supports_evex(), "");
11259   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11260   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11261   attributes.set_is_evex_instruction();
11262   attributes.set_embedded_opmask_register_specifier(mask);
11263   if (merge) {
11264     attributes.reset_is_clear_context();
11265   }
11266   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11267   emit_int16(0x45, (0xC0 | encode));
11268 }
11269 
11270 void Assembler::evpsrlvq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11271   assert(VM_Version::supports_evex(), "");
11272   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11273   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11274   attributes.set_is_evex_instruction();
11275   attributes.set_embedded_opmask_register_specifier(mask);
11276   if (merge) {
11277     attributes.reset_is_clear_context();
11278   }
11279   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11280   emit_int16(0x45, (0xC0 | encode));
11281 }
11282 
11283 void Assembler::evpsravw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11284   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11285   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11286   attributes.set_is_evex_instruction();
11287   attributes.set_embedded_opmask_register_specifier(mask);
11288   if (merge) {
11289     attributes.reset_is_clear_context();
11290   }
11291   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11292   emit_int16(0x11, (0xC0 | encode));
11293 }
11294 
11295 void Assembler::evpsravd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11296   assert(VM_Version::supports_evex(), "");
11297   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11298   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11299   attributes.set_is_evex_instruction();
11300   attributes.set_embedded_opmask_register_specifier(mask);
11301   if (merge) {
11302     attributes.reset_is_clear_context();
11303   }
11304   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11305   emit_int16(0x46, (0xC0 | encode));
11306 }
11307 
11308 void Assembler::evpsravq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11309   assert(VM_Version::supports_evex(), "");
11310   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11311   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11312   attributes.set_is_evex_instruction();
11313   attributes.set_embedded_opmask_register_specifier(mask);
11314   if (merge) {
11315     attributes.reset_is_clear_context();
11316   }
11317   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11318   emit_int16(0x46, (0xC0 | encode));
11319 }
11320 
11321 void Assembler::evpminsb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11322   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11323   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11324   attributes.set_is_evex_instruction();
11325   attributes.set_embedded_opmask_register_specifier(mask);
11326   if (merge) {
11327     attributes.reset_is_clear_context();
11328   }
11329   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11330   emit_int16(0x38, (0xC0 | encode));
11331 }
11332 
11333 void Assembler::evpminsb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11334   assert(VM_Version::supports_avx512bw(), "");
11335   InstructionMark im(this);
11336   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11337   attributes.set_is_evex_instruction();
11338   attributes.set_embedded_opmask_register_specifier(mask);
11339   if (merge) {
11340     attributes.reset_is_clear_context();
11341   }
11342   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11343   emit_int8(0x38);
11344   emit_operand(dst, src, 0);
11345 }
11346 
11347 void Assembler::evpminsw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11348   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11349   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11350   attributes.set_is_evex_instruction();
11351   attributes.set_embedded_opmask_register_specifier(mask);
11352   if (merge) {
11353     attributes.reset_is_clear_context();
11354   }
11355   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11356   emit_int16((unsigned char)0xEA, (0xC0 | encode));
11357 }
11358 
11359 void Assembler::evpminsw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11360   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11361   InstructionMark im(this);
11362   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11363   attributes.set_is_evex_instruction();
11364   attributes.set_embedded_opmask_register_specifier(mask);
11365   if (merge) {
11366     attributes.reset_is_clear_context();
11367   }
11368   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11369   emit_int8((unsigned char)0xEA);
11370   emit_operand(dst, src, 0);
11371 }
11372 
11373 void Assembler::evpminsd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11374   assert(VM_Version::supports_evex(), "");
11375   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11376   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11377   attributes.set_is_evex_instruction();
11378   attributes.set_embedded_opmask_register_specifier(mask);
11379   if (merge) {
11380     attributes.reset_is_clear_context();
11381   }
11382   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11383   emit_int16(0x39, (0xC0 | encode));
11384 }
11385 
11386 void Assembler::evpminsd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11387   assert(VM_Version::supports_evex(), "");
11388   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11389   InstructionMark im(this);
11390   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11391   attributes.set_is_evex_instruction();
11392   attributes.set_embedded_opmask_register_specifier(mask);
11393   if (merge) {
11394     attributes.reset_is_clear_context();
11395   }
11396   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11397   emit_int8(0x39);
11398   emit_operand(dst, src, 0);
11399 }
11400 
11401 void Assembler::evpminsq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11402   assert(VM_Version::supports_evex(), "");
11403   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11404   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11405   attributes.set_is_evex_instruction();
11406   attributes.set_embedded_opmask_register_specifier(mask);
11407   if (merge) {
11408     attributes.reset_is_clear_context();
11409   }
11410   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11411   emit_int16(0x39, (0xC0 | encode));
11412 }
11413 
11414 void Assembler::evpminsq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11415   assert(VM_Version::supports_evex(), "");
11416   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11417   InstructionMark im(this);
11418   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11419   attributes.set_is_evex_instruction();
11420   attributes.set_embedded_opmask_register_specifier(mask);
11421   if (merge) {
11422     attributes.reset_is_clear_context();
11423   }
11424   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11425   emit_int8(0x39);
11426   emit_operand(dst, src, 0);
11427 }
11428 
11429 
11430 void Assembler::evpmaxsb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11431   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11432   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11433   attributes.set_is_evex_instruction();
11434   attributes.set_embedded_opmask_register_specifier(mask);
11435   if (merge) {
11436     attributes.reset_is_clear_context();
11437   }
11438   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11439   emit_int16(0x3C, (0xC0 | encode));
11440 }
11441 
11442 void Assembler::evpmaxsb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11443   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11444   InstructionMark im(this);
11445   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11446   attributes.set_is_evex_instruction();
11447   attributes.set_embedded_opmask_register_specifier(mask);
11448   if (merge) {
11449     attributes.reset_is_clear_context();
11450   }
11451   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11452   emit_int8(0x3C);
11453   emit_operand(dst, src, 0);
11454 }
11455 
11456 void Assembler::evpmaxsw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11457   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11458   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11459   attributes.set_is_evex_instruction();
11460   attributes.set_embedded_opmask_register_specifier(mask);
11461   if (merge) {
11462     attributes.reset_is_clear_context();
11463   }
11464   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11465   emit_int16((unsigned char)0xEE, (0xC0 | encode));
11466 }
11467 
11468 void Assembler::evpmaxsw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11469   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11470   InstructionMark im(this);
11471   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11472   attributes.set_is_evex_instruction();
11473   attributes.set_embedded_opmask_register_specifier(mask);
11474   if (merge) {
11475     attributes.reset_is_clear_context();
11476   }
11477   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11478   emit_int8((unsigned char)0xEE);
11479   emit_operand(dst, src, 0);
11480 }
11481 
11482 void Assembler::evpmaxsd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11483   assert(VM_Version::supports_evex(), "");
11484   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11485   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11486   attributes.set_is_evex_instruction();
11487   attributes.set_embedded_opmask_register_specifier(mask);
11488   if (merge) {
11489     attributes.reset_is_clear_context();
11490   }
11491   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11492   emit_int16(0x3D, (0xC0 | encode));
11493 }
11494 
11495 void Assembler::evpmaxsd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11496   assert(VM_Version::supports_evex(), "");
11497   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11498   InstructionMark im(this);
11499   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11500   attributes.set_is_evex_instruction();
11501   attributes.set_embedded_opmask_register_specifier(mask);
11502   if (merge) {
11503     attributes.reset_is_clear_context();
11504   }
11505   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11506   emit_int8(0x3D);
11507   emit_operand(dst, src, 0);
11508 }
11509 
11510 void Assembler::evpmaxsq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11511   assert(VM_Version::supports_evex(), "");
11512   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11513   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11514   attributes.set_is_evex_instruction();
11515   attributes.set_embedded_opmask_register_specifier(mask);
11516   if (merge) {
11517     attributes.reset_is_clear_context();
11518   }
11519   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11520   emit_int16(0x3D, (0xC0 | encode));
11521 }
11522 
11523 void Assembler::evpmaxsq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
11524   assert(VM_Version::supports_evex(), "");
11525   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11526   InstructionMark im(this);
11527   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11528   attributes.set_is_evex_instruction();
11529   attributes.set_embedded_opmask_register_specifier(mask);
11530   if (merge) {
11531     attributes.reset_is_clear_context();
11532   }
11533   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11534   emit_int8(0x3D);
11535   emit_operand(dst, src, 0);
11536 }
11537 
11538 void Assembler::evpternlogd(XMMRegister dst, int imm8, KRegister mask, XMMRegister src2, XMMRegister src3, bool merge, int vector_len) {
11539   assert(VM_Version::supports_evex(), "requires EVEX support");
11540   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
11541   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11542   attributes.set_is_evex_instruction();
11543   attributes.set_embedded_opmask_register_specifier(mask);
11544   if (merge) {
11545     attributes.reset_is_clear_context();
11546   }
11547   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
11548   emit_int24(0x25, (unsigned char)(0xC0 | encode), imm8);
11549 }
11550 
11551 void Assembler::evpternlogd(XMMRegister dst, int imm8, KRegister mask, XMMRegister src2, Address src3, bool merge, int vector_len) {
11552   assert(VM_Version::supports_evex(), "requires EVEX support");
11553   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
11554   assert(dst != xnoreg, "sanity");
11555   InstructionMark im(this);
11556   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11557   attributes.set_is_evex_instruction();
11558   attributes.set_embedded_opmask_register_specifier(mask);
11559   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
11560   if (merge) {
11561     attributes.reset_is_clear_context();
11562   }
11563   vex_prefix(src3, src2->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
11564   emit_int8(0x25);
11565   emit_operand(dst, src3, 1);
11566   emit_int8(imm8);
11567 }
11568 
11569 void Assembler::evpternlogq(XMMRegister dst, int imm8, KRegister mask, XMMRegister src2, XMMRegister src3, bool merge, int vector_len) {
11570   assert(VM_Version::supports_evex(), "requires EVEX support");
11571   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
11572   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11573   attributes.set_is_evex_instruction();
11574   attributes.set_embedded_opmask_register_specifier(mask);
11575   if (merge) {
11576     attributes.reset_is_clear_context();
11577   }
11578   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
11579   emit_int24(0x25, (unsigned char)(0xC0 | encode), imm8);
11580 }
11581 
11582 void Assembler::evpternlogq(XMMRegister dst, int imm8, KRegister mask, XMMRegister src2, Address src3, bool merge, int vector_len) {
11583   assert(VM_Version::supports_evex(), "requires EVEX support");
11584   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
11585   assert(dst != xnoreg, "sanity");
11586   InstructionMark im(this);
11587   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11588   attributes.set_is_evex_instruction();
11589   attributes.set_embedded_opmask_register_specifier(mask);
11590   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
11591   if (merge) {
11592     attributes.reset_is_clear_context();
11593   }
11594   vex_prefix(src3, src2->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
11595   emit_int8(0x25);
11596   emit_operand(dst, src3, 1);
11597   emit_int8(imm8);
11598 }
11599 
11600 void Assembler::gf2p8affineqb(XMMRegister dst, XMMRegister src, int imm8) {
11601   assert(VM_Version::supports_gfni(), "");
11602   assert(VM_Version::supports_sse(), "");
11603   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
11604   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
11605   emit_int24((unsigned char)0xCE, (unsigned char)(0xC0 | encode), imm8);
11606 }
11607 
11608 void Assembler::vgf2p8affineqb(XMMRegister dst, XMMRegister src2, XMMRegister src3, int imm8, int vector_len) {
11609   assert(VM_Version::supports_gfni(), "requires GFNI support");
11610   assert(VM_Version::supports_sse(), "");
11611   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11612   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
11613   emit_int24((unsigned char)0xCE, (unsigned char)(0xC0 | encode), imm8);
11614 }
11615 
11616 // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL
11617 void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len) {
11618   assert(UseAVX >= 2, "");
11619   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11620   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11621   emit_int16(0x58, (0xC0 | encode));
11622 }
11623 
11624 void Assembler::vpbroadcastd(XMMRegister dst, Address src, int vector_len) {
11625   assert(VM_Version::supports_avx2(), "");
11626   assert(dst != xnoreg, "sanity");
11627   InstructionMark im(this);
11628   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11629   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
11630   // swap src<->dst for encoding
11631   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11632   emit_int8(0x58);
11633   emit_operand(dst, src, 0);
11634 }
11635 
11636 // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL
11637 void Assembler::vpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len) {
11638   assert(VM_Version::supports_avx2(), "");
11639   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11640   attributes.set_rex_vex_w_reverted();
11641   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11642   emit_int16(0x59, (0xC0 | encode));
11643 }
11644 
11645 void Assembler::vpbroadcastq(XMMRegister dst, Address src, int vector_len) {
11646   assert(VM_Version::supports_avx2(), "");
11647   assert(dst != xnoreg, "sanity");
11648   InstructionMark im(this);
11649   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11650   attributes.set_rex_vex_w_reverted();
11651   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
11652   // swap src<->dst for encoding
11653   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11654   emit_int8(0x59);
11655   emit_operand(dst, src, 0);
11656 }
11657 
11658 void Assembler::evbroadcasti32x4(XMMRegister dst, Address src, int vector_len) {
11659   assert(vector_len != Assembler::AVX_128bit, "");
11660   assert(VM_Version::supports_evex(), "");
11661   assert(dst != xnoreg, "sanity");
11662   InstructionMark im(this);
11663   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11664   attributes.set_rex_vex_w_reverted();
11665   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
11666   // swap src<->dst for encoding
11667   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11668   emit_int8(0x5A);
11669   emit_operand(dst, src, 0);
11670 }
11671 
11672 void Assembler::evbroadcasti64x2(XMMRegister dst, XMMRegister src, int vector_len) {
11673   assert(vector_len != Assembler::AVX_128bit, "");
11674   assert(VM_Version::supports_avx512dq(), "");
11675   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11676   attributes.set_rex_vex_w_reverted();
11677   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11678   emit_int16(0x5A, (0xC0 | encode));
11679 }
11680 
11681 void Assembler::evbroadcasti64x2(XMMRegister dst, Address src, int vector_len) {
11682   assert(vector_len != Assembler::AVX_128bit, "");
11683   assert(VM_Version::supports_avx512dq(), "");
11684   assert(dst != xnoreg, "sanity");
11685   InstructionMark im(this);
11686   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11687   attributes.set_rex_vex_w_reverted();
11688   attributes.set_address_attributes(/* tuple_type */ EVEX_T2, /* input_size_in_bits */ EVEX_64bit);
11689   // swap src<->dst for encoding
11690   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11691   emit_int8(0x5A);
11692   emit_operand(dst, src, 0);
11693 }
11694 
11695 // scalar single/double precision replicate
11696 
11697 // duplicate single precision data from src into programmed locations in dest : requires AVX512VL
11698 void Assembler::vbroadcastss(XMMRegister dst, XMMRegister src, int vector_len) {
11699   assert(VM_Version::supports_avx2(), "");
11700   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11701   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11702   emit_int16(0x18, (0xC0 | encode));
11703 }
11704 
11705 void Assembler::vbroadcastss(XMMRegister dst, Address src, int vector_len) {
11706   assert(VM_Version::supports_avx(), "");
11707   assert(dst != xnoreg, "sanity");
11708   InstructionMark im(this);
11709   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11710   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
11711   // swap src<->dst for encoding
11712   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11713   emit_int8(0x18);
11714   emit_operand(dst, src, 0);
11715 }
11716 
11717 // duplicate double precision data from src into programmed locations in dest : requires AVX512VL
11718 void Assembler::vbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len) {
11719   assert(VM_Version::supports_avx2(), "");
11720   assert(vector_len == AVX_256bit || vector_len == AVX_512bit, "");
11721   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11722   attributes.set_rex_vex_w_reverted();
11723   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11724   emit_int16(0x19, (0xC0 | encode));
11725 }
11726 
11727 void Assembler::vbroadcastsd(XMMRegister dst, Address src, int vector_len) {
11728   assert(VM_Version::supports_avx(), "");
11729   assert(vector_len == AVX_256bit || vector_len == AVX_512bit, "");
11730   assert(dst != xnoreg, "sanity");
11731   InstructionMark im(this);
11732   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11733   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
11734   attributes.set_rex_vex_w_reverted();
11735   // swap src<->dst for encoding
11736   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11737   emit_int8(0x19);
11738   emit_operand(dst, src, 0);
11739 }
11740 
11741 void Assembler::vbroadcastf128(XMMRegister dst, Address src, int vector_len) {
11742   assert(VM_Version::supports_avx(), "");
11743   assert(vector_len == AVX_256bit, "");
11744   assert(dst != xnoreg, "sanity");
11745   InstructionMark im(this);
11746   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11747   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
11748   // swap src<->dst for encoding
11749   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11750   emit_int8(0x1A);
11751   emit_operand(dst, src, 0);
11752 }
11753 
11754 void Assembler::evbroadcastf64x2(XMMRegister dst, Address src, int vector_len) {
11755   assert(VM_Version::supports_avx512dq(), "");
11756   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11757   assert(dst != xnoreg, "sanity");
11758   InstructionMark im(this);
11759   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11760   attributes.set_address_attributes(/* tuple_type */ EVEX_T2, /* input_size_in_bits */ EVEX_64bit);
11761   attributes.set_is_evex_instruction();
11762   // swap src<->dst for encoding
11763   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11764   emit_int8(0x1A);
11765   emit_operand(dst, src, 0);
11766 }
11767 
11768 
11769 // gpr source broadcast forms
11770 
11771 // duplicate 1-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL
11772 void Assembler::evpbroadcastb(XMMRegister dst, Register src, int vector_len) {
11773   assert(VM_Version::supports_avx512bw(), "");
11774   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
11775   attributes.set_is_evex_instruction();
11776   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes, true);
11777   emit_int16(0x7A, (0xC0 | encode));
11778 }
11779 
11780 // duplicate 2-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL
11781 void Assembler::evpbroadcastw(XMMRegister dst, Register src, int vector_len) {
11782   assert(VM_Version::supports_avx512bw(), "");
11783   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
11784   attributes.set_is_evex_instruction();
11785   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes , true);
11786   emit_int16(0x7B, (0xC0 | encode));
11787 }
11788 
11789 // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL
11790 void Assembler::evpbroadcastd(XMMRegister dst, Register src, int vector_len) {
11791   assert(VM_Version::supports_evex(), "");
11792   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11793   attributes.set_is_evex_instruction();
11794   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes, true);
11795   emit_int16(0x7C, (0xC0 | encode));
11796 }
11797 
11798 // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL
11799 void Assembler::evpbroadcastq(XMMRegister dst, Register src, int vector_len) {
11800   assert(VM_Version::supports_evex(), "");
11801   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11802   attributes.set_is_evex_instruction();
11803   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes, true);
11804   emit_int16(0x7C, (0xC0 | encode));
11805 }
11806 
11807 void Assembler::vpgatherdd(XMMRegister dst, Address src, XMMRegister mask, int vector_len) {
11808   assert(VM_Version::supports_avx2(), "");
11809   assert(!needs_eevex(src.base()), "does not support extended gprs as BASE of address operand");
11810   assert(vector_len == Assembler::AVX_128bit || vector_len == Assembler::AVX_256bit, "");
11811   assert(dst != xnoreg, "sanity");
11812   assert(src.isxmmindex(),"expected to be xmm index");
11813   assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same");
11814   InstructionMark im(this);
11815   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
11816   vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11817   emit_int8((unsigned char)0x90);
11818   emit_operand(dst, src, 0);
11819 }
11820 
11821 void Assembler::vpgatherdq(XMMRegister dst, Address src, XMMRegister mask, int vector_len) {
11822   assert(VM_Version::supports_avx2(), "");
11823   assert(!needs_eevex(src.base()), "does not support extended gprs as BASE of address operand");
11824   assert(vector_len == Assembler::AVX_128bit || vector_len == Assembler::AVX_256bit, "");
11825   assert(dst != xnoreg, "sanity");
11826   assert(src.isxmmindex(),"expected to be xmm index");
11827   assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same");
11828   InstructionMark im(this);
11829   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
11830   vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11831   emit_int8((unsigned char)0x90);
11832   emit_operand(dst, src, 0);
11833 }
11834 
11835 void Assembler::vgatherdpd(XMMRegister dst, Address src, XMMRegister mask, int vector_len) {
11836   assert(VM_Version::supports_avx2(), "");
11837   assert(!needs_eevex(src.base()), "does not support extended gprs as BASE of address operand");
11838   assert(vector_len == Assembler::AVX_128bit || vector_len == Assembler::AVX_256bit, "");
11839   assert(dst != xnoreg, "sanity");
11840   assert(src.isxmmindex(),"expected to be xmm index");
11841   assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same");
11842   InstructionMark im(this);
11843   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
11844   vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11845   emit_int8((unsigned char)0x92);
11846   emit_operand(dst, src, 0);
11847 }
11848 
11849 void Assembler::vgatherdps(XMMRegister dst, Address src, XMMRegister mask, int vector_len) {
11850   assert(VM_Version::supports_avx2(), "");
11851   assert(!needs_eevex(src.base()), "does not support extended gprs as BASE of address operand");
11852   assert(vector_len == Assembler::AVX_128bit || vector_len == Assembler::AVX_256bit, "");
11853   assert(dst != xnoreg, "sanity");
11854   assert(src.isxmmindex(),"expected to be xmm index");
11855   assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same");
11856   InstructionMark im(this);
11857   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
11858   vex_prefix(src, mask->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11859   emit_int8((unsigned char)0x92);
11860   emit_operand(dst, src, 0);
11861 }
11862 void Assembler::evpgatherdd(XMMRegister dst, KRegister mask, Address src, int vector_len) {
11863   assert(VM_Version::supports_evex(), "");
11864   assert(dst != xnoreg, "sanity");
11865   assert(src.isxmmindex(),"expected to be xmm index");
11866   assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same");
11867   assert(mask != k0, "instruction will #UD if mask is in k0");
11868   InstructionMark im(this);
11869   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11870   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
11871   attributes.reset_is_clear_context();
11872   attributes.set_embedded_opmask_register_specifier(mask);
11873   attributes.set_is_evex_instruction();
11874   // swap src<->dst for encoding
11875   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11876   emit_int8((unsigned char)0x90);
11877   emit_operand(dst, src, 0);
11878 }
11879 
11880 void Assembler::evpgatherdq(XMMRegister dst, KRegister mask, Address src, int vector_len) {
11881   assert(VM_Version::supports_evex(), "");
11882   assert(dst != xnoreg, "sanity");
11883   assert(src.isxmmindex(),"expected to be xmm index");
11884   assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same");
11885   assert(mask != k0, "instruction will #UD if mask is in k0");
11886   InstructionMark im(this);
11887   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11888   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
11889   attributes.reset_is_clear_context();
11890   attributes.set_embedded_opmask_register_specifier(mask);
11891   attributes.set_is_evex_instruction();
11892   // swap src<->dst for encoding
11893   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11894   emit_int8((unsigned char)0x90);
11895   emit_operand(dst, src, 0);
11896 }
11897 
11898 void Assembler::evgatherdpd(XMMRegister dst, KRegister mask, Address src, int vector_len) {
11899   assert(VM_Version::supports_evex(), "");
11900   assert(dst != xnoreg, "sanity");
11901   assert(src.isxmmindex(),"expected to be xmm index");
11902   assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same");
11903   assert(mask != k0, "instruction will #UD if mask is in k0");
11904   InstructionMark im(this);
11905   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11906   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
11907   attributes.reset_is_clear_context();
11908   attributes.set_embedded_opmask_register_specifier(mask);
11909   attributes.set_is_evex_instruction();
11910   // swap src<->dst for encoding
11911   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11912   emit_int8((unsigned char)0x92);
11913   emit_operand(dst, src, 0);
11914 }
11915 
11916 void Assembler::evgatherdps(XMMRegister dst, KRegister mask, Address src, int vector_len) {
11917   assert(VM_Version::supports_evex(), "");
11918   assert(dst != xnoreg, "sanity");
11919   assert(src.isxmmindex(),"expected to be xmm index");
11920   assert(dst != src.xmmindex(), "instruction will #UD if dst and index are the same");
11921   assert(mask != k0, "instruction will #UD if mask is in k0");
11922   InstructionMark im(this);
11923   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11924   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
11925   attributes.reset_is_clear_context();
11926   attributes.set_embedded_opmask_register_specifier(mask);
11927   attributes.set_is_evex_instruction();
11928   // swap src<->dst for encoding
11929   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11930   emit_int8((unsigned char)0x92);
11931   emit_operand(dst, src, 0);
11932 }
11933 
11934 void Assembler::evpscatterdd(Address dst, KRegister mask, XMMRegister src, int vector_len) {
11935   assert(VM_Version::supports_evex(), "");
11936   assert(mask != k0, "instruction will #UD if mask is in k0");
11937   InstructionMark im(this);
11938   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11939   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
11940   attributes.reset_is_clear_context();
11941   attributes.set_embedded_opmask_register_specifier(mask);
11942   attributes.set_is_evex_instruction();
11943   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11944   emit_int8((unsigned char)0xA0);
11945   emit_operand(src, dst, 0);
11946 }
11947 
11948 void Assembler::evpscatterdq(Address dst, KRegister mask, XMMRegister src, int vector_len) {
11949   assert(VM_Version::supports_evex(), "");
11950   assert(mask != k0, "instruction will #UD if mask is in k0");
11951   InstructionMark im(this);
11952   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11953   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
11954   attributes.reset_is_clear_context();
11955   attributes.set_embedded_opmask_register_specifier(mask);
11956   attributes.set_is_evex_instruction();
11957   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11958   emit_int8((unsigned char)0xA0);
11959   emit_operand(src, dst, 0);
11960 }
11961 
11962 void Assembler::evscatterdps(Address dst, KRegister mask, XMMRegister src, int vector_len) {
11963   assert(VM_Version::supports_evex(), "");
11964   assert(mask != k0, "instruction will #UD if mask is in k0");
11965   InstructionMark im(this);
11966   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11967   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
11968   attributes.reset_is_clear_context();
11969   attributes.set_embedded_opmask_register_specifier(mask);
11970   attributes.set_is_evex_instruction();
11971   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11972   emit_int8((unsigned char)0xA2);
11973   emit_operand(src, dst, 0);
11974 }
11975 
11976 void Assembler::evscatterdpd(Address dst, KRegister mask, XMMRegister src, int vector_len) {
11977   assert(VM_Version::supports_evex(), "");
11978   assert(mask != k0, "instruction will #UD if mask is in k0");
11979   InstructionMark im(this);
11980   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11981   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
11982   attributes.reset_is_clear_context();
11983   attributes.set_embedded_opmask_register_specifier(mask);
11984   attributes.set_is_evex_instruction();
11985   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11986   emit_int8((unsigned char)0xA2);
11987   emit_operand(src, dst, 0);
11988 }
11989 // Carry-Less Multiplication Quadword
11990 void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) {
11991   assert(VM_Version::supports_clmul(), "");
11992   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
11993   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
11994   emit_int24(0x44, (0xC0 | encode), (unsigned char)mask);
11995 }
11996 
11997 // Carry-Less Multiplication Quadword
11998 void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) {
11999   assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), "");
12000   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
12001   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
12002   emit_int24(0x44, (0xC0 | encode), (unsigned char)mask);
12003 }
12004 
12005 void Assembler::evpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask, int vector_len) {
12006   assert(VM_Version::supports_avx512_vpclmulqdq(), "Requires vector carryless multiplication support");
12007   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
12008   attributes.set_is_evex_instruction();
12009   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
12010   emit_int24(0x44, (0xC0 | encode), (unsigned char)mask);
12011 }
12012 
12013 void Assembler::vzeroupper_uncached() {
12014   if (VM_Version::supports_vzeroupper()) {
12015     InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
12016     (void)vex_prefix_and_encode(0, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
12017     emit_int8(0x77);
12018   }
12019 }
12020 
12021 void Assembler::vfpclassss(KRegister kdst, XMMRegister src, uint8_t imm8) {
12022   // Encoding: EVEX.LIG.66.0F3A.W0 67 /r ib
12023   assert(VM_Version::supports_evex(), "");
12024   assert(VM_Version::supports_avx512dq(), "");
12025   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
12026   attributes.set_is_evex_instruction();
12027   int encode = vex_prefix_and_encode(kdst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
12028   emit_int24((unsigned char)0x67, (unsigned char)(0xC0 | encode), imm8);
12029 }
12030 
12031 void Assembler::vfpclasssd(KRegister kdst, XMMRegister src, uint8_t imm8) {
12032   // Encoding: EVEX.LIG.66.0F3A.W1 67 /r ib
12033   assert(VM_Version::supports_evex(), "");
12034   assert(VM_Version::supports_avx512dq(), "");
12035   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
12036   attributes.set_is_evex_instruction();
12037   int encode = vex_prefix_and_encode(kdst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
12038   emit_int24((unsigned char)0x67, (unsigned char)(0xC0 | encode), imm8);
12039 }
12040 
12041 void Assembler::fld_x(Address adr) {
12042   InstructionMark im(this);
12043   emit_int8((unsigned char)0xDB);
12044   emit_operand32(rbp, adr, 0);
12045 }
12046 
12047 void Assembler::fstp_x(Address adr) {
12048   InstructionMark im(this);
12049   emit_int8((unsigned char)0xDB);
12050   emit_operand32(rdi, adr, 0);
12051 }
12052 
12053 void Assembler::emit_operand32(Register reg, Address adr, int post_addr_length) {
12054   assert(reg->encoding() < 8, "no extended registers");
12055   assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
12056   emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec, post_addr_length);
12057 }
12058 
12059 void Assembler::fld_d(Address adr) {
12060   InstructionMark im(this);
12061   emit_int8((unsigned char)0xDD);
12062   emit_operand32(rax, adr, 0);
12063 }
12064 
12065 void Assembler::fprem() {
12066   emit_int16((unsigned char)0xD9, (unsigned char)0xF8);
12067 }
12068 
12069 void Assembler::fnstsw_ax() {
12070   emit_int16((unsigned char)0xDF, (unsigned char)0xE0);
12071 }
12072 
12073 void Assembler::fstp_d(Address adr) {
12074   InstructionMark im(this);
12075   emit_int8((unsigned char)0xDD);
12076   emit_operand32(rbx, adr, 0);
12077 }
12078 
12079 void Assembler::fstp_d(int index) {
12080   emit_farith(0xDD, 0xD8, index);
12081 }
12082 
12083 void Assembler::emit_farith(int b1, int b2, int i) {
12084   assert(isByte(b1) && isByte(b2), "wrong opcode");
12085   assert(0 <= i &&  i < 8, "illegal stack offset");
12086   emit_int16(b1, b2 + i);
12087 }
12088 
12089 #ifndef _LP64
12090 // 32bit only pieces of the assembler
12091 
12092 void Assembler::emms() {
12093   NOT_LP64(assert(VM_Version::supports_mmx(), ""));
12094   emit_int16(0x0F, 0x77);
12095 }
12096 
12097 void Assembler::vzeroupper() {
12098   vzeroupper_uncached();
12099 }
12100 
12101 void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) {
12102   // NO PREFIX AS NEVER 64BIT
12103   InstructionMark im(this);
12104   emit_int16((unsigned char)0x81, (0xF8 | src1->encoding()));
12105   emit_data(imm32, rspec, 0);
12106 }
12107 
12108 void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) {
12109   // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs
12110   InstructionMark im(this);
12111   emit_int8((unsigned char)0x81);
12112   emit_operand(rdi, src1, 4);
12113   emit_data(imm32, rspec, 0);
12114 }
12115 
12116 // The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax,
12117 // and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded
12118 // into rdx:rax.  The ZF is set if the compared values were equal, and cleared otherwise.
12119 void Assembler::cmpxchg8(Address adr) {
12120   InstructionMark im(this);
12121   emit_int16(0x0F, (unsigned char)0xC7);
12122   emit_operand(rcx, adr, 0);
12123 }
12124 
12125 void Assembler::decl(Register dst) {
12126   // Don't use it directly. Use MacroAssembler::decrementl() instead.
12127  emit_int8(0x48 | dst->encoding());
12128 }
12129 
12130 void Assembler::edecl(Register dst, Register src, bool no_flags) {
12131   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
12132   (void) evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
12133   emit_int8(0x48 | src->encoding());
12134 }
12135 
12136 // 64bit doesn't use the x87
12137 
12138 void Assembler::fabs() {
12139   emit_int16((unsigned char)0xD9, (unsigned char)0xE1);
12140 }
12141 
12142 void Assembler::fadd(int i) {
12143   emit_farith(0xD8, 0xC0, i);
12144 }
12145 
12146 void Assembler::fadd_d(Address src) {
12147   InstructionMark im(this);
12148   emit_int8((unsigned char)0xDC);
12149   emit_operand32(rax, src, 0);
12150 }
12151 
12152 void Assembler::fadd_s(Address src) {
12153   InstructionMark im(this);
12154   emit_int8((unsigned char)0xD8);
12155   emit_operand32(rax, src, 0);
12156 }
12157 
12158 void Assembler::fadda(int i) {
12159   emit_farith(0xDC, 0xC0, i);
12160 }
12161 
12162 void Assembler::faddp(int i) {
12163   emit_farith(0xDE, 0xC0, i);
12164 }
12165 
12166 void Assembler::fchs() {
12167   emit_int16((unsigned char)0xD9, (unsigned char)0xE0);
12168 }
12169 
12170 void Assembler::fcom(int i) {
12171   emit_farith(0xD8, 0xD0, i);
12172 }
12173 
12174 void Assembler::fcomp(int i) {
12175   emit_farith(0xD8, 0xD8, i);
12176 }
12177 
12178 void Assembler::fcomp_d(Address src) {
12179   InstructionMark im(this);
12180   emit_int8((unsigned char)0xDC);
12181   emit_operand32(rbx, src, 0);
12182 }
12183 
12184 void Assembler::fcomp_s(Address src) {
12185   InstructionMark im(this);
12186   emit_int8((unsigned char)0xD8);
12187   emit_operand32(rbx, src, 0);
12188 }
12189 
12190 void Assembler::fcompp() {
12191   emit_int16((unsigned char)0xDE, (unsigned char)0xD9);
12192 }
12193 
12194 void Assembler::fcos() {
12195   emit_int16((unsigned char)0xD9, (unsigned char)0xFF);
12196 }
12197 
12198 void Assembler::fdecstp() {
12199   emit_int16((unsigned char)0xD9, (unsigned char)0xF6);
12200 }
12201 
12202 void Assembler::fdiv(int i) {
12203   emit_farith(0xD8, 0xF0, i);
12204 }
12205 
12206 void Assembler::fdiv_d(Address src) {
12207   InstructionMark im(this);
12208   emit_int8((unsigned char)0xDC);
12209   emit_operand32(rsi, src, 0);
12210 }
12211 
12212 void Assembler::fdiv_s(Address src) {
12213   InstructionMark im(this);
12214   emit_int8((unsigned char)0xD8);
12215   emit_operand32(rsi, src, 0);
12216 }
12217 
12218 void Assembler::fdiva(int i) {
12219   emit_farith(0xDC, 0xF8, i);
12220 }
12221 
12222 // Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994)
12223 //       is erroneous for some of the floating-point instructions below.
12224 
12225 void Assembler::fdivp(int i) {
12226   emit_farith(0xDE, 0xF8, i);                    // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong)
12227 }
12228 
12229 void Assembler::fdivr(int i) {
12230   emit_farith(0xD8, 0xF8, i);
12231 }
12232 
12233 void Assembler::fdivr_d(Address src) {
12234   InstructionMark im(this);
12235   emit_int8((unsigned char)0xDC);
12236   emit_operand32(rdi, src, 0);
12237 }
12238 
12239 void Assembler::fdivr_s(Address src) {
12240   InstructionMark im(this);
12241   emit_int8((unsigned char)0xD8);
12242   emit_operand32(rdi, src, 0);
12243 }
12244 
12245 void Assembler::fdivra(int i) {
12246   emit_farith(0xDC, 0xF0, i);
12247 }
12248 
12249 void Assembler::fdivrp(int i) {
12250   emit_farith(0xDE, 0xF0, i);                    // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong)
12251 }
12252 
12253 void Assembler::ffree(int i) {
12254   emit_farith(0xDD, 0xC0, i);
12255 }
12256 
12257 void Assembler::fild_d(Address adr) {
12258   InstructionMark im(this);
12259   emit_int8((unsigned char)0xDF);
12260   emit_operand32(rbp, adr, 0);
12261 }
12262 
12263 void Assembler::fild_s(Address adr) {
12264   InstructionMark im(this);
12265   emit_int8((unsigned char)0xDB);
12266   emit_operand32(rax, adr, 0);
12267 }
12268 
12269 void Assembler::fincstp() {
12270   emit_int16((unsigned char)0xD9, (unsigned char)0xF7);
12271 }
12272 
12273 void Assembler::finit() {
12274   emit_int24((unsigned char)0x9B, (unsigned char)0xDB, (unsigned char)0xE3);
12275 }
12276 
12277 void Assembler::fist_s(Address adr) {
12278   InstructionMark im(this);
12279   emit_int8((unsigned char)0xDB);
12280   emit_operand32(rdx, adr, 0);
12281 }
12282 
12283 void Assembler::fistp_d(Address adr) {
12284   InstructionMark im(this);
12285   emit_int8((unsigned char)0xDF);
12286   emit_operand32(rdi, adr, 0);
12287 }
12288 
12289 void Assembler::fistp_s(Address adr) {
12290   InstructionMark im(this);
12291   emit_int8((unsigned char)0xDB);
12292   emit_operand32(rbx, adr, 0);
12293 }
12294 
12295 void Assembler::fld1() {
12296   emit_int16((unsigned char)0xD9, (unsigned char)0xE8);
12297 }
12298 
12299 void Assembler::fld_s(Address adr) {
12300   InstructionMark im(this);
12301   emit_int8((unsigned char)0xD9);
12302   emit_operand32(rax, adr, 0);
12303 }
12304 
12305 
12306 void Assembler::fld_s(int index) {
12307   emit_farith(0xD9, 0xC0, index);
12308 }
12309 
12310 void Assembler::fldcw(Address src) {
12311   InstructionMark im(this);
12312   emit_int8((unsigned char)0xD9);
12313   emit_operand32(rbp, src, 0);
12314 }
12315 
12316 void Assembler::fldenv(Address src) {
12317   InstructionMark im(this);
12318   emit_int8((unsigned char)0xD9);
12319   emit_operand32(rsp, src, 0);
12320 }
12321 
12322 void Assembler::fldlg2() {
12323   emit_int16((unsigned char)0xD9, (unsigned char)0xEC);
12324 }
12325 
12326 void Assembler::fldln2() {
12327   emit_int16((unsigned char)0xD9, (unsigned char)0xED);
12328 }
12329 
12330 void Assembler::fldz() {
12331   emit_int16((unsigned char)0xD9, (unsigned char)0xEE);
12332 }
12333 
12334 void Assembler::flog() {
12335   fldln2();
12336   fxch();
12337   fyl2x();
12338 }
12339 
12340 void Assembler::flog10() {
12341   fldlg2();
12342   fxch();
12343   fyl2x();
12344 }
12345 
12346 void Assembler::fmul(int i) {
12347   emit_farith(0xD8, 0xC8, i);
12348 }
12349 
12350 void Assembler::fmul_d(Address src) {
12351   InstructionMark im(this);
12352   emit_int8((unsigned char)0xDC);
12353   emit_operand32(rcx, src, 0);
12354 }
12355 
12356 void Assembler::fmul_s(Address src) {
12357   InstructionMark im(this);
12358   emit_int8((unsigned char)0xD8);
12359   emit_operand32(rcx, src, 0);
12360 }
12361 
12362 void Assembler::fmula(int i) {
12363   emit_farith(0xDC, 0xC8, i);
12364 }
12365 
12366 void Assembler::fmulp(int i) {
12367   emit_farith(0xDE, 0xC8, i);
12368 }
12369 
12370 void Assembler::fnsave(Address dst) {
12371   InstructionMark im(this);
12372   emit_int8((unsigned char)0xDD);
12373   emit_operand32(rsi, dst, 0);
12374 }
12375 
12376 void Assembler::fnstcw(Address src) {
12377   InstructionMark im(this);
12378   emit_int16((unsigned char)0x9B, (unsigned char)0xD9);
12379   emit_operand32(rdi, src, 0);
12380 }
12381 
12382 void Assembler::fprem1() {
12383   emit_int16((unsigned char)0xD9, (unsigned char)0xF5);
12384 }
12385 
12386 void Assembler::frstor(Address src) {
12387   InstructionMark im(this);
12388   emit_int8((unsigned char)0xDD);
12389   emit_operand32(rsp, src, 0);
12390 }
12391 
12392 void Assembler::fsin() {
12393   emit_int16((unsigned char)0xD9, (unsigned char)0xFE);
12394 }
12395 
12396 void Assembler::fsqrt() {
12397   emit_int16((unsigned char)0xD9, (unsigned char)0xFA);
12398 }
12399 
12400 void Assembler::fst_d(Address adr) {
12401   InstructionMark im(this);
12402   emit_int8((unsigned char)0xDD);
12403   emit_operand32(rdx, adr, 0);
12404 }
12405 
12406 void Assembler::fst_s(Address adr) {
12407   InstructionMark im(this);
12408   emit_int8((unsigned char)0xD9);
12409   emit_operand32(rdx, adr, 0);
12410 }
12411 
12412 void Assembler::fstp_s(Address adr) {
12413   InstructionMark im(this);
12414   emit_int8((unsigned char)0xD9);
12415   emit_operand32(rbx, adr, 0);
12416 }
12417 
12418 void Assembler::fsub(int i) {
12419   emit_farith(0xD8, 0xE0, i);
12420 }
12421 
12422 void Assembler::fsub_d(Address src) {
12423   InstructionMark im(this);
12424   emit_int8((unsigned char)0xDC);
12425   emit_operand32(rsp, src, 0);
12426 }
12427 
12428 void Assembler::fsub_s(Address src) {
12429   InstructionMark im(this);
12430   emit_int8((unsigned char)0xD8);
12431   emit_operand32(rsp, src, 0);
12432 }
12433 
12434 void Assembler::fsuba(int i) {
12435   emit_farith(0xDC, 0xE8, i);
12436 }
12437 
12438 void Assembler::fsubp(int i) {
12439   emit_farith(0xDE, 0xE8, i);                    // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong)
12440 }
12441 
12442 void Assembler::fsubr(int i) {
12443   emit_farith(0xD8, 0xE8, i);
12444 }
12445 
12446 void Assembler::fsubr_d(Address src) {
12447   InstructionMark im(this);
12448   emit_int8((unsigned char)0xDC);
12449   emit_operand32(rbp, src, 0);
12450 }
12451 
12452 void Assembler::fsubr_s(Address src) {
12453   InstructionMark im(this);
12454   emit_int8((unsigned char)0xD8);
12455   emit_operand32(rbp, src, 0);
12456 }
12457 
12458 void Assembler::fsubra(int i) {
12459   emit_farith(0xDC, 0xE0, i);
12460 }
12461 
12462 void Assembler::fsubrp(int i) {
12463   emit_farith(0xDE, 0xE0, i);                    // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong)
12464 }
12465 
12466 void Assembler::ftan() {
12467   emit_int32((unsigned char)0xD9, (unsigned char)0xF2, (unsigned char)0xDD, (unsigned char)0xD8);
12468 }
12469 
12470 void Assembler::ftst() {
12471   emit_int16((unsigned char)0xD9, (unsigned char)0xE4);
12472 }
12473 
12474 void Assembler::fucomi(int i) {
12475   // make sure the instruction is supported (introduced for P6, together with cmov)
12476   guarantee(VM_Version::supports_cmov(), "illegal instruction");
12477   emit_farith(0xDB, 0xE8, i);
12478 }
12479 
12480 void Assembler::fucomip(int i) {
12481   // make sure the instruction is supported (introduced for P6, together with cmov)
12482   guarantee(VM_Version::supports_cmov(), "illegal instruction");
12483   emit_farith(0xDF, 0xE8, i);
12484 }
12485 
12486 void Assembler::fwait() {
12487   emit_int8((unsigned char)0x9B);
12488 }
12489 
12490 void Assembler::fxch(int i) {
12491   emit_farith(0xD9, 0xC8, i);
12492 }
12493 
12494 void Assembler::fyl2x() {
12495   emit_int16((unsigned char)0xD9, (unsigned char)0xF1);
12496 }
12497 
12498 void Assembler::frndint() {
12499   emit_int16((unsigned char)0xD9, (unsigned char)0xFC);
12500 }
12501 
12502 void Assembler::f2xm1() {
12503   emit_int16((unsigned char)0xD9, (unsigned char)0xF0);
12504 }
12505 
12506 void Assembler::fldl2e() {
12507   emit_int16((unsigned char)0xD9, (unsigned char)0xEA);
12508 }
12509 #endif // !_LP64
12510 
12511 // SSE SIMD prefix byte values corresponding to VexSimdPrefix encoding.
12512 static int simd_pre[4] = { 0, 0x66, 0xF3, 0xF2 };
12513 // SSE opcode second byte values (first is 0x0F) corresponding to VexOpcode encoding.
12514 static int simd_opc[4] = { 0,    0, 0x38, 0x3A };
12515 
12516 // Generate SSE legacy REX prefix and SIMD opcode based on VEX encoding.
12517 void Assembler::rex_prefix(Address adr, XMMRegister xreg, VexSimdPrefix pre, VexOpcode opc, bool rex_w) {
12518   if (pre > 0) {
12519     emit_int8(simd_pre[pre]);
12520   }
12521   if (rex_w) {
12522     prefixq(adr, xreg);
12523   } else {
12524     prefix(adr, xreg);
12525   }
12526   if (opc > 0) {
12527     emit_int8(0x0F);
12528     int opc2 = simd_opc[opc];
12529     if (opc2 > 0) {
12530       emit_int8(opc2);
12531     }
12532   }
12533 }
12534 
12535 int Assembler::rex_prefix_and_encode(int dst_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool rex_w) {
12536   if (pre > 0) {
12537     emit_int8(simd_pre[pre]);
12538   }
12539   int encode = (rex_w) ? prefixq_and_encode(dst_enc, src_enc) : prefix_and_encode(dst_enc, src_enc);
12540   if (opc > 0) {
12541     emit_int8(0x0F);
12542     int opc2 = simd_opc[opc];
12543     if (opc2 > 0) {
12544       emit_int8(opc2);
12545     }
12546   }
12547   return encode;
12548 }
12549 
12550 
12551 void Assembler::vex_prefix(bool vex_r, bool vex_b, bool vex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc) {
12552   int vector_len = _attributes->get_vector_len();
12553   bool vex_w = _attributes->is_rex_vex_w();
12554   if (vex_b || vex_x || vex_w || (opc == VEX_OPCODE_0F_38) || (opc == VEX_OPCODE_0F_3A)) {
12555     int byte1 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0);
12556     byte1 = (~byte1) & 0xE0;
12557     byte1 |= opc;
12558 
12559     int byte2 = ((~nds_enc) & 0xf) << 3;
12560     byte2 |= (vex_w ? VEX_W : 0) | ((vector_len > 0) ? 4 : 0) | pre;
12561 
12562     emit_int24((unsigned char)VEX_3bytes, byte1, byte2);
12563   } else {
12564     int byte1 = vex_r ? VEX_R : 0;
12565     byte1 = (~byte1) & 0x80;
12566     byte1 |= ((~nds_enc) & 0xf) << 3;
12567     byte1 |= ((vector_len > 0 ) ? 4 : 0) | pre;
12568     emit_int16((unsigned char)VEX_2bytes, byte1);
12569   }
12570 }
12571 
12572 // This is a 4 byte encoding
12573 void Assembler::evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_r, bool eevex_b, bool evex_v,
12574                        bool eevex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc, bool no_flags) {
12575   // EVEX 0x62 prefix
12576   // byte1 = EVEX_4bytes;
12577 
12578   bool vex_w = _attributes->is_rex_vex_w();
12579   int evex_encoding = (vex_w ? VEX_W : 0);
12580   // EVEX.b is not currently used for broadcast of single element or data rounding modes
12581   _attributes->set_evex_encoding(evex_encoding);
12582 
12583   // P0: byte 2, initialized to RXBR'0mmm
12584   // instead of not'd
12585   int byte2 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0) | (evex_r ? EVEX_Rb : 0);
12586   byte2 = (~byte2) & 0xF0;
12587   byte2 |= eevex_b ? EEVEX_B : 0;
12588   // confine opc opcode extensions in mm bits to lower two bits
12589   // of form {0F, 0F_38, 0F_3A, 0F_3C}
12590   byte2 |= opc;
12591 
12592   // P1: byte 3 as Wvvvv1pp
12593   int byte3 = ((~nds_enc) & 0xf) << 3;
12594   byte3 |= (eevex_x ? 0 : EEVEX_X);
12595   byte3 |= (vex_w & 1) << 7;
12596   // confine pre opcode extensions in pp bits to lower two bits
12597   // of form {66, F3, F2}
12598   byte3 |= pre;
12599 
12600   // P2: byte 4 as zL'Lbv'aaa or 00LXVF00 where V = V4, X(extended context) = ND and F = NF (no flags)
12601   int byte4 = 0;
12602   if (no_flags) {
12603     assert(_attributes->is_no_reg_mask(), "mask register not supported with no_flags");
12604     byte4 |= 0x4;
12605   } else {
12606     // kregs are implemented in the low 3 bits as aaa
12607     byte4 = (_attributes->is_no_reg_mask()) ?
12608                 0 :
12609                 _attributes->get_embedded_opmask_register_specifier();
12610   }
12611   // EVEX.v` for extending EVEX.vvvv or VIDX
12612   byte4 |= (evex_v ? 0: EVEX_V);
12613   // third EXEC.b for broadcast actions
12614   byte4 |= (_attributes->is_extended_context() ? EVEX_Rb : 0);
12615   // fourth EVEX.L'L for vector length : 0 is 128, 1 is 256, 2 is 512, currently we do not support 1024
12616   byte4 |= ((_attributes->get_vector_len())& 0x3) << 5;
12617   // last is EVEX.z for zero/merge actions
12618   if (_attributes->is_no_reg_mask() == false &&
12619       _attributes->get_embedded_opmask_register_specifier() != 0) {
12620     byte4 |= (_attributes->is_clear_context() ? EVEX_Z : 0);
12621   }
12622   emit_int32(EVEX_4bytes, byte2, byte3, byte4);
12623 }
12624 
12625 void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes, bool nds_is_ndd, bool no_flags) {
12626   if (adr.base_needs_rex2() || adr.index_needs_rex2() || nds_is_ndd || no_flags) {
12627     assert(UseAPX, "APX features not enabled");
12628   }
12629   if (nds_is_ndd) attributes->set_extended_context();
12630   bool is_extended = adr.base_needs_rex2() || adr.index_needs_rex2() || nds_enc >= 16 || xreg_enc >= 16 || nds_is_ndd;
12631   bool vex_r = (xreg_enc & 8) == 8;
12632   bool vex_b = adr.base_needs_rex();
12633   bool vex_x;
12634   if (adr.isxmmindex()) {
12635     vex_x = adr.xmmindex_needs_rex();
12636   } else {
12637     vex_x = adr.index_needs_rex();
12638   }
12639   set_attributes(attributes);
12640   // For EVEX instruction (which is not marked as pure EVEX instruction) check and see if this instruction
12641   // is allowed in legacy mode and has resources which will fit in it.
12642   // Pure EVEX instructions will have is_evex_instruction set in their definition.
12643   if (!attributes->is_legacy_mode()) {
12644     if (UseAVX > 2 && !attributes->is_evex_instruction() && !is_managed()) {
12645       if ((attributes->get_vector_len() != AVX_512bit) && !is_extended) {
12646           attributes->set_is_legacy_mode();
12647       }
12648     }
12649   }
12650 
12651   if (UseAVX > 2) {
12652     assert(((!attributes->uses_vl()) ||
12653             (attributes->get_vector_len() == AVX_512bit) ||
12654             (!_legacy_mode_vl) ||
12655             (attributes->is_legacy_mode())),"XMM register should be 0-15");
12656     assert((!is_extended || (!attributes->is_legacy_mode())),"XMM register should be 0-15");
12657   }
12658 
12659   clear_managed();
12660   if (UseAVX > 2 && !attributes->is_legacy_mode())
12661   {
12662     bool evex_r = (xreg_enc >= 16);
12663     bool evex_v;
12664     // EVEX.V' is set to true when VSIB is used as we may need to use higher order XMM registers (16-31)
12665     if (adr.isxmmindex())  {
12666       evex_v = ((adr._xmmindex->encoding() > 15) ? true : false);
12667     } else {
12668       evex_v = (nds_enc >= 16);
12669     }
12670     bool eevex_x = adr.index_needs_rex2();
12671     bool eevex_b = adr.base_needs_rex2();
12672     attributes->set_is_evex_instruction();
12673     evex_prefix(vex_r, vex_b, vex_x, evex_r, eevex_b, evex_v, eevex_x, nds_enc, pre, opc, no_flags);
12674   } else {
12675     if (UseAVX > 2 && attributes->is_rex_vex_w_reverted()) {
12676       attributes->set_rex_vex_w(false);
12677     }
12678     vex_prefix(vex_r, vex_b, vex_x, nds_enc, pre, opc);
12679   }
12680 }
12681 
12682 void Assembler::evex_prefix_ndd(Address adr, int ndd_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes, bool no_flags) {
12683   attributes->set_is_evex_instruction();
12684   vex_prefix(adr, ndd_enc, xreg_enc, pre, opc, attributes, /* nds_is_ndd */ true, no_flags);
12685 }
12686 
12687 void Assembler::evex_prefix_nf(Address adr, int ndd_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes, bool no_flags) {
12688   attributes->set_is_evex_instruction();
12689   vex_prefix(adr, ndd_enc, xreg_enc, pre, opc, attributes, /* nds_is_ndd */ false, no_flags);
12690 }
12691 
12692 int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes, bool src_is_gpr, bool nds_is_ndd, bool no_flags) {
12693   if (nds_is_ndd || no_flags || (src_is_gpr && src_enc >= 16)) {
12694     assert(UseAPX, "APX features not enabled");
12695   }
12696   if (nds_is_ndd) attributes->set_extended_context();
12697   bool is_extended = dst_enc >= 16 || nds_enc >= 16 || src_enc >=16;
12698   bool vex_r = (dst_enc & 8) == 8;
12699   bool vex_b = (src_enc & 8) == 8;
12700   bool vex_x = false;
12701   set_attributes(attributes);
12702 
12703   // For EVEX instruction (which is not marked as pure EVEX instruction) check and see if this instruction
12704   // is allowed in legacy mode and has resources which will fit in it.
12705   // Pure EVEX instructions will have is_evex_instruction set in their definition.
12706   if (!attributes->is_legacy_mode()) {
12707     if (UseAVX > 2 && !attributes->is_evex_instruction() && !is_managed()) {
12708       if ((!attributes->uses_vl() || (attributes->get_vector_len() != AVX_512bit)) &&
12709            !is_extended) {
12710           attributes->set_is_legacy_mode();
12711       }
12712     }
12713   }
12714 
12715   if (UseAVX > 2) {
12716     // All the scalar fp instructions (with uses_vl as false) can have legacy_mode as false
12717     // Instruction with uses_vl true are vector instructions
12718     // All the vector instructions with AVX_512bit length can have legacy_mode as false
12719     // All the vector instructions with < AVX_512bit length can have legacy_mode as false if AVX512vl() is supported
12720     // Rest all should have legacy_mode set as true
12721     assert(((!attributes->uses_vl()) ||
12722             (attributes->get_vector_len() == AVX_512bit) ||
12723             (!_legacy_mode_vl) ||
12724             (attributes->is_legacy_mode())),"XMM register should be 0-15");
12725     // Instruction with legacy_mode true should have dst, nds and src < 15
12726     assert(((!is_extended) || (!attributes->is_legacy_mode())),"XMM register should be 0-15");
12727   }
12728 
12729   clear_managed();
12730   if (UseAVX > 2 && !attributes->is_legacy_mode())
12731   {
12732     bool evex_r = (dst_enc >= 16);
12733     bool evex_v = (nds_enc >= 16);
12734     bool evex_b = (src_enc >= 16) && src_is_gpr;
12735     // can use vex_x as bank extender on rm encoding
12736     vex_x = (src_enc >= 16) && !src_is_gpr;
12737     attributes->set_is_evex_instruction();
12738     evex_prefix(vex_r, vex_b, vex_x, evex_r, evex_b, evex_v, false /*eevex_x*/, nds_enc, pre, opc, no_flags);
12739   } else {
12740     if (UseAVX > 2 && attributes->is_rex_vex_w_reverted()) {
12741       attributes->set_rex_vex_w(false);
12742     }
12743     vex_prefix(vex_r, vex_b, vex_x, nds_enc, pre, opc);
12744   }
12745 
12746   // return modrm byte components for operands
12747   return (((dst_enc & 7) << 3) | (src_enc & 7));
12748 }
12749 
12750 int Assembler::evex_prefix_and_encode_ndd(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc,
12751                            InstructionAttr *attributes, bool no_flags) {
12752   attributes->set_is_evex_instruction();
12753   return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, attributes, /* src_is_gpr */ true, /* nds_is_ndd */ true, no_flags);
12754 }
12755 
12756 int Assembler::evex_prefix_and_encode_nf(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc,
12757                            InstructionAttr *attributes, bool no_flags) {
12758   attributes->set_is_evex_instruction();
12759   return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, attributes, /* src_is_gpr */ true, /* nds_is_ndd */ false, no_flags);
12760 }
12761 
12762 void Assembler::simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre,
12763                             VexOpcode opc, InstructionAttr *attributes) {
12764   if (UseAVX > 0) {
12765     int xreg_enc = xreg->encoding();
12766     int nds_enc = nds->is_valid() ? nds->encoding() : 0;
12767     vex_prefix(adr, nds_enc, xreg_enc, pre, opc, attributes);
12768   } else {
12769     assert((nds == xreg) || (nds == xnoreg), "wrong sse encoding");
12770     rex_prefix(adr, xreg, pre, opc, attributes->is_rex_vex_w());
12771   }
12772 }
12773 
12774 int Assembler::simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre,
12775                                       VexOpcode opc, InstructionAttr *attributes, bool src_is_gpr) {
12776   int dst_enc = dst->encoding();
12777   int src_enc = src->encoding();
12778   if (UseAVX > 0) {
12779     int nds_enc = nds->is_valid() ? nds->encoding() : 0;
12780     return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, attributes, src_is_gpr);
12781   } else {
12782     assert((nds == dst) || (nds == src) || (nds == xnoreg), "wrong sse encoding");
12783     return rex_prefix_and_encode(dst_enc, src_enc, pre, opc, attributes->is_rex_vex_w());
12784   }
12785 }
12786 
12787 void Assembler::vmaxss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
12788   assert(VM_Version::supports_avx(), "");
12789   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
12790   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
12791   emit_int16(0x5F, (0xC0 | encode));
12792 }
12793 
12794 void Assembler::vmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
12795   assert(VM_Version::supports_avx(), "");
12796   InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
12797   attributes.set_rex_vex_w_reverted();
12798   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
12799   emit_int16(0x5F, (0xC0 | encode));
12800 }
12801 
12802 void Assembler::vminss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
12803   assert(VM_Version::supports_avx(), "");
12804   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
12805   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
12806   emit_int16(0x5D, (0xC0 | encode));
12807 }
12808 
12809 void Assembler::vminsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
12810   assert(VM_Version::supports_avx(), "");
12811   InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
12812   attributes.set_rex_vex_w_reverted();
12813   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
12814   emit_int16(0x5D, (0xC0 | encode));
12815 }
12816 
12817 void Assembler::vcmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len) {
12818   assert(VM_Version::supports_avx(), "");
12819   assert(vector_len <= AVX_256bit, "");
12820   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
12821   int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
12822   emit_int24((unsigned char)0xC2, (0xC0 | encode), (0xF & cop));
12823 }
12824 
12825 void Assembler::blendvpb(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) {
12826   assert(VM_Version::supports_avx(), "");
12827   assert(vector_len <= AVX_256bit, "");
12828   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
12829   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
12830   int src2_enc = src2->encoding();
12831   emit_int24(0x4C, (0xC0 | encode), (0xF0 & src2_enc << 4));
12832 }
12833 
12834 void Assembler::vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) {
12835   assert(UseAVX > 0 && (vector_len == AVX_128bit || vector_len == AVX_256bit), "");
12836   assert(vector_len <= AVX_256bit, "");
12837   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
12838   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
12839   int src2_enc = src2->encoding();
12840   emit_int24(0x4B, (0xC0 | encode), (0xF0 & src2_enc << 4));
12841 }
12842 
12843 void Assembler::vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
12844   assert(VM_Version::supports_avx2(), "");
12845   assert(vector_len <= AVX_256bit, "");
12846   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
12847   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
12848   emit_int24(0x02, (0xC0 | encode), (unsigned char)imm8);
12849 }
12850 
12851 void Assembler::vcmpps(XMMRegister dst, XMMRegister nds, XMMRegister src, int comparison, int vector_len) {
12852   assert(VM_Version::supports_avx(), "");
12853   assert(vector_len <= AVX_256bit, "");
12854   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
12855   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
12856   emit_int24((unsigned char)0xC2, (0xC0 | encode), (unsigned char)comparison);
12857 }
12858 
12859 void Assembler::evcmpps(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
12860                         ComparisonPredicateFP comparison, int vector_len) {
12861   assert(VM_Version::supports_evex(), "");
12862   // Encoding: EVEX.NDS.XXX.0F.W0 C2 /r ib
12863   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12864   attributes.set_is_evex_instruction();
12865   attributes.set_embedded_opmask_register_specifier(mask);
12866   attributes.reset_is_clear_context();
12867   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
12868   emit_int24((unsigned char)0xC2, (0xC0 | encode), comparison);
12869 }
12870 
12871 void Assembler::evcmppd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
12872                         ComparisonPredicateFP comparison, int vector_len) {
12873   assert(VM_Version::supports_evex(), "");
12874   // Encoding: EVEX.NDS.XXX.66.0F.W1 C2 /r ib
12875   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12876   attributes.set_is_evex_instruction();
12877   attributes.set_embedded_opmask_register_specifier(mask);
12878   attributes.reset_is_clear_context();
12879   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
12880   emit_int24((unsigned char)0xC2, (0xC0 | encode), comparison);
12881 }
12882 
12883 void Assembler::blendvps(XMMRegister dst, XMMRegister src) {
12884   assert(VM_Version::supports_sse4_1(), "");
12885   assert(UseAVX <= 0, "sse encoding is inconsistent with avx encoding");
12886   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
12887   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12888   emit_int16(0x14, (0xC0 | encode));
12889 }
12890 
12891 void Assembler::blendvpd(XMMRegister dst, XMMRegister src) {
12892   assert(VM_Version::supports_sse4_1(), "");
12893   assert(UseAVX <= 0, "sse encoding is inconsistent with avx encoding");
12894   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
12895   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12896   emit_int16(0x15, (0xC0 | encode));
12897 }
12898 
12899 void Assembler::pblendvb(XMMRegister dst, XMMRegister src) {
12900   assert(VM_Version::supports_sse4_1(), "");
12901   assert(UseAVX <= 0, "sse encoding is inconsistent with avx encoding");
12902   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
12903   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12904   emit_int16(0x10, (0xC0 | encode));
12905 }
12906 
12907 void Assembler::vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) {
12908   assert(UseAVX > 0 && (vector_len == AVX_128bit || vector_len == AVX_256bit), "");
12909   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
12910   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
12911   int src2_enc = src2->encoding();
12912   emit_int24(0x4A, (0xC0 | encode), (0xF0 & src2_enc << 4));
12913 }
12914 
12915 void Assembler::vblendps(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
12916   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
12917   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
12918   emit_int24(0x0C, (0xC0 | encode), imm8);
12919 }
12920 
12921 void Assembler::vpcmpgtb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
12922   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
12923   assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
12924   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
12925   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
12926   emit_int16(0x64, (0xC0 | encode));
12927 }
12928 
12929 void Assembler::vpcmpgtw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
12930   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
12931   assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
12932   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
12933   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
12934   emit_int16(0x65, (0xC0 | encode));
12935 }
12936 
12937 void Assembler::vpcmpgtd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
12938   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
12939   assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
12940   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
12941   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
12942   emit_int16(0x66, (0xC0 | encode));
12943 }
12944 
12945 void Assembler::vpcmpgtq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
12946   assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), "");
12947   assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest");
12948   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
12949   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
12950   emit_int16(0x37, (0xC0 | encode));
12951 }
12952 
12953 void Assembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
12954                         int comparison, bool is_signed, int vector_len) {
12955   assert(VM_Version::supports_evex(), "");
12956   assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
12957   // Encoding: EVEX.NDS.XXX.66.0F3A.W0 1F /r ib
12958   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12959   attributes.set_is_evex_instruction();
12960   attributes.set_embedded_opmask_register_specifier(mask);
12961   attributes.reset_is_clear_context();
12962   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
12963   int opcode = is_signed ? 0x1F : 0x1E;
12964   emit_int24(opcode, (0xC0 | encode), comparison);
12965 }
12966 
12967 void Assembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, Address src,
12968                         int comparison, bool is_signed, int vector_len) {
12969   assert(VM_Version::supports_evex(), "");
12970   assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
12971   // Encoding: EVEX.NDS.XXX.66.0F3A.W0 1F /r ib
12972   InstructionMark im(this);
12973   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12974   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
12975   attributes.set_is_evex_instruction();
12976   attributes.set_embedded_opmask_register_specifier(mask);
12977   attributes.reset_is_clear_context();
12978   int dst_enc = kdst->encoding();
12979   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
12980   int opcode = is_signed ? 0x1F : 0x1E;
12981   emit_int8((unsigned char)opcode);
12982   emit_operand(as_Register(dst_enc), src, 1);
12983   emit_int8((unsigned char)comparison);
12984 }
12985 
12986 void Assembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
12987                         int comparison, bool is_signed, int vector_len) {
12988   assert(VM_Version::supports_evex(), "");
12989   assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
12990   // Encoding: EVEX.NDS.XXX.66.0F3A.W1 1F /r ib
12991   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
12992   attributes.set_is_evex_instruction();
12993   attributes.set_embedded_opmask_register_specifier(mask);
12994   attributes.reset_is_clear_context();
12995   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
12996   int opcode = is_signed ? 0x1F : 0x1E;
12997   emit_int24(opcode, (0xC0 | encode), comparison);
12998 }
12999 
13000 void Assembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, Address src,
13001                         int comparison, bool is_signed, int vector_len) {
13002   assert(VM_Version::supports_evex(), "");
13003   assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
13004   // Encoding: EVEX.NDS.XXX.66.0F3A.W1 1F /r ib
13005   InstructionMark im(this);
13006   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13007   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
13008   attributes.set_is_evex_instruction();
13009   attributes.set_embedded_opmask_register_specifier(mask);
13010   attributes.reset_is_clear_context();
13011   int dst_enc = kdst->encoding();
13012   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
13013   int opcode = is_signed ? 0x1F : 0x1E;
13014   emit_int8((unsigned char)opcode);
13015   emit_operand(as_Register(dst_enc), src, 1);
13016   emit_int8((unsigned char)comparison);
13017 }
13018 
13019 void Assembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
13020                         int comparison, bool is_signed, int vector_len) {
13021   assert(VM_Version::supports_evex(), "");
13022   assert(VM_Version::supports_avx512bw(), "");
13023   assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
13024   // Encoding: EVEX.NDS.XXX.66.0F3A.W0 3F /r ib
13025   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
13026   attributes.set_is_evex_instruction();
13027   attributes.set_embedded_opmask_register_specifier(mask);
13028   attributes.reset_is_clear_context();
13029   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
13030   int opcode = is_signed ? 0x3F : 0x3E;
13031   emit_int24(opcode, (0xC0 | encode), comparison);
13032 }
13033 
13034 void Assembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, Address src,
13035                         int comparison, bool is_signed, int vector_len) {
13036   assert(VM_Version::supports_evex(), "");
13037   assert(VM_Version::supports_avx512bw(), "");
13038   assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
13039   // Encoding: EVEX.NDS.XXX.66.0F3A.W0 3F /r ib
13040   InstructionMark im(this);
13041   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
13042   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
13043   attributes.set_is_evex_instruction();
13044   attributes.set_embedded_opmask_register_specifier(mask);
13045   attributes.reset_is_clear_context();
13046   int dst_enc = kdst->encoding();
13047   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
13048   int opcode = is_signed ? 0x3F : 0x3E;
13049   emit_int8((unsigned char)opcode);
13050   emit_operand(as_Register(dst_enc), src, 1);
13051   emit_int8((unsigned char)comparison);
13052 }
13053 
13054 void Assembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
13055                         int comparison, bool is_signed, int vector_len) {
13056   assert(VM_Version::supports_evex(), "");
13057   assert(VM_Version::supports_avx512bw(), "");
13058   assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
13059   // Encoding: EVEX.NDS.XXX.66.0F3A.W1 3F /r ib
13060   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
13061   attributes.set_is_evex_instruction();
13062   attributes.set_embedded_opmask_register_specifier(mask);
13063   attributes.reset_is_clear_context();
13064   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
13065   int opcode = is_signed ? 0x3F : 0x3E;
13066   emit_int24(opcode, (0xC0 | encode), comparison);
13067 }
13068 
13069 void Assembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, Address src,
13070                         int comparison, bool is_signed, int vector_len) {
13071   assert(VM_Version::supports_evex(), "");
13072   assert(VM_Version::supports_avx512bw(), "");
13073   assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
13074   // Encoding: EVEX.NDS.XXX.66.0F3A.W1 3F /r ib
13075   InstructionMark im(this);
13076   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
13077   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
13078   attributes.set_is_evex_instruction();
13079   attributes.set_embedded_opmask_register_specifier(mask);
13080   attributes.reset_is_clear_context();
13081   int dst_enc = kdst->encoding();
13082   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
13083   int opcode = is_signed ? 0x3F : 0x3E;
13084   emit_int8((unsigned char)opcode);
13085   emit_operand(as_Register(dst_enc), src, 1);
13086   emit_int8((unsigned char)comparison);
13087 }
13088 
13089 void Assembler::evprord(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
13090   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
13091   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13092   attributes.set_is_evex_instruction();
13093   attributes.set_embedded_opmask_register_specifier(mask);
13094   if (merge) {
13095     attributes.reset_is_clear_context();
13096   }
13097   int encode = vex_prefix_and_encode(xmm0->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
13098   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
13099 }
13100 
13101 void Assembler::evprorq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
13102   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
13103   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13104   attributes.set_is_evex_instruction();
13105   attributes.set_embedded_opmask_register_specifier(mask);
13106   if (merge) {
13107     attributes.reset_is_clear_context();
13108   }
13109   int encode = vex_prefix_and_encode(xmm0->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
13110   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
13111 }
13112 
13113 void Assembler::evprorvd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
13114   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
13115   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13116   attributes.set_is_evex_instruction();
13117   attributes.set_embedded_opmask_register_specifier(mask);
13118   if (merge) {
13119     attributes.reset_is_clear_context();
13120   }
13121   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13122   emit_int16(0x14, (0xC0 | encode));
13123 }
13124 
13125 void Assembler::evprorvq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
13126   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
13127   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13128   attributes.set_is_evex_instruction();
13129   attributes.set_embedded_opmask_register_specifier(mask);
13130   if (merge) {
13131     attributes.reset_is_clear_context();
13132   }
13133   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13134   emit_int16(0x14, (0xC0 | encode));
13135 }
13136 
13137 void Assembler::evprold(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
13138   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
13139   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13140   attributes.set_is_evex_instruction();
13141   attributes.set_embedded_opmask_register_specifier(mask);
13142   if (merge) {
13143     attributes.reset_is_clear_context();
13144   }
13145   int encode = vex_prefix_and_encode(xmm1->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
13146   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
13147 }
13148 
13149 void Assembler::evprolq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
13150   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
13151   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13152   attributes.set_is_evex_instruction();
13153   attributes.set_embedded_opmask_register_specifier(mask);
13154   if (merge) {
13155     attributes.reset_is_clear_context();
13156   }
13157   int encode = vex_prefix_and_encode(xmm1->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
13158   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
13159 }
13160 
13161 void Assembler::evprolvd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
13162   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
13163   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13164   attributes.set_is_evex_instruction();
13165   attributes.set_embedded_opmask_register_specifier(mask);
13166   if (merge) {
13167     attributes.reset_is_clear_context();
13168   }
13169   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13170   emit_int16(0x15, (0xC0 | encode));
13171 }
13172 
13173 void Assembler::evprolvq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
13174   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
13175   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13176   attributes.set_is_evex_instruction();
13177   attributes.set_embedded_opmask_register_specifier(mask);
13178   if (merge) {
13179     attributes.reset_is_clear_context();
13180   }
13181   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13182   emit_int16(0x15, (0xC0 | encode));
13183 }
13184 
13185 void Assembler::vpblendvb(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len) {
13186   assert(VM_Version::supports_avx(), "");
13187   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
13188   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
13189   int mask_enc = mask->encoding();
13190   emit_int24(0x4C, (0xC0 | encode), 0xF0 & mask_enc << 4);
13191 }
13192 
13193 void Assembler::evblendmpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
13194   assert(VM_Version::supports_evex(), "");
13195   // Encoding: EVEX.NDS.XXX.66.0F38.W1 65 /r
13196   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13197   attributes.set_is_evex_instruction();
13198   attributes.set_embedded_opmask_register_specifier(mask);
13199   if (merge) {
13200     attributes.reset_is_clear_context();
13201   }
13202   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13203   emit_int16(0x65, (0xC0 | encode));
13204 }
13205 
13206 void Assembler::evblendmps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
13207   assert(VM_Version::supports_evex(), "");
13208   // Encoding: EVEX.NDS.XXX.66.0F38.W0 65 /r
13209   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13210   attributes.set_is_evex_instruction();
13211   attributes.set_embedded_opmask_register_specifier(mask);
13212   if (merge) {
13213     attributes.reset_is_clear_context();
13214   }
13215   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13216   emit_int16(0x65, (0xC0 | encode));
13217 }
13218 
13219 void Assembler::evpblendmb (XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
13220   assert(VM_Version::supports_evex(), "");
13221   assert(VM_Version::supports_avx512bw(), "");
13222   // Encoding: EVEX.NDS.512.66.0F38.W0 66 /r
13223   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
13224   attributes.set_is_evex_instruction();
13225   attributes.set_embedded_opmask_register_specifier(mask);
13226   if (merge) {
13227     attributes.reset_is_clear_context();
13228   }
13229   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13230   emit_int16(0x66, (0xC0 | encode));
13231 }
13232 
13233 void Assembler::evpblendmw (XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
13234   assert(VM_Version::supports_evex(), "");
13235   assert(VM_Version::supports_avx512bw(), "");
13236   // Encoding: EVEX.NDS.512.66.0F38.W1 66 /r
13237   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
13238   attributes.set_is_evex_instruction();
13239   attributes.set_embedded_opmask_register_specifier(mask);
13240   if (merge) {
13241     attributes.reset_is_clear_context();
13242   }
13243   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13244   emit_int16(0x66, (0xC0 | encode));
13245 }
13246 
13247 void Assembler::evpblendmd (XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
13248   assert(VM_Version::supports_evex(), "");
13249   //Encoding: EVEX.NDS.512.66.0F38.W0 64 /r
13250   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13251   attributes.set_is_evex_instruction();
13252   attributes.set_embedded_opmask_register_specifier(mask);
13253   if (merge) {
13254     attributes.reset_is_clear_context();
13255   }
13256   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13257   emit_int16(0x64, (0xC0 | encode));
13258 }
13259 
13260 void Assembler::evpblendmq (XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
13261   assert(VM_Version::supports_evex(), "");
13262   //Encoding: EVEX.NDS.512.66.0F38.W1 64 /r
13263   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13264   attributes.set_is_evex_instruction();
13265   attributes.set_embedded_opmask_register_specifier(mask);
13266   if (merge) {
13267     attributes.reset_is_clear_context();
13268   }
13269   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13270   emit_int16(0x64, (0xC0 | encode));
13271 }
13272 
13273 void Assembler::bzhiq(Register dst, Register src1, Register src2) {
13274   assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
13275   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13276   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes, true);
13277   emit_int16((unsigned char)0xF5, (0xC0 | encode));
13278 }
13279 
13280 void Assembler::bzhil(Register dst, Register src1, Register src2) {
13281   assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
13282   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13283   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes, true);
13284   emit_int16((unsigned char)0xF5, (0xC0 | encode));
13285 }
13286 
13287 void Assembler::pextl(Register dst, Register src1, Register src2) {
13288   assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
13289   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13290   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes, true);
13291   emit_int16((unsigned char)0xF5, (0xC0 | encode));
13292 }
13293 
13294 void Assembler::pdepl(Register dst, Register src1, Register src2) {
13295   assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
13296   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13297   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes, true);
13298   emit_int16((unsigned char)0xF5, (0xC0 | encode));
13299 }
13300 
13301 void Assembler::pextq(Register dst, Register src1, Register src2) {
13302   assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
13303   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13304   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes, true);
13305   emit_int16((unsigned char)0xF5, (0xC0 | encode));
13306 }
13307 
13308 void Assembler::pdepq(Register dst, Register src1, Register src2) {
13309   assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
13310   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13311   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes, true);
13312   emit_int16((unsigned char)0xF5, (0xC0 | encode));
13313 }
13314 
13315 void Assembler::pextl(Register dst, Register src1, Address src2) {
13316   assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
13317   InstructionMark im(this);
13318   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13319   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
13320   vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
13321   emit_int8((unsigned char)0xF5);
13322   emit_operand(dst, src2, 0);
13323 }
13324 
13325 void Assembler::pdepl(Register dst, Register src1, Address src2) {
13326   assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
13327   InstructionMark im(this);
13328   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13329   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
13330   vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes);
13331   emit_int8((unsigned char)0xF5);
13332   emit_operand(dst, src2, 0);
13333 }
13334 
13335 void Assembler::pextq(Register dst, Register src1, Address src2) {
13336   assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
13337   InstructionMark im(this);
13338   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13339   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
13340   vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
13341   emit_int8((unsigned char)0xF5);
13342   emit_operand(dst, src2, 0);
13343 }
13344 
13345 void Assembler::pdepq(Register dst, Register src1, Address src2) {
13346   assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
13347   InstructionMark im(this);
13348   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13349   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
13350   vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes);
13351   emit_int8((unsigned char)0xF5);
13352   emit_operand(dst, src2, 0);
13353 }
13354 
13355 void Assembler::sarxl(Register dst, Register src1, Register src2) {
13356   assert(VM_Version::supports_bmi2(), "");
13357   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
13358   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes, true);
13359   emit_int16((unsigned char)0xF7, (0xC0 | encode));
13360 }
13361 
13362 void Assembler::sarxl(Register dst, Address src1, Register src2) {
13363   assert(VM_Version::supports_bmi2(), "");
13364   InstructionMark im(this);
13365   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
13366   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
13367   vex_prefix(src1, src2->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
13368   emit_int8((unsigned char)0xF7);
13369   emit_operand(dst, src1, 0);
13370 }
13371 
13372 void Assembler::sarxq(Register dst, Register src1, Register src2) {
13373   assert(VM_Version::supports_bmi2(), "");
13374   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
13375   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes, true);
13376   emit_int16((unsigned char)0xF7, (0xC0 | encode));
13377 }
13378 
13379 void Assembler::sarxq(Register dst, Address src1, Register src2) {
13380   assert(VM_Version::supports_bmi2(), "");
13381   InstructionMark im(this);
13382   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
13383   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
13384   vex_prefix(src1, src2->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
13385   emit_int8((unsigned char)0xF7);
13386   emit_operand(dst, src1, 0);
13387 }
13388 
13389 void Assembler::shlxl(Register dst, Register src1, Register src2) {
13390   assert(VM_Version::supports_bmi2(), "");
13391   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
13392   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes, true);
13393   emit_int16((unsigned char)0xF7, (0xC0 | encode));
13394 }
13395 
13396 void Assembler::shlxl(Register dst, Address src1, Register src2) {
13397   assert(VM_Version::supports_bmi2(), "");
13398   InstructionMark im(this);
13399   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
13400   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
13401   vex_prefix(src1, src2->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13402   emit_int8((unsigned char)0xF7);
13403   emit_operand(dst, src1, 0);
13404 }
13405 
13406 void Assembler::shlxq(Register dst, Register src1, Register src2) {
13407   assert(VM_Version::supports_bmi2(), "");
13408   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
13409   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes, true);
13410   emit_int16((unsigned char)0xF7, (0xC0 | encode));
13411 }
13412 
13413 void Assembler::shlxq(Register dst, Address src1, Register src2) {
13414   assert(VM_Version::supports_bmi2(), "");
13415   InstructionMark im(this);
13416   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
13417   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
13418   vex_prefix(src1, src2->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13419   emit_int8((unsigned char)0xF7);
13420   emit_operand(dst, src1, 0);
13421 }
13422 
13423 void Assembler::shrxl(Register dst, Register src1, Register src2) {
13424   assert(VM_Version::supports_bmi2(), "");
13425   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
13426   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes, true);
13427   emit_int16((unsigned char)0xF7, (0xC0 | encode));
13428 }
13429 
13430 void Assembler::shrxl(Register dst, Address src1, Register src2) {
13431   assert(VM_Version::supports_bmi2(), "");
13432   InstructionMark im(this);
13433   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
13434   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
13435   vex_prefix(src1, src2->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes);
13436   emit_int8((unsigned char)0xF7);
13437   emit_operand(dst, src1, 0);
13438 }
13439 
13440 void Assembler::shrxq(Register dst, Register src1, Register src2) {
13441   assert(VM_Version::supports_bmi2(), "");
13442   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
13443   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes, true);
13444   emit_int16((unsigned char)0xF7, (0xC0 | encode));
13445 }
13446 
13447 void Assembler::shrxq(Register dst, Address src1, Register src2) {
13448   assert(VM_Version::supports_bmi2(), "");
13449   InstructionMark im(this);
13450   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
13451   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
13452   vex_prefix(src1, src2->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes);
13453   emit_int8((unsigned char)0xF7);
13454   emit_operand(dst, src1, 0);
13455 }
13456 
13457 void Assembler::evpmovq2m(KRegister dst, XMMRegister src, int vector_len) {
13458   assert(VM_Version::supports_avx512vldq(), "");
13459   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
13460   attributes.set_is_evex_instruction();
13461   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
13462   emit_int16(0x39, (0xC0 | encode));
13463 }
13464 
13465 void Assembler::evpmovd2m(KRegister dst, XMMRegister src, int vector_len) {
13466   assert(VM_Version::supports_avx512vldq(), "");
13467   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
13468   attributes.set_is_evex_instruction();
13469   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
13470   emit_int16(0x39, (0xC0 | encode));
13471 }
13472 
13473 void Assembler::evpmovw2m(KRegister dst, XMMRegister src, int vector_len) {
13474   assert(VM_Version::supports_avx512vlbw(), "");
13475   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
13476   attributes.set_is_evex_instruction();
13477   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
13478   emit_int16(0x29, (0xC0 | encode));
13479 }
13480 
13481 void Assembler::evpmovb2m(KRegister dst, XMMRegister src, int vector_len) {
13482   assert(VM_Version::supports_avx512vlbw(), "");
13483   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
13484   attributes.set_is_evex_instruction();
13485   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
13486   emit_int16(0x29, (0xC0 | encode));
13487 }
13488 
13489 void Assembler::evpmovm2q(XMMRegister dst, KRegister src, int vector_len) {
13490   assert(VM_Version::supports_avx512vldq(), "");
13491   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
13492   attributes.set_is_evex_instruction();
13493   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
13494   emit_int16(0x38, (0xC0 | encode));
13495 }
13496 
13497 void Assembler::evpmovm2d(XMMRegister dst, KRegister src, int vector_len) {
13498   assert(VM_Version::supports_avx512vldq(), "");
13499   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
13500   attributes.set_is_evex_instruction();
13501   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
13502   emit_int16(0x38, (0xC0 | encode));
13503 }
13504 
13505 void Assembler::evpmovm2w(XMMRegister dst, KRegister src, int vector_len) {
13506   assert(VM_Version::supports_avx512vlbw(), "");
13507   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
13508   attributes.set_is_evex_instruction();
13509   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
13510   emit_int16(0x28, (0xC0 | encode));
13511 }
13512 
13513 void Assembler::evpmovm2b(XMMRegister dst, KRegister src, int vector_len) {
13514   assert(VM_Version::supports_avx512vlbw(), "");
13515   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
13516   attributes.set_is_evex_instruction();
13517   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
13518   emit_int16(0x28, (0xC0 | encode));
13519 }
13520 
13521 void Assembler::evpcompressb(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
13522   assert(VM_Version::supports_avx512_vbmi2(), "");
13523   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
13524   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13525   attributes.set_embedded_opmask_register_specifier(mask);
13526   attributes.set_is_evex_instruction();
13527   if (merge) {
13528     attributes.reset_is_clear_context();
13529   }
13530   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13531   emit_int16((unsigned char)0x63, (0xC0 | encode));
13532 }
13533 
13534 void Assembler::evpcompressw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
13535   assert(VM_Version::supports_avx512_vbmi2(), "");
13536   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
13537   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13538   attributes.set_embedded_opmask_register_specifier(mask);
13539   attributes.set_is_evex_instruction();
13540   if (merge) {
13541     attributes.reset_is_clear_context();
13542   }
13543   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13544   emit_int16((unsigned char)0x63, (0xC0 | encode));
13545 }
13546 
13547 void Assembler::evpcompressd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
13548   assert(VM_Version::supports_evex(), "");
13549   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
13550   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13551   attributes.set_embedded_opmask_register_specifier(mask);
13552   attributes.set_is_evex_instruction();
13553   if (merge) {
13554     attributes.reset_is_clear_context();
13555   }
13556   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13557   emit_int16((unsigned char)0x8B, (0xC0 | encode));
13558 }
13559 
13560 void Assembler::evpcompressq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
13561   assert(VM_Version::supports_evex(), "");
13562   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
13563   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13564   attributes.set_embedded_opmask_register_specifier(mask);
13565   attributes.set_is_evex_instruction();
13566   if (merge) {
13567     attributes.reset_is_clear_context();
13568   }
13569   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13570   emit_int16((unsigned char)0x8B, (0xC0 | encode));
13571 }
13572 
13573 void Assembler::evcompressps(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
13574   assert(VM_Version::supports_evex(), "");
13575   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
13576   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13577   attributes.set_embedded_opmask_register_specifier(mask);
13578   attributes.set_is_evex_instruction();
13579   if (merge) {
13580     attributes.reset_is_clear_context();
13581   }
13582   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13583   emit_int16((unsigned char)0x8A, (0xC0 | encode));
13584 }
13585 
13586 void Assembler::evcompresspd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
13587   assert(VM_Version::supports_evex(), "");
13588   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
13589   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
13590   attributes.set_embedded_opmask_register_specifier(mask);
13591   attributes.set_is_evex_instruction();
13592   if (merge) {
13593     attributes.reset_is_clear_context();
13594   }
13595   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
13596   emit_int16((unsigned char)0x8A, (0xC0 | encode));
13597 }
13598 
13599 #ifndef _LP64
13600 
13601 void Assembler::incl(Register dst) {
13602   // Don't use it directly. Use MacroAssembler::incrementl() instead.
13603   emit_int8(0x40 | dst->encoding());
13604 }
13605 
13606 void Assembler::eincl(Register dst, Register src, bool no_flags) {
13607   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13608   (void) evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
13609   emit_int8(0x40 | src->encoding());
13610 }
13611 
13612 void Assembler::lea(Register dst, Address src) {
13613   leal(dst, src);
13614 }
13615 
13616 void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) {
13617   InstructionMark im(this);
13618   emit_int8((unsigned char)0xC7);
13619   emit_operand(rax, dst, 4);
13620   emit_data((int)imm32, rspec, 0);
13621 }
13622 
13623 void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) {
13624   InstructionMark im(this);
13625   int encode = prefix_and_encode(dst->encoding());
13626   emit_int8((0xB8 | encode));
13627   emit_data((int)imm32, rspec, 0);
13628 }
13629 
13630 void Assembler::popa() { // 32bit
13631   emit_int8(0x61);
13632 }
13633 
13634 void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) {
13635   InstructionMark im(this);
13636   emit_int8(0x68);
13637   emit_data(imm32, rspec, 0);
13638 }
13639 
13640 void Assembler::pusha() { // 32bit
13641   emit_int8(0x60);
13642 }
13643 
13644 #else // LP64
13645 
13646 // 64bit only pieces of the assembler
13647 
13648 // This should only be used by 64bit instructions that can use rip-relative
13649 // it cannot be used by instructions that want an immediate value.
13650 
13651 // Determine whether an address is always reachable in rip-relative addressing mode
13652 // when accessed from the code cache.
13653 static bool is_always_reachable(address target, relocInfo::relocType reloc_type) {
13654   switch (reloc_type) {
13655     // This should be rip-relative and easily reachable.
13656     case relocInfo::internal_word_type: {
13657       return true;
13658     }
13659     // This should be rip-relative within the code cache and easily
13660     // reachable until we get huge code caches. (At which point
13661     // IC code is going to have issues).
13662     case relocInfo::virtual_call_type:
13663     case relocInfo::opt_virtual_call_type:
13664     case relocInfo::static_call_type:
13665     case relocInfo::static_stub_type: {
13666       return true;
13667     }
13668     case relocInfo::runtime_call_type:
13669     case relocInfo::external_word_type:
13670     case relocInfo::poll_return_type: // these are really external_word but need special
13671     case relocInfo::poll_type: {      // relocs to identify them
13672       return CodeCache::contains(target);
13673     }
13674     default: {
13675       return false;
13676     }
13677   }
13678 }
13679 
13680 // Determine whether an address is reachable in rip-relative addressing mode from the code cache.
13681 static bool is_reachable(address target, relocInfo::relocType reloc_type) {
13682   if (is_always_reachable(target, reloc_type)) {
13683     return true;
13684   }
13685   switch (reloc_type) {
13686     // None will force a 64bit literal to the code stream. Likely a placeholder
13687     // for something that will be patched later and we need to certain it will
13688     // always be reachable.
13689     case relocInfo::none: {
13690       return false;
13691     }
13692     case relocInfo::runtime_call_type:
13693     case relocInfo::external_word_type:
13694     case relocInfo::poll_return_type: // these are really external_word but need special
13695     case relocInfo::poll_type: {      // relocs to identify them
13696       assert(!CodeCache::contains(target), "always reachable");
13697       if (ForceUnreachable) {
13698         return false; // stress the correction code
13699       }
13700       // For external_word_type/runtime_call_type if it is reachable from where we
13701       // are now (possibly a temp buffer) and where we might end up
13702       // anywhere in the code cache then we are always reachable.
13703       // This would have to change if we ever save/restore shared code to be more pessimistic.
13704       // Code buffer has to be allocated in the code cache, so check against
13705       // code cache boundaries cover that case.
13706       //
13707       // In rip-relative addressing mode, an effective address is formed by adding displacement
13708       // to the 64-bit RIP of the next instruction which is not known yet. Considering target address
13709       // is guaranteed to be outside of the code cache, checking against code cache boundaries is enough
13710       // to account for that.
13711       return Assembler::is_simm32(target - CodeCache::low_bound()) &&
13712              Assembler::is_simm32(target - CodeCache::high_bound());
13713     }
13714     default: {
13715       return false;
13716     }
13717   }
13718 }
13719 
13720 bool Assembler::reachable(AddressLiteral adr) {
13721   assert(CodeCache::contains(pc()), "required");
13722   if (adr.is_lval()) {
13723     return false;
13724   }
13725   return is_reachable(adr.target(), adr.reloc());
13726 }
13727 
13728 bool Assembler::always_reachable(AddressLiteral adr) {
13729   assert(CodeCache::contains(pc()), "required");
13730   if (adr.is_lval()) {
13731     return false;
13732   }
13733   return is_always_reachable(adr.target(), adr.reloc());
13734 }
13735 
13736 void Assembler::emit_data64(jlong data,
13737                             relocInfo::relocType rtype,
13738                             int format) {
13739   if (rtype == relocInfo::none) {
13740     emit_int64(data);
13741   } else {
13742     emit_data64(data, Relocation::spec_simple(rtype), format);
13743   }
13744 }
13745 
13746 void Assembler::emit_data64(jlong data,
13747                             RelocationHolder const& rspec,
13748                             int format) {
13749   assert(imm_operand == 0, "default format must be immediate in this file");
13750   assert(imm_operand == format, "must be immediate");
13751   assert(inst_mark() != nullptr, "must be inside InstructionMark");
13752   // Do not use AbstractAssembler::relocate, which is not intended for
13753   // embedded words.  Instead, relocate to the enclosing instruction.
13754   code_section()->relocate(inst_mark(), rspec, format);
13755 #ifdef ASSERT
13756   check_relocation(rspec, format);
13757 #endif
13758   emit_int64(data);
13759 }
13760 
13761 int Assembler::get_base_prefix_bits(int enc) {
13762   int bits = 0;
13763   if (enc & 16) bits |= REX2BIT_B4;
13764   if (enc & 8) bits |= REX2BIT_B;
13765   return bits;
13766 }
13767 
13768 int Assembler::get_index_prefix_bits(int enc) {
13769   int bits = 0;
13770   if (enc & 16) bits |= REX2BIT_X4;
13771   if (enc & 8) bits |= REX2BIT_X;
13772   return bits;
13773 }
13774 
13775 int Assembler::get_base_prefix_bits(Register base) {
13776   return base->is_valid() ? get_base_prefix_bits(base->encoding()) : 0;
13777 }
13778 
13779 int Assembler::get_index_prefix_bits(Register index) {
13780   return index->is_valid() ? get_index_prefix_bits(index->encoding()) : 0;
13781 }
13782 
13783 int Assembler::get_reg_prefix_bits(int enc) {
13784   int bits = 0;
13785   if (enc & 16) bits |= REX2BIT_R4;
13786   if (enc & 8) bits |= REX2BIT_R;
13787   return bits;
13788 }
13789 
13790 void Assembler::prefix(Register reg) {
13791   if (reg->encoding() >= 16) {
13792     prefix16(WREX2 | get_base_prefix_bits(reg->encoding()));
13793   } else if (reg->encoding() >= 8) {
13794     prefix(REX_B);
13795   }
13796 }
13797 
13798 void Assembler::prefix(Register dst, Register src, Prefix p) {
13799   if ((p & WREX2) || src->encoding() >= 16 || dst->encoding() >= 16) {
13800     prefix_rex2(dst, src);
13801     return;
13802   }
13803   if (src->encoding() >= 8) {
13804     p = (Prefix)(p | REX_B);
13805   }
13806   if (dst->encoding() >= 8) {
13807     p = (Prefix)(p | REX_R);
13808   }
13809   if (p != Prefix_EMPTY) {
13810     // do not generate an empty prefix
13811     prefix(p);
13812   }
13813 }
13814 
13815 void Assembler::prefix_rex2(Register dst, Register src) {
13816   int bits = 0;
13817   bits |= get_base_prefix_bits(src->encoding());
13818   bits |= get_reg_prefix_bits(dst->encoding());
13819   prefix16(WREX2 | bits);
13820 }
13821 
13822 void Assembler::prefix(Register dst, Address adr, Prefix p) {
13823   if (adr.base_needs_rex2() || adr.index_needs_rex2() || dst->encoding() >= 16) {
13824     prefix_rex2(dst, adr);
13825   }
13826   if (adr.base_needs_rex()) {
13827     if (adr.index_needs_rex()) {
13828       assert(false, "prefix(Register dst, Address adr, Prefix p) does not support handling of an X");
13829     } else {
13830       p = (Prefix)(p | REX_B);
13831     }
13832   } else {
13833     if (adr.index_needs_rex()) {
13834       assert(false, "prefix(Register dst, Address adr, Prefix p) does not support handling of an X");
13835     }
13836   }
13837   if (dst->encoding() >= 8) {
13838     p = (Prefix)(p | REX_R);
13839   }
13840   if (p != Prefix_EMPTY) {
13841     // do not generate an empty prefix
13842     prefix(p);
13843   }
13844 }
13845 
13846 void Assembler::prefix_rex2(Register dst, Address adr) {
13847   assert(!adr.index_needs_rex2(), "prefix(Register dst, Address adr) does not support handling of an X");
13848   int bits = 0;
13849   bits |= get_base_prefix_bits(adr.base());
13850   bits |= get_reg_prefix_bits(dst->encoding());
13851   prefix16(WREX2 | bits);
13852 }
13853 
13854 void Assembler::prefix(Address adr, bool is_map1) {
13855   if (adr.base_needs_rex2() || adr.index_needs_rex2()) {
13856     prefix_rex2(adr, is_map1);
13857     return;
13858   }
13859   if (adr.base_needs_rex()) {
13860     if (adr.index_needs_rex()) {
13861       prefix(REX_XB);
13862     } else {
13863       prefix(REX_B);
13864     }
13865   } else {
13866     if (adr.index_needs_rex()) {
13867       prefix(REX_X);
13868     }
13869   }
13870   if (is_map1) emit_int8(0x0F);
13871 }
13872 
13873 void Assembler::prefix_rex2(Address adr, bool is_map1) {
13874   int bits = is_map1 ? REX2BIT_M0 : 0;
13875   bits |= get_base_prefix_bits(adr.base());
13876   bits |= get_index_prefix_bits(adr.index());
13877   prefix16(WREX2 | bits);
13878 }
13879 
13880 void Assembler::prefix(Address adr, Register reg, bool byteinst, bool is_map1) {
13881   if (reg->encoding() >= 16 || adr.base_needs_rex2() || adr.index_needs_rex2()) {
13882     prefix_rex2(adr, reg, byteinst, is_map1);
13883     return;
13884   }
13885   if (reg->encoding() < 8) {
13886     if (adr.base_needs_rex()) {
13887       if (adr.index_needs_rex()) {
13888         prefix(REX_XB);
13889       } else {
13890         prefix(REX_B);
13891       }
13892     } else {
13893       if (adr.index_needs_rex()) {
13894         prefix(REX_X);
13895       } else if (byteinst && reg->encoding() >= 4) {
13896         prefix(REX);
13897       }
13898     }
13899   } else {
13900     if (adr.base_needs_rex()) {
13901       if (adr.index_needs_rex()) {
13902         prefix(REX_RXB);
13903       } else {
13904         prefix(REX_RB);
13905       }
13906     } else {
13907       if (adr.index_needs_rex()) {
13908         prefix(REX_RX);
13909       } else {
13910         prefix(REX_R);
13911       }
13912     }
13913   }
13914   if (is_map1) emit_int8(0x0F);
13915 }
13916 
13917 void Assembler::prefix_rex2(Address adr, Register reg, bool byteinst, bool is_map1) {
13918   int bits = is_map1 ? REX2BIT_M0 : 0;
13919   bits |= get_base_prefix_bits(adr.base());
13920   bits |= get_index_prefix_bits(adr.index());
13921   bits |= get_reg_prefix_bits(reg->encoding());
13922   prefix16(WREX2 | bits);
13923 }
13924 
13925 void Assembler::prefix(Address adr, XMMRegister reg) {
13926   if (reg->encoding() >= 16 || adr.base_needs_rex2() || adr.index_needs_rex2()) {
13927     prefixq_rex2(adr, reg);
13928     return;
13929   }
13930   if (reg->encoding() < 8) {
13931     if (adr.base_needs_rex()) {
13932       if (adr.index_needs_rex()) {
13933         prefix(REX_XB);
13934       } else {
13935         prefix(REX_B);
13936       }
13937     } else {
13938       if (adr.index_needs_rex()) {
13939         prefix(REX_X);
13940       }
13941     }
13942   } else {
13943     if (adr.base_needs_rex()) {
13944       if (adr.index_needs_rex()) {
13945         prefix(REX_RXB);
13946       } else {
13947         prefix(REX_RB);
13948       }
13949     } else {
13950       if (adr.index_needs_rex()) {
13951         prefix(REX_RX);
13952       } else {
13953         prefix(REX_R);
13954       }
13955     }
13956   }
13957 }
13958 
13959 void Assembler::prefix_rex2(Address adr, XMMRegister src) {
13960   int bits = 0;
13961   bits |= get_base_prefix_bits(adr.base());
13962   bits |= get_index_prefix_bits(adr.index());
13963   bits |= get_reg_prefix_bits(src->encoding());
13964   prefix16(WREX2 | bits);
13965 }
13966 
13967 int Assembler::prefix_and_encode(int reg_enc, bool byteinst, bool is_map1) {
13968   if (reg_enc >= 16) {
13969     return prefix_and_encode_rex2(reg_enc, is_map1);
13970   }
13971   if (reg_enc >= 8) {
13972     prefix(REX_B);
13973     reg_enc -= 8;
13974   } else if (byteinst && reg_enc >= 4) {
13975     prefix(REX);
13976   }
13977   int opc_prefix = is_map1 ? 0x0F00 : 0;
13978   return opc_prefix | reg_enc;
13979 }
13980 
13981 int Assembler::prefix_and_encode_rex2(int reg_enc, bool is_map1) {
13982   prefix16(WREX2 | (is_map1 ? REX2BIT_M0 : 0) | get_base_prefix_bits(reg_enc));
13983   return reg_enc & 0x7;
13984 }
13985 
13986 int Assembler::prefix_and_encode(int dst_enc, bool dst_is_byte, int src_enc, bool src_is_byte, bool is_map1) {
13987   if (src_enc >= 16 || dst_enc >= 16) {
13988     return prefix_and_encode_rex2(dst_enc, src_enc, is_map1 ? REX2BIT_M0 : 0);
13989   }
13990   if (dst_enc < 8) {
13991     if (src_enc >= 8) {
13992       prefix(REX_B);
13993       src_enc -= 8;
13994     } else if ((src_is_byte && src_enc >= 4) || (dst_is_byte && dst_enc >= 4)) {
13995       prefix(REX);
13996     }
13997   } else {
13998     if (src_enc < 8) {
13999       prefix(REX_R);
14000     } else {
14001       prefix(REX_RB);
14002       src_enc -= 8;
14003     }
14004     dst_enc -= 8;
14005   }
14006   int opcode_prefix = is_map1 ? 0x0F00 : 0;
14007   return opcode_prefix | (dst_enc << 3 | src_enc);
14008 }
14009 
14010 int Assembler::prefix_and_encode_rex2(int dst_enc, int src_enc, int init_bits) {
14011   int bits = init_bits;
14012   bits |= get_reg_prefix_bits(dst_enc);
14013   bits |= get_base_prefix_bits(src_enc);
14014   dst_enc &= 0x7;
14015   src_enc &= 0x7;
14016   prefix16(WREX2 | bits);
14017   return dst_enc << 3 | src_enc;
14018 }
14019 
14020 bool Assembler::prefix_is_rex2(int prefix) {
14021   return (prefix & 0xFF00) == WREX2;
14022 }
14023 
14024 int Assembler::get_prefixq_rex2(Address adr, bool is_map1) {
14025   assert(UseAPX, "APX features not enabled");
14026   int bits = REX2BIT_W;
14027   if (is_map1) bits |= REX2BIT_M0;
14028   bits |= get_base_prefix_bits(adr.base());
14029   bits |= get_index_prefix_bits(adr.index());
14030   return WREX2 | bits;
14031 }
14032 
14033 int Assembler::get_prefixq(Address adr, bool is_map1) {
14034   if (adr.base_needs_rex2() || adr.index_needs_rex2()) {
14035     return get_prefixq_rex2(adr, is_map1);
14036   }
14037   int8_t prfx = get_prefixq(adr, rax);
14038   assert(REX_W <= prfx && prfx <= REX_WXB, "must be");
14039   return is_map1 ? (((int16_t)prfx) << 8) | 0x0F : (int16_t)prfx;
14040 }
14041 
14042 int Assembler::get_prefixq(Address adr, Register src, bool is_map1) {
14043   if (adr.base_needs_rex2() || adr.index_needs_rex2() || src->encoding() >= 16) {
14044     return get_prefixq_rex2(adr, src, is_map1);
14045   }
14046   int8_t prfx = (int8_t)(REX_W +
14047                          ((int)adr.base_needs_rex()) +
14048                          ((int)adr.index_needs_rex() << 1) +
14049                          ((int)(src->encoding() >= 8) << 2));
14050 #ifdef ASSERT
14051   if (src->encoding() < 8) {
14052     if (adr.base_needs_rex()) {
14053       if (adr.index_needs_rex()) {
14054         assert(prfx == REX_WXB, "must be");
14055       } else {
14056         assert(prfx == REX_WB, "must be");
14057       }
14058     } else {
14059       if (adr.index_needs_rex()) {
14060         assert(prfx == REX_WX, "must be");
14061       } else {
14062         assert(prfx == REX_W, "must be");
14063       }
14064     }
14065   } else {
14066     if (adr.base_needs_rex()) {
14067       if (adr.index_needs_rex()) {
14068         assert(prfx == REX_WRXB, "must be");
14069       } else {
14070         assert(prfx == REX_WRB, "must be");
14071       }
14072     } else {
14073       if (adr.index_needs_rex()) {
14074         assert(prfx == REX_WRX, "must be");
14075       } else {
14076         assert(prfx == REX_WR, "must be");
14077       }
14078     }
14079   }
14080 #endif
14081   return is_map1 ? (((int16_t)prfx) << 8) | 0x0F : (int16_t)prfx;
14082 }
14083 
14084 int Assembler::get_prefixq_rex2(Address adr, Register src, bool is_map1) {
14085   assert(UseAPX, "APX features not enabled");
14086   int bits = REX2BIT_W;
14087   if (is_map1) bits |= REX2BIT_M0;
14088   bits |= get_base_prefix_bits(adr.base());
14089   bits |= get_index_prefix_bits(adr.index());
14090   bits |= get_reg_prefix_bits(src->encoding());
14091   return WREX2 | bits;
14092 }
14093 
14094 void Assembler::prefixq(Address adr) {
14095   if (adr.base_needs_rex2() || adr.index_needs_rex2()) {
14096     prefix16(get_prefixq_rex2(adr));
14097   } else {
14098     emit_int8(get_prefixq(adr));
14099   }
14100 }
14101 
14102 void Assembler::prefixq(Address adr, Register src, bool is_map1) {
14103   if (adr.base_needs_rex2() || adr.index_needs_rex2() || src->encoding() >= 16) {
14104     prefix16(get_prefixq_rex2(adr, src, is_map1));
14105   } else {
14106     emit_int8(get_prefixq(adr, src));
14107     if (is_map1) emit_int8(0x0F);
14108   }
14109 }
14110 
14111 
14112 void Assembler::prefixq(Address adr, XMMRegister src) {
14113   if (src->encoding() >= 16 || adr.base_needs_rex2() || adr.index_needs_rex2()) {
14114     prefixq_rex2(adr, src);
14115     return;
14116   }
14117   if (src->encoding() < 8) {
14118     if (adr.base_needs_rex()) {
14119       if (adr.index_needs_rex()) {
14120         prefix(REX_WXB);
14121       } else {
14122         prefix(REX_WB);
14123       }
14124     } else {
14125       if (adr.index_needs_rex()) {
14126         prefix(REX_WX);
14127       } else {
14128         prefix(REX_W);
14129       }
14130     }
14131   } else {
14132     if (adr.base_needs_rex()) {
14133       if (adr.index_needs_rex()) {
14134         prefix(REX_WRXB);
14135       } else {
14136         prefix(REX_WRB);
14137       }
14138     } else {
14139       if (adr.index_needs_rex()) {
14140         prefix(REX_WRX);
14141       } else {
14142         prefix(REX_WR);
14143       }
14144     }
14145   }
14146 }
14147 
14148 void Assembler::prefixq_rex2(Address adr, XMMRegister src) {
14149   int bits = REX2BIT_W;
14150   bits |= get_base_prefix_bits(adr.base());
14151   bits |= get_index_prefix_bits(adr.index());
14152   bits |= get_reg_prefix_bits(src->encoding());
14153   prefix16(WREX2 | bits);
14154 }
14155 
14156 int Assembler::prefixq_and_encode(int reg_enc, bool is_map1) {
14157   if (reg_enc >= 16) {
14158     return prefixq_and_encode_rex2(reg_enc, is_map1);
14159   }
14160   if (reg_enc < 8) {
14161     prefix(REX_W);
14162   } else {
14163     prefix(REX_WB);
14164     reg_enc -= 8;
14165   }
14166   int opcode_prefix = is_map1 ? 0x0F00 : 0;
14167   return opcode_prefix | reg_enc;
14168 }
14169 
14170 
14171 int Assembler::prefixq_and_encode_rex2(int reg_enc, bool is_map1) {
14172   prefix16(WREX2 | REX2BIT_W | (is_map1 ? REX2BIT_M0: 0) | get_base_prefix_bits(reg_enc));
14173   return reg_enc & 0x7;
14174 }
14175 
14176 int Assembler::prefixq_and_encode(int dst_enc, int src_enc, bool is_map1) {
14177   if (dst_enc >= 16 || src_enc >= 16) {
14178     return prefixq_and_encode_rex2(dst_enc, src_enc, is_map1);
14179   }
14180   if (dst_enc < 8) {
14181     if (src_enc < 8) {
14182       prefix(REX_W);
14183     } else {
14184       prefix(REX_WB);
14185       src_enc -= 8;
14186     }
14187   } else {
14188     if (src_enc < 8) {
14189       prefix(REX_WR);
14190     } else {
14191       prefix(REX_WRB);
14192       src_enc -= 8;
14193     }
14194     dst_enc -= 8;
14195   }
14196   int opcode_prefix = is_map1 ? 0x0F00 : 0;
14197   return opcode_prefix | (dst_enc << 3 | src_enc);
14198 }
14199 
14200 int Assembler::prefixq_and_encode_rex2(int dst_enc, int src_enc, bool is_map1) {
14201   int init_bits = REX2BIT_W | (is_map1 ? REX2BIT_M0 : 0);
14202   return prefix_and_encode_rex2(dst_enc, src_enc, init_bits);
14203 }
14204 
14205 void Assembler::emit_prefix_and_int8(int prefix, int b1) {
14206   if ((prefix & 0xFF00) == 0) {
14207     emit_int16(prefix, b1);
14208   } else {
14209     assert((prefix & 0xFF00) != WREX2 || UseAPX, "APX features not enabled");
14210     emit_int24((prefix & 0xFF00) >> 8, prefix & 0x00FF, b1);
14211   }
14212 }
14213 
14214 void Assembler::adcq(Register dst, int32_t imm32) {
14215   (void) prefixq_and_encode(dst->encoding());
14216   emit_arith(0x81, 0xD0, dst, imm32);
14217 }
14218 
14219 void Assembler::adcq(Register dst, Address src) {
14220   InstructionMark im(this);
14221   emit_prefix_and_int8(get_prefixq(src, dst), 0x13);
14222   emit_operand(dst, src, 0);
14223 }
14224 
14225 void Assembler::adcq(Register dst, Register src) {
14226   (void) prefixq_and_encode(dst->encoding(), src->encoding());
14227   emit_arith(0x13, 0xC0, dst, src);
14228 }
14229 
14230 void Assembler::addq(Address dst, int32_t imm32) {
14231   InstructionMark im(this);
14232   prefixq(dst);
14233   emit_arith_operand(0x81, rax, dst, imm32);
14234 }
14235 
14236 void Assembler::eaddq(Register dst, Address src, int32_t imm32, bool no_flags) {
14237   InstructionMark im(this);
14238   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14239   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
14240   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14241   emit_arith_operand(0x81, rax, src, imm32);
14242 }
14243 
14244 void Assembler::addq(Address dst, Register src) {
14245   InstructionMark im(this);
14246   emit_prefix_and_int8(get_prefixq(dst, src), 0x01);
14247   emit_operand(src, dst, 0);
14248 }
14249 
14250 void Assembler::eaddq(Register dst, Address src1, Register src2, bool no_flags) {
14251   InstructionMark im(this);
14252   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14253   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
14254   evex_prefix_ndd(src1, dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14255   emit_int8(0x01);
14256   emit_operand(src2, src1, 0);
14257 }
14258 
14259 void Assembler::addq(Register dst, int32_t imm32) {
14260   (void) prefixq_and_encode(dst->encoding());
14261   emit_arith(0x81, 0xC0, dst, imm32);
14262 }
14263 
14264 void Assembler::eaddq(Register dst, Register src, int32_t imm32, bool no_flags) {
14265   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14266   (void) evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14267   emit_arith(0x81, 0xC0, src, imm32);
14268 }
14269 
14270 void Assembler::addq(Register dst, Address src) {
14271   InstructionMark im(this);
14272   emit_prefix_and_int8(get_prefixq(src, dst), 0x03);
14273   emit_operand(dst, src, 0);
14274 }
14275 
14276 void Assembler::eaddq(Register dst, Register src1, Address src2, bool no_flags) {
14277   InstructionMark im(this);
14278   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14279   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
14280   evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14281   emit_int8(0x03);
14282   emit_operand(src1, src2, 0);
14283 }
14284 
14285 void Assembler::addq(Register dst, Register src) {
14286   (void) prefixq_and_encode(dst->encoding(), src->encoding());
14287   emit_arith(0x03, 0xC0, dst, src);
14288 }
14289 
14290 void Assembler::eaddq(Register dst, Register src1, Register src2, bool no_flags) {
14291   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14292   (void) evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14293   // opcode matches gcc
14294   emit_arith(0x01, 0xC0, src1, src2);
14295 }
14296 
14297 void Assembler::adcxq(Register dst, Register src) {
14298   //assert(VM_Version::supports_adx(), "adx instructions not supported");
14299   if (needs_rex2(dst, src)) {
14300     InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14301     int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3C, &attributes, true);
14302     emit_int16((unsigned char)0x66, (0xC0 | encode));
14303   } else {
14304     emit_int8(0x66);
14305     int encode = prefixq_and_encode(dst->encoding(), src->encoding());
14306     emit_int32(0x0F,
14307                0x38,
14308                (unsigned char)0xF6,
14309                (0xC0 | encode));
14310   }
14311 }
14312 
14313 void Assembler::eadcxq(Register dst, Register src1, Register src2) {
14314     InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14315     int encode = evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3C, &attributes);
14316     emit_int16((unsigned char)0x66, (0xC0 | encode));
14317 }
14318 
14319 void Assembler::adoxq(Register dst, Register src) {
14320   //assert(VM_Version::supports_adx(), "adx instructions not supported");
14321   if (needs_rex2(dst, src)) {
14322     InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14323     int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_3C, &attributes, true);
14324     emit_int16((unsigned char)0x66, (0xC0 | encode));
14325   } else {
14326     emit_int8((unsigned char)0xF3);
14327     int encode = prefixq_and_encode(dst->encoding(), src->encoding());
14328     emit_int32(0x0F,
14329                0x38,
14330                (unsigned char)0xF6,
14331                (0xC0 | encode));
14332   }
14333 }
14334 
14335 void Assembler::eadoxq(Register dst, Register src1, Register src2) {
14336     InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14337     int encode = evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_3C, &attributes);
14338     emit_int16((unsigned char)0x66, (0xC0 | encode));
14339 }
14340 
14341 void Assembler::andq(Address dst, int32_t imm32) {
14342   InstructionMark im(this);
14343   prefixq(dst);
14344   emit_arith_operand(0x81, as_Register(4), dst, imm32);
14345 }
14346 
14347 void Assembler::eandq(Register dst, Address src, int32_t imm32, bool no_flags) {
14348   InstructionMark im(this);
14349   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14350   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
14351   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14352   emit_arith_operand(0x81, as_Register(4), src, imm32);
14353 }
14354 
14355 void Assembler::andq(Register dst, int32_t imm32) {
14356   (void) prefixq_and_encode(dst->encoding());
14357   emit_arith(0x81, 0xE0, dst, imm32);
14358 }
14359 
14360 void Assembler::eandq(Register dst, Register src, int32_t imm32, bool no_flags) {
14361   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14362   evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14363   emit_arith(0x81, 0xE0, src, imm32);
14364 }
14365 
14366 void Assembler::andq(Register dst, Address src) {
14367   InstructionMark im(this);
14368   emit_prefix_and_int8(get_prefixq(src, dst), 0x23);
14369   emit_operand(dst, src, 0);
14370 }
14371 
14372 void Assembler::eandq(Register dst, Register src1, Address src2, bool no_flags) {
14373   InstructionMark im(this);
14374   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14375   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
14376   evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14377   emit_int8(0x23);
14378   emit_operand(src1, src2, 0);
14379 }
14380 
14381 void Assembler::andq(Register dst, Register src) {
14382   (void) prefixq_and_encode(dst->encoding(), src->encoding());
14383   emit_arith(0x23, 0xC0, dst, src);
14384 }
14385 
14386 void Assembler::eandq(Register dst, Register src1, Register src2, bool no_flags) {
14387   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14388   (void) evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14389   // opcode matches gcc
14390   emit_arith(0x21, 0xC0, src1, src2);
14391 }
14392 
14393 void Assembler::andq(Address dst, Register src) {
14394   InstructionMark im(this);
14395   emit_prefix_and_int8(get_prefixq(dst, src), 0x21);
14396   emit_operand(src, dst, 0);
14397 }
14398 
14399 void Assembler::eandq(Register dst, Address src1, Register src2, bool no_flags) {
14400   InstructionMark im(this);
14401   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14402   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
14403   evex_prefix_ndd(src1, dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14404   emit_int8(0x21);
14405   emit_operand(src2, src1, 0);
14406 }
14407 
14408 void Assembler::andnq(Register dst, Register src1, Register src2) {
14409   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
14410   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14411   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes, true);
14412   emit_int16((unsigned char)0xF2, (0xC0 | encode));
14413 }
14414 
14415 void Assembler::andnq(Register dst, Register src1, Address src2) {
14416   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
14417   InstructionMark im(this);
14418   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14419   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
14420   vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
14421   emit_int8((unsigned char)0xF2);
14422   emit_operand(dst, src2, 0);
14423 }
14424 
14425 void Assembler::bsfq(Register dst, Register src) {
14426   int encode = prefixq_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
14427   emit_opcode_prefix_and_encoding((unsigned char)0xBC, 0xC0, encode);
14428 }
14429 
14430 void Assembler::bsrq(Register dst, Register src) {
14431   int encode = prefixq_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
14432   emit_opcode_prefix_and_encoding((unsigned char)0xBD, 0xC0, encode);
14433 }
14434 
14435 void Assembler::bswapq(Register reg) {
14436   int encode = prefixq_and_encode(reg->encoding(), true /* is_map1 */);
14437   emit_opcode_prefix_and_encoding((unsigned char)0xC8, encode);
14438 }
14439 
14440 void Assembler::blsiq(Register dst, Register src) {
14441   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
14442   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14443   int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes, true);
14444   emit_int16((unsigned char)0xF3, (0xC0 | encode));
14445 }
14446 
14447 void Assembler::blsiq(Register dst, Address src) {
14448   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
14449   InstructionMark im(this);
14450   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14451   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
14452   vex_prefix(src, dst->encoding(), rbx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
14453   emit_int8((unsigned char)0xF3);
14454   emit_operand(rbx, src, 0);
14455 }
14456 
14457 void Assembler::blsmskq(Register dst, Register src) {
14458   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
14459   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14460   int encode = vex_prefix_and_encode(rdx->encoding(),  dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes, true);
14461   emit_int16((unsigned char)0xF3, (0xC0 | encode));
14462 }
14463 
14464 void Assembler::blsmskq(Register dst, Address src) {
14465   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
14466   InstructionMark im(this);
14467   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14468   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
14469   vex_prefix(src, dst->encoding(), rdx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
14470   emit_int8((unsigned char)0xF3);
14471   emit_operand(rdx, src, 0);
14472 }
14473 
14474 void Assembler::blsrq(Register dst, Register src) {
14475   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
14476   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14477   int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes, true);
14478   emit_int16((unsigned char)0xF3, (0xC0 | encode));
14479 }
14480 
14481 void Assembler::blsrq(Register dst, Address src) {
14482   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
14483   InstructionMark im(this);
14484   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14485   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
14486   vex_prefix(src, dst->encoding(), rcx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
14487   emit_int8((unsigned char)0xF3);
14488   emit_operand(rcx, src, 0);
14489 }
14490 
14491 void Assembler::cdqq() {
14492   emit_int16(REX_W, (unsigned char)0x99);
14493 }
14494 
14495 void Assembler::cdqe() {
14496   emit_int16(REX_W, (unsigned char)0x98);
14497 }
14498 
14499 void Assembler::clflush(Address adr) {
14500   assert(VM_Version::supports_clflush(), "should do");
14501   prefix(adr, true /* is_map1 */);
14502   emit_int8((unsigned char)0xAE);
14503   emit_operand(rdi, adr, 0);
14504 }
14505 
14506 void Assembler::clflushopt(Address adr) {
14507   assert(VM_Version::supports_clflushopt(), "should do!");
14508   // adr should be base reg only with no index or offset
14509   assert(adr.index() == noreg, "index should be noreg");
14510   assert(adr.scale() == Address::no_scale, "scale should be no_scale");
14511   assert(adr.disp() == 0, "displacement should be 0");
14512   // instruction prefix is 0x66
14513   emit_int8(0x66);
14514   prefix(adr, true /* is_map1 */);
14515   // opcode family is 0x0F 0xAE
14516   emit_int8((unsigned char)0xAE);
14517   // extended opcode byte is 7 == rdi
14518   emit_operand(rdi, adr, 0);
14519 }
14520 
14521 void Assembler::clwb(Address adr) {
14522   assert(VM_Version::supports_clwb(), "should do!");
14523   // adr should be base reg only with no index or offset
14524   assert(adr.index() == noreg, "index should be noreg");
14525   assert(adr.scale() == Address::no_scale, "scale should be no_scale");
14526   assert(adr.disp() == 0, "displacement should be 0");
14527   // instruction prefix is 0x66
14528   emit_int8(0x66);
14529   prefix(adr, true /* is_map1 */);
14530   // opcode family is 0x0f 0xAE
14531   emit_int8((unsigned char)0xAE);
14532   // extended opcode byte is 6 == rsi
14533   emit_operand(rsi, adr, 0);
14534 }
14535 
14536 void Assembler::cmovq(Condition cc, Register dst, Register src) {
14537   int encode = prefixq_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
14538   emit_opcode_prefix_and_encoding((0x40 | cc), 0xC0, encode);
14539 }
14540 
14541 void Assembler::ecmovq(Condition cc, Register dst, Register src1, Register src2) {
14542   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14543   int encode = evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes);
14544   emit_int16((0x40 | cc), (0xC0 | encode));
14545 }
14546 
14547 void Assembler::cmovq(Condition cc, Register dst, Address src) {
14548   InstructionMark im(this);
14549   int prefix = get_prefixq(src, dst, true /* is_map1 */);
14550   emit_prefix_and_int8(prefix, (0x40 | cc));
14551   emit_operand(dst, src, 0);
14552 }
14553 
14554 void Assembler::ecmovq(Condition cc, Register dst, Register src1, Address src2) {
14555   InstructionMark im(this);
14556   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14557   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
14558   evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes);
14559   emit_int8((0x40 | cc));
14560   emit_operand(src1, src2, 0);
14561 }
14562 
14563 void Assembler::cmpq(Address dst, int32_t imm32) {
14564   InstructionMark im(this);
14565   prefixq(dst);
14566   emit_arith_operand(0x81, as_Register(7), dst, imm32);
14567 }
14568 
14569 void Assembler::cmpq(Register dst, int32_t imm32) {
14570   (void) prefixq_and_encode(dst->encoding());
14571   emit_arith(0x81, 0xF8, dst, imm32);
14572 }
14573 
14574 void Assembler::cmpq(Address dst, Register src) {
14575   InstructionMark im(this);
14576   emit_prefix_and_int8(get_prefixq(dst, src), 0x39);
14577   emit_operand(src, dst, 0);
14578 }
14579 
14580 void Assembler::cmpq(Register dst, Register src) {
14581   (void) prefixq_and_encode(dst->encoding(), src->encoding());
14582   emit_arith(0x3B, 0xC0, dst, src);
14583 }
14584 
14585 void Assembler::cmpq(Register dst, Address src) {
14586   InstructionMark im(this);
14587   emit_prefix_and_int8(get_prefixq(src, dst), 0x3B);
14588   emit_operand(dst, src, 0);
14589 }
14590 
14591 void Assembler::cmpxchgq(Register reg, Address adr) {
14592   InstructionMark im(this);
14593   int prefix = get_prefixq(adr, reg, true /* is_map1 */);
14594   emit_prefix_and_int8(prefix, (unsigned char)0xB1);
14595   emit_operand(reg, adr, 0);
14596 }
14597 
14598 void Assembler::cvtsi2sdq(XMMRegister dst, Register src) {
14599   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
14600   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14601   int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes, true);
14602   emit_int16(0x2A, (0xC0 | encode));
14603 }
14604 
14605 void Assembler::cvtsi2sdq(XMMRegister dst, Address src) {
14606   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
14607   InstructionMark im(this);
14608   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14609   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
14610   simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
14611   emit_int8(0x2A);
14612   emit_operand(dst, src, 0);
14613 }
14614 
14615 void Assembler::cvtsi2ssq(XMMRegister dst, Address src) {
14616   NOT_LP64(assert(VM_Version::supports_sse(), ""));
14617   InstructionMark im(this);
14618   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14619   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
14620   simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
14621   emit_int8(0x2A);
14622   emit_operand(dst, src, 0);
14623 }
14624 
14625 void Assembler::cvttsd2siq(Register dst, Address src) {
14626   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
14627   // F2 REX.W 0F 2C /r
14628   // CVTTSD2SI r64, xmm1/m64
14629   InstructionMark im(this);
14630   emit_int8((unsigned char)0xF2);
14631   prefixq(src, dst, true /* is_map1 */);
14632   emit_int8((unsigned char)0x2C);
14633   emit_operand(dst, src, 0);
14634 }
14635 
14636 void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
14637   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
14638   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14639   int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
14640   emit_int16(0x2C, (0xC0 | encode));
14641 }
14642 
14643 void Assembler::cvtsd2siq(Register dst, XMMRegister src) {
14644   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
14645   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14646   int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
14647   emit_int16(0x2D, (0xC0 | encode));
14648 }
14649 
14650 void Assembler::cvttss2siq(Register dst, XMMRegister src) {
14651   NOT_LP64(assert(VM_Version::supports_sse(), ""));
14652   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14653   int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
14654   emit_int16(0x2C, (0xC0 | encode));
14655 }
14656 
14657 void Assembler::decl(Register dst) {
14658   // Don't use it directly. Use MacroAssembler::decrementl() instead.
14659   // Use two-byte form (one-byte form is a REX prefix in 64-bit mode)
14660   int encode = prefix_and_encode(dst->encoding());
14661   emit_int16((unsigned char)0xFF, (0xC8 | encode));
14662 }
14663 
14664 void Assembler::edecl(Register dst, Register src, bool no_flags) {
14665   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14666   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14667   emit_int16((unsigned char)0xFF, (0xC8 | encode));
14668 }
14669 
14670 void Assembler::decq(Register dst) {
14671   // Don't use it directly. Use MacroAssembler::decrementq() instead.
14672   // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
14673   int encode = prefixq_and_encode(dst->encoding());
14674   emit_int16((unsigned char)0xFF, 0xC8 | encode);
14675 }
14676 
14677 void Assembler::edecq(Register dst, Register src, bool no_flags) {
14678   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14679   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14680   emit_int16((unsigned char)0xFF, (0xC8 | encode));
14681 }
14682 
14683 void Assembler::decq(Address dst) {
14684   // Don't use it directly. Use MacroAssembler::decrementq() instead.
14685   InstructionMark im(this);
14686   emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xFF);
14687   emit_operand(rcx, dst, 0);
14688 }
14689 
14690 void Assembler::edecq(Register dst, Address src, bool no_flags) {
14691   InstructionMark im(this);
14692   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14693   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
14694   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14695   emit_int8((unsigned char)0xFF);
14696   emit_operand(rcx, src, 0);
14697 }
14698 
14699 // can't use REX2
14700 void Assembler::fxrstor(Address src) {
14701   InstructionMark im(this);
14702   emit_int24(get_prefixq(src), 0x0F, (unsigned char)0xAE);
14703   emit_operand(as_Register(1), src, 0);
14704 }
14705 
14706 // can't use REX2
14707 void Assembler::xrstor(Address src) {
14708   InstructionMark im(this);
14709   emit_int24(get_prefixq(src), 0x0F, (unsigned char)0xAE);
14710   emit_operand(as_Register(5), src, 0);
14711 }
14712 
14713 // can't use REX2
14714 void Assembler::fxsave(Address dst) {
14715   InstructionMark im(this);
14716   emit_int24(get_prefixq(dst), 0x0F, (unsigned char)0xAE);
14717   emit_operand(as_Register(0), dst, 0);
14718 }
14719 
14720 // cant use REX2
14721 void Assembler::xsave(Address dst) {
14722   InstructionMark im(this);
14723   emit_int24(get_prefixq(dst), 0x0F, (unsigned char)0xAE);
14724   emit_operand(as_Register(4), dst, 0);
14725 }
14726 
14727 void Assembler::idivq(Register src) {
14728   int encode = prefixq_and_encode(src->encoding());
14729   emit_int16((unsigned char)0xF7, (0xF8 | encode));
14730 }
14731 
14732 void Assembler::eidivq(Register src, bool no_flags) {
14733   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14734   int encode = evex_prefix_and_encode_nf(0, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14735   emit_int16((unsigned char)0xF7, (0xF8 | encode));
14736 }
14737 
14738 void Assembler::divq(Register src) {
14739   int encode = prefixq_and_encode(src->encoding());
14740   emit_int16((unsigned char)0xF7, (0xF0 | encode));
14741 }
14742 
14743 void Assembler::edivq(Register src, bool no_flags) {
14744   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14745   int encode = evex_prefix_and_encode_nf(0, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14746   emit_int16((unsigned char)0xF7, (0xF0 | encode));
14747 }
14748 
14749 void Assembler::imulq(Register dst, Register src) {
14750   int encode = prefixq_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
14751   emit_opcode_prefix_and_encoding((unsigned char)0xAF, 0xC0, encode);
14752 }
14753 
14754 void Assembler::eimulq(Register dst, Register src, bool no_flags) {
14755   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14756   int encode = evex_prefix_and_encode_nf(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14757   emit_int16((unsigned char)0xAF, (0xC0 | encode));
14758 }
14759 
14760 void Assembler::eimulq(Register dst, Register src1, Register src2, bool no_flags) {
14761   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14762   int encode = evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14763   emit_int16((unsigned char)0xAF, (0xC0 | encode));
14764 }
14765 
14766 void Assembler::imulq(Register src) {
14767   int encode = prefixq_and_encode(src->encoding());
14768   emit_int16((unsigned char)0xF7, (0xE8 | encode));
14769 }
14770 
14771 void Assembler::eimulq(Register src, bool no_flags) {
14772   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14773   int encode = evex_prefix_and_encode_nf(0, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14774   emit_int16((unsigned char)0xF7, (0xE8 | encode));
14775 }
14776 
14777 void Assembler::imulq(Register dst, Address src, int32_t value) {
14778   InstructionMark im(this);
14779   prefixq(src, dst);
14780   if (is8bit(value)) {
14781     emit_int8((unsigned char)0x6B);
14782     emit_operand(dst, src, 1);
14783     emit_int8(value);
14784   } else {
14785     emit_int8((unsigned char)0x69);
14786     emit_operand(dst, src, 4);
14787     emit_int32(value);
14788   }
14789 }
14790 
14791 void Assembler::eimulq(Register dst, Address src, int32_t value, bool no_flags) {
14792   InstructionMark im(this);
14793   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14794   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
14795   evex_prefix_nf(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14796   if (is8bit(value)) {
14797     emit_int8((unsigned char)0x6B);
14798     emit_operand(dst, src, 1);
14799     emit_int8(value);
14800   } else {
14801     emit_int8((unsigned char)0x69);
14802     emit_operand(dst, src, 4);
14803     emit_int32(value);
14804   }
14805 }
14806 
14807 void Assembler::imulq(Register dst, Register src, int value) {
14808   int encode = prefixq_and_encode(dst->encoding(), src->encoding());
14809   if (is8bit(value)) {
14810     emit_int24(0x6B, (0xC0 | encode), (value & 0xFF));
14811   } else {
14812     emit_int16(0x69, (0xC0 | encode));
14813     emit_int32(value);
14814   }
14815 }
14816 
14817 void Assembler::eimulq(Register dst, Register src, int value, bool no_flags) {
14818   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14819   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, /* src_is_gpr */ true, /* nds_is_ndd */ false, no_flags);
14820   if (is8bit(value)) {
14821     emit_int24(0x6B, (0xC0 | encode), (value & 0xFF));
14822   } else {
14823     emit_int16(0x69, (0xC0 | encode));
14824     emit_int32(value);
14825   }
14826 }
14827 
14828 void Assembler::imulq(Register dst, Address src) {
14829   InstructionMark im(this);
14830   int prefix = get_prefixq(src, dst, true /* is_map1 */);
14831   emit_prefix_and_int8(prefix, (unsigned char)0xAF);
14832   emit_operand(dst, src, 0);
14833 }
14834 
14835 void Assembler::eimulq(Register dst, Address src, bool no_flags) {
14836   InstructionMark im(this);
14837   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14838   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes, /* nds_is_ndd */ false, no_flags);
14839   emit_int8((unsigned char)0xAF);
14840   emit_operand(dst, src, 0);
14841 }
14842 
14843 void Assembler::eimulq(Register dst, Register src1, Address src2, bool no_flags) {
14844   InstructionMark im(this);
14845   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14846   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
14847   evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14848   emit_int8((unsigned char)0xAF);
14849   emit_operand(src1, src2, 0);
14850 }
14851 
14852 void Assembler::incl(Register dst) {
14853   // Don't use it directly. Use MacroAssembler::incrementl() instead.
14854   // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
14855   int encode = prefix_and_encode(dst->encoding());
14856   emit_int16((unsigned char)0xFF, (0xC0 | encode));
14857 }
14858 
14859 void Assembler::eincl(Register dst, Register src, bool no_flags) {
14860   // Don't use it directly. Use MacroAssembler::incrementl() instead.
14861   // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
14862   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14863   // int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14864   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14865   emit_int16((unsigned char)0xFF, (0xC0 | encode));
14866 }
14867 
14868 void Assembler::incq(Register dst) {
14869   // Don't use it directly. Use MacroAssembler::incrementq() instead.
14870   // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
14871   int encode = prefixq_and_encode(dst->encoding());
14872   emit_int16((unsigned char)0xFF, (0xC0 | encode));
14873 }
14874 
14875 void Assembler::eincq(Register dst, Register src, bool no_flags) {
14876   // Don't use it directly. Use MacroAssembler::incrementq() instead.
14877   // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
14878   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14879   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14880   emit_int16((unsigned char)0xFF, (0xC0 | encode));
14881 }
14882 
14883 void Assembler::incq(Address dst) {
14884   // Don't use it directly. Use MacroAssembler::incrementq() instead.
14885   InstructionMark im(this);
14886   emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xFF);
14887   emit_operand(rax, dst, 0);
14888 }
14889 
14890 void Assembler::eincq(Register dst, Address src, bool no_flags) {
14891   // Don't use it directly. Use MacroAssembler::incrementq() instead.
14892   InstructionMark im(this);
14893   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14894   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
14895   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14896   emit_int8((unsigned char) 0xFF);
14897   emit_operand(rax, src, 0);
14898 }
14899 
14900 void Assembler::lea(Register dst, Address src) {
14901   leaq(dst, src);
14902 }
14903 
14904 void Assembler::leaq(Register dst, Address src) {
14905   InstructionMark im(this);
14906   emit_prefix_and_int8(get_prefixq(src, dst), (unsigned char)0x8D);
14907   emit_operand(dst, src, 0);
14908 }
14909 
14910 void Assembler::mov64(Register dst, int64_t imm64) {
14911   InstructionMark im(this);
14912   int encode = prefixq_and_encode(dst->encoding());
14913   emit_int8(0xB8 | encode);
14914   emit_int64(imm64);
14915 }
14916 
14917 void Assembler::mov64(Register dst, int64_t imm64, relocInfo::relocType rtype, int format) {
14918   InstructionMark im(this);
14919   int encode = prefixq_and_encode(dst->encoding());
14920   emit_int8(0xB8 | encode);
14921   emit_data64(imm64, rtype, format);
14922 }
14923 
14924 void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) {
14925   InstructionMark im(this);
14926   int encode = prefixq_and_encode(dst->encoding());
14927   emit_int8(0xB8 | encode);
14928   emit_data64(imm64, rspec);
14929 }
14930 
14931 void Assembler::mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec) {
14932   InstructionMark im(this);
14933   int encode = prefix_and_encode(dst->encoding());
14934   emit_int8(0xB8 | encode);
14935   emit_data((int)imm32, rspec, narrow_oop_operand);
14936 }
14937 
14938 void Assembler::mov_narrow_oop(Address dst, int32_t imm32,  RelocationHolder const& rspec) {
14939   InstructionMark im(this);
14940   prefix(dst);
14941   emit_int8((unsigned char)0xC7);
14942   emit_operand(rax, dst, 4);
14943   emit_data((int)imm32, rspec, narrow_oop_operand);
14944 }
14945 
14946 void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) {
14947   InstructionMark im(this);
14948   int encode = prefix_and_encode(src1->encoding());
14949   emit_int16((unsigned char)0x81, (0xF8 | encode));
14950   emit_data((int)imm32, rspec, narrow_oop_operand);
14951 }
14952 
14953 void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) {
14954   InstructionMark im(this);
14955   prefix(src1);
14956   emit_int8((unsigned char)0x81);
14957   emit_operand(rax, src1, 4);
14958   emit_data((int)imm32, rspec, narrow_oop_operand);
14959 }
14960 
14961 void Assembler::lzcntq(Register dst, Register src) {
14962   assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
14963   emit_int8((unsigned char)0xF3);
14964   int encode = prefixq_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
14965   emit_opcode_prefix_and_encoding((unsigned char)0xBD, 0xC0, encode);
14966 }
14967 
14968 void Assembler::elzcntq(Register dst, Register src, bool no_flags) {
14969   assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
14970   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14971   int encode = evex_prefix_and_encode_nf(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14972   emit_int16((unsigned char)0xF5, (0xC0 | encode));
14973 }
14974 
14975 void Assembler::lzcntq(Register dst, Address src) {
14976   assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
14977   InstructionMark im(this);
14978   emit_int8((unsigned char)0xF3);
14979   prefixq(src, dst, true /* is_map1 */);
14980   emit_int8((unsigned char)0xBD);
14981   emit_operand(dst, src, 0);
14982 }
14983 
14984 void Assembler::elzcntq(Register dst, Address src, bool no_flags) {
14985   assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
14986   InstructionMark im(this);
14987   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14988   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
14989   evex_prefix_nf(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
14990   emit_int8((unsigned char)0xF5);
14991   emit_operand(dst, src, 0);
14992 }
14993 
14994 void Assembler::movdq(XMMRegister dst, Register src) {
14995   // table D-1 says MMX/SSE2
14996   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
14997   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
14998   int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes, true);
14999   emit_int16(0x6E, (0xC0 | encode));
15000 }
15001 
15002 void Assembler::movdq(Register dst, XMMRegister src) {
15003   // table D-1 says MMX/SSE2
15004   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
15005   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15006   // swap src/dst to get correct prefix
15007   int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes, true);
15008   emit_int16(0x7E,
15009              (0xC0 | encode));
15010 }
15011 
15012 void Assembler::movq(Register dst, Register src) {
15013   int encode = prefixq_and_encode(dst->encoding(), src->encoding());
15014   emit_int16((unsigned char)0x8B,
15015              (0xC0 | encode));
15016 }
15017 
15018 void Assembler::movq(Register dst, Address src) {
15019   InstructionMark im(this);
15020   emit_prefix_and_int8(get_prefixq(src, dst), (unsigned char)0x8B);
15021   emit_operand(dst, src, 0);
15022 }
15023 
15024 void Assembler::movq(Address dst, Register src) {
15025   InstructionMark im(this);
15026   emit_prefix_and_int8(get_prefixq(dst, src), (unsigned char)0x89);
15027   emit_operand(src, dst, 0);
15028 }
15029 
15030 void Assembler::movq(Address dst, int32_t imm32) {
15031   InstructionMark im(this);
15032   emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xC7);
15033   emit_operand(as_Register(0), dst, 4);
15034   emit_int32(imm32);
15035 }
15036 
15037 void Assembler::movq(Register dst, int32_t imm32) {
15038   int encode = prefixq_and_encode(dst->encoding());
15039   emit_int16((unsigned char)0xC7, (0xC0 | encode));
15040   emit_int32(imm32);
15041 }
15042 
15043 void Assembler::movsbq(Register dst, Address src) {
15044   InstructionMark im(this);
15045   int prefix = get_prefixq(src, dst, true /* is_map1 */);
15046   emit_prefix_and_int8(prefix, (unsigned char)0xBE);
15047   emit_operand(dst, src, 0);
15048 }
15049 
15050 void Assembler::movsbq(Register dst, Register src) {
15051   int encode = prefixq_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
15052   emit_opcode_prefix_and_encoding((unsigned char)0xBE, 0xC0, encode);
15053 }
15054 
15055 void Assembler::movslq(Address dst, int32_t imm32) {
15056   assert(is_simm32(imm32), "lost bits");
15057   InstructionMark im(this);
15058   emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xC7);
15059   emit_operand(rax, dst, 4);
15060   emit_int32(imm32);
15061 }
15062 
15063 void Assembler::movslq(Register dst, Address src) {
15064   InstructionMark im(this);
15065   emit_prefix_and_int8(get_prefixq(src, dst), 0x63);
15066   emit_operand(dst, src, 0);
15067 }
15068 
15069 void Assembler::movslq(Register dst, Register src) {
15070   int encode = prefixq_and_encode(dst->encoding(), src->encoding());
15071   emit_int16(0x63, (0xC0 | encode));
15072 }
15073 
15074 void Assembler::movswq(Register dst, Address src) {
15075   InstructionMark im(this);
15076   int prefix = get_prefixq(src, dst, true /* is_map1 */);
15077   emit_prefix_and_int8(prefix, (unsigned char)0xBF);
15078   emit_operand(dst, src, 0);
15079 }
15080 
15081 void Assembler::movswq(Register dst, Register src) {
15082   int encode = prefixq_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
15083   emit_opcode_prefix_and_encoding((unsigned char)0xBF, 0xC0, encode);
15084 }
15085 
15086 void Assembler::movzbq(Register dst, Address src) {
15087   InstructionMark im(this);
15088   int prefix = get_prefixq(src, dst, true /* is_map1 */);
15089   emit_prefix_and_int8(prefix, (unsigned char)0xB6);
15090   emit_operand(dst, src, 0);
15091 }
15092 
15093 void Assembler::movzbq(Register dst, Register src) {
15094   int encode = prefixq_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
15095   emit_opcode_prefix_and_encoding((unsigned char)0xB6, 0xC0, encode);
15096 }
15097 
15098 void Assembler::movzwq(Register dst, Address src) {
15099   InstructionMark im(this);
15100   int prefix = get_prefixq(src, dst, true /* is_map1 */);
15101   emit_prefix_and_int8(prefix, (unsigned char)0xB7);
15102   emit_operand(dst, src, 0);
15103 }
15104 
15105 void Assembler::movzwq(Register dst, Register src) {
15106   int encode = prefixq_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
15107   emit_opcode_prefix_and_encoding((unsigned char)0xB7, 0xC0, encode);
15108 }
15109 
15110 void Assembler::mulq(Address src) {
15111   InstructionMark im(this);
15112   emit_prefix_and_int8(get_prefixq(src), (unsigned char)0xF7);
15113   emit_operand(rsp, src, 0);
15114 }
15115 
15116 void Assembler::emulq(Address src, bool no_flags) {
15117   InstructionMark im(this);
15118   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15119   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
15120   evex_prefix_nf(src, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15121   emit_int8(0xF7);
15122   emit_operand(rsp, src, 0);
15123 }
15124 
15125 void Assembler::mulq(Register src) {
15126   int encode = prefixq_and_encode(src->encoding());
15127   emit_int16((unsigned char)0xF7, (0xE0 | encode));
15128 }
15129 
15130 void Assembler::emulq(Register src, bool no_flags) {
15131   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15132   int encode = evex_prefix_and_encode_nf(0, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15133   emit_int16((unsigned char)0xF7, (0xE0 | encode));
15134 }
15135 
15136 void Assembler::mulxq(Register dst1, Register dst2, Register src) {
15137   assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
15138   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15139   int encode = vex_prefix_and_encode(dst1->encoding(), dst2->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes, true);
15140   emit_int16((unsigned char)0xF6, (0xC0 | encode));
15141 }
15142 
15143 void Assembler::negq(Register dst) {
15144   int encode = prefixq_and_encode(dst->encoding());
15145   emit_int16((unsigned char)0xF7, (0xD8 | encode));
15146 }
15147 
15148 void Assembler::enegq(Register dst, Register src, bool no_flags) {
15149   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15150   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15151   emit_int16((unsigned char)0xF7, (0xD8 | encode));
15152 }
15153 
15154 void Assembler::negq(Address dst) {
15155   InstructionMark im(this);
15156   emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xF7);
15157   emit_operand(as_Register(3), dst, 0);
15158 }
15159 
15160 void Assembler::enegq(Register dst, Address src, bool no_flags) {
15161   InstructionMark im(this);
15162   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15163   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
15164   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15165   emit_int8((unsigned char)0xF7);
15166   emit_operand(as_Register(3), src, 0);
15167 }
15168 
15169 void Assembler::notq(Register dst) {
15170   int encode = prefixq_and_encode(dst->encoding());
15171   emit_int16((unsigned char)0xF7, (0xD0 | encode));
15172 }
15173 
15174 void Assembler::enotq(Register dst, Register src) {
15175   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15176   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes);
15177   emit_int16((unsigned char)0xF7, (0xD0 | encode));
15178 }
15179 
15180 void Assembler::btq(Register dst, Register src) {
15181   int encode = prefixq_and_encode(src->encoding(), dst->encoding(), true /* is_map1 */);
15182   emit_opcode_prefix_and_encoding((unsigned char)0xA3, 0xC0, encode);
15183 }
15184 
15185 void Assembler::btq(Register src, int imm8) {
15186   assert(isByte(imm8), "not a byte");
15187   int encode = prefixq_and_encode(src->encoding(), true /* is_map1 */);
15188   emit_opcode_prefix_and_encoding((unsigned char)0xBA, 0xE0, encode);
15189   emit_int8(imm8);
15190 }
15191 
15192 void Assembler::btsq(Address dst, int imm8) {
15193   assert(isByte(imm8), "not a byte");
15194   InstructionMark im(this);
15195   int prefix = get_prefixq(dst, true /* is_map1 */);
15196   emit_prefix_and_int8(prefix, (unsigned char)0xBA);
15197   emit_operand(rbp /* 5 */, dst, 1);
15198   emit_int8(imm8);
15199 }
15200 
15201 void Assembler::btrq(Address dst, int imm8) {
15202   assert(isByte(imm8), "not a byte");
15203   InstructionMark im(this);
15204   int prefix = get_prefixq(dst, true /* is_map1 */);
15205   emit_prefix_and_int8(prefix, (unsigned char)0xBA);
15206   emit_operand(rsi /* 6 */, dst, 1);
15207   emit_int8(imm8);
15208 }
15209 
15210 void Assembler::orq(Address dst, int32_t imm32) {
15211   InstructionMark im(this);
15212   prefixq(dst);
15213   emit_arith_operand(0x81, as_Register(1), dst, imm32);
15214 }
15215 
15216 void Assembler::eorq(Register dst, Address src, int32_t imm32, bool no_flags) {
15217   InstructionMark im(this);
15218   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15219   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
15220   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15221   emit_arith_operand(0x81, as_Register(1), src, imm32);
15222 }
15223 
15224 void Assembler::orq(Address dst, Register src) {
15225   InstructionMark im(this);
15226   emit_prefix_and_int8(get_prefixq(dst, src), (unsigned char)0x09);
15227   emit_operand(src, dst, 0);
15228 }
15229 
15230 void Assembler::eorq(Register dst, Address src1, Register src2, bool no_flags) {
15231   InstructionMark im(this);
15232   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15233   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
15234   evex_prefix_ndd(src1, dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15235   emit_int8(0x09);
15236   emit_operand(src2, src1, 0);
15237 }
15238 
15239 void Assembler::orq(Register dst, int32_t imm32) {
15240   (void) prefixq_and_encode(dst->encoding());
15241   emit_arith(0x81, 0xC8, dst, imm32);
15242 }
15243 
15244 void Assembler::eorq(Register dst, Register src, int32_t imm32, bool no_flags) {
15245   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15246   evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15247   emit_arith(0x81, 0xC8, src, imm32);
15248 }
15249 
15250 void Assembler::orq_imm32(Register dst, int32_t imm32) {
15251   (void) prefixq_and_encode(dst->encoding());
15252   emit_arith_imm32(0x81, 0xC8, dst, imm32);
15253 }
15254 
15255 void Assembler::eorq_imm32(Register dst, Register src, int32_t imm32, bool no_flags) {
15256   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15257   (void) evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15258   emit_arith_imm32(0x81, 0xC8, src, imm32);
15259 }
15260 
15261 void Assembler::orq(Register dst, Address src) {
15262   InstructionMark im(this);
15263   emit_prefix_and_int8(get_prefixq(src, dst), 0x0B);
15264   emit_operand(dst, src, 0);
15265 }
15266 
15267 void Assembler::eorq(Register dst, Register src1, Address src2, bool no_flags) {
15268   InstructionMark im(this);
15269   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15270   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
15271   evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15272   emit_int8(0x0B);
15273   emit_operand(src1, src2, 0);
15274 }
15275 
15276 void Assembler::orq(Register dst, Register src) {
15277   (void) prefixq_and_encode(dst->encoding(), src->encoding());
15278   emit_arith(0x0B, 0xC0, dst, src);
15279 }
15280 
15281 void Assembler::eorq(Register dst, Register src1, Register src2, bool no_flags) {
15282   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15283   (void) evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15284   // opcode matches gcc
15285   emit_arith(0x09, 0xC0, src1, src2);
15286 }
15287 
15288 void Assembler::popcntq(Register dst, Address src) {
15289   assert(VM_Version::supports_popcnt(), "must support");
15290   InstructionMark im(this);
15291   emit_int8((unsigned char)0xF3);
15292   emit_prefix_and_int8(get_prefixq(src, dst, true /* is_map1 */), (unsigned char) 0xB8);
15293   emit_operand(dst, src, 0);
15294 }
15295 
15296 void Assembler::epopcntq(Register dst, Address src, bool no_flags) {
15297   assert(VM_Version::supports_popcnt(), "must support");
15298   InstructionMark im(this);
15299   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15300   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
15301   evex_prefix_nf(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15302   emit_int8((unsigned char) 0x88);
15303   emit_operand(dst, src, 0);
15304 }
15305 
15306 void Assembler::popcntq(Register dst, Register src) {
15307   assert(VM_Version::supports_popcnt(), "must support");
15308   emit_int8((unsigned char)0xF3);
15309   int encode = prefixq_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */);
15310   emit_opcode_prefix_and_encoding((unsigned char)0xB8, 0xC0, encode);
15311 }
15312 
15313 void Assembler::epopcntq(Register dst, Register src, bool no_flags) {
15314   assert(VM_Version::supports_popcnt(), "must support");
15315   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15316   int encode = evex_prefix_and_encode_nf(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15317   emit_int16((unsigned char)0x88, (0xC0 | encode));
15318 }
15319 
15320 void Assembler::popq(Address dst) {
15321   InstructionMark im(this);
15322   emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0x8F);
15323   emit_operand(rax, dst, 0);
15324 }
15325 
15326 void Assembler::popq(Register dst) {
15327   int encode = prefix_and_encode(dst->encoding());
15328   emit_int8((unsigned char)0x58 | encode);
15329 }
15330 
15331 // Precomputable: popa, pusha, vzeroupper
15332 
15333 // The result of these routines are invariant from one invocation to another
15334 // invocation for the duration of a run. Caching the result on bootstrap
15335 // and copying it out on subsequent invocations can thus be beneficial
15336 static bool     precomputed = false;
15337 
15338 static u_char* popa_code  = nullptr;
15339 static int     popa_len   = 0;
15340 
15341 static u_char* pusha_code = nullptr;
15342 static int     pusha_len  = 0;
15343 
15344 static u_char* vzup_code  = nullptr;
15345 static int     vzup_len   = 0;
15346 
15347 void Assembler::precompute_instructions() {
15348   assert(!Universe::is_fully_initialized(), "must still be single threaded");
15349   guarantee(!precomputed, "only once");
15350   precomputed = true;
15351   ResourceMark rm;
15352 
15353   // Make a temporary buffer big enough for the routines we're capturing
15354   int size = UseAPX ? 512 : 256;
15355   char* tmp_code = NEW_RESOURCE_ARRAY(char, size);
15356   CodeBuffer buffer((address)tmp_code, size);
15357   MacroAssembler masm(&buffer);
15358 
15359   address begin_popa  = masm.code_section()->end();
15360   masm.popa_uncached();
15361   address end_popa    = masm.code_section()->end();
15362   masm.pusha_uncached();
15363   address end_pusha   = masm.code_section()->end();
15364   masm.vzeroupper_uncached();
15365   address end_vzup    = masm.code_section()->end();
15366 
15367   // Save the instructions to permanent buffers.
15368   popa_len = (int)(end_popa - begin_popa);
15369   popa_code = NEW_C_HEAP_ARRAY(u_char, popa_len, mtInternal);
15370   memcpy(popa_code, begin_popa, popa_len);
15371 
15372   pusha_len = (int)(end_pusha - end_popa);
15373   pusha_code = NEW_C_HEAP_ARRAY(u_char, pusha_len, mtInternal);
15374   memcpy(pusha_code, end_popa, pusha_len);
15375 
15376   vzup_len = (int)(end_vzup - end_pusha);
15377   if (vzup_len > 0) {
15378     vzup_code = NEW_C_HEAP_ARRAY(u_char, vzup_len, mtInternal);
15379     memcpy(vzup_code, end_pusha, vzup_len);
15380   } else {
15381     vzup_code = pusha_code; // dummy
15382   }
15383 
15384   assert(masm.code()->total_oop_size() == 0 &&
15385          masm.code()->total_metadata_size() == 0 &&
15386          masm.code()->total_relocation_size() == 0,
15387          "pre-computed code can't reference oops, metadata or contain relocations");
15388 }
15389 
15390 static void emit_copy(CodeSection* code_section, u_char* src, int src_len) {
15391   assert(src != nullptr, "code to copy must have been pre-computed");
15392   assert(code_section->limit() - code_section->end() > src_len, "code buffer not large enough");
15393   address end = code_section->end();
15394   memcpy(end, src, src_len);
15395   code_section->set_end(end + src_len);
15396 }
15397 
15398 
15399 // Does not actually store the value of rsp on the stack.
15400 // The slot for rsp just contains an arbitrary value.
15401 void Assembler::pusha() { // 64bit
15402   emit_copy(code_section(), pusha_code, pusha_len);
15403 }
15404 
15405 // Does not actually store the value of rsp on the stack.
15406 // The slot for rsp just contains an arbitrary value.
15407 void Assembler::pusha_uncached() { // 64bit
15408   if (UseAPX) {
15409     // Data being pushed by PUSH2 must be 16B-aligned on the stack, for this push rax upfront
15410     // and use it as a temporary register for stack alignment.
15411     pushp(rax);
15412     // Move original stack pointer to RAX and align stack pointer to 16B boundary.
15413     movq(rax, rsp);
15414     andq(rsp, -(StackAlignmentInBytes));
15415     // Push pair of original stack pointer along with remaining registers
15416     // at 16B aligned boundary.
15417     push2p(rax, r31);
15418     push2p(r30, r29);
15419     push2p(r28, r27);
15420     push2p(r26, r25);
15421     push2p(r24, r23);
15422     push2p(r22, r21);
15423     push2p(r20, r19);
15424     push2p(r18, r17);
15425     push2p(r16, r15);
15426     push2p(r14, r13);
15427     push2p(r12, r11);
15428     push2p(r10, r9);
15429     push2p(r8, rdi);
15430     push2p(rsi, rbp);
15431     push2p(rbx, rdx);
15432     // To maintain 16 byte alignment after rcx is pushed.
15433     subq(rsp, 8);
15434     pushp(rcx);
15435   } else {
15436     subq(rsp, 16 * wordSize);
15437     movq(Address(rsp, 15 * wordSize), rax);
15438     movq(Address(rsp, 14 * wordSize), rcx);
15439     movq(Address(rsp, 13 * wordSize), rdx);
15440     movq(Address(rsp, 12 * wordSize), rbx);
15441     // Skip rsp as the value is normally not used. There are a few places where
15442     // the original value of rsp needs to be known but that can be computed
15443     // from the value of rsp immediately after pusha (rsp + 16 * wordSize).
15444     // FIXME: For APX any such direct access should also consider EGPR size
15445     // during address compution.
15446     movq(Address(rsp, 10 * wordSize), rbp);
15447     movq(Address(rsp, 9 * wordSize), rsi);
15448     movq(Address(rsp, 8 * wordSize), rdi);
15449     movq(Address(rsp, 7 * wordSize), r8);
15450     movq(Address(rsp, 6 * wordSize), r9);
15451     movq(Address(rsp, 5 * wordSize), r10);
15452     movq(Address(rsp, 4 * wordSize), r11);
15453     movq(Address(rsp, 3 * wordSize), r12);
15454     movq(Address(rsp, 2 * wordSize), r13);
15455     movq(Address(rsp, wordSize), r14);
15456     movq(Address(rsp, 0), r15);
15457   }
15458 }
15459 
15460 void Assembler::popa() { // 64bit
15461   emit_copy(code_section(), popa_code, popa_len);
15462 }
15463 
15464 void Assembler::popa_uncached() { // 64bit
15465   if (UseAPX) {
15466     popp(rcx);
15467     addq(rsp, 8);
15468     // Data being popped by POP2 must be 16B-aligned on the stack.
15469     pop2p(rdx, rbx);
15470     pop2p(rbp, rsi);
15471     pop2p(rdi, r8);
15472     pop2p(r9, r10);
15473     pop2p(r11, r12);
15474     pop2p(r13, r14);
15475     pop2p(r15, r16);
15476     pop2p(r17, r18);
15477     pop2p(r19, r20);
15478     pop2p(r21, r22);
15479     pop2p(r23, r24);
15480     pop2p(r25, r26);
15481     pop2p(r27, r28);
15482     pop2p(r29, r30);
15483     // Popped value in RAX holds original unaligned stack pointer.
15484     pop2p(r31, rax);
15485     // Reinstantiate original stack pointer.
15486     movq(rsp, rax);
15487     popp(rax);
15488   } else {
15489     movq(r15, Address(rsp, 0));
15490     movq(r14, Address(rsp, wordSize));
15491     movq(r13, Address(rsp, 2 * wordSize));
15492     movq(r12, Address(rsp, 3 * wordSize));
15493     movq(r11, Address(rsp, 4 * wordSize));
15494     movq(r10, Address(rsp, 5 * wordSize));
15495     movq(r9,  Address(rsp, 6 * wordSize));
15496     movq(r8,  Address(rsp, 7 * wordSize));
15497     movq(rdi, Address(rsp, 8 * wordSize));
15498     movq(rsi, Address(rsp, 9 * wordSize));
15499     movq(rbp, Address(rsp, 10 * wordSize));
15500     // Skip rsp as it is restored automatically to the value
15501     // before the corresponding pusha when popa is done.
15502     movq(rbx, Address(rsp, 12 * wordSize));
15503     movq(rdx, Address(rsp, 13 * wordSize));
15504     movq(rcx, Address(rsp, 14 * wordSize));
15505     movq(rax, Address(rsp, 15 * wordSize));
15506 
15507     addq(rsp, 16 * wordSize);
15508   }
15509 }
15510 
15511 void Assembler::vzeroupper() {
15512   emit_copy(code_section(), vzup_code, vzup_len);
15513 }
15514 
15515 void Assembler::vzeroall() {
15516   assert(VM_Version::supports_avx(), "requires AVX");
15517   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
15518   (void)vex_prefix_and_encode(0, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
15519   emit_int8(0x77);
15520 }
15521 
15522 void Assembler::pushq(Address src) {
15523   InstructionMark im(this);
15524   emit_prefix_and_int8(get_prefixq(src), (unsigned char)0xFF);
15525   emit_operand(rsi, src, 0);
15526 }
15527 
15528 void Assembler::rclq(Register dst, int imm8) {
15529   assert(isShiftCount(imm8 >> 1), "illegal shift count");
15530   int encode = prefixq_and_encode(dst->encoding());
15531   if (imm8 == 1) {
15532     emit_int16((unsigned char)0xD1, (0xD0 | encode));
15533   } else {
15534     emit_int24((unsigned char)0xC1, (0xD0 | encode), imm8);
15535   }
15536 }
15537 
15538 void Assembler::erclq(Register dst, Register src, int imm8) {
15539   assert(isShiftCount(imm8 >> 1), "illegal shift count");
15540   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15541   int encode =  evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes);
15542   if (imm8 == 1) {
15543     emit_int16((unsigned char)0xD1, (0xD0 | encode));
15544   } else {
15545     emit_int24((unsigned char)0xC1, (0xD0 | encode), imm8);
15546   }
15547 }
15548 
15549 void Assembler::rcrq(Register dst, int imm8) {
15550   assert(isShiftCount(imm8 >> 1), "illegal shift count");
15551   int encode = prefixq_and_encode(dst->encoding());
15552   if (imm8 == 1) {
15553     emit_int16((unsigned char)0xD1, (0xD8 | encode));
15554   } else {
15555     emit_int24((unsigned char)0xC1, (0xD8 | encode), imm8);
15556   }
15557 }
15558 
15559 void Assembler::ercrq(Register dst, Register src, int imm8) {
15560   assert(isShiftCount(imm8 >> 1), "illegal shift count");
15561   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15562   int encode =  evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes);
15563   if (imm8 == 1) {
15564     emit_int16((unsigned char)0xD1, (0xD8 | encode));
15565   } else {
15566     emit_int24((unsigned char)0xC1, (0xD8 | encode), imm8);
15567   }
15568 }
15569 
15570 void Assembler::rorxl(Register dst, Register src, int imm8) {
15571   assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
15572   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15573   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes, true);
15574   emit_int24((unsigned char)0xF0, (0xC0 | encode), imm8);
15575 }
15576 
15577 void Assembler::rorxl(Register dst, Address src, int imm8) {
15578   assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
15579   InstructionMark im(this);
15580   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15581   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit);
15582   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes);
15583   emit_int8((unsigned char)0xF0);
15584   emit_operand(dst, src, 1);
15585   emit_int8(imm8);
15586 }
15587 
15588 void Assembler::rorxq(Register dst, Register src, int imm8) {
15589   assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
15590   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15591   int encode = vex_prefix_and_encode(dst->encoding(), 0,  src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes, true);
15592   emit_int24((unsigned char)0xF0, (0xC0 | encode), imm8);
15593 }
15594 
15595 void Assembler::rorxq(Register dst, Address src, int imm8) {
15596   assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
15597   InstructionMark im(this);
15598   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15599   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
15600   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes);
15601   emit_int8((unsigned char)0xF0);
15602   emit_operand(dst, src, 1);
15603   emit_int8(imm8);
15604 }
15605 
15606 #ifdef _LP64
15607 void Assembler::salq(Address dst, int imm8) {
15608   InstructionMark im(this);
15609   assert(isShiftCount(imm8 >> 1), "illegal shift count");
15610   if (imm8 == 1) {
15611     emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xD1);
15612     emit_operand(as_Register(4), dst, 0);
15613   }
15614   else {
15615     emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xC1);
15616     emit_operand(as_Register(4), dst, 1);
15617     emit_int8(imm8);
15618   }
15619 }
15620 
15621 void Assembler::esalq(Register dst, Address src, int imm8, bool no_flags) {
15622   InstructionMark im(this);
15623   assert(isShiftCount(imm8 >> 1), "illegal shift count");
15624   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15625   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
15626   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15627   if (imm8 == 1) {
15628     emit_int8((unsigned char)0xD1);
15629     emit_operand(as_Register(4), src, 0);
15630   }
15631   else {
15632     emit_int8((unsigned char)0xC1);
15633     emit_operand(as_Register(4), src, 1);
15634     emit_int8(imm8);
15635   }
15636 }
15637 
15638 void Assembler::salq(Address dst) {
15639   InstructionMark im(this);
15640   emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xD3);
15641   emit_operand(as_Register(4), dst, 0);
15642 }
15643 
15644 void Assembler::esalq(Register dst, Address src, bool no_flags) {
15645   InstructionMark im(this);
15646   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15647   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
15648   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15649   emit_int8((unsigned char)0xD3);
15650   emit_operand(as_Register(4), src, 0);
15651 }
15652 
15653 void Assembler::salq(Register dst, int imm8) {
15654   assert(isShiftCount(imm8 >> 1), "illegal shift count");
15655   int encode = prefixq_and_encode(dst->encoding());
15656   if (imm8 == 1) {
15657     emit_int16((unsigned char)0xD1, (0xE0 | encode));
15658   } else {
15659     emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8);
15660   }
15661 }
15662 
15663 void Assembler::esalq(Register dst, Register src, int imm8, bool no_flags) {
15664   assert(isShiftCount(imm8 >> 1), "illegal shift count");
15665   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15666   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15667   if (imm8 == 1) {
15668     emit_int16((unsigned char)0xD1, (0xE0 | encode));
15669   } else {
15670     emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8);
15671   }
15672 }
15673 
15674 void Assembler::salq(Register dst) {
15675   int encode = prefixq_and_encode(dst->encoding());
15676   emit_int16((unsigned char)0xD3, (0xE0 | encode));
15677 }
15678 
15679 void Assembler::esalq(Register dst, Register src, bool no_flags) {
15680   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15681   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15682   emit_int16((unsigned char)0xD3, (0xE0 | encode));
15683 }
15684 
15685 void Assembler::sarq(Address dst, int imm8) {
15686   InstructionMark im(this);
15687   assert(isShiftCount(imm8 >> 1), "illegal shift count");
15688   if (imm8 == 1) {
15689     emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xD1);
15690     emit_operand(as_Register(7), dst, 0);
15691   }
15692   else {
15693     emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xC1);
15694     emit_operand(as_Register(7), dst, 1);
15695     emit_int8(imm8);
15696   }
15697 }
15698 
15699 void Assembler::esarq(Register dst, Address src, int imm8, bool no_flags) {
15700   assert(isShiftCount(imm8 >> 1), "illegal shift count");
15701   InstructionMark im(this);
15702   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15703   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
15704   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15705   if (imm8 == 1) {
15706     emit_int8((unsigned char)0xD1);
15707     emit_operand(as_Register(7), src, 0);
15708   }
15709   else {
15710     emit_int8((unsigned char)0xC1);
15711     emit_operand(as_Register(7), src, 1);
15712     emit_int8(imm8);
15713   }
15714 }
15715 
15716 void Assembler::sarq(Address dst) {
15717   InstructionMark im(this);
15718   emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xD3);
15719   emit_operand(as_Register(7), dst, 0);
15720 }
15721 
15722 void Assembler::esarq(Register dst, Address src, bool no_flags) {
15723   InstructionMark im(this);
15724   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15725   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
15726   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15727   emit_int8((unsigned char)0xD3);
15728   emit_operand(as_Register(7), src, 0);
15729 }
15730 
15731 void Assembler::sarq(Register dst, int imm8) {
15732   assert(isShiftCount(imm8 >> 1), "illegal shift count");
15733   int encode = prefixq_and_encode(dst->encoding());
15734   if (imm8 == 1) {
15735     emit_int16((unsigned char)0xD1, (0xF8 | encode));
15736   } else {
15737     emit_int24((unsigned char)0xC1, (0xF8 | encode), imm8);
15738   }
15739 }
15740 
15741 void Assembler::esarq(Register dst, Register src, int imm8, bool no_flags) {
15742   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15743   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15744   if (imm8 == 1) {
15745     emit_int16((unsigned char)0xD1, (0xF8 | encode));
15746   } else {
15747     emit_int24((unsigned char)0xC1, (0xF8 | encode), imm8);
15748   }
15749 }
15750 
15751 void Assembler::sarq(Register dst) {
15752   int encode = prefixq_and_encode(dst->encoding());
15753   emit_int16((unsigned char)0xD3, (0xF8 | encode));
15754 }
15755 
15756 void Assembler::esarq(Register dst, Register src, bool no_flags) {
15757   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15758   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15759   emit_int16((unsigned char)0xD3, (0xF8 | encode));
15760 }
15761 #endif
15762 
15763 void Assembler::sbbq(Address dst, int32_t imm32) {
15764   InstructionMark im(this);
15765   prefixq(dst);
15766   emit_arith_operand(0x81, rbx, dst, imm32);
15767 }
15768 
15769 void Assembler::sbbq(Register dst, int32_t imm32) {
15770   (void) prefixq_and_encode(dst->encoding());
15771   emit_arith(0x81, 0xD8, dst, imm32);
15772 }
15773 
15774 void Assembler::sbbq(Register dst, Address src) {
15775   InstructionMark im(this);
15776   emit_prefix_and_int8(get_prefixq(src, dst), 0x1B);
15777   emit_operand(dst, src, 0);
15778 }
15779 
15780 void Assembler::sbbq(Register dst, Register src) {
15781   (void) prefixq_and_encode(dst->encoding(), src->encoding());
15782   emit_arith(0x1B, 0xC0, dst, src);
15783 }
15784 
15785 void Assembler::shlq(Register dst, int imm8) {
15786   assert(isShiftCount(imm8 >> 1), "illegal shift count");
15787   int encode = prefixq_and_encode(dst->encoding());
15788   if (imm8 == 1) {
15789     emit_int16((unsigned char)0xD1, (0xE0 | encode));
15790   } else {
15791     emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8);
15792   }
15793 }
15794 
15795 void Assembler::eshlq(Register dst, Register src, int imm8, bool no_flags) {
15796   assert(isShiftCount(imm8 >> 1), "illegal shift count");
15797   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15798   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15799   if (imm8 == 1 ) {
15800     emit_int16((unsigned char)0xD1, (0xE0 | encode));
15801   } else {
15802     emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8);
15803   }
15804 }
15805 
15806 void Assembler::shlq(Register dst) {
15807   int encode = prefixq_and_encode(dst->encoding());
15808   emit_int16((unsigned char)0xD3, (0xE0 | encode));
15809 }
15810 
15811 void Assembler::eshlq(Register dst, Register src, bool no_flags) {
15812   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15813   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15814   emit_int16((unsigned char)0xD3, (0xE0 | encode));
15815 }
15816 
15817 void Assembler::shrq(Register dst, int imm8) {
15818   assert(isShiftCount(imm8 >> 1), "illegal shift count");
15819   int encode = prefixq_and_encode(dst->encoding());
15820   if (imm8 == 1) {
15821     emit_int16((unsigned char)0xD1, (0xE8 | encode));
15822   }
15823   else {
15824     emit_int24((unsigned char)0xC1, (0xE8 | encode), imm8);
15825   }
15826 }
15827 
15828 void Assembler::eshrq(Register dst, Register src, int imm8, bool no_flags) {
15829   assert(isShiftCount(imm8 >> 1), "illegal shift count");
15830   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15831   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15832   if (imm8 == 1) {
15833     emit_int16((unsigned char)0xD1, (0xE8 | encode));
15834   }
15835   else {
15836     emit_int24((unsigned char)0xC1, (0xE8 | encode), imm8);
15837   }
15838 }
15839 
15840 void Assembler::shrq(Register dst) {
15841   int encode = prefixq_and_encode(dst->encoding());
15842   emit_int16((unsigned char)0xD3, 0xE8 | encode);
15843 }
15844 
15845 void Assembler::eshrq(Register dst, Register src, bool no_flags) {
15846   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15847   int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15848   emit_int16((unsigned char)0xD3, (0xE8 | encode));
15849 }
15850 
15851 void Assembler::shrq(Address dst) {
15852   InstructionMark im(this);
15853   emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xD3);
15854   emit_operand(as_Register(5), dst, 0);
15855 }
15856 
15857 void Assembler::eshrq(Register dst, Address src, bool no_flags) {
15858   InstructionMark im(this);
15859   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15860   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
15861   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15862   emit_int8((unsigned char)0xD3);
15863   emit_operand(as_Register(5), src, 0);
15864 }
15865 
15866 void Assembler::shrq(Address dst, int imm8) {
15867   InstructionMark im(this);
15868   assert(isShiftCount(imm8 >> 1), "illegal shift count");
15869   if (imm8 == 1) {
15870     emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xD1);
15871     emit_operand(as_Register(5), dst, 0);
15872   }
15873   else {
15874     emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xC1);
15875     emit_operand(as_Register(5), dst, 1);
15876     emit_int8(imm8);
15877   }
15878 }
15879 
15880 void Assembler::eshrq(Register dst, Address src, int imm8, bool no_flags) {
15881   InstructionMark im(this);
15882   assert(isShiftCount(imm8 >> 1), "illegal shift count");
15883   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15884   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
15885   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15886   if (imm8 == 1) {
15887     emit_int8((unsigned char)0xD1);
15888     emit_operand(as_Register(5), src, 0);
15889   }
15890   else {
15891     emit_int8((unsigned char)0xC1);
15892     emit_operand(as_Register(5), src, 1);
15893     emit_int8(imm8);
15894   }
15895 }
15896 
15897 void Assembler::subq(Address dst, int32_t imm32) {
15898   InstructionMark im(this);
15899   prefixq(dst);
15900   emit_arith_operand(0x81, rbp, dst, imm32);
15901 }
15902 
15903 void Assembler::esubq(Register dst, Address src, int32_t imm32, bool no_flags) {
15904   InstructionMark im(this);
15905   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15906   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
15907   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15908   emit_arith_operand(0x81, rbp, src, imm32);
15909 }
15910 
15911 void Assembler::subq(Address dst, Register src) {
15912   InstructionMark im(this);
15913   emit_prefix_and_int8(get_prefixq(dst, src), 0x29);
15914   emit_operand(src, dst, 0);
15915 }
15916 
15917 void Assembler::esubq(Register dst, Address src1, Register src2, bool no_flags) {
15918   InstructionMark im(this);
15919   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15920   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
15921   evex_prefix_ndd(src1, dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15922   emit_int8(0x29);
15923   emit_operand(src2, src1, 0);
15924 }
15925 
15926 void Assembler::subq(Register dst, int32_t imm32) {
15927   (void) prefixq_and_encode(dst->encoding());
15928   emit_arith(0x81, 0xE8, dst, imm32);
15929 }
15930 
15931 void Assembler::esubq(Register dst, Register src, int32_t imm32, bool no_flags) {
15932   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15933   (void) evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15934   emit_arith(0x81, 0xE8, src, imm32);
15935 }
15936 
15937 // Force generation of a 4 byte immediate value even if it fits into 8bit
15938 void Assembler::subq_imm32(Register dst, int32_t imm32) {
15939   (void) prefixq_and_encode(dst->encoding());
15940   emit_arith_imm32(0x81, 0xE8, dst, imm32);
15941 }
15942 
15943 void Assembler::esubq_imm32(Register dst, Register src, int32_t imm32, bool no_flags) {
15944   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15945   (void) evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15946   emit_arith_imm32(0x81, 0xE8, src, imm32);
15947 }
15948 
15949 void Assembler::subq(Register dst, Address src) {
15950   InstructionMark im(this);
15951   emit_prefix_and_int8(get_prefixq(src, dst), 0x2B);
15952   emit_operand(dst, src, 0);
15953 }
15954 
15955 void Assembler::esubq(Register dst, Register src1, Address src2, bool no_flags) {
15956   InstructionMark im(this);
15957   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15958   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
15959   evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15960   emit_int8(0x2B);
15961   emit_operand(src1, src2, 0);
15962 }
15963 
15964 void Assembler::subq(Register dst, Register src) {
15965   (void) prefixq_and_encode(dst->encoding(), src->encoding());
15966   emit_arith(0x2B, 0xC0, dst, src);
15967 }
15968 
15969 void Assembler::esubq(Register dst, Register src1, Register src2, bool no_flags) {
15970   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15971   (void) evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15972   // opcode matches gcc
15973   emit_arith(0x29, 0xC0, src1, src2);
15974 }
15975 
15976 void Assembler::testq(Address dst, int32_t imm32) {
15977   InstructionMark im(this);
15978   emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xF7);
15979   emit_operand(as_Register(0), dst, 4);
15980   emit_int32(imm32);
15981 }
15982 
15983 void Assembler::testq(Register dst, int32_t imm32) {
15984   // not using emit_arith because test
15985   // doesn't support sign-extension of
15986   // 8bit operands
15987   if (dst == rax) {
15988     prefix(REX_W);
15989     emit_int8((unsigned char)0xA9);
15990     emit_int32(imm32);
15991   } else {
15992     int encode = dst->encoding();
15993     encode = prefixq_and_encode(encode);
15994     emit_int16((unsigned char)0xF7, (0xC0 | encode));
15995     emit_int32(imm32);
15996   }
15997 }
15998 
15999 void Assembler::testq(Register dst, Register src) {
16000   (void) prefixq_and_encode(dst->encoding(), src->encoding());
16001   emit_arith(0x85, 0xC0, dst, src);
16002 }
16003 
16004 void Assembler::testq(Register dst, Address src) {
16005   InstructionMark im(this);
16006   emit_prefix_and_int8(get_prefixq(src, dst), (unsigned char)0x85);
16007   emit_operand(dst, src, 0);
16008 }
16009 
16010 void Assembler::xaddq(Address dst, Register src) {
16011   InstructionMark im(this);
16012   int prefix = get_prefixq(dst, src, true /* is_map1 */);
16013   emit_prefix_and_int8(prefix, (unsigned char)0xC1);
16014   emit_operand(src, dst, 0);
16015 }
16016 
16017 void Assembler::xchgq(Register dst, Address src) {
16018   InstructionMark im(this);
16019   emit_prefix_and_int8(get_prefixq(src, dst), (unsigned char)0x87);
16020   emit_operand(dst, src, 0);
16021 }
16022 
16023 void Assembler::xchgq(Register dst, Register src) {
16024   int encode = prefixq_and_encode(dst->encoding(), src->encoding());
16025   emit_int16((unsigned char)0x87, (0xc0 | encode));
16026 }
16027 
16028 void Assembler::xorq(Register dst, Register src) {
16029   (void) prefixq_and_encode(dst->encoding(), src->encoding());
16030   emit_arith(0x33, 0xC0, dst, src);
16031 }
16032 
16033 void Assembler::exorq(Register dst, Register src1, Register src2, bool no_flags) {
16034   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16035   (void) evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
16036   // opcode matches gcc
16037   emit_arith(0x31, 0xC0, src1, src2);
16038 }
16039 
16040 void Assembler::xorq(Register dst, Address src) {
16041   InstructionMark im(this);
16042   emit_prefix_and_int8(get_prefixq(src, dst), 0x33);
16043   emit_operand(dst, src, 0);
16044 }
16045 
16046 void Assembler::exorq(Register dst, Register src1, Address src2, bool no_flags) {
16047   InstructionMark im(this);
16048   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16049   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
16050   evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
16051   emit_int8(0x33);
16052   emit_operand(src1, src2, 0);
16053 }
16054 
16055 void Assembler::xorq(Register dst, int32_t imm32) {
16056   (void) prefixq_and_encode(dst->encoding());
16057   emit_arith(0x81, 0xF0, dst, imm32);
16058 }
16059 
16060 void Assembler::exorq(Register dst, Register src, int32_t imm32, bool no_flags) {
16061   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16062   evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
16063   emit_arith(0x81, 0xF0, src, imm32);
16064 }
16065 
16066 void Assembler::xorq(Address dst, int32_t imm32) {
16067   InstructionMark im(this);
16068   prefixq(dst);
16069   emit_arith_operand(0x81, as_Register(6), dst, imm32);
16070 }
16071 
16072 void Assembler::exorq(Register dst, Address src, int32_t imm32, bool no_flags) {
16073   InstructionMark im(this);
16074   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16075   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
16076   evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
16077   emit_arith_operand(0x81, as_Register(6), src, imm32);
16078 }
16079 
16080 void Assembler::xorq(Address dst, Register src) {
16081   InstructionMark im(this);
16082   emit_prefix_and_int8(get_prefixq(dst, src), 0x31);
16083   emit_operand(src, dst, 0);
16084 }
16085 
16086 void Assembler::esetzucc(Condition cc, Register dst) {
16087   assert(VM_Version::supports_apx_f(), "");
16088   assert(0 <= cc && cc < 16, "illegal cc");
16089   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16090   // Encoding Format : eevex_prefix (4 bytes) | opcode_cc | modrm
16091   int encode =  evex_prefix_and_encode_ndd(0, 0, dst->encoding(), VEX_SIMD_F2, /* MAP4 */VEX_OPCODE_0F_3C, &attributes);
16092   emit_opcode_prefix_and_encoding((0x40 | cc), 0xC0, encode);
16093 }
16094 
16095 void Assembler::exorq(Register dst, Address src1, Register src2, bool no_flags) {
16096   InstructionMark im(this);
16097   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16098   attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit);
16099   evex_prefix_ndd(src1, dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
16100   emit_int8(0x31);
16101   emit_operand(src2, src1, 0);
16102 }
16103 
16104 #endif // !LP64
16105 
16106 void InstructionAttr::set_address_attributes(int tuple_type, int input_size_in_bits) {
16107   if (VM_Version::supports_evex()) {
16108     _tuple_type = tuple_type;
16109     _input_size_in_bits = input_size_in_bits;
16110   }
16111 }
16112 
16113 void Assembler::evpermi2b(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
16114   assert(VM_Version::supports_avx512_vbmi() && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl()), "");
16115   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16116   attributes.set_is_evex_instruction();
16117   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
16118   emit_int16(0x75, (0xC0 | encode));
16119 }
16120 
16121 void Assembler::evpermi2w(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
16122   assert(VM_Version::supports_avx512bw() && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl()), "");
16123   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16124   attributes.set_is_evex_instruction();
16125   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
16126   emit_int16(0x75, (0xC0 | encode));
16127 }
16128 
16129 void Assembler::evpermi2d(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
16130   assert(VM_Version::supports_evex() && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl()), "");
16131   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16132   attributes.set_is_evex_instruction();
16133   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
16134   emit_int16(0x76, (0xC0 | encode));
16135 }
16136 
16137 void Assembler::evpermi2q(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
16138   assert(VM_Version::supports_evex() && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl()), "");
16139   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16140   attributes.set_is_evex_instruction();
16141   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
16142   emit_int16(0x76, (0xC0 | encode));
16143 }
16144 
16145 void Assembler::evpermi2ps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
16146   assert(VM_Version::supports_evex() && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl()), "");
16147   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16148   attributes.set_is_evex_instruction();
16149   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
16150   emit_int16(0x77, (0xC0 | encode));
16151 }
16152 
16153 void Assembler::evpermi2pd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
16154   assert(VM_Version::supports_evex() && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl()), "");
16155   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16156   attributes.set_is_evex_instruction();
16157   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
16158   emit_int16(0x77, (0xC0 | encode));
16159 }
16160 
16161 void Assembler::evpermt2b(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
16162   assert(VM_Version::supports_avx512_vbmi(), "");
16163   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
16164   attributes.set_is_evex_instruction();
16165   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
16166   emit_int16(0x7D, (0xC0 | encode));
16167 }