< prev index next >

src/hotspot/cpu/x86/assembler_x86.cpp

Print this page
*** 2456,10 ***
--- 2456,17 ---
    attributes.set_rex_vex_w_reverted();
    int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
    emit_int16(0x12, 0xC0 | encode);
  }
  
+ void Assembler::kmovbl(KRegister dst, KRegister src) {
+   assert(VM_Version::supports_avx512dq(), "");
+   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0x90, (0xC0 | encode));
+ }
+ 
  void Assembler::kmovbl(KRegister dst, Register src) {
    assert(VM_Version::supports_avx512dq(), "");
    InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
    int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
    emit_int16((unsigned char)0x92, (0xC0 | encode));

*** 2503,11 ***
    emit_int8((unsigned char)0x91);
    emit_operand((Register)src, dst);
  }
  
  void Assembler::kmovwl(KRegister dst, KRegister src) {
!   assert(VM_Version::supports_avx512bw(), "");
    InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
    int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
    emit_int16((unsigned char)0x90, (0xC0 | encode));
  }
  
--- 2510,11 ---
    emit_int8((unsigned char)0x91);
    emit_operand((Register)src, dst);
  }
  
  void Assembler::kmovwl(KRegister dst, KRegister src) {
!   assert(VM_Version::supports_evex(), "");
    InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
    int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
    emit_int16((unsigned char)0x90, (0xC0 | encode));
  }
  

*** 2569,10 ***
--- 2576,108 ---
    InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
    int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
    emit_int16(0x44, (0xC0 | encode));
  }
  
+ void Assembler::knotbl(KRegister dst, KRegister src) {
+   assert(VM_Version::supports_evex(), "");
+   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x44, (0xC0 | encode));
+ }
+ 
+ void Assembler::korbl(KRegister dst, KRegister src1, KRegister src2) {
+   assert(VM_Version::supports_avx512dq(), "");
+   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x45, (0xC0 | encode));
+ }
+ 
+ void Assembler::korwl(KRegister dst, KRegister src1, KRegister src2) {
+   assert(VM_Version::supports_evex(), "");
+   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x45, (0xC0 | encode));
+ }
+ 
+ void Assembler::kordl(KRegister dst, KRegister src1, KRegister src2) {
+   assert(VM_Version::supports_avx512bw(), "");
+   InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x45, (0xC0 | encode));
+ }
+ 
+ void Assembler::korql(KRegister dst, KRegister src1, KRegister src2) {
+   assert(VM_Version::supports_avx512bw(), "");
+   InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x45, (0xC0 | encode));
+ }
+ 
+ void Assembler::kxorbl(KRegister dst, KRegister src1, KRegister src2) {
+   assert(VM_Version::supports_avx512dq(), "");
+   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x47, (0xC0 | encode));
+ }
+ 
+ void Assembler::kxorwl(KRegister dst, KRegister src1, KRegister src2) {
+   assert(VM_Version::supports_evex(), "");
+   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x47, (0xC0 | encode));
+ }
+ 
+ void Assembler::kxordl(KRegister dst, KRegister src1, KRegister src2) {
+   assert(VM_Version::supports_avx512bw(), "");
+   InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x47, (0xC0 | encode));
+ }
+ 
+ void Assembler::kxorql(KRegister dst, KRegister src1, KRegister src2) {
+   assert(VM_Version::supports_avx512bw(), "");
+   InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x47, (0xC0 | encode));
+ }
+ 
+ void Assembler::kandbl(KRegister dst, KRegister src1, KRegister src2) {
+   assert(VM_Version::supports_avx512dq(), "");
+   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x41, (0xC0 | encode));
+ }
+ 
+ void Assembler::kandwl(KRegister dst, KRegister src1, KRegister src2) {
+   assert(VM_Version::supports_evex(), "");
+   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x41, (0xC0 | encode));
+ }
+ 
+ void Assembler::kanddl(KRegister dst, KRegister src1, KRegister src2) {
+   assert(VM_Version::supports_avx512bw(), "");
+   InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x41, (0xC0 | encode));
+ }
+ 
+ void Assembler::kandql(KRegister dst, KRegister src1, KRegister src2) {
+   assert(VM_Version::supports_avx512bw(), "");
+   InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x41, (0xC0 | encode));
+ }
+ 
+ void Assembler::knotdl(KRegister dst, KRegister src) {
+   assert(VM_Version::supports_avx512bw(), "");
+   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x44, (0xC0 | encode));
+ }
+ 
  void Assembler::knotql(KRegister dst, KRegister src) {
    assert(VM_Version::supports_avx512bw(), "");
    InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
    int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
    emit_int16(0x44, (0xC0 | encode));

*** 2616,10 ***
--- 2721,31 ---
    InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
    int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
    emit_int16((unsigned char)0x99, (0xC0 | encode));
  }
  
+ void Assembler::ktestdl(KRegister src1, KRegister src2) {
+   assert(VM_Version::supports_avx512bw(), "");
+   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0x99, (0xC0 | encode));
+ }
+ 
+ void Assembler::ktestwl(KRegister src1, KRegister src2) {
+   assert(VM_Version::supports_avx512dq(), "");
+   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0x99, (0xC0 | encode));
+ }
+ 
+ void Assembler::ktestbl(KRegister src1, KRegister src2) {
+   assert(VM_Version::supports_avx512dq(), "");
+   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0x99, (0xC0 | encode));
+ }
+ 
  void Assembler::ktestq(KRegister src1, KRegister src2) {
    assert(VM_Version::supports_avx512bw(), "");
    InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
    int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
    emit_int16((unsigned char)0x99, (0xC0 | encode));

*** 2630,10 ***
--- 2756,56 ---
    InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
    int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
    emit_int16((unsigned char)0x99, (0xC0 | encode));
  }
  
+ void Assembler::kxnorbl(KRegister dst, KRegister src1, KRegister src2) {
+   assert(VM_Version::supports_avx512dq(), "");
+   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x46, (0xC0 | encode));
+ }
+ 
+ void Assembler::kshiftlbl(KRegister dst, KRegister src, int imm8) {
+   assert(VM_Version::supports_avx512dq(), "");
+   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+   int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+   emit_int16(0x32, (0xC0 | encode));
+   emit_int8(imm8);
+ }
+ 
+ void Assembler::kshiftrbl(KRegister dst, KRegister src, int imm8) {
+   assert(VM_Version::supports_avx512dq(), "");
+   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+   int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+   emit_int16(0x30, (0xC0 | encode));
+ }
+ 
+ void Assembler::kshiftrwl(KRegister dst, KRegister src, int imm8) {
+   assert(VM_Version::supports_evex(), "");
+   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+   int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+   emit_int16(0x30, (0xC0 | encode));
+   emit_int8(imm8);
+ }
+ 
+ void Assembler::kshiftrdl(KRegister dst, KRegister src, int imm8) {
+   assert(VM_Version::supports_avx512bw(), "");
+   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+   int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+   emit_int16(0x31, (0xC0 | encode));
+   emit_int8(imm8);
+ }
+ 
+ void Assembler::kshiftrql(KRegister dst, KRegister src, int imm8) {
+   assert(VM_Version::supports_avx512bw(), "");
+   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+   int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+   emit_int16(0x31, (0xC0 | encode));
+   emit_int8(imm8);
+ }
+ 
  void Assembler::movb(Address dst, int imm8) {
    InstructionMark im(this);
     prefix(dst);
    emit_int8((unsigned char)0xC6);
    emit_operand(rax, dst, 1);

*** 4110,28 ***
    vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
    emit_int8(0x29);
    emit_operand(as_Register(dst_enc), src);
  }
  
- void Assembler::evpmovd2m(KRegister kdst, XMMRegister src, int vector_len) {
-   assert(UseAVX > 2  && VM_Version::supports_avx512dq(), "");
-   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
-   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
-   attributes.set_is_evex_instruction();
-   int encode = vex_prefix_and_encode(kdst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
-   emit_int16(0x39, (0xC0 | encode));
- }
- 
- void Assembler::evpmovq2m(KRegister kdst, XMMRegister src, int vector_len) {
-   assert(UseAVX > 2  && VM_Version::supports_avx512dq(), "");
-   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
-   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
-   attributes.set_is_evex_instruction();
-   int encode = vex_prefix_and_encode(kdst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
-   emit_int16(0x39, (0xC0 | encode));
- }
- 
  void Assembler::pcmpgtq(XMMRegister dst, XMMRegister src) {
    assert(VM_Version::supports_sse4_1(), "");
    InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
    int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
    emit_int16(0x37, (0xC0 | encode));
--- 4282,10 ---

*** 7417,22 ***
    int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
    emit_int16((unsigned char)0xEF, (0xC0 | encode));
  }
  
  void Assembler::evpxord(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
-   assert(VM_Version::supports_evex(), "");
    // Encoding: EVEX.NDS.XXX.66.0F.W0 EF /r
    InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
    attributes.set_is_evex_instruction();
    attributes.set_embedded_opmask_register_specifier(mask);
    if (merge) {
      attributes.reset_is_clear_context();
    }
    int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
    emit_int16((unsigned char)0xEF, (0xC0 | encode));
  }
  
  void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
    assert(VM_Version::supports_evex(), "requires EVEX support");
    InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
    attributes.set_is_evex_instruction();
    int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
--- 7571,134 ---
    int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
    emit_int16((unsigned char)0xEF, (0xC0 | encode));
  }
  
  void Assembler::evpxord(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
    // Encoding: EVEX.NDS.XXX.66.0F.W0 EF /r
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
    InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
    attributes.set_is_evex_instruction();
    attributes.set_embedded_opmask_register_specifier(mask);
    if (merge) {
      attributes.reset_is_clear_context();
    }
    int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
    emit_int16((unsigned char)0xEF, (0xC0 | encode));
  }
  
+ void Assembler::evpxord(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8((unsigned char)0xEF);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpxorq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   // Encoding: EVEX.NDS.XXX.66.0F.W1 EF /r
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0xEF, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpxorq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8((unsigned char)0xEF);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpandd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8((unsigned char)0xDB);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpandq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0xDB, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpandq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8((unsigned char)0xDB);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evporq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0xEB, (0xC0 | encode));
+ }
+ 
+ void Assembler::evporq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8((unsigned char)0xEB);
+   emit_operand(dst, src);
+ }
+ 
  void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
    assert(VM_Version::supports_evex(), "requires EVEX support");
    InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
    attributes.set_is_evex_instruction();
    int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);

*** 7973,33 ***
    emit_operand(dst, src);
  }
  
  // xmm/mem sourced byte/word/dword/qword replicate
  
! // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL
! void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len) {
!   assert(UseAVX >= 2, "");
!   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
!   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
!   emit_int16(0x58, (0xC0 | encode));
  }
  
! void Assembler::vpbroadcastd(XMMRegister dst, Address src, int vector_len) {
-   assert(VM_Version::supports_avx2(), "");
-   assert(dst != xnoreg, "sanity");
    InstructionMark im(this);
!   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
!   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
!   // swap src<->dst for encoding
!   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
!   emit_int8(0x58);
!   emit_operand(dst, src);
! }
! 
! // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL
! void Assembler::vpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len) {
!   assert(VM_Version::supports_avx2(), "");
    InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
    attributes.set_rex_vex_w_reverted();
    int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
    emit_int16(0x59, (0xC0 | encode));
  }
--- 8239,1472 ---
    emit_operand(dst, src);
  }
  
  // xmm/mem sourced byte/word/dword/qword replicate
  
! void Assembler::evpaddb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
!   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
!   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
!   attributes.set_is_evex_instruction();
!   attributes.set_embedded_opmask_register_specifier(mask);
!   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0xFC, (0xC0 | encode));
  }
  
! void Assembler::evpaddb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
    InstructionMark im(this);
!   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
!   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
!   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
!   attributes.set_is_evex_instruction();
!   attributes.set_embedded_opmask_register_specifier(mask);
!   if (merge) {
!     attributes.reset_is_clear_context();
!   }
!   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
!   emit_int8((unsigned char)0xFC);
!   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpaddw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0xFD, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpaddw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8((unsigned char)0xFD);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpaddd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0xFE, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpaddd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8((unsigned char)0xFE);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0xD4, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8((unsigned char)0xD4);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evaddps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x58, (0xC0 | encode));
+ }
+ 
+ void Assembler::evaddps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
+   emit_int8(0x58);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evaddpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x58, (0xC0 | encode));
+ }
+ 
+ void Assembler::evaddpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8(0x58);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpsubb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0xF8, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpsubb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8((unsigned char)0xF8);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpsubw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0xF9, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpsubw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8((unsigned char)0xF9);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpsubd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0xFA, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpsubd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8((unsigned char)0xFA);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpsubq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0xFB, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpsubq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8((unsigned char)0xFB);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evsubps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x5C, (0xC0 | encode));
+ }
+ 
+ void Assembler::evsubps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
+   emit_int8(0x5C);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evsubpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x5C, (0xC0 | encode));
+ }
+ 
+ void Assembler::evsubpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8(0x5C);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpmullw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0xD5, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpmullw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8((unsigned char)0xD5);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpmulld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x40, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpmulld(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int8(0x40);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpmullq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512dq() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x40, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpmullq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_avx512dq() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int8(0x40);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evmulps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x59, (0xC0 | encode));
+ }
+ 
+ void Assembler::evmulps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
+   emit_int8(0x59);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evmulpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x59, (0xC0 | encode));
+ }
+ 
+ void Assembler::evmulpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8(0x59);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evsqrtps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len,/* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x51, (0xC0 | encode));
+ }
+ 
+ void Assembler::evsqrtps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
+   emit_int8(0x51);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evsqrtpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len,/* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x51, (0xC0 | encode));
+ }
+ 
+ void Assembler::evsqrtpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8(0x51);
+   emit_operand(dst, src);
+ }
+ 
+ 
+ void Assembler::evdivps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len,/* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x5E, (0xC0 | encode));
+ }
+ 
+ void Assembler::evdivps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
+   emit_int8(0x5E);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evdivpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len,/* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x5E, (0xC0 | encode));
+ }
+ 
+ void Assembler::evdivpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8(0x5E);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpabsb(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x1C, (0xC0 | encode));
+ }
+ 
+ 
+ void Assembler::evpabsb(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int8(0x1C);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpabsw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x1D, (0xC0 | encode));
+ }
+ 
+ 
+ void Assembler::evpabsw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int8(0x1D);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpabsd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x1E, (0xC0 | encode));
+ }
+ 
+ 
+ void Assembler::evpabsd(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int8(0x1E);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpabsq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x1F, (0xC0 | encode));
+ }
+ 
+ 
+ void Assembler::evpabsq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int8(0x1F);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpfma213ps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16((unsigned char)0xA8, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpfma213ps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int8((unsigned char)0xA8);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpfma213pd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16((unsigned char)0xA8, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpfma213pd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   InstructionMark im(this);
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int8((unsigned char)0xA8);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpermb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512_vbmi() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16((unsigned char)0x8D, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpermb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512_vbmi() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int8((unsigned char)0x8D);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpermw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16((unsigned char)0x8D, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpermw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int8((unsigned char)0x8D);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpermd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex() && vector_len > AVX_128bit, "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x36, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpermd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex() && vector_len > AVX_128bit, "");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int8(0x36);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpermq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex() && vector_len > AVX_128bit, "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x36, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpermq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex() && vector_len > AVX_128bit, "");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int8(0x36);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpsllw(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
+ }
+ 
+ void Assembler::evpslld(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
+ }
+ 
+ void Assembler::evpsllq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
+ }
+ 
+ void Assembler::evpsrlw(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
+ }
+ 
+ void Assembler::evpsrld(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
+ }
+ 
+ void Assembler::evpsrlq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
+ }
+ 
+ void Assembler::evpsraw(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
+ }
+ 
+ void Assembler::evpsrad(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
+ }
+ 
+ void Assembler::evpsraq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
+ }
+ 
+ void Assembler::evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0xF1, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0xF2, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0xF3, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0xD1, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0xD2, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0xD3, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0xE1, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0xE2, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0xE2, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpsllvw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x12, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpsllvd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x47, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpsllvq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x47, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpsrlvw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x10, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpsrlvd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x45, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpsrlvq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x45, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpsravw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x11, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpsravd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x46, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpsravq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x46, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpminsb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x38, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpminsb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512bw(), "");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int8(0x38);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpminsw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0xEA, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpminsw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8((unsigned char)0xEA);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpminsd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x39, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpminsd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int8(0x39);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpminsq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x39, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpminsq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int8(0x39);
+   emit_operand(dst, src);
+ }
+ 
+ 
+ void Assembler::evpmaxsb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x3C, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpmaxsb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int8(0x3C);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpmaxsw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16((unsigned char)0xEE, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpmaxsw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8((unsigned char)0xEE);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpmaxsd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x3D, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpmaxsd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int8(0x3D);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evpmaxsq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x3D, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpmaxsq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int8(0x3D);
+   emit_operand(dst, src);
+ }
+ 
+ // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL
+ void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len) {
+   assert(UseAVX >= 2, "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x58, (0xC0 | encode));
+ }
+ 
+ void Assembler::vpbroadcastd(XMMRegister dst, Address src, int vector_len) {
+   assert(VM_Version::supports_avx2(), "");
+   assert(dst != xnoreg, "sanity");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
+   // swap src<->dst for encoding
+   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int8(0x58);
+   emit_operand(dst, src);
+ }
+ 
+ // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL
+ void Assembler::vpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len) {
+   assert(VM_Version::supports_avx2(), "");
    InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
    attributes.set_rex_vex_w_reverted();
    int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
    emit_int16(0x59, (0xC0 | encode));
  }

*** 9365,10 ***
--- 11070,106 ---
    emit_int8((unsigned char)opcode);
    emit_operand(as_Register(dst_enc), src);
    emit_int8((unsigned char)comparison);
  }
  
+ void Assembler::evprord(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(xmm0->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
+ }
+ 
+ void Assembler::evprorq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(xmm0->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
+ }
+ 
+ void Assembler::evprorvd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x14, (0xC0 | encode));
+ }
+ 
+ void Assembler::evprorvq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x14, (0xC0 | encode));
+ }
+ 
+ void Assembler::evprold(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(xmm1->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
+ }
+ 
+ void Assembler::evprolq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(xmm1->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
+ }
+ 
+ void Assembler::evprolvd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x15, (0xC0 | encode));
+ }
+ 
+ void Assembler::evprolvq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x15, (0xC0 | encode));
+ }
+ 
  void Assembler::vpblendvb(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len) {
    assert(VM_Version::supports_avx(), "");
    InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
    int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
    int mask_enc = mask->encoding();

*** 9488,18 ***
--- 11289,73 ---
    InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
    int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes);
    emit_int16((unsigned char)0xF7, (0xC0 | encode));
  }
  
+ void Assembler::evpmovq2m(KRegister dst, XMMRegister src, int vector_len) {
+   assert(VM_Version::supports_avx512vldq(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x39, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpmovd2m(KRegister dst, XMMRegister src, int vector_len) {
+   assert(VM_Version::supports_avx512vldq(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x39, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpmovw2m(KRegister dst, XMMRegister src, int vector_len) {
+   assert(VM_Version::supports_avx512vlbw(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x29, (0xC0 | encode));
+ }
+ 
  void Assembler::evpmovb2m(KRegister dst, XMMRegister src, int vector_len) {
    assert(VM_Version::supports_avx512vlbw(), "");
    InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
    attributes.set_is_evex_instruction();
    int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
    emit_int16(0x29, (0xC0 | encode));
  }
  
+ void Assembler::evpmovm2q(XMMRegister dst, KRegister src, int vector_len) {
+   assert(VM_Version::supports_avx512vldq(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x38, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpmovm2d(XMMRegister dst, KRegister src, int vector_len) {
+   assert(VM_Version::supports_avx512vldq(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x38, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpmovm2w(XMMRegister dst, KRegister src, int vector_len) {
+   assert(VM_Version::supports_avx512vlbw(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x28, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpmovm2b(XMMRegister dst, KRegister src, int vector_len) {
+   assert(VM_Version::supports_avx512vlbw(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x28, (0xC0 | encode));
+ }
  #ifndef _LP64
  
  void Assembler::incl(Register dst) {
    // Don't use it directly. Use MacroAssembler::incrementl() instead.
    emit_int8(0x40 | dst->encoding());
< prev index next >