< prev index next >

src/hotspot/cpu/x86/assembler_x86.cpp

Print this page
*** 4885,22 ***
    emit_int8((unsigned char)0xF3);
    int encode = prefix_and_encode(dst->encoding(), src->encoding());
    emit_int24(0x0F, (unsigned char)0xB8, (0xC0 | encode));
  }
  
! void Assembler::vpopcntd(XMMRegister dst, XMMRegister src, int vector_len) {
    assert(VM_Version::supports_avx512_vpopcntdq(), "must support vpopcntdq feature");
!   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
    attributes.set_is_evex_instruction();
    int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
    emit_int16(0x55, (0xC0 | encode));
  }
  
! void Assembler::vpopcntq(XMMRegister dst, XMMRegister src, int vector_len) {
    assert(VM_Version::supports_avx512_vpopcntdq(), "must support vpopcntdq feature");
!   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
    attributes.set_is_evex_instruction();
    int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
    emit_int16(0x55, (0xC0 | encode));
  }
  
  void Assembler::popf() {
--- 4885,58 ---
    emit_int8((unsigned char)0xF3);
    int encode = prefix_and_encode(dst->encoding(), src->encoding());
    emit_int24(0x0F, (unsigned char)0xB8, (0xC0 | encode));
  }
  
! void Assembler::evpopcntb(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512_bitalg(), "must support avx512bitalg feature");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_embedded_opmask_register_specifier(mask);
+   attributes.set_is_evex_instruction();
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x54, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpopcntw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512_bitalg(), "must support avx512bitalg feature");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x54, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpopcntd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
    assert(VM_Version::supports_avx512_vpopcntdq(), "must support vpopcntdq feature");
!   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
    attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
    int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
    emit_int16(0x55, (0xC0 | encode));
  }
  
! void Assembler::evpopcntq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
    assert(VM_Version::supports_avx512_vpopcntdq(), "must support vpopcntdq feature");
!   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
    attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
    int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
    emit_int16(0x55, (0xC0 | encode));
  }
  
  void Assembler::popf() {

*** 7941,10 ***
--- 7977,34 ---
    attributes.set_is_evex_instruction();
    int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
    emit_int16(0x14, (unsigned char)(0xC0 | encode));
  }
  
+ void Assembler::evplzcntd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512cd() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x44, (0xC0 | encode));
+ }
+ 
+ void Assembler::evplzcntq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512cd() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x44, (0xC0 | encode));
+ }
+ 
  void Assembler::vpternlogd(XMMRegister dst, int imm8, XMMRegister src2, XMMRegister src3, int vector_len) {
    assert(VM_Version::supports_evex(), "requires EVEX support");
    assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
    InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
    attributes.set_is_evex_instruction();

*** 7977,10 ***
--- 8037,88 ---
    emit_int8(0x25);
    emit_int8((unsigned char)(0xC0 | encode));
    emit_int8(imm8);
  }
  
+ void Assembler::evexpandps(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16((unsigned char)0x88, (0xC0 | encode));
+ }
+ 
+ void Assembler::evexpandpd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16((unsigned char)0x88, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpexpandb(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512_vbmi2(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x62, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpexpandw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512_vbmi2(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16(0x62, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpexpandd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16((unsigned char)0x89, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpexpandq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
+   attributes.set_is_evex_instruction();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16((unsigned char)0x89, (0xC0 | encode));
+ }
+ 
  // vinserti forms
  
  void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
    assert(VM_Version::supports_avx2(), "");
    assert(imm8 <= 0x01, "imm8: %u", imm8);

*** 8020,11 ***
    // 0x03 - insert into q3 128 bits (384..511)
    emit_int24(0x38, (0xC0 | encode), imm8 & 0x03);
  }
  
  void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
!   assert(VM_Version::supports_avx(), "");
    assert(dst != xnoreg, "sanity");
    assert(imm8 <= 0x03, "imm8: %u", imm8);
    InstructionMark im(this);
    InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
    attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
--- 8158,11 ---
    // 0x03 - insert into q3 128 bits (384..511)
    emit_int24(0x38, (0xC0 | encode), imm8 & 0x03);
  }
  
  void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
!   assert(VM_Version::supports_evex(), "");
    assert(dst != xnoreg, "sanity");
    assert(imm8 <= 0x03, "imm8: %u", imm8);
    InstructionMark im(this);
    InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
    attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);

*** 8079,11 ***
    // 0x01 - insert into upper 128 bits
    emit_int8(imm8 & 0x01);
  }
  
  void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
!   assert(VM_Version::supports_avx2(), "");
    assert(imm8 <= 0x03, "imm8: %u", imm8);
    InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
    int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
    // imm8:
    // 0x00 - insert into q0 128 bits (0..127)
--- 8217,11 ---
    // 0x01 - insert into upper 128 bits
    emit_int8(imm8 & 0x01);
  }
  
  void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
!   assert(VM_Version::supports_evex(), "");
    assert(imm8 <= 0x03, "imm8: %u", imm8);
    InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
    int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
    // imm8:
    // 0x00 - insert into q0 128 bits (0..127)

*** 8092,11 ***
    // 0x03 - insert into q1 128 bits (384..512)
    emit_int24(0x18, (0xC0 | encode), imm8 & 0x03);
  }
  
  void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
!   assert(VM_Version::supports_avx(), "");
    assert(dst != xnoreg, "sanity");
    assert(imm8 <= 0x03, "imm8: %u", imm8);
    InstructionMark im(this);
    InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
    attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
--- 8230,11 ---
    // 0x03 - insert into q1 128 bits (384..512)
    emit_int24(0x18, (0xC0 | encode), imm8 & 0x03);
  }
  
  void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
!   assert(VM_Version::supports_evex(), "");
    assert(dst != xnoreg, "sanity");
    assert(imm8 <= 0x03, "imm8: %u", imm8);
    InstructionMark im(this);
    InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
    attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);

*** 8393,10 ***
--- 8531,24 ---
    InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
    int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
    emit_int16((unsigned char)0xF6, (0xC0 | encode));
  }
  
+ void Assembler::vpunpckhwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+   assert(UseAVX > 0, "requires some form of AVX");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x69, (0xC0 | encode));
+ }
+ 
+ void Assembler::vpunpcklwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+   assert(UseAVX > 0, "requires some form of AVX");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int16(0x61, (0xC0 | encode));
+ }
+ 
  void Assembler::vpunpckhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
    assert(UseAVX > 0, "requires some form of AVX");
    InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
    int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
    emit_int16(0x6A, (0xC0 | encode));

*** 9909,10 ***
--- 10061,18 ---
    emit_int8(0x25);
    emit_operand(dst, src3);
    emit_int8(imm8);
  }
  
+ void Assembler::vgf2p8affineqb(XMMRegister dst, XMMRegister src2, XMMRegister src3, int imm8, int vector_len) {
+   assert(VM_Version::supports_gfni(), "requires GFNI support");
+   assert(VM_Version::supports_sse(), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+   emit_int24((unsigned char)0xCE, (unsigned char)(0xC0 | encode), imm8);
+ }
+ 
  // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL
  void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len) {
    assert(UseAVX >= 2, "");
    InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
    int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);

*** 11602,10 ***
--- 11762,83 ---
    InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
    attributes.set_is_evex_instruction();
    int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
    emit_int16(0x28, (0xC0 | encode));
  }
+ 
+ void Assembler::evpcompressb(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512_vbmi2() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_embedded_opmask_register_specifier(mask);
+   attributes.set_is_evex_instruction();
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16((unsigned char)0x63, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpcompressw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_avx512_vbmi2() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_embedded_opmask_register_specifier(mask);
+   attributes.set_is_evex_instruction();
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16((unsigned char)0x63, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpcompressd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_embedded_opmask_register_specifier(mask);
+   attributes.set_is_evex_instruction();
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16((unsigned char)0x8B, (0xC0 | encode));
+ }
+ 
+ void Assembler::evpcompressq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_embedded_opmask_register_specifier(mask);
+   attributes.set_is_evex_instruction();
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16((unsigned char)0x8B, (0xC0 | encode));
+ }
+ 
+ void Assembler::evcompressps(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_embedded_opmask_register_specifier(mask);
+   attributes.set_is_evex_instruction();
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16((unsigned char)0x8A, (0xC0 | encode));
+ }
+ 
+ void Assembler::evcompresspd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
+   assert(VM_Version::supports_evex() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_embedded_opmask_register_specifier(mask);
+   attributes.set_is_evex_instruction();
+   if (merge) {
+     attributes.reset_is_clear_context();
+   }
+   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int16((unsigned char)0x8A, (0xC0 | encode));
+ }
+ 
  #ifndef _LP64
  
  void Assembler::incl(Register dst) {
    // Don't use it directly. Use MacroAssembler::incrementl() instead.
    emit_int8(0x40 | dst->encoding());
< prev index next >