< prev index next >

src/hotspot/cpu/x86/assembler_x86.cpp

Print this page

 7858   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7859   attributes.set_is_evex_instruction();
 7860   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
 7861   vex_prefix(src3, src2->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 7862   emit_int8(0x25);
 7863   emit_operand(dst, src3);
 7864   emit_int8(imm8);
 7865 }
 7866 
 7867 void Assembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, XMMRegister src3, int vector_len) {
 7868   assert(VM_Version::supports_evex(), "requires EVEX support");
 7869   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
 7870   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7871   attributes.set_is_evex_instruction();
 7872   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 7873   emit_int8(0x25);
 7874   emit_int8((unsigned char)(0xC0 | encode));
 7875   emit_int8(imm8);
 7876 }
 7877 














































































 7878 // vinserti forms
 7879 
 7880 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
 7881   assert(VM_Version::supports_avx2(), "");
 7882   assert(imm8 <= 0x01, "imm8: %u", imm8);
 7883   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7884   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 7885   // last byte:
 7886   // 0x00 - insert into lower 128 bits
 7887   // 0x01 - insert into upper 128 bits
 7888   emit_int24(0x38, (0xC0 | encode), imm8 & 0x01);
 7889 }
 7890 
 7891 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
 7892   assert(VM_Version::supports_avx2(), "");
 7893   assert(dst != xnoreg, "sanity");
 7894   assert(imm8 <= 0x01, "imm8: %u", imm8);
 7895   InstructionMark im(this);
 7896   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7897   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);

 7901   // 0x00 - insert into lower 128 bits
 7902   // 0x01 - insert into upper 128 bits
 7903   emit_int8(imm8 & 0x01);
 7904 }
 7905 
 7906 void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
 7907   assert(VM_Version::supports_evex(), "");
 7908   assert(imm8 <= 0x03, "imm8: %u", imm8);
 7909   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7910   attributes.set_is_evex_instruction();
 7911   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 7912   // imm8:
 7913   // 0x00 - insert into q0 128 bits (0..127)
 7914   // 0x01 - insert into q1 128 bits (128..255)
 7915   // 0x02 - insert into q2 128 bits (256..383)
 7916   // 0x03 - insert into q3 128 bits (384..511)
 7917   emit_int24(0x38, (0xC0 | encode), imm8 & 0x03);
 7918 }
 7919 
 7920 void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
 7921   assert(VM_Version::supports_avx(), "");
 7922   assert(dst != xnoreg, "sanity");
 7923   assert(imm8 <= 0x03, "imm8: %u", imm8);
 7924   InstructionMark im(this);
 7925   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7926   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
 7927   attributes.set_is_evex_instruction();
 7928   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 7929   emit_int8(0x18);
 7930   emit_operand(dst, src);
 7931   // 0x00 - insert into q0 128 bits (0..127)
 7932   // 0x01 - insert into q1 128 bits (128..255)
 7933   // 0x02 - insert into q2 128 bits (256..383)
 7934   // 0x03 - insert into q3 128 bits (384..511)
 7935   emit_int8(imm8 & 0x03);
 7936 }
 7937 
 7938 void Assembler::vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
 7939   assert(VM_Version::supports_evex(), "");
 7940   assert(imm8 <= 0x01, "imm8: %u", imm8);
 7941   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);

 7960   // 0x01 - insert into upper 128 bits
 7961   emit_int24(0x18, (0xC0 | encode), imm8 & 0x01);
 7962 }
 7963 
 7964 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
 7965   assert(VM_Version::supports_avx(), "");
 7966   assert(dst != xnoreg, "sanity");
 7967   assert(imm8 <= 0x01, "imm8: %u", imm8);
 7968   InstructionMark im(this);
 7969   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7970   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
 7971   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 7972   emit_int8(0x18);
 7973   emit_operand(dst, src);
 7974   // 0x00 - insert into lower 128 bits
 7975   // 0x01 - insert into upper 128 bits
 7976   emit_int8(imm8 & 0x01);
 7977 }
 7978 
 7979 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
 7980   assert(VM_Version::supports_avx2(), "");
 7981   assert(imm8 <= 0x03, "imm8: %u", imm8);
 7982   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7983   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 7984   // imm8:
 7985   // 0x00 - insert into q0 128 bits (0..127)
 7986   // 0x01 - insert into q1 128 bits (128..255)
 7987   // 0x02 - insert into q0 128 bits (256..383)
 7988   // 0x03 - insert into q1 128 bits (384..512)
 7989   emit_int24(0x18, (0xC0 | encode), imm8 & 0x03);
 7990 }
 7991 
 7992 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
 7993   assert(VM_Version::supports_avx(), "");
 7994   assert(dst != xnoreg, "sanity");
 7995   assert(imm8 <= 0x03, "imm8: %u", imm8);
 7996   InstructionMark im(this);
 7997   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7998   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
 7999   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 8000   emit_int8(0x18);
 8001   emit_operand(dst, src);
 8002   // 0x00 - insert into q0 128 bits (0..127)
 8003   // 0x01 - insert into q1 128 bits (128..255)
 8004   // 0x02 - insert into q0 128 bits (256..383)
 8005   // 0x03 - insert into q1 128 bits (384..512)
 8006   emit_int8(imm8 & 0x03);
 8007 }
 8008 
 8009 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
 8010   assert(VM_Version::supports_evex(), "");
 8011   assert(imm8 <= 0x01, "imm8: %u", imm8);
 8012   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8013   attributes.set_is_evex_instruction();

11401   attributes.set_is_evex_instruction();
11402   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
11403   emit_int16(0x38, (0xC0 | encode));
11404 }
11405 
11406 void Assembler::evpmovm2w(XMMRegister dst, KRegister src, int vector_len) {
11407   assert(VM_Version::supports_avx512vlbw(), "");
11408   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11409   attributes.set_is_evex_instruction();
11410   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
11411   emit_int16(0x28, (0xC0 | encode));
11412 }
11413 
11414 void Assembler::evpmovm2b(XMMRegister dst, KRegister src, int vector_len) {
11415   assert(VM_Version::supports_avx512vlbw(), "");
11416   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11417   attributes.set_is_evex_instruction();
11418   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
11419   emit_int16(0x28, (0xC0 | encode));
11420 }









































































11421 #ifndef _LP64
11422 
11423 void Assembler::incl(Register dst) {
11424   // Don't use it directly. Use MacroAssembler::incrementl() instead.
11425   emit_int8(0x40 | dst->encoding());
11426 }
11427 
11428 void Assembler::lea(Register dst, Address src) {
11429   leal(dst, src);
11430 }
11431 
11432 void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) {
11433   InstructionMark im(this);
11434   emit_int8((unsigned char)0xC7);
11435   emit_operand(rax, dst);
11436   emit_data((int)imm32, rspec, 0);
11437 }
11438 
11439 void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) {
11440   InstructionMark im(this);

 7858   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7859   attributes.set_is_evex_instruction();
 7860   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
 7861   vex_prefix(src3, src2->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 7862   emit_int8(0x25);
 7863   emit_operand(dst, src3);
 7864   emit_int8(imm8);
 7865 }
 7866 
 7867 void Assembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, XMMRegister src3, int vector_len) {
 7868   assert(VM_Version::supports_evex(), "requires EVEX support");
 7869   assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
 7870   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7871   attributes.set_is_evex_instruction();
 7872   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 7873   emit_int8(0x25);
 7874   emit_int8((unsigned char)(0xC0 | encode));
 7875   emit_int8(imm8);
 7876 }
 7877 
 7878 void Assembler::evexpandps(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 7879   assert(VM_Version::supports_evex(), "");
 7880   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 7881   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 7882   attributes.set_is_evex_instruction();
 7883   attributes.set_embedded_opmask_register_specifier(mask);
 7884   if (merge) {
 7885     attributes.reset_is_clear_context();
 7886   }
 7887   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 7888   emit_int16((unsigned char)0x88, (0xC0 | encode));
 7889 }
 7890 
 7891 void Assembler::evexpandpd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 7892   assert(VM_Version::supports_evex(), "");
 7893   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 7894   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 7895   attributes.set_is_evex_instruction();
 7896   attributes.set_embedded_opmask_register_specifier(mask);
 7897   if (merge) {
 7898     attributes.reset_is_clear_context();
 7899   }
 7900   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 7901   emit_int16((unsigned char)0x88, (0xC0 | encode));
 7902 }
 7903 
 7904 void Assembler::evpexpandb(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 7905   assert(VM_Version::supports_avx512_vbmi2(), "");
 7906   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 7907   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 7908   attributes.set_is_evex_instruction();
 7909   attributes.set_embedded_opmask_register_specifier(mask);
 7910   if (merge) {
 7911     attributes.reset_is_clear_context();
 7912   }
 7913   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 7914   emit_int16(0x62, (0xC0 | encode));
 7915 }
 7916 
 7917 void Assembler::evpexpandw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 7918   assert(VM_Version::supports_avx512_vbmi2(), "");
 7919   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 7920   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 7921   attributes.set_is_evex_instruction();
 7922   attributes.set_embedded_opmask_register_specifier(mask);
 7923   if (merge) {
 7924     attributes.reset_is_clear_context();
 7925   }
 7926   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 7927   emit_int16(0x62, (0xC0 | encode));
 7928 }
 7929 
 7930 void Assembler::evpexpandd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 7931   assert(VM_Version::supports_evex(), "");
 7932   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 7933   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 7934   attributes.set_is_evex_instruction();
 7935   attributes.set_embedded_opmask_register_specifier(mask);
 7936   if (merge) {
 7937     attributes.reset_is_clear_context();
 7938   }
 7939   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 7940   emit_int16((unsigned char)0x89, (0xC0 | encode));
 7941 }
 7942 
 7943 void Assembler::evpexpandq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 7944   assert(VM_Version::supports_evex(), "");
 7945   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 7946   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 7947   attributes.set_is_evex_instruction();
 7948   attributes.set_embedded_opmask_register_specifier(mask);
 7949   if (merge) {
 7950     attributes.reset_is_clear_context();
 7951   }
 7952   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 7953   emit_int16((unsigned char)0x89, (0xC0 | encode));
 7954 }
 7955 
 7956 // vinserti forms
 7957 
 7958 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
 7959   assert(VM_Version::supports_avx2(), "");
 7960   assert(imm8 <= 0x01, "imm8: %u", imm8);
 7961   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7962   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 7963   // last byte:
 7964   // 0x00 - insert into lower 128 bits
 7965   // 0x01 - insert into upper 128 bits
 7966   emit_int24(0x38, (0xC0 | encode), imm8 & 0x01);
 7967 }
 7968 
 7969 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
 7970   assert(VM_Version::supports_avx2(), "");
 7971   assert(dst != xnoreg, "sanity");
 7972   assert(imm8 <= 0x01, "imm8: %u", imm8);
 7973   InstructionMark im(this);
 7974   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7975   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);

 7979   // 0x00 - insert into lower 128 bits
 7980   // 0x01 - insert into upper 128 bits
 7981   emit_int8(imm8 & 0x01);
 7982 }
 7983 
 7984 void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
 7985   assert(VM_Version::supports_evex(), "");
 7986   assert(imm8 <= 0x03, "imm8: %u", imm8);
 7987   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7988   attributes.set_is_evex_instruction();
 7989   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 7990   // imm8:
 7991   // 0x00 - insert into q0 128 bits (0..127)
 7992   // 0x01 - insert into q1 128 bits (128..255)
 7993   // 0x02 - insert into q2 128 bits (256..383)
 7994   // 0x03 - insert into q3 128 bits (384..511)
 7995   emit_int24(0x38, (0xC0 | encode), imm8 & 0x03);
 7996 }
 7997 
 7998 void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
 7999   assert(VM_Version::supports_evex(), "");
 8000   assert(dst != xnoreg, "sanity");
 8001   assert(imm8 <= 0x03, "imm8: %u", imm8);
 8002   InstructionMark im(this);
 8003   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8004   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
 8005   attributes.set_is_evex_instruction();
 8006   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 8007   emit_int8(0x18);
 8008   emit_operand(dst, src);
 8009   // 0x00 - insert into q0 128 bits (0..127)
 8010   // 0x01 - insert into q1 128 bits (128..255)
 8011   // 0x02 - insert into q2 128 bits (256..383)
 8012   // 0x03 - insert into q3 128 bits (384..511)
 8013   emit_int8(imm8 & 0x03);
 8014 }
 8015 
 8016 void Assembler::vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
 8017   assert(VM_Version::supports_evex(), "");
 8018   assert(imm8 <= 0x01, "imm8: %u", imm8);
 8019   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);

 8038   // 0x01 - insert into upper 128 bits
 8039   emit_int24(0x18, (0xC0 | encode), imm8 & 0x01);
 8040 }
 8041 
 8042 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
 8043   assert(VM_Version::supports_avx(), "");
 8044   assert(dst != xnoreg, "sanity");
 8045   assert(imm8 <= 0x01, "imm8: %u", imm8);
 8046   InstructionMark im(this);
 8047   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8048   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
 8049   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 8050   emit_int8(0x18);
 8051   emit_operand(dst, src);
 8052   // 0x00 - insert into lower 128 bits
 8053   // 0x01 - insert into upper 128 bits
 8054   emit_int8(imm8 & 0x01);
 8055 }
 8056 
 8057 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
 8058   assert(VM_Version::supports_evex(), "");
 8059   assert(imm8 <= 0x03, "imm8: %u", imm8);
 8060   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8061   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 8062   // imm8:
 8063   // 0x00 - insert into q0 128 bits (0..127)
 8064   // 0x01 - insert into q1 128 bits (128..255)
 8065   // 0x02 - insert into q0 128 bits (256..383)
 8066   // 0x03 - insert into q1 128 bits (384..512)
 8067   emit_int24(0x18, (0xC0 | encode), imm8 & 0x03);
 8068 }
 8069 
 8070 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
 8071   assert(VM_Version::supports_evex(), "");
 8072   assert(dst != xnoreg, "sanity");
 8073   assert(imm8 <= 0x03, "imm8: %u", imm8);
 8074   InstructionMark im(this);
 8075   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8076   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
 8077   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 8078   emit_int8(0x18);
 8079   emit_operand(dst, src);
 8080   // 0x00 - insert into q0 128 bits (0..127)
 8081   // 0x01 - insert into q1 128 bits (128..255)
 8082   // 0x02 - insert into q0 128 bits (256..383)
 8083   // 0x03 - insert into q1 128 bits (384..512)
 8084   emit_int8(imm8 & 0x03);
 8085 }
 8086 
 8087 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
 8088   assert(VM_Version::supports_evex(), "");
 8089   assert(imm8 <= 0x01, "imm8: %u", imm8);
 8090   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8091   attributes.set_is_evex_instruction();

11479   attributes.set_is_evex_instruction();
11480   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
11481   emit_int16(0x38, (0xC0 | encode));
11482 }
11483 
11484 void Assembler::evpmovm2w(XMMRegister dst, KRegister src, int vector_len) {
11485   assert(VM_Version::supports_avx512vlbw(), "");
11486   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11487   attributes.set_is_evex_instruction();
11488   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
11489   emit_int16(0x28, (0xC0 | encode));
11490 }
11491 
11492 void Assembler::evpmovm2b(XMMRegister dst, KRegister src, int vector_len) {
11493   assert(VM_Version::supports_avx512vlbw(), "");
11494   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11495   attributes.set_is_evex_instruction();
11496   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
11497   emit_int16(0x28, (0xC0 | encode));
11498 }
11499 
11500 void Assembler::evpcompressb(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
11501   assert(VM_Version::supports_avx512_vbmi2() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11502   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11503   attributes.set_embedded_opmask_register_specifier(mask);
11504   attributes.set_is_evex_instruction();
11505   if (merge) {
11506     attributes.reset_is_clear_context();
11507   }
11508   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11509   emit_int16((unsigned char)0x63, (0xC0 | encode));
11510 }
11511 
11512 void Assembler::evpcompressw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
11513   assert(VM_Version::supports_avx512_vbmi2() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11514   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11515   attributes.set_embedded_opmask_register_specifier(mask);
11516   attributes.set_is_evex_instruction();
11517   if (merge) {
11518     attributes.reset_is_clear_context();
11519   }
11520   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11521   emit_int16((unsigned char)0x63, (0xC0 | encode));
11522 }
11523 
11524 void Assembler::evpcompressd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
11525   assert(VM_Version::supports_evex() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11526   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11527   attributes.set_embedded_opmask_register_specifier(mask);
11528   attributes.set_is_evex_instruction();
11529   if (merge) {
11530     attributes.reset_is_clear_context();
11531   }
11532   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11533   emit_int16((unsigned char)0x8B, (0xC0 | encode));
11534 }
11535 
11536 void Assembler::evpcompressq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
11537   assert(VM_Version::supports_evex() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11538   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11539   attributes.set_embedded_opmask_register_specifier(mask);
11540   attributes.set_is_evex_instruction();
11541   if (merge) {
11542     attributes.reset_is_clear_context();
11543   }
11544   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11545   emit_int16((unsigned char)0x8B, (0xC0 | encode));
11546 }
11547 
11548 void Assembler::evcompressps(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
11549   assert(VM_Version::supports_evex() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11550   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11551   attributes.set_embedded_opmask_register_specifier(mask);
11552   attributes.set_is_evex_instruction();
11553   if (merge) {
11554     attributes.reset_is_clear_context();
11555   }
11556   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11557   emit_int16((unsigned char)0x8A, (0xC0 | encode));
11558 }
11559 
11560 void Assembler::evcompresspd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
11561   assert(VM_Version::supports_evex() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
11562   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11563   attributes.set_embedded_opmask_register_specifier(mask);
11564   attributes.set_is_evex_instruction();
11565   if (merge) {
11566     attributes.reset_is_clear_context();
11567   }
11568   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11569   emit_int16((unsigned char)0x8A, (0xC0 | encode));
11570 }
11571 
11572 #ifndef _LP64
11573 
11574 void Assembler::incl(Register dst) {
11575   // Don't use it directly. Use MacroAssembler::incrementl() instead.
11576   emit_int8(0x40 | dst->encoding());
11577 }
11578 
11579 void Assembler::lea(Register dst, Address src) {
11580   leal(dst, src);
11581 }
11582 
11583 void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) {
11584   InstructionMark im(this);
11585   emit_int8((unsigned char)0xC7);
11586   emit_operand(rax, dst);
11587   emit_data((int)imm32, rspec, 0);
11588 }
11589 
11590 void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) {
11591   InstructionMark im(this);
< prev index next >