< prev index next >

src/hotspot/cpu/x86/assembler_x86.cpp

Print this page
@@ -2758,10 +2758,35 @@
    vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
    emit_int8(0x7F);
    emit_operand(src, dst);
  }
  
+ // Move Aligned 256bit Vector
+ 
+ void Assembler::vmovdqa(XMMRegister dst, Address src) {
+   assert(UseAVX > 0, "");
+   InstructionMark im(this);
+   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
+   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8(0x6F);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::vmovdqa(Address dst, XMMRegister src) {
+   assert(UseAVX > 0, "");
+   InstructionMark im(this);
+   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
+   attributes.reset_is_clear_context();
+   // swap src<->dst for encoding
+   assert(src != xnoreg, "sanity");
+   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8(0x7F);
+   emit_operand(src, dst);
+ }
+ 
  // Move Unaligned EVEX enabled Vector (programmable : 8,16,32,64)
  void Assembler::evmovdqub(XMMRegister dst, XMMRegister src, bool merge, int vector_len) {
    assert(VM_Version::supports_evex(), "");
    InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
    attributes.set_is_evex_instruction();

@@ -3012,10 +3037,143 @@
    vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
    emit_int8(0x7F);
    emit_operand(src, dst);
  }
  
+ // Move Aligned EVEX enabled Vector (programmable : 8,16,32,64)
+ void Assembler::evmovdqab(XMMRegister dst, Address src, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
+   attributes.set_is_evex_instruction();
+   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8(0x6F);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evmovdqab(Address dst, XMMRegister src, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(src != xnoreg, "sanity");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
+   attributes.set_is_evex_instruction();
+   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8(0x7F);
+   emit_operand(src, dst);
+ }
+ 
+ void Assembler::evmovdqab(XMMRegister dst, KRegister mask, Address src, int vector_len) {
+   assert(VM_Version::supports_avx512vlbw(), "");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
+   attributes.set_embedded_opmask_register_specifier(mask);
+   attributes.set_is_evex_instruction();
+   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8(0x6F);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evmovdqaw(XMMRegister dst, Address src, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
+   attributes.set_is_evex_instruction();
+   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8(0x6F);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evmovdqaw(XMMRegister dst, KRegister mask, Address src, int vector_len) {
+   assert(VM_Version::supports_avx512vlbw(), "");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
+   attributes.set_embedded_opmask_register_specifier(mask);
+   attributes.set_is_evex_instruction();
+   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8(0x6F);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evmovdqaw(Address dst, XMMRegister src, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(src != xnoreg, "sanity");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
+   attributes.set_is_evex_instruction();
+   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8(0x7F);
+   emit_operand(src, dst);
+ }
+ 
+ void Assembler::evmovdqaw(Address dst, KRegister mask, XMMRegister src, int vector_len) {
+   assert(VM_Version::supports_avx512vlbw(), "");
+   assert(src != xnoreg, "sanity");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
+   attributes.reset_is_clear_context();
+   attributes.set_embedded_opmask_register_specifier(mask);
+   attributes.set_is_evex_instruction();
+   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8(0x7F);
+   emit_operand(src, dst);
+ }
+ 
+ void Assembler::evmovdqal(XMMRegister dst, Address src, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true , /* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
+   attributes.set_is_evex_instruction();
+   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8(0x6F);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evmovdqal(Address dst, XMMRegister src, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(src != xnoreg, "sanity");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
+   attributes.reset_is_clear_context();
+   attributes.set_is_evex_instruction();
+   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8(0x7F);
+   emit_operand(src, dst);
+ }
+ 
+ void Assembler::evmovdqaq(XMMRegister dst, Address src, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
+   attributes.set_is_evex_instruction();
+   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8(0x6F);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evmovdqaq(Address dst, XMMRegister src, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(src != xnoreg, "sanity");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
+   attributes.reset_is_clear_context();
+   attributes.set_is_evex_instruction();
+   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8(0x7F);
+   emit_operand(src, dst);
+ }
+ 
  // Uses zero extension on 64bit
  
  void Assembler::movl(Register dst, int32_t imm32) {
    int encode = prefix_and_encode(dst->encoding());
    emit_int8(0xB8 | encode);

@@ -3061,10 +3219,91 @@
    simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
    emit_int8(0x12);
    emit_operand(dst, src);
  }
  
+ void Assembler::movntq(Address dst, Register src) { // uses the MOVNTI operation
+   InstructionMark im(this);
+   prefixq(dst);
+   emit_int8(0x0F);
+   emit_int8((unsigned char)0xC3);
+   emit_operand(src, dst);
+ }
+ 
+ void Assembler::movntdq(Address dst, XMMRegister src) {
+   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+   InstructionMark im(this);
+   emit_int8((unsigned char)0x66);
+   emit_int8(0x0F);
+   emit_int8((unsigned char)0xE7);
+   emit_operand(src, dst);
+ }
+ 
+ void Assembler::movntdqa(XMMRegister dst, Address src) {
+   NOT_LP64(assert(VM_Version::supports_sse4_1(), ""));
+   emit_int8((unsigned char)0x66);
+   emit_int8(0x0F);
+   emit_int8(0x38);
+   emit_int8((unsigned char)0x2A);
+   emit_operand(dst, src);
+ 
+   // Same thing, with AVX:
+   // assert(UseAVX > 0, "");
+   // InstructionMark im(this);
+   // InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+   // simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   // emit_int8((unsigned char)0x2A);
+   // emit_operand(dst, src);
+ }
+ 
+ void Assembler::vmovntdqa(XMMRegister dst, Address src) {
+   assert(UseAVX > 0, "");
+   InstructionMark im(this);
+   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
+   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int8(0x2A);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::evmovntdqa(XMMRegister dst, Address src, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
+   attributes.set_is_evex_instruction();
+   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+   emit_int8(0x2A);
+   emit_operand(dst, src);
+ }
+ 
+ void Assembler::vmovntdq(Address dst, XMMRegister src) {
+   assert(UseAVX > 0, "");
+   InstructionMark im(this);
+   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
+   attributes.reset_is_clear_context();
+   // swap src<->dst for encoding
+   assert(src != xnoreg, "sanity");
+   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8((unsigned char)0xE7);
+   emit_operand(src, dst);
+ }
+ 
+ void Assembler::evmovntdq(Address dst, XMMRegister src, int vector_len) {
+   assert(VM_Version::supports_evex(), "");
+   assert(src != xnoreg, "sanity");
+   InstructionMark im(this);
+   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
+   attributes.reset_is_clear_context();
+   attributes.set_is_evex_instruction();
+   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+   emit_int8((unsigned char)0xE7);
+   emit_operand(src, dst);
+ }
+ 
  void Assembler::movq(XMMRegister dst, Address src) {
    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
    InstructionMark im(this);
    InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
    attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
< prev index next >