< prev index next >

src/hotspot/cpu/x86/assembler_x86.cpp

Print this page

 2743   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2744   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 2745   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 2746   emit_int8(0x6F);
 2747   emit_operand(dst, src);
 2748 }
 2749 
 2750 void Assembler::vmovdqu(Address dst, XMMRegister src) {
 2751   assert(UseAVX > 0, "");
 2752   InstructionMark im(this);
 2753   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2754   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 2755   attributes.reset_is_clear_context();
 2756   // swap src<->dst for encoding
 2757   assert(src != xnoreg, "sanity");
 2758   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 2759   emit_int8(0x7F);
 2760   emit_operand(src, dst);
 2761 }
 2762 

























 2763 // Move Unaligned EVEX enabled Vector (programmable : 8,16,32,64)
 2764 void Assembler::evmovdqub(XMMRegister dst, XMMRegister src, bool merge, int vector_len) {
 2765   assert(VM_Version::supports_evex(), "");
 2766   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 2767   attributes.set_is_evex_instruction();
 2768   if (merge) {
 2769     attributes.reset_is_clear_context();
 2770   }
 2771   int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3;
 2772   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes);
 2773   emit_int16(0x6F, (0xC0 | encode));
 2774 }
 2775 
 2776 void Assembler::evmovdqub(XMMRegister dst, Address src, bool merge, int vector_len) {
 2777   assert(VM_Version::supports_evex(), "");
 2778   InstructionMark im(this);
 2779   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 2780   int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3;
 2781   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 2782   attributes.set_is_evex_instruction();

 2997   // Unmasked instruction
 2998   evmovdquq(dst, k0, src, /*merge*/ true, vector_len);
 2999 }
 3000 
 3001 void Assembler::evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 3002   assert(VM_Version::supports_evex(), "");
 3003   assert(src != xnoreg, "sanity");
 3004   InstructionMark im(this);
 3005   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 3006   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3007   attributes.set_embedded_opmask_register_specifier(mask);
 3008   if (merge) {
 3009     attributes.reset_is_clear_context();
 3010   }
 3011   attributes.set_is_evex_instruction();
 3012   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 3013   emit_int8(0x7F);
 3014   emit_operand(src, dst);
 3015 }
 3016 





































































































































 3017 // Uses zero extension on 64bit
 3018 
 3019 void Assembler::movl(Register dst, int32_t imm32) {
 3020   int encode = prefix_and_encode(dst->encoding());
 3021   emit_int8(0xB8 | encode);
 3022   emit_int32(imm32);
 3023 }
 3024 
 3025 void Assembler::movl(Register dst, Register src) {
 3026   int encode = prefix_and_encode(dst->encoding(), src->encoding());
 3027   emit_int16((unsigned char)0x8B, (0xC0 | encode));
 3028 }
 3029 
 3030 void Assembler::movl(Register dst, Address src) {
 3031   InstructionMark im(this);
 3032   prefix(src, dst);
 3033   emit_int8((unsigned char)0x8B);
 3034   emit_operand(dst, src);
 3035 }
 3036 

 3046   InstructionMark im(this);
 3047   prefix(dst, src);
 3048   emit_int8((unsigned char)0x89);
 3049   emit_operand(src, dst);
 3050 }
 3051 
 3052 // New cpus require to use movsd and movss to avoid partial register stall
 3053 // when loading from memory. But for old Opteron use movlpd instead of movsd.
 3054 // The selection is done in MacroAssembler::movdbl() and movflt().
 3055 void Assembler::movlpd(XMMRegister dst, Address src) {
 3056   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 3057   InstructionMark im(this);
 3058   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 3059   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 3060   attributes.set_rex_vex_w_reverted();
 3061   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3062   emit_int8(0x12);
 3063   emit_operand(dst, src);
 3064 }
 3065 

















































































 3066 void Assembler::movq(XMMRegister dst, Address src) {
 3067   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 3068   InstructionMark im(this);
 3069   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3070   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 3071   attributes.set_rex_vex_w_reverted();
 3072   simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 3073   emit_int8(0x7E);
 3074   emit_operand(dst, src);
 3075 }
 3076 
 3077 void Assembler::movq(Address dst, XMMRegister src) {
 3078   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 3079   InstructionMark im(this);
 3080   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3081   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 3082   attributes.set_rex_vex_w_reverted();
 3083   simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3084   emit_int8((unsigned char)0xD6);
 3085   emit_operand(src, dst);

 2743   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2744   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 2745   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 2746   emit_int8(0x6F);
 2747   emit_operand(dst, src);
 2748 }
 2749 
 2750 void Assembler::vmovdqu(Address dst, XMMRegister src) {
 2751   assert(UseAVX > 0, "");
 2752   InstructionMark im(this);
 2753   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2754   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 2755   attributes.reset_is_clear_context();
 2756   // swap src<->dst for encoding
 2757   assert(src != xnoreg, "sanity");
 2758   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 2759   emit_int8(0x7F);
 2760   emit_operand(src, dst);
 2761 }
 2762 
 2763 // Move Aligned 256bit Vector
 2764 
 2765 void Assembler::vmovdqa(XMMRegister dst, Address src) {
 2766   assert(UseAVX > 0, "");
 2767   InstructionMark im(this);
 2768   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2769   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 2770   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2771   emit_int8(0x6F);
 2772   emit_operand(dst, src);
 2773 }
 2774 
 2775 void Assembler::vmovdqa(Address dst, XMMRegister src) {
 2776   assert(UseAVX > 0, "");
 2777   InstructionMark im(this);
 2778   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2779   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 2780   attributes.reset_is_clear_context();
 2781   // swap src<->dst for encoding
 2782   assert(src != xnoreg, "sanity");
 2783   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2784   emit_int8(0x7F);
 2785   emit_operand(src, dst);
 2786 }
 2787 
 2788 // Move Unaligned EVEX enabled Vector (programmable : 8,16,32,64)
 2789 void Assembler::evmovdqub(XMMRegister dst, XMMRegister src, bool merge, int vector_len) {
 2790   assert(VM_Version::supports_evex(), "");
 2791   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 2792   attributes.set_is_evex_instruction();
 2793   if (merge) {
 2794     attributes.reset_is_clear_context();
 2795   }
 2796   int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3;
 2797   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes);
 2798   emit_int16(0x6F, (0xC0 | encode));
 2799 }
 2800 
 2801 void Assembler::evmovdqub(XMMRegister dst, Address src, bool merge, int vector_len) {
 2802   assert(VM_Version::supports_evex(), "");
 2803   InstructionMark im(this);
 2804   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 2805   int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3;
 2806   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 2807   attributes.set_is_evex_instruction();

 3022   // Unmasked instruction
 3023   evmovdquq(dst, k0, src, /*merge*/ true, vector_len);
 3024 }
 3025 
 3026 void Assembler::evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 3027   assert(VM_Version::supports_evex(), "");
 3028   assert(src != xnoreg, "sanity");
 3029   InstructionMark im(this);
 3030   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 3031   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3032   attributes.set_embedded_opmask_register_specifier(mask);
 3033   if (merge) {
 3034     attributes.reset_is_clear_context();
 3035   }
 3036   attributes.set_is_evex_instruction();
 3037   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 3038   emit_int8(0x7F);
 3039   emit_operand(src, dst);
 3040 }
 3041 
 3042 // Move Aligned EVEX enabled Vector (programmable : 8,16,32,64)
 3043 void Assembler::evmovdqab(XMMRegister dst, Address src, int vector_len) {
 3044   assert(VM_Version::supports_evex(), "");
 3045   InstructionMark im(this);
 3046   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 3047   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3048   attributes.set_is_evex_instruction();
 3049   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3050   emit_int8(0x6F);
 3051   emit_operand(dst, src);
 3052 }
 3053 
 3054 void Assembler::evmovdqab(Address dst, XMMRegister src, int vector_len) {
 3055   assert(VM_Version::supports_evex(), "");
 3056   assert(src != xnoreg, "sanity");
 3057   InstructionMark im(this);
 3058   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 3059   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3060   attributes.set_is_evex_instruction();
 3061   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3062   emit_int8(0x7F);
 3063   emit_operand(src, dst);
 3064 }
 3065 
 3066 void Assembler::evmovdqab(XMMRegister dst, KRegister mask, Address src, int vector_len) {
 3067   assert(VM_Version::supports_avx512vlbw(), "");
 3068   InstructionMark im(this);
 3069   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 3070   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3071   attributes.set_embedded_opmask_register_specifier(mask);
 3072   attributes.set_is_evex_instruction();
 3073   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3074   emit_int8(0x6F);
 3075   emit_operand(dst, src);
 3076 }
 3077 
 3078 void Assembler::evmovdqaw(XMMRegister dst, Address src, int vector_len) {
 3079   assert(VM_Version::supports_evex(), "");
 3080   InstructionMark im(this);
 3081   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 3082   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3083   attributes.set_is_evex_instruction();
 3084   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3085   emit_int8(0x6F);
 3086   emit_operand(dst, src);
 3087 }
 3088 
 3089 void Assembler::evmovdqaw(XMMRegister dst, KRegister mask, Address src, int vector_len) {
 3090   assert(VM_Version::supports_avx512vlbw(), "");
 3091   InstructionMark im(this);
 3092   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 3093   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3094   attributes.set_embedded_opmask_register_specifier(mask);
 3095   attributes.set_is_evex_instruction();
 3096   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3097   emit_int8(0x6F);
 3098   emit_operand(dst, src);
 3099 }
 3100 
 3101 void Assembler::evmovdqaw(Address dst, XMMRegister src, int vector_len) {
 3102   assert(VM_Version::supports_evex(), "");
 3103   assert(src != xnoreg, "sanity");
 3104   InstructionMark im(this);
 3105   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 3106   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3107   attributes.set_is_evex_instruction();
 3108   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3109   emit_int8(0x7F);
 3110   emit_operand(src, dst);
 3111 }
 3112 
 3113 void Assembler::evmovdqaw(Address dst, KRegister mask, XMMRegister src, int vector_len) {
 3114   assert(VM_Version::supports_avx512vlbw(), "");
 3115   assert(src != xnoreg, "sanity");
 3116   InstructionMark im(this);
 3117   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 3118   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3119   attributes.reset_is_clear_context();
 3120   attributes.set_embedded_opmask_register_specifier(mask);
 3121   attributes.set_is_evex_instruction();
 3122   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3123   emit_int8(0x7F);
 3124   emit_operand(src, dst);
 3125 }
 3126 
 3127 void Assembler::evmovdqal(XMMRegister dst, Address src, int vector_len) {
 3128   assert(VM_Version::supports_evex(), "");
 3129   InstructionMark im(this);
 3130   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true , /* uses_vl */ true);
 3131   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3132   attributes.set_is_evex_instruction();
 3133   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3134   emit_int8(0x6F);
 3135   emit_operand(dst, src);
 3136 }
 3137 
 3138 void Assembler::evmovdqal(Address dst, XMMRegister src, int vector_len) {
 3139   assert(VM_Version::supports_evex(), "");
 3140   assert(src != xnoreg, "sanity");
 3141   InstructionMark im(this);
 3142   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 3143   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3144   attributes.reset_is_clear_context();
 3145   attributes.set_is_evex_instruction();
 3146   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3147   emit_int8(0x7F);
 3148   emit_operand(src, dst);
 3149 }
 3150 
 3151 void Assembler::evmovdqaq(XMMRegister dst, Address src, int vector_len) {
 3152   assert(VM_Version::supports_evex(), "");
 3153   InstructionMark im(this);
 3154   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 3155   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3156   attributes.set_is_evex_instruction();
 3157   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3158   emit_int8(0x6F);
 3159   emit_operand(dst, src);
 3160 }
 3161 
 3162 void Assembler::evmovdqaq(Address dst, XMMRegister src, int vector_len) {
 3163   assert(VM_Version::supports_evex(), "");
 3164   assert(src != xnoreg, "sanity");
 3165   InstructionMark im(this);
 3166   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 3167   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3168   attributes.reset_is_clear_context();
 3169   attributes.set_is_evex_instruction();
 3170   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3171   emit_int8(0x7F);
 3172   emit_operand(src, dst);
 3173 }
 3174 
 3175 // Uses zero extension on 64bit
 3176 
 3177 void Assembler::movl(Register dst, int32_t imm32) {
 3178   int encode = prefix_and_encode(dst->encoding());
 3179   emit_int8(0xB8 | encode);
 3180   emit_int32(imm32);
 3181 }
 3182 
 3183 void Assembler::movl(Register dst, Register src) {
 3184   int encode = prefix_and_encode(dst->encoding(), src->encoding());
 3185   emit_int16((unsigned char)0x8B, (0xC0 | encode));
 3186 }
 3187 
 3188 void Assembler::movl(Register dst, Address src) {
 3189   InstructionMark im(this);
 3190   prefix(src, dst);
 3191   emit_int8((unsigned char)0x8B);
 3192   emit_operand(dst, src);
 3193 }
 3194 

 3204   InstructionMark im(this);
 3205   prefix(dst, src);
 3206   emit_int8((unsigned char)0x89);
 3207   emit_operand(src, dst);
 3208 }
 3209 
 3210 // New cpus require to use movsd and movss to avoid partial register stall
 3211 // when loading from memory. But for old Opteron use movlpd instead of movsd.
 3212 // The selection is done in MacroAssembler::movdbl() and movflt().
 3213 void Assembler::movlpd(XMMRegister dst, Address src) {
 3214   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 3215   InstructionMark im(this);
 3216   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 3217   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 3218   attributes.set_rex_vex_w_reverted();
 3219   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3220   emit_int8(0x12);
 3221   emit_operand(dst, src);
 3222 }
 3223 
 3224 void Assembler::movntq(Address dst, Register src) { // uses the MOVNTI operation
 3225   InstructionMark im(this);
 3226   prefixq(dst);
 3227   emit_int8(0x0F);
 3228   emit_int8((unsigned char)0xC3);
 3229   emit_operand(src, dst);
 3230 }
 3231 
 3232 void Assembler::movntdq(Address dst, XMMRegister src) {
 3233   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 3234   InstructionMark im(this);
 3235   emit_int8((unsigned char)0x66);
 3236   emit_int8(0x0F);
 3237   emit_int8((unsigned char)0xE7);
 3238   emit_operand(src, dst);
 3239 }
 3240 
 3241 void Assembler::movntdqa(XMMRegister dst, Address src) {
 3242   NOT_LP64(assert(VM_Version::supports_sse4_1(), ""));
 3243   emit_int8((unsigned char)0x66);
 3244   emit_int8(0x0F);
 3245   emit_int8(0x38);
 3246   emit_int8((unsigned char)0x2A);
 3247   emit_operand(dst, src);
 3248 
 3249   // Same thing, with AVX:
 3250   // assert(UseAVX > 0, "");
 3251   // InstructionMark im(this);
 3252   // InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 3253   // simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 3254   // emit_int8((unsigned char)0x2A);
 3255   // emit_operand(dst, src);
 3256 }
 3257 
 3258 void Assembler::vmovntdqa(XMMRegister dst, Address src) {
 3259   assert(UseAVX > 0, "");
 3260   InstructionMark im(this);
 3261   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 3262   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3263   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 3264   emit_int8(0x2A);
 3265   emit_operand(dst, src);
 3266 }
 3267 
 3268 void Assembler::evmovntdqa(XMMRegister dst, Address src, int vector_len) {
 3269   assert(VM_Version::supports_evex(), "");
 3270   InstructionMark im(this);
 3271   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 3272   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3273   attributes.set_is_evex_instruction();
 3274   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 3275   emit_int8(0x2A);
 3276   emit_operand(dst, src);
 3277 }
 3278 
 3279 void Assembler::vmovntdq(Address dst, XMMRegister src) {
 3280   assert(UseAVX > 0, "");
 3281   InstructionMark im(this);
 3282   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 3283   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3284   attributes.reset_is_clear_context();
 3285   // swap src<->dst for encoding
 3286   assert(src != xnoreg, "sanity");
 3287   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3288   emit_int8((unsigned char)0xE7);
 3289   emit_operand(src, dst);
 3290 }
 3291 
 3292 void Assembler::evmovntdq(Address dst, XMMRegister src, int vector_len) {
 3293   assert(VM_Version::supports_evex(), "");
 3294   assert(src != xnoreg, "sanity");
 3295   InstructionMark im(this);
 3296   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 3297   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 3298   attributes.reset_is_clear_context();
 3299   attributes.set_is_evex_instruction();
 3300   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3301   emit_int8((unsigned char)0xE7);
 3302   emit_operand(src, dst);
 3303 }
 3304 
 3305 void Assembler::movq(XMMRegister dst, Address src) {
 3306   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 3307   InstructionMark im(this);
 3308   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3309   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 3310   attributes.set_rex_vex_w_reverted();
 3311   simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
 3312   emit_int8(0x7E);
 3313   emit_operand(dst, src);
 3314 }
 3315 
 3316 void Assembler::movq(Address dst, XMMRegister src) {
 3317   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 3318   InstructionMark im(this);
 3319   InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
 3320   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 3321   attributes.set_rex_vex_w_reverted();
 3322   simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 3323   emit_int8((unsigned char)0xD6);
 3324   emit_operand(src, dst);
< prev index next >