< prev index next >

src/hotspot/cpu/x86/assembler_x86.cpp

Print this page

 2441   emit_int16(0x16, (0xC0 | encode));
 2442 }
 2443 
 2444 void Assembler::movb(Register dst, Address src) {
 2445   NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
 2446   InstructionMark im(this);
 2447   prefix(src, dst, true);
 2448   emit_int8((unsigned char)0x8A);
 2449   emit_operand(dst, src);
 2450 }
 2451 
 2452 void Assembler::movddup(XMMRegister dst, XMMRegister src) {
 2453   NOT_LP64(assert(VM_Version::supports_sse3(), ""));
 2454   int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
 2455   InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2456   attributes.set_rex_vex_w_reverted();
 2457   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 2458   emit_int16(0x12, 0xC0 | encode);
 2459 }
 2460 







 2461 void Assembler::kmovbl(KRegister dst, Register src) {
 2462   assert(VM_Version::supports_avx512dq(), "");
 2463   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2464   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2465   emit_int16((unsigned char)0x92, (0xC0 | encode));
 2466 }
 2467 
 2468 void Assembler::kmovbl(Register dst, KRegister src) {
 2469   assert(VM_Version::supports_avx512dq(), "");
 2470   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2471   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2472   emit_int16((unsigned char)0x93, (0xC0 | encode));
 2473 }
 2474 
 2475 void Assembler::kmovwl(KRegister dst, Register src) {
 2476   assert(VM_Version::supports_evex(), "");
 2477   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2478   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2479   emit_int16((unsigned char)0x92, (0xC0 | encode));
 2480 }

 2488 
 2489 void Assembler::kmovwl(KRegister dst, Address src) {
 2490   assert(VM_Version::supports_evex(), "");
 2491   InstructionMark im(this);
 2492   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2493   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2494   emit_int8((unsigned char)0x90);
 2495   emit_operand((Register)dst, src);
 2496 }
 2497 
 2498 void Assembler::kmovwl(Address dst, KRegister src) {
 2499   assert(VM_Version::supports_evex(), "");
 2500   InstructionMark im(this);
 2501   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2502   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2503   emit_int8((unsigned char)0x91);
 2504   emit_operand((Register)src, dst);
 2505 }
 2506 
 2507 void Assembler::kmovwl(KRegister dst, KRegister src) {
 2508   assert(VM_Version::supports_avx512bw(), "");
 2509   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2510   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2511   emit_int16((unsigned char)0x90, (0xC0 | encode));
 2512 }
 2513 
 2514 void Assembler::kmovdl(KRegister dst, Register src) {
 2515   assert(VM_Version::supports_avx512bw(), "");
 2516   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2517   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 2518   emit_int16((unsigned char)0x92, (0xC0 | encode));
 2519 }
 2520 
 2521 void Assembler::kmovdl(Register dst, KRegister src) {
 2522   assert(VM_Version::supports_avx512bw(), "");
 2523   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2524   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 2525   emit_int16((unsigned char)0x93, (0xC0 | encode));
 2526 }
 2527 
 2528 void Assembler::kmovql(KRegister dst, KRegister src) {

 2554   assert(VM_Version::supports_avx512bw(), "");
 2555   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2556   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 2557   emit_int16((unsigned char)0x92, (0xC0 | encode));
 2558 }
 2559 
 2560 void Assembler::kmovql(Register dst, KRegister src) {
 2561   assert(VM_Version::supports_avx512bw(), "");
 2562   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2563   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 2564   emit_int16((unsigned char)0x93, (0xC0 | encode));
 2565 }
 2566 
 2567 void Assembler::knotwl(KRegister dst, KRegister src) {
 2568   assert(VM_Version::supports_evex(), "");
 2569   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2570   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2571   emit_int16(0x44, (0xC0 | encode));
 2572 }
 2573 


































































































 2574 void Assembler::knotql(KRegister dst, KRegister src) {
 2575   assert(VM_Version::supports_avx512bw(), "");
 2576   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2577   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2578   emit_int16(0x44, (0xC0 | encode));
 2579 }
 2580 
 2581 // This instruction produces ZF or CF flags
 2582 void Assembler::kortestbl(KRegister src1, KRegister src2) {
 2583   assert(VM_Version::supports_avx512dq(), "");
 2584   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2585   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2586   emit_int16((unsigned char)0x98, (0xC0 | encode));
 2587 }
 2588 
 2589 // This instruction produces ZF or CF flags
 2590 void Assembler::kortestwl(KRegister src1, KRegister src2) {
 2591   assert(VM_Version::supports_evex(), "");
 2592   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2593   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);

 2601   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2602   emit_int16((unsigned char)0x98, (0xC0 | encode));
 2603 }
 2604 
 2605 // This instruction produces ZF or CF flags
 2606 void Assembler::kortestql(KRegister src1, KRegister src2) {
 2607   assert(VM_Version::supports_avx512bw(), "");
 2608   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2609   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2610   emit_int16((unsigned char)0x98, (0xC0 | encode));
 2611 }
 2612 
 2613 // This instruction produces ZF or CF flags
 2614 void Assembler::ktestql(KRegister src1, KRegister src2) {
 2615   assert(VM_Version::supports_avx512bw(), "");
 2616   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2617   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2618   emit_int16((unsigned char)0x99, (0xC0 | encode));
 2619 }
 2620 





















 2621 void Assembler::ktestq(KRegister src1, KRegister src2) {
 2622   assert(VM_Version::supports_avx512bw(), "");
 2623   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2624   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2625   emit_int16((unsigned char)0x99, (0xC0 | encode));
 2626 }
 2627 
 2628 void Assembler::ktestd(KRegister src1, KRegister src2) {
 2629   assert(VM_Version::supports_avx512bw(), "");
 2630   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2631   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2632   emit_int16((unsigned char)0x99, (0xC0 | encode));
 2633 }
 2634 














































 2635 void Assembler::movb(Address dst, int imm8) {
 2636   InstructionMark im(this);
 2637    prefix(dst);
 2638   emit_int8((unsigned char)0xC6);
 2639   emit_operand(rax, dst, 1);
 2640   emit_int8(imm8);
 2641 }
 2642 
 2643 
 2644 void Assembler::movb(Address dst, Register src) {
 2645   assert(src->has_byte_register(), "must have byte register");
 2646   InstructionMark im(this);
 2647   prefix(dst, src, true);
 2648   emit_int8((unsigned char)0x88);
 2649   emit_operand(src, dst);
 2650 }
 2651 
 2652 void Assembler::movdl(XMMRegister dst, Register src) {
 2653   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 2654   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);

 4095   attributes.reset_is_clear_context();
 4096   attributes.set_is_evex_instruction();
 4097   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 4098   emit_int16(0x29, (0xC0 | encode));
 4099 }
 4100 
 4101 // In this context, kdst is written the mask used to process the equal components
 4102 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
 4103   assert(VM_Version::supports_evex(), "");
 4104   InstructionMark im(this);
 4105   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 4106   attributes.reset_is_clear_context();
 4107   attributes.set_is_evex_instruction();
 4108   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
 4109   int dst_enc = kdst->encoding();
 4110   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 4111   emit_int8(0x29);
 4112   emit_operand(as_Register(dst_enc), src);
 4113 }
 4114 
 4115 void Assembler::evpmovd2m(KRegister kdst, XMMRegister src, int vector_len) {
 4116   assert(UseAVX > 2  && VM_Version::supports_avx512dq(), "");
 4117   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 4118   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 4119   attributes.set_is_evex_instruction();
 4120   int encode = vex_prefix_and_encode(kdst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
 4121   emit_int16(0x39, (0xC0 | encode));
 4122 }
 4123 
 4124 void Assembler::evpmovq2m(KRegister kdst, XMMRegister src, int vector_len) {
 4125   assert(UseAVX > 2  && VM_Version::supports_avx512dq(), "");
 4126   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 4127   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 4128   attributes.set_is_evex_instruction();
 4129   int encode = vex_prefix_and_encode(kdst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
 4130   emit_int16(0x39, (0xC0 | encode));
 4131 }
 4132 
 4133 void Assembler::pcmpgtq(XMMRegister dst, XMMRegister src) {
 4134   assert(VM_Version::supports_sse4_1(), "");
 4135   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 4136   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 4137   emit_int16(0x37, (0xC0 | encode));
 4138 }
 4139 
 4140 void Assembler::pmovmskb(Register dst, XMMRegister src) {
 4141   assert(VM_Version::supports_sse2(), "");
 4142   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 4143   int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4144   emit_int16((unsigned char)0xD7, (0xC0 | encode));
 4145 }
 4146 
 4147 void Assembler::vpmovmskb(Register dst, XMMRegister src, int vec_enc) {
 4148   assert((VM_Version::supports_avx() && vec_enc == AVX_128bit) ||
 4149          (VM_Version::supports_avx2() && vec_enc  == AVX_256bit), "");
 4150   InstructionAttr attributes(vec_enc, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 4151   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4152   emit_int16((unsigned char)0xD7, (0xC0 | encode));

 7402 
 7403 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 7404   assert(UseAVX > 0, "requires some form of AVX");
 7405   InstructionMark im(this);
 7406   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7407   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
 7408   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7409   emit_int8((unsigned char)0xEF);
 7410   emit_operand(dst, src);
 7411 }
 7412 
 7413 void Assembler::vpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 7414   assert(UseAVX > 2, "requires some form of EVEX");
 7415   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7416   attributes.set_rex_vex_w_reverted();
 7417   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7418   emit_int16((unsigned char)0xEF, (0xC0 | encode));
 7419 }
 7420 
 7421 void Assembler::evpxord(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 7422   assert(VM_Version::supports_evex(), "");
 7423   // Encoding: EVEX.NDS.XXX.66.0F.W0 EF /r

 7424   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 7425   attributes.set_is_evex_instruction();
 7426   attributes.set_embedded_opmask_register_specifier(mask);
 7427   if (merge) {
 7428     attributes.reset_is_clear_context();
 7429   }
 7430   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7431   emit_int16((unsigned char)0xEF, (0xC0 | encode));
 7432 }
 7433 
















































































































 7434 void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 7435   assert(VM_Version::supports_evex(), "requires EVEX support");
 7436   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7437   attributes.set_is_evex_instruction();
 7438   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7439   emit_int16((unsigned char)0xEF, (0xC0 | encode));
 7440 }
 7441 
 7442 void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 7443   assert(VM_Version::supports_evex(), "requires EVEX support");
 7444   assert(dst != xnoreg, "sanity");
 7445   InstructionMark im(this);
 7446   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7447   attributes.set_is_evex_instruction();
 7448   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
 7449   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7450   emit_int8((unsigned char)0xEF);
 7451   emit_operand(dst, src);
 7452 }
 7453 

 7958   assert(VM_Version::supports_avx2(), "");
 7959   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 7960   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 7961   emit_int16(0x79, (0xC0 | encode));
 7962 }
 7963 
 7964 void Assembler::vpbroadcastw(XMMRegister dst, Address src, int vector_len) {
 7965   assert(VM_Version::supports_avx2(), "");
 7966   assert(dst != xnoreg, "sanity");
 7967   InstructionMark im(this);
 7968   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 7969   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit);
 7970   // swap src<->dst for encoding
 7971   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 7972   emit_int8(0x79);
 7973   emit_operand(dst, src);
 7974 }
 7975 
 7976 // xmm/mem sourced byte/word/dword/qword replicate
 7977 
 7978 // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL
 7979 void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len) {
 7980   assert(UseAVX >= 2, "");
 7981   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7982   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 7983   emit_int16(0x58, (0xC0 | encode));




 7984 }
 7985 
 7986 void Assembler::vpbroadcastd(XMMRegister dst, Address src, int vector_len) {
 7987   assert(VM_Version::supports_avx2(), "");
 7988   assert(dst != xnoreg, "sanity");
 7989   InstructionMark im(this);
 7990   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7991   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
 7992   // swap src<->dst for encoding
 7993   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 7994   emit_int8(0x58);
 7995   emit_operand(dst, src);
 7996 }
 7997 
 7998 // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL
 7999 void Assembler::vpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len) {
 8000   assert(VM_Version::supports_avx2(), "");





























































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































 8001   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8002   attributes.set_rex_vex_w_reverted();
 8003   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8004   emit_int16(0x59, (0xC0 | encode));
 8005 }
 8006 
 8007 void Assembler::vpbroadcastq(XMMRegister dst, Address src, int vector_len) {
 8008   assert(VM_Version::supports_avx2(), "");
 8009   assert(dst != xnoreg, "sanity");
 8010   InstructionMark im(this);
 8011   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 8012   attributes.set_rex_vex_w_reverted();
 8013   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 8014   // swap src<->dst for encoding
 8015   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8016   emit_int8(0x59);
 8017   emit_operand(dst, src);
 8018 }
 8019 
 8020 void Assembler::evbroadcasti32x4(XMMRegister dst, Address src, int vector_len) {

 9350 void Assembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, Address src,
 9351                         int comparison, bool is_signed, int vector_len) {
 9352   assert(VM_Version::supports_evex(), "");
 9353   assert(VM_Version::supports_avx512bw(), "");
 9354   assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
 9355   // Encoding: EVEX.NDS.XXX.66.0F3A.W1 3F /r ib
 9356   InstructionMark im(this);
 9357   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
 9358   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
 9359   attributes.set_is_evex_instruction();
 9360   attributes.set_embedded_opmask_register_specifier(mask);
 9361   attributes.reset_is_clear_context();
 9362   int dst_enc = kdst->encoding();
 9363   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9364   int opcode = is_signed ? 0x3F : 0x3E;
 9365   emit_int8((unsigned char)opcode);
 9366   emit_operand(as_Register(dst_enc), src);
 9367   emit_int8((unsigned char)comparison);
 9368 }
 9369 
































































































 9370 void Assembler::vpblendvb(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len) {
 9371   assert(VM_Version::supports_avx(), "");
 9372   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 9373   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 9374   int mask_enc = mask->encoding();
 9375   emit_int24(0x4C, (0xC0 | encode), 0xF0 & mask_enc << 4);
 9376 }
 9377 
 9378 void Assembler::evblendmpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9379   assert(VM_Version::supports_evex(), "");
 9380   // Encoding: EVEX.NDS.XXX.66.0F38.W1 65 /r
 9381   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9382   attributes.set_is_evex_instruction();
 9383   attributes.set_embedded_opmask_register_specifier(mask);
 9384   if (merge) {
 9385     attributes.reset_is_clear_context();
 9386   }
 9387   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9388   emit_int16(0x65, (0xC0 | encode));
 9389 }

 9473   assert(VM_Version::supports_bmi2(), "");
 9474   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
 9475   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9476   emit_int16((unsigned char)0xF7, (0xC0 | encode));
 9477 }
 9478 
 9479 void Assembler::shrxl(Register dst, Register src1, Register src2) {
 9480   assert(VM_Version::supports_bmi2(), "");
 9481   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
 9482   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes);
 9483   emit_int16((unsigned char)0xF7, (0xC0 | encode));
 9484 }
 9485 
 9486 void Assembler::shrxq(Register dst, Register src1, Register src2) {
 9487   assert(VM_Version::supports_bmi2(), "");
 9488   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
 9489   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes);
 9490   emit_int16((unsigned char)0xF7, (0xC0 | encode));
 9491 }
 9492 
























 9493 void Assembler::evpmovb2m(KRegister dst, XMMRegister src, int vector_len) {
 9494   assert(VM_Version::supports_avx512vlbw(), "");
 9495   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9496   attributes.set_is_evex_instruction();
 9497   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
 9498   emit_int16(0x29, (0xC0 | encode));
 9499 }
 9500 































 9501 #ifndef _LP64
 9502 
 9503 void Assembler::incl(Register dst) {
 9504   // Don't use it directly. Use MacroAssembler::incrementl() instead.
 9505   emit_int8(0x40 | dst->encoding());
 9506 }
 9507 
 9508 void Assembler::lea(Register dst, Address src) {
 9509   leal(dst, src);
 9510 }
 9511 
 9512 void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) {
 9513   InstructionMark im(this);
 9514   emit_int8((unsigned char)0xC7);
 9515   emit_operand(rax, dst);
 9516   emit_data((int)imm32, rspec, 0);
 9517 }
 9518 
 9519 void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) {
 9520   InstructionMark im(this);

 2441   emit_int16(0x16, (0xC0 | encode));
 2442 }
 2443 
 2444 void Assembler::movb(Register dst, Address src) {
 2445   NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
 2446   InstructionMark im(this);
 2447   prefix(src, dst, true);
 2448   emit_int8((unsigned char)0x8A);
 2449   emit_operand(dst, src);
 2450 }
 2451 
 2452 void Assembler::movddup(XMMRegister dst, XMMRegister src) {
 2453   NOT_LP64(assert(VM_Version::supports_sse3(), ""));
 2454   int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
 2455   InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 2456   attributes.set_rex_vex_w_reverted();
 2457   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 2458   emit_int16(0x12, 0xC0 | encode);
 2459 }
 2460 
 2461 void Assembler::kmovbl(KRegister dst, KRegister src) {
 2462   assert(VM_Version::supports_avx512dq(), "");
 2463   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2464   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2465   emit_int16((unsigned char)0x90, (0xC0 | encode));
 2466 }
 2467 
 2468 void Assembler::kmovbl(KRegister dst, Register src) {
 2469   assert(VM_Version::supports_avx512dq(), "");
 2470   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2471   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2472   emit_int16((unsigned char)0x92, (0xC0 | encode));
 2473 }
 2474 
 2475 void Assembler::kmovbl(Register dst, KRegister src) {
 2476   assert(VM_Version::supports_avx512dq(), "");
 2477   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2478   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2479   emit_int16((unsigned char)0x93, (0xC0 | encode));
 2480 }
 2481 
 2482 void Assembler::kmovwl(KRegister dst, Register src) {
 2483   assert(VM_Version::supports_evex(), "");
 2484   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2485   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2486   emit_int16((unsigned char)0x92, (0xC0 | encode));
 2487 }

 2495 
 2496 void Assembler::kmovwl(KRegister dst, Address src) {
 2497   assert(VM_Version::supports_evex(), "");
 2498   InstructionMark im(this);
 2499   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2500   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2501   emit_int8((unsigned char)0x90);
 2502   emit_operand((Register)dst, src);
 2503 }
 2504 
 2505 void Assembler::kmovwl(Address dst, KRegister src) {
 2506   assert(VM_Version::supports_evex(), "");
 2507   InstructionMark im(this);
 2508   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2509   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2510   emit_int8((unsigned char)0x91);
 2511   emit_operand((Register)src, dst);
 2512 }
 2513 
 2514 void Assembler::kmovwl(KRegister dst, KRegister src) {
 2515   assert(VM_Version::supports_evex(), "");
 2516   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2517   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2518   emit_int16((unsigned char)0x90, (0xC0 | encode));
 2519 }
 2520 
 2521 void Assembler::kmovdl(KRegister dst, Register src) {
 2522   assert(VM_Version::supports_avx512bw(), "");
 2523   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2524   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 2525   emit_int16((unsigned char)0x92, (0xC0 | encode));
 2526 }
 2527 
 2528 void Assembler::kmovdl(Register dst, KRegister src) {
 2529   assert(VM_Version::supports_avx512bw(), "");
 2530   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2531   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 2532   emit_int16((unsigned char)0x93, (0xC0 | encode));
 2533 }
 2534 
 2535 void Assembler::kmovql(KRegister dst, KRegister src) {

 2561   assert(VM_Version::supports_avx512bw(), "");
 2562   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2563   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 2564   emit_int16((unsigned char)0x92, (0xC0 | encode));
 2565 }
 2566 
 2567 void Assembler::kmovql(Register dst, KRegister src) {
 2568   assert(VM_Version::supports_avx512bw(), "");
 2569   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2570   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
 2571   emit_int16((unsigned char)0x93, (0xC0 | encode));
 2572 }
 2573 
 2574 void Assembler::knotwl(KRegister dst, KRegister src) {
 2575   assert(VM_Version::supports_evex(), "");
 2576   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2577   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2578   emit_int16(0x44, (0xC0 | encode));
 2579 }
 2580 
 2581 void Assembler::knotbl(KRegister dst, KRegister src) {
 2582   assert(VM_Version::supports_evex(), "");
 2583   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2584   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2585   emit_int16(0x44, (0xC0 | encode));
 2586 }
 2587 
 2588 void Assembler::korbl(KRegister dst, KRegister src1, KRegister src2) {
 2589   assert(VM_Version::supports_avx512dq(), "");
 2590   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2591   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2592   emit_int16(0x45, (0xC0 | encode));
 2593 }
 2594 
 2595 void Assembler::korwl(KRegister dst, KRegister src1, KRegister src2) {
 2596   assert(VM_Version::supports_evex(), "");
 2597   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2598   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2599   emit_int16(0x45, (0xC0 | encode));
 2600 }
 2601 
 2602 void Assembler::kordl(KRegister dst, KRegister src1, KRegister src2) {
 2603   assert(VM_Version::supports_avx512bw(), "");
 2604   InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2605   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2606   emit_int16(0x45, (0xC0 | encode));
 2607 }
 2608 
 2609 void Assembler::korql(KRegister dst, KRegister src1, KRegister src2) {
 2610   assert(VM_Version::supports_avx512bw(), "");
 2611   InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2612   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2613   emit_int16(0x45, (0xC0 | encode));
 2614 }
 2615 
 2616 void Assembler::kxorbl(KRegister dst, KRegister src1, KRegister src2) {
 2617   assert(VM_Version::supports_avx512dq(), "");
 2618   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2619   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2620   emit_int16(0x47, (0xC0 | encode));
 2621 }
 2622 
 2623 void Assembler::kxorwl(KRegister dst, KRegister src1, KRegister src2) {
 2624   assert(VM_Version::supports_evex(), "");
 2625   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2626   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2627   emit_int16(0x47, (0xC0 | encode));
 2628 }
 2629 
 2630 void Assembler::kxordl(KRegister dst, KRegister src1, KRegister src2) {
 2631   assert(VM_Version::supports_avx512bw(), "");
 2632   InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2633   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2634   emit_int16(0x47, (0xC0 | encode));
 2635 }
 2636 
 2637 void Assembler::kxorql(KRegister dst, KRegister src1, KRegister src2) {
 2638   assert(VM_Version::supports_avx512bw(), "");
 2639   InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2640   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2641   emit_int16(0x47, (0xC0 | encode));
 2642 }
 2643 
 2644 void Assembler::kandbl(KRegister dst, KRegister src1, KRegister src2) {
 2645   assert(VM_Version::supports_avx512dq(), "");
 2646   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2647   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2648   emit_int16(0x41, (0xC0 | encode));
 2649 }
 2650 
 2651 void Assembler::kandwl(KRegister dst, KRegister src1, KRegister src2) {
 2652   assert(VM_Version::supports_evex(), "");
 2653   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2654   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2655   emit_int16(0x41, (0xC0 | encode));
 2656 }
 2657 
 2658 void Assembler::kanddl(KRegister dst, KRegister src1, KRegister src2) {
 2659   assert(VM_Version::supports_avx512bw(), "");
 2660   InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2661   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2662   emit_int16(0x41, (0xC0 | encode));
 2663 }
 2664 
 2665 void Assembler::kandql(KRegister dst, KRegister src1, KRegister src2) {
 2666   assert(VM_Version::supports_avx512bw(), "");
 2667   InstructionAttr attributes(AVX_256bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2668   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2669   emit_int16(0x41, (0xC0 | encode));
 2670 }
 2671 
 2672 void Assembler::knotdl(KRegister dst, KRegister src) {
 2673   assert(VM_Version::supports_avx512bw(), "");
 2674   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2675   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2676   emit_int16(0x44, (0xC0 | encode));
 2677 }
 2678 
 2679 void Assembler::knotql(KRegister dst, KRegister src) {
 2680   assert(VM_Version::supports_avx512bw(), "");
 2681   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2682   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2683   emit_int16(0x44, (0xC0 | encode));
 2684 }
 2685 
 2686 // This instruction produces ZF or CF flags
 2687 void Assembler::kortestbl(KRegister src1, KRegister src2) {
 2688   assert(VM_Version::supports_avx512dq(), "");
 2689   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2690   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2691   emit_int16((unsigned char)0x98, (0xC0 | encode));
 2692 }
 2693 
 2694 // This instruction produces ZF or CF flags
 2695 void Assembler::kortestwl(KRegister src1, KRegister src2) {
 2696   assert(VM_Version::supports_evex(), "");
 2697   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2698   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);

 2706   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2707   emit_int16((unsigned char)0x98, (0xC0 | encode));
 2708 }
 2709 
 2710 // This instruction produces ZF or CF flags
 2711 void Assembler::kortestql(KRegister src1, KRegister src2) {
 2712   assert(VM_Version::supports_avx512bw(), "");
 2713   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2714   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2715   emit_int16((unsigned char)0x98, (0xC0 | encode));
 2716 }
 2717 
 2718 // This instruction produces ZF or CF flags
 2719 void Assembler::ktestql(KRegister src1, KRegister src2) {
 2720   assert(VM_Version::supports_avx512bw(), "");
 2721   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2722   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2723   emit_int16((unsigned char)0x99, (0xC0 | encode));
 2724 }
 2725 
 2726 void Assembler::ktestdl(KRegister src1, KRegister src2) {
 2727   assert(VM_Version::supports_avx512bw(), "");
 2728   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2729   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2730   emit_int16((unsigned char)0x99, (0xC0 | encode));
 2731 }
 2732 
 2733 void Assembler::ktestwl(KRegister src1, KRegister src2) {
 2734   assert(VM_Version::supports_avx512dq(), "");
 2735   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2736   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2737   emit_int16((unsigned char)0x99, (0xC0 | encode));
 2738 }
 2739 
 2740 void Assembler::ktestbl(KRegister src1, KRegister src2) {
 2741   assert(VM_Version::supports_avx512dq(), "");
 2742   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2743   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2744   emit_int16((unsigned char)0x99, (0xC0 | encode));
 2745 }
 2746 
 2747 void Assembler::ktestq(KRegister src1, KRegister src2) {
 2748   assert(VM_Version::supports_avx512bw(), "");
 2749   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2750   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 2751   emit_int16((unsigned char)0x99, (0xC0 | encode));
 2752 }
 2753 
 2754 void Assembler::ktestd(KRegister src1, KRegister src2) {
 2755   assert(VM_Version::supports_avx512bw(), "");
 2756   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2757   int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2758   emit_int16((unsigned char)0x99, (0xC0 | encode));
 2759 }
 2760 
 2761 void Assembler::kxnorbl(KRegister dst, KRegister src1, KRegister src2) {
 2762   assert(VM_Version::supports_avx512dq(), "");
 2763   InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2764   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 2765   emit_int16(0x46, (0xC0 | encode));
 2766 }
 2767 
 2768 void Assembler::kshiftlbl(KRegister dst, KRegister src, int imm8) {
 2769   assert(VM_Version::supports_avx512dq(), "");
 2770   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2771   int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 2772   emit_int16(0x32, (0xC0 | encode));
 2773   emit_int8(imm8);
 2774 }
 2775 
 2776 void Assembler::kshiftrbl(KRegister dst, KRegister src, int imm8) {
 2777   assert(VM_Version::supports_avx512dq(), "");
 2778   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2779   int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 2780   emit_int16(0x30, (0xC0 | encode));
 2781 }
 2782 
 2783 void Assembler::kshiftrwl(KRegister dst, KRegister src, int imm8) {
 2784   assert(VM_Version::supports_evex(), "");
 2785   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2786   int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 2787   emit_int16(0x30, (0xC0 | encode));
 2788   emit_int8(imm8);
 2789 }
 2790 
 2791 void Assembler::kshiftrdl(KRegister dst, KRegister src, int imm8) {
 2792   assert(VM_Version::supports_avx512bw(), "");
 2793   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2794   int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 2795   emit_int16(0x31, (0xC0 | encode));
 2796   emit_int8(imm8);
 2797 }
 2798 
 2799 void Assembler::kshiftrql(KRegister dst, KRegister src, int imm8) {
 2800   assert(VM_Version::supports_avx512bw(), "");
 2801   InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 2802   int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
 2803   emit_int16(0x31, (0xC0 | encode));
 2804   emit_int8(imm8);
 2805 }
 2806 
 2807 void Assembler::movb(Address dst, int imm8) {
 2808   InstructionMark im(this);
 2809    prefix(dst);
 2810   emit_int8((unsigned char)0xC6);
 2811   emit_operand(rax, dst, 1);
 2812   emit_int8(imm8);
 2813 }
 2814 
 2815 
 2816 void Assembler::movb(Address dst, Register src) {
 2817   assert(src->has_byte_register(), "must have byte register");
 2818   InstructionMark im(this);
 2819   prefix(dst, src, true);
 2820   emit_int8((unsigned char)0x88);
 2821   emit_operand(src, dst);
 2822 }
 2823 
 2824 void Assembler::movdl(XMMRegister dst, Register src) {
 2825   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 2826   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);

 4267   attributes.reset_is_clear_context();
 4268   attributes.set_is_evex_instruction();
 4269   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 4270   emit_int16(0x29, (0xC0 | encode));
 4271 }
 4272 
 4273 // In this context, kdst is written the mask used to process the equal components
 4274 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
 4275   assert(VM_Version::supports_evex(), "");
 4276   InstructionMark im(this);
 4277   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 4278   attributes.reset_is_clear_context();
 4279   attributes.set_is_evex_instruction();
 4280   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
 4281   int dst_enc = kdst->encoding();
 4282   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 4283   emit_int8(0x29);
 4284   emit_operand(as_Register(dst_enc), src);
 4285 }
 4286 


















 4287 void Assembler::pcmpgtq(XMMRegister dst, XMMRegister src) {
 4288   assert(VM_Version::supports_sse4_1(), "");
 4289   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 4290   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 4291   emit_int16(0x37, (0xC0 | encode));
 4292 }
 4293 
 4294 void Assembler::pmovmskb(Register dst, XMMRegister src) {
 4295   assert(VM_Version::supports_sse2(), "");
 4296   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 4297   int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4298   emit_int16((unsigned char)0xD7, (0xC0 | encode));
 4299 }
 4300 
 4301 void Assembler::vpmovmskb(Register dst, XMMRegister src, int vec_enc) {
 4302   assert((VM_Version::supports_avx() && vec_enc == AVX_128bit) ||
 4303          (VM_Version::supports_avx2() && vec_enc  == AVX_256bit), "");
 4304   InstructionAttr attributes(vec_enc, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
 4305   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 4306   emit_int16((unsigned char)0xD7, (0xC0 | encode));

 7556 
 7557 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 7558   assert(UseAVX > 0, "requires some form of AVX");
 7559   InstructionMark im(this);
 7560   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7561   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
 7562   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7563   emit_int8((unsigned char)0xEF);
 7564   emit_operand(dst, src);
 7565 }
 7566 
 7567 void Assembler::vpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 7568   assert(UseAVX > 2, "requires some form of EVEX");
 7569   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7570   attributes.set_rex_vex_w_reverted();
 7571   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7572   emit_int16((unsigned char)0xEF, (0xC0 | encode));
 7573 }
 7574 
 7575 void Assembler::evpxord(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {

 7576   // Encoding: EVEX.NDS.XXX.66.0F.W0 EF /r
 7577   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 7578   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 7579   attributes.set_is_evex_instruction();
 7580   attributes.set_embedded_opmask_register_specifier(mask);
 7581   if (merge) {
 7582     attributes.reset_is_clear_context();
 7583   }
 7584   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7585   emit_int16((unsigned char)0xEF, (0xC0 | encode));
 7586 }
 7587 
 7588 void Assembler::evpxord(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 7589   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 7590   InstructionMark im(this);
 7591   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 7592   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 7593   attributes.set_is_evex_instruction();
 7594   attributes.set_embedded_opmask_register_specifier(mask);
 7595   if (merge) {
 7596     attributes.reset_is_clear_context();
 7597   }
 7598   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7599   emit_int8((unsigned char)0xEF);
 7600   emit_operand(dst, src);
 7601 }
 7602 
 7603 void Assembler::evpxorq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 7604   // Encoding: EVEX.NDS.XXX.66.0F.W1 EF /r
 7605   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 7606   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 7607   attributes.set_is_evex_instruction();
 7608   attributes.set_embedded_opmask_register_specifier(mask);
 7609   if (merge) {
 7610     attributes.reset_is_clear_context();
 7611   }
 7612   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7613   emit_int16((unsigned char)0xEF, (0xC0 | encode));
 7614 }
 7615 
 7616 void Assembler::evpxorq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 7617   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 7618   InstructionMark im(this);
 7619   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 7620   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 7621   attributes.set_is_evex_instruction();
 7622   attributes.set_embedded_opmask_register_specifier(mask);
 7623   if (merge) {
 7624     attributes.reset_is_clear_context();
 7625   }
 7626   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7627   emit_int8((unsigned char)0xEF);
 7628   emit_operand(dst, src);
 7629 }
 7630 
 7631 void Assembler::evpandd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 7632   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 7633   InstructionMark im(this);
 7634   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 7635   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 7636   attributes.set_is_evex_instruction();
 7637   attributes.set_embedded_opmask_register_specifier(mask);
 7638   if (merge) {
 7639     attributes.reset_is_clear_context();
 7640   }
 7641   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7642   emit_int8((unsigned char)0xDB);
 7643   emit_operand(dst, src);
 7644 }
 7645 
 7646 void Assembler::evpandq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 7647   assert(VM_Version::supports_evex(), "");
 7648   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 7649   attributes.set_is_evex_instruction();
 7650   attributes.set_embedded_opmask_register_specifier(mask);
 7651   if (merge) {
 7652     attributes.reset_is_clear_context();
 7653   }
 7654   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7655   emit_int16((unsigned char)0xDB, (0xC0 | encode));
 7656 }
 7657 
 7658 void Assembler::evpandq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 7659   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 7660   InstructionMark im(this);
 7661   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 7662   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 7663   attributes.set_is_evex_instruction();
 7664   attributes.set_embedded_opmask_register_specifier(mask);
 7665   if (merge) {
 7666     attributes.reset_is_clear_context();
 7667   }
 7668   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7669   emit_int8((unsigned char)0xDB);
 7670   emit_operand(dst, src);
 7671 }
 7672 
 7673 void Assembler::evporq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 7674   assert(VM_Version::supports_evex(), "");
 7675   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 7676   attributes.set_is_evex_instruction();
 7677   attributes.set_embedded_opmask_register_specifier(mask);
 7678   if (merge) {
 7679     attributes.reset_is_clear_context();
 7680   }
 7681   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7682   emit_int16((unsigned char)0xEB, (0xC0 | encode));
 7683 }
 7684 
 7685 void Assembler::evporq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 7686   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 7687   InstructionMark im(this);
 7688   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 7689   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 7690   attributes.set_is_evex_instruction();
 7691   attributes.set_embedded_opmask_register_specifier(mask);
 7692   if (merge) {
 7693     attributes.reset_is_clear_context();
 7694   }
 7695   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7696   emit_int8((unsigned char)0xEB);
 7697   emit_operand(dst, src);
 7698 }
 7699 
 7700 void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 7701   assert(VM_Version::supports_evex(), "requires EVEX support");
 7702   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7703   attributes.set_is_evex_instruction();
 7704   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7705   emit_int16((unsigned char)0xEF, (0xC0 | encode));
 7706 }
 7707 
 7708 void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 7709   assert(VM_Version::supports_evex(), "requires EVEX support");
 7710   assert(dst != xnoreg, "sanity");
 7711   InstructionMark im(this);
 7712   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 7713   attributes.set_is_evex_instruction();
 7714   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
 7715   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 7716   emit_int8((unsigned char)0xEF);
 7717   emit_operand(dst, src);
 7718 }
 7719 

 8224   assert(VM_Version::supports_avx2(), "");
 8225   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8226   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8227   emit_int16(0x79, (0xC0 | encode));
 8228 }
 8229 
 8230 void Assembler::vpbroadcastw(XMMRegister dst, Address src, int vector_len) {
 8231   assert(VM_Version::supports_avx2(), "");
 8232   assert(dst != xnoreg, "sanity");
 8233   InstructionMark im(this);
 8234   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
 8235   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit);
 8236   // swap src<->dst for encoding
 8237   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8238   emit_int8(0x79);
 8239   emit_operand(dst, src);
 8240 }
 8241 
 8242 // xmm/mem sourced byte/word/dword/qword replicate
 8243 
 8244 void Assembler::evpaddb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 8245   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 8246   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8247   attributes.set_is_evex_instruction();
 8248   attributes.set_embedded_opmask_register_specifier(mask);
 8249   if (merge) {
 8250     attributes.reset_is_clear_context();
 8251   }
 8252   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8253   emit_int16((unsigned char)0xFC, (0xC0 | encode));
 8254 }
 8255 
 8256 void Assembler::evpaddb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {


 8257   InstructionMark im(this);
 8258   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 8259   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8260   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8261   attributes.set_is_evex_instruction();
 8262   attributes.set_embedded_opmask_register_specifier(mask);
 8263   if (merge) {
 8264     attributes.reset_is_clear_context();
 8265   }
 8266   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8267   emit_int8((unsigned char)0xFC);
 8268   emit_operand(dst, src);
 8269 }
 8270 
 8271 void Assembler::evpaddw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 8272   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 8273   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8274   attributes.set_is_evex_instruction();
 8275   attributes.set_embedded_opmask_register_specifier(mask);
 8276   if (merge) {
 8277     attributes.reset_is_clear_context();
 8278   }
 8279   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8280   emit_int16((unsigned char)0xFD, (0xC0 | encode));
 8281 }
 8282 
 8283 void Assembler::evpaddw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 8284   InstructionMark im(this);
 8285   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 8286   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8287   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8288   attributes.set_is_evex_instruction();
 8289   attributes.set_embedded_opmask_register_specifier(mask);
 8290   if (merge) {
 8291     attributes.reset_is_clear_context();
 8292   }
 8293   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8294   emit_int8((unsigned char)0xFD);
 8295   emit_operand(dst, src);
 8296 }
 8297 
 8298 void Assembler::evpaddd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 8299   assert(VM_Version::supports_evex(), "");
 8300   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8301   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8302   attributes.set_is_evex_instruction();
 8303   attributes.set_embedded_opmask_register_specifier(mask);
 8304   if (merge) {
 8305     attributes.reset_is_clear_context();
 8306   }
 8307   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8308   emit_int16((unsigned char)0xFE, (0xC0 | encode));
 8309 }
 8310 
 8311 void Assembler::evpaddd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 8312   InstructionMark im(this);
 8313   assert(VM_Version::supports_evex(), "");
 8314   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8315   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8316   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8317   attributes.set_is_evex_instruction();
 8318   attributes.set_embedded_opmask_register_specifier(mask);
 8319   if (merge) {
 8320     attributes.reset_is_clear_context();
 8321   }
 8322   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8323   emit_int8((unsigned char)0xFE);
 8324   emit_operand(dst, src);
 8325 }
 8326 
 8327 void Assembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 8328   assert(VM_Version::supports_evex(), "");
 8329   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8330   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8331   attributes.set_is_evex_instruction();
 8332   attributes.set_embedded_opmask_register_specifier(mask);
 8333   if (merge) {
 8334     attributes.reset_is_clear_context();
 8335   }
 8336   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8337   emit_int16((unsigned char)0xD4, (0xC0 | encode));
 8338 }
 8339 
 8340 void Assembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 8341   InstructionMark im(this);
 8342   assert(VM_Version::supports_evex(), "");
 8343   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8344   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8345   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8346   attributes.set_is_evex_instruction();
 8347   attributes.set_embedded_opmask_register_specifier(mask);
 8348   if (merge) {
 8349     attributes.reset_is_clear_context();
 8350   }
 8351   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8352   emit_int8((unsigned char)0xD4);
 8353   emit_operand(dst, src);
 8354 }
 8355 
 8356 void Assembler::evaddps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 8357   assert(VM_Version::supports_evex(), "");
 8358   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8359   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8360   attributes.set_is_evex_instruction();
 8361   attributes.set_embedded_opmask_register_specifier(mask);
 8362   if (merge) {
 8363     attributes.reset_is_clear_context();
 8364   }
 8365   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 8366   emit_int16(0x58, (0xC0 | encode));
 8367 }
 8368 
 8369 void Assembler::evaddps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 8370   InstructionMark im(this);
 8371   assert(VM_Version::supports_evex(), "");
 8372   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8373   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8374   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8375   attributes.set_is_evex_instruction();
 8376   attributes.set_embedded_opmask_register_specifier(mask);
 8377   if (merge) {
 8378     attributes.reset_is_clear_context();
 8379   }
 8380   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 8381   emit_int8(0x58);
 8382   emit_operand(dst, src);
 8383 }
 8384 
 8385 void Assembler::evaddpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 8386   assert(VM_Version::supports_evex(), "");
 8387   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8388   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8389   attributes.set_is_evex_instruction();
 8390   attributes.set_embedded_opmask_register_specifier(mask);
 8391   if (merge) {
 8392     attributes.reset_is_clear_context();
 8393   }
 8394   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8395   emit_int16(0x58, (0xC0 | encode));
 8396 }
 8397 
 8398 void Assembler::evaddpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 8399   InstructionMark im(this);
 8400   assert(VM_Version::supports_evex(), "");
 8401   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8402   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8403   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8404   attributes.set_is_evex_instruction();
 8405   attributes.set_embedded_opmask_register_specifier(mask);
 8406   if (merge) {
 8407     attributes.reset_is_clear_context();
 8408   }
 8409   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8410   emit_int8(0x58);
 8411   emit_operand(dst, src);
 8412 }
 8413 
 8414 void Assembler::evpsubb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 8415   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 8416   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8417   attributes.set_is_evex_instruction();
 8418   attributes.set_embedded_opmask_register_specifier(mask);
 8419   if (merge) {
 8420     attributes.reset_is_clear_context();
 8421   }
 8422   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8423   emit_int16((unsigned char)0xF8, (0xC0 | encode));
 8424 }
 8425 
 8426 void Assembler::evpsubb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 8427   InstructionMark im(this);
 8428   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 8429   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8430   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8431   attributes.set_is_evex_instruction();
 8432   attributes.set_embedded_opmask_register_specifier(mask);
 8433   if (merge) {
 8434     attributes.reset_is_clear_context();
 8435   }
 8436   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8437   emit_int8((unsigned char)0xF8);
 8438   emit_operand(dst, src);
 8439 }
 8440 
 8441 void Assembler::evpsubw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 8442   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 8443   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8444   attributes.set_is_evex_instruction();
 8445   attributes.set_embedded_opmask_register_specifier(mask);
 8446   if (merge) {
 8447     attributes.reset_is_clear_context();
 8448   }
 8449   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8450   emit_int16((unsigned char)0xF9, (0xC0 | encode));
 8451 }
 8452 
 8453 void Assembler::evpsubw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 8454   InstructionMark im(this);
 8455   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 8456   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8457   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8458   attributes.set_is_evex_instruction();
 8459   attributes.set_embedded_opmask_register_specifier(mask);
 8460   if (merge) {
 8461     attributes.reset_is_clear_context();
 8462   }
 8463   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8464   emit_int8((unsigned char)0xF9);
 8465   emit_operand(dst, src);
 8466 }
 8467 
 8468 void Assembler::evpsubd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 8469   assert(VM_Version::supports_evex(), "");
 8470   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8471   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8472   attributes.set_is_evex_instruction();
 8473   attributes.set_embedded_opmask_register_specifier(mask);
 8474   if (merge) {
 8475     attributes.reset_is_clear_context();
 8476   }
 8477   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8478   emit_int16((unsigned char)0xFA, (0xC0 | encode));
 8479 }
 8480 
 8481 void Assembler::evpsubd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 8482   InstructionMark im(this);
 8483   assert(VM_Version::supports_evex(), "");
 8484   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8485   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8486   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8487   attributes.set_is_evex_instruction();
 8488   attributes.set_embedded_opmask_register_specifier(mask);
 8489   if (merge) {
 8490     attributes.reset_is_clear_context();
 8491   }
 8492   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8493   emit_int8((unsigned char)0xFA);
 8494   emit_operand(dst, src);
 8495 }
 8496 
 8497 void Assembler::evpsubq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 8498   assert(VM_Version::supports_evex(), "");
 8499   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8500   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8501   attributes.set_is_evex_instruction();
 8502   attributes.set_embedded_opmask_register_specifier(mask);
 8503   if (merge) {
 8504     attributes.reset_is_clear_context();
 8505   }
 8506   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8507   emit_int16((unsigned char)0xFB, (0xC0 | encode));
 8508 }
 8509 
 8510 void Assembler::evpsubq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 8511   InstructionMark im(this);
 8512   assert(VM_Version::supports_evex(), "");
 8513   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8514   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8515   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8516   attributes.set_is_evex_instruction();
 8517   attributes.set_embedded_opmask_register_specifier(mask);
 8518   if (merge) {
 8519     attributes.reset_is_clear_context();
 8520   }
 8521   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8522   emit_int8((unsigned char)0xFB);
 8523   emit_operand(dst, src);
 8524 }
 8525 
 8526 void Assembler::evsubps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 8527   assert(VM_Version::supports_evex(), "");
 8528   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8529   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8530   attributes.set_is_evex_instruction();
 8531   attributes.set_embedded_opmask_register_specifier(mask);
 8532   if (merge) {
 8533     attributes.reset_is_clear_context();
 8534   }
 8535   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 8536   emit_int16(0x5C, (0xC0 | encode));
 8537 }
 8538 
 8539 void Assembler::evsubps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 8540   InstructionMark im(this);
 8541   assert(VM_Version::supports_evex(), "");
 8542   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8543   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8544   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8545   attributes.set_is_evex_instruction();
 8546   attributes.set_embedded_opmask_register_specifier(mask);
 8547   if (merge) {
 8548     attributes.reset_is_clear_context();
 8549   }
 8550   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 8551   emit_int8(0x5C);
 8552   emit_operand(dst, src);
 8553 }
 8554 
 8555 void Assembler::evsubpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 8556   assert(VM_Version::supports_evex(), "");
 8557   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8558   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8559   attributes.set_is_evex_instruction();
 8560   attributes.set_embedded_opmask_register_specifier(mask);
 8561   if (merge) {
 8562     attributes.reset_is_clear_context();
 8563   }
 8564   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8565   emit_int16(0x5C, (0xC0 | encode));
 8566 }
 8567 
 8568 void Assembler::evsubpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 8569   InstructionMark im(this);
 8570   assert(VM_Version::supports_evex(), "");
 8571   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8572   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8573   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8574   attributes.set_is_evex_instruction();
 8575   attributes.set_embedded_opmask_register_specifier(mask);
 8576   if (merge) {
 8577     attributes.reset_is_clear_context();
 8578   }
 8579   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8580   emit_int8(0x5C);
 8581   emit_operand(dst, src);
 8582 }
 8583 
 8584 void Assembler::evpmullw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 8585   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 8586   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8587   attributes.set_is_evex_instruction();
 8588   attributes.set_embedded_opmask_register_specifier(mask);
 8589   if (merge) {
 8590     attributes.reset_is_clear_context();
 8591   }
 8592   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8593   emit_int16((unsigned char)0xD5, (0xC0 | encode));
 8594 }
 8595 
 8596 void Assembler::evpmullw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 8597   InstructionMark im(this);
 8598   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 8599   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8600   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8601   attributes.set_is_evex_instruction();
 8602   attributes.set_embedded_opmask_register_specifier(mask);
 8603   if (merge) {
 8604     attributes.reset_is_clear_context();
 8605   }
 8606   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8607   emit_int8((unsigned char)0xD5);
 8608   emit_operand(dst, src);
 8609 }
 8610 
 8611 void Assembler::evpmulld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 8612   assert(VM_Version::supports_evex(), "");
 8613   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8614   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8615   attributes.set_is_evex_instruction();
 8616   attributes.set_embedded_opmask_register_specifier(mask);
 8617   if (merge) {
 8618     attributes.reset_is_clear_context();
 8619   }
 8620   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8621   emit_int16(0x40, (0xC0 | encode));
 8622 }
 8623 
 8624 void Assembler::evpmulld(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 8625   InstructionMark im(this);
 8626   assert(VM_Version::supports_evex(), "");
 8627   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8628   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8629   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8630   attributes.set_is_evex_instruction();
 8631   attributes.set_embedded_opmask_register_specifier(mask);
 8632   if (merge) {
 8633     attributes.reset_is_clear_context();
 8634   }
 8635   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8636   emit_int8(0x40);
 8637   emit_operand(dst, src);
 8638 }
 8639 
 8640 void Assembler::evpmullq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 8641   assert(VM_Version::supports_avx512dq() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 8642   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8643   attributes.set_is_evex_instruction();
 8644   attributes.set_embedded_opmask_register_specifier(mask);
 8645   if (merge) {
 8646     attributes.reset_is_clear_context();
 8647   }
 8648   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8649   emit_int16(0x40, (0xC0 | encode));
 8650 }
 8651 
 8652 void Assembler::evpmullq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 8653   InstructionMark im(this);
 8654   assert(VM_Version::supports_avx512dq() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 8655   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8656   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8657   attributes.set_is_evex_instruction();
 8658   attributes.set_embedded_opmask_register_specifier(mask);
 8659   if (merge) {
 8660     attributes.reset_is_clear_context();
 8661   }
 8662   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8663   emit_int8(0x40);
 8664   emit_operand(dst, src);
 8665 }
 8666 
 8667 void Assembler::evmulps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 8668   assert(VM_Version::supports_evex(), "");
 8669   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8670   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8671   attributes.set_is_evex_instruction();
 8672   attributes.set_embedded_opmask_register_specifier(mask);
 8673   if (merge) {
 8674     attributes.reset_is_clear_context();
 8675   }
 8676   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 8677   emit_int16(0x59, (0xC0 | encode));
 8678 }
 8679 
 8680 void Assembler::evmulps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 8681   InstructionMark im(this);
 8682   assert(VM_Version::supports_evex(), "");
 8683   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8684   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8685   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8686   attributes.set_is_evex_instruction();
 8687   attributes.set_embedded_opmask_register_specifier(mask);
 8688   if (merge) {
 8689     attributes.reset_is_clear_context();
 8690   }
 8691   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 8692   emit_int8(0x59);
 8693   emit_operand(dst, src);
 8694 }
 8695 
 8696 void Assembler::evmulpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 8697   assert(VM_Version::supports_evex(), "");
 8698   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8699   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8700   attributes.set_is_evex_instruction();
 8701   attributes.set_embedded_opmask_register_specifier(mask);
 8702   if (merge) {
 8703     attributes.reset_is_clear_context();
 8704   }
 8705   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8706   emit_int16(0x59, (0xC0 | encode));
 8707 }
 8708 
 8709 void Assembler::evmulpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 8710   InstructionMark im(this);
 8711   assert(VM_Version::supports_evex(), "");
 8712   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8713   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8714   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8715   attributes.set_is_evex_instruction();
 8716   attributes.set_embedded_opmask_register_specifier(mask);
 8717   if (merge) {
 8718     attributes.reset_is_clear_context();
 8719   }
 8720   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8721   emit_int8(0x59);
 8722   emit_operand(dst, src);
 8723 }
 8724 
 8725 void Assembler::evsqrtps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 8726   assert(VM_Version::supports_evex(), "");
 8727   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8728   InstructionAttr attributes(vector_len,/* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8729   attributes.set_is_evex_instruction();
 8730   attributes.set_embedded_opmask_register_specifier(mask);
 8731   if (merge) {
 8732     attributes.reset_is_clear_context();
 8733   }
 8734   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 8735   emit_int16(0x51, (0xC0 | encode));
 8736 }
 8737 
 8738 void Assembler::evsqrtps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 8739   InstructionMark im(this);
 8740   assert(VM_Version::supports_evex(), "");
 8741   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8742   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8743   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8744   attributes.set_is_evex_instruction();
 8745   attributes.set_embedded_opmask_register_specifier(mask);
 8746   if (merge) {
 8747     attributes.reset_is_clear_context();
 8748   }
 8749   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 8750   emit_int8(0x51);
 8751   emit_operand(dst, src);
 8752 }
 8753 
 8754 void Assembler::evsqrtpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 8755   assert(VM_Version::supports_evex(), "");
 8756   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8757   InstructionAttr attributes(vector_len,/* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8758   attributes.set_is_evex_instruction();
 8759   attributes.set_embedded_opmask_register_specifier(mask);
 8760   if (merge) {
 8761     attributes.reset_is_clear_context();
 8762   }
 8763   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8764   emit_int16(0x51, (0xC0 | encode));
 8765 }
 8766 
 8767 void Assembler::evsqrtpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 8768   InstructionMark im(this);
 8769   assert(VM_Version::supports_evex(), "");
 8770   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8771   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8772   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8773   attributes.set_is_evex_instruction();
 8774   attributes.set_embedded_opmask_register_specifier(mask);
 8775   if (merge) {
 8776     attributes.reset_is_clear_context();
 8777   }
 8778   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8779   emit_int8(0x51);
 8780   emit_operand(dst, src);
 8781 }
 8782 
 8783 
 8784 void Assembler::evdivps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 8785   assert(VM_Version::supports_evex(), "");
 8786   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8787   InstructionAttr attributes(vector_len,/* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8788   attributes.set_is_evex_instruction();
 8789   attributes.set_embedded_opmask_register_specifier(mask);
 8790   if (merge) {
 8791     attributes.reset_is_clear_context();
 8792   }
 8793   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 8794   emit_int16(0x5E, (0xC0 | encode));
 8795 }
 8796 
 8797 void Assembler::evdivps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 8798   InstructionMark im(this);
 8799   assert(VM_Version::supports_evex(), "");
 8800   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8801   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8802   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8803   attributes.set_is_evex_instruction();
 8804   attributes.set_embedded_opmask_register_specifier(mask);
 8805   if (merge) {
 8806     attributes.reset_is_clear_context();
 8807   }
 8808   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
 8809   emit_int8(0x5E);
 8810   emit_operand(dst, src);
 8811 }
 8812 
 8813 void Assembler::evdivpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 8814   assert(VM_Version::supports_evex(), "");
 8815   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8816   InstructionAttr attributes(vector_len,/* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8817   attributes.set_is_evex_instruction();
 8818   attributes.set_embedded_opmask_register_specifier(mask);
 8819   if (merge) {
 8820     attributes.reset_is_clear_context();
 8821   }
 8822   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8823   emit_int16(0x5E, (0xC0 | encode));
 8824 }
 8825 
 8826 void Assembler::evdivpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 8827   InstructionMark im(this);
 8828   assert(VM_Version::supports_evex(), "");
 8829   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8830   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8831   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8832   attributes.set_is_evex_instruction();
 8833   attributes.set_embedded_opmask_register_specifier(mask);
 8834   if (merge) {
 8835     attributes.reset_is_clear_context();
 8836   }
 8837   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 8838   emit_int8(0x5E);
 8839   emit_operand(dst, src);
 8840 }
 8841 
 8842 void Assembler::evpabsb(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 8843   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 8844   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8845   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8846   attributes.set_is_evex_instruction();
 8847   attributes.set_embedded_opmask_register_specifier(mask);
 8848   if (merge) {
 8849     attributes.reset_is_clear_context();
 8850   }
 8851   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8852   emit_int16(0x1C, (0xC0 | encode));
 8853 }
 8854 
 8855 
 8856 void Assembler::evpabsb(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
 8857   InstructionMark im(this);
 8858   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 8859   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8860   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8861   attributes.set_is_evex_instruction();
 8862   attributes.set_embedded_opmask_register_specifier(mask);
 8863   if (merge) {
 8864     attributes.reset_is_clear_context();
 8865   }
 8866   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8867   emit_int8(0x1C);
 8868   emit_operand(dst, src);
 8869 }
 8870 
 8871 void Assembler::evpabsw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 8872   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 8873   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8874   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8875   attributes.set_is_evex_instruction();
 8876   attributes.set_embedded_opmask_register_specifier(mask);
 8877   if (merge) {
 8878     attributes.reset_is_clear_context();
 8879   }
 8880   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8881   emit_int16(0x1D, (0xC0 | encode));
 8882 }
 8883 
 8884 
 8885 void Assembler::evpabsw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
 8886   InstructionMark im(this);
 8887   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 8888   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8889   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8890   attributes.set_is_evex_instruction();
 8891   attributes.set_embedded_opmask_register_specifier(mask);
 8892   if (merge) {
 8893     attributes.reset_is_clear_context();
 8894   }
 8895   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8896   emit_int8(0x1D);
 8897   emit_operand(dst, src);
 8898 }
 8899 
 8900 void Assembler::evpabsd(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 8901   assert(VM_Version::supports_evex(), "");
 8902   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8903   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8904   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8905   attributes.set_is_evex_instruction();
 8906   attributes.set_embedded_opmask_register_specifier(mask);
 8907   if (merge) {
 8908     attributes.reset_is_clear_context();
 8909   }
 8910   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8911   emit_int16(0x1E, (0xC0 | encode));
 8912 }
 8913 
 8914 
 8915 void Assembler::evpabsd(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
 8916   InstructionMark im(this);
 8917   assert(VM_Version::supports_evex(), "");
 8918   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8919   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8920   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8921   attributes.set_is_evex_instruction();
 8922   attributes.set_embedded_opmask_register_specifier(mask);
 8923   if (merge) {
 8924     attributes.reset_is_clear_context();
 8925   }
 8926   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8927   emit_int8(0x1E);
 8928   emit_operand(dst, src);
 8929 }
 8930 
 8931 void Assembler::evpabsq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
 8932   assert(VM_Version::supports_evex(), "");
 8933   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8934   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8935   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8936   attributes.set_is_evex_instruction();
 8937   attributes.set_embedded_opmask_register_specifier(mask);
 8938   if (merge) {
 8939     attributes.reset_is_clear_context();
 8940   }
 8941   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8942   emit_int16(0x1F, (0xC0 | encode));
 8943 }
 8944 
 8945 
 8946 void Assembler::evpabsq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) {
 8947   InstructionMark im(this);
 8948   assert(VM_Version::supports_evex(), "");
 8949   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8950   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8951   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8952   attributes.set_is_evex_instruction();
 8953   attributes.set_embedded_opmask_register_specifier(mask);
 8954   if (merge) {
 8955     attributes.reset_is_clear_context();
 8956   }
 8957   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8958   emit_int8(0x1F);
 8959   emit_operand(dst, src);
 8960 }
 8961 
 8962 void Assembler::evpfma213ps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 8963   assert(VM_Version::supports_evex(), "");
 8964   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8965   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8966   attributes.set_is_evex_instruction();
 8967   attributes.set_embedded_opmask_register_specifier(mask);
 8968   if (merge) {
 8969     attributes.reset_is_clear_context();
 8970   }
 8971   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8972   emit_int16((unsigned char)0xA8, (0xC0 | encode));
 8973 }
 8974 
 8975 void Assembler::evpfma213ps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 8976   InstructionMark im(this);
 8977   assert(VM_Version::supports_evex(), "");
 8978   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8979   InstructionAttr attributes(vector_len, /* vex_w */ false,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8980   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 8981   attributes.set_is_evex_instruction();
 8982   attributes.set_embedded_opmask_register_specifier(mask);
 8983   if (merge) {
 8984     attributes.reset_is_clear_context();
 8985   }
 8986   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 8987   emit_int8((unsigned char)0xA8);
 8988   emit_operand(dst, src);
 8989 }
 8990 
 8991 void Assembler::evpfma213pd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 8992   assert(VM_Version::supports_evex(), "");
 8993   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 8994   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 8995   attributes.set_is_evex_instruction();
 8996   attributes.set_embedded_opmask_register_specifier(mask);
 8997   if (merge) {
 8998     attributes.reset_is_clear_context();
 8999   }
 9000   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9001   emit_int16((unsigned char)0xA8, (0xC0 | encode));
 9002 }
 9003 
 9004 void Assembler::evpfma213pd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9005   InstructionMark im(this);
 9006   assert(VM_Version::supports_evex(), "");
 9007   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9008   InstructionAttr attributes(vector_len, /* vex_w */ true,/* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
 9009   attributes.set_address_attributes(/* tuple_type */ EVEX_FV,/* input_size_in_bits */ EVEX_32bit);
 9010   attributes.set_is_evex_instruction();
 9011   attributes.set_embedded_opmask_register_specifier(mask);
 9012   if (merge) {
 9013     attributes.reset_is_clear_context();
 9014   }
 9015   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9016   emit_int8((unsigned char)0xA8);
 9017   emit_operand(dst, src);
 9018 }
 9019 
 9020 void Assembler::evpermb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9021   assert(VM_Version::supports_avx512_vbmi() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 9022   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9023   attributes.set_is_evex_instruction();
 9024   attributes.set_embedded_opmask_register_specifier(mask);
 9025   if (merge) {
 9026     attributes.reset_is_clear_context();
 9027   }
 9028   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9029   emit_int16((unsigned char)0x8D, (0xC0 | encode));
 9030 }
 9031 
 9032 void Assembler::evpermb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9033   assert(VM_Version::supports_avx512_vbmi() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 9034   InstructionMark im(this);
 9035   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9036   attributes.set_is_evex_instruction();
 9037   attributes.set_embedded_opmask_register_specifier(mask);
 9038   if (merge) {
 9039     attributes.reset_is_clear_context();
 9040   }
 9041   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9042   emit_int8((unsigned char)0x8D);
 9043   emit_operand(dst, src);
 9044 }
 9045 
 9046 void Assembler::evpermw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9047   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 9048   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9049   attributes.set_is_evex_instruction();
 9050   attributes.set_embedded_opmask_register_specifier(mask);
 9051   if (merge) {
 9052     attributes.reset_is_clear_context();
 9053   }
 9054   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9055   emit_int16((unsigned char)0x8D, (0xC0 | encode));
 9056 }
 9057 
 9058 void Assembler::evpermw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9059   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 9060   InstructionMark im(this);
 9061   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9062   attributes.set_is_evex_instruction();
 9063   attributes.set_embedded_opmask_register_specifier(mask);
 9064   if (merge) {
 9065     attributes.reset_is_clear_context();
 9066   }
 9067   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9068   emit_int8((unsigned char)0x8D);
 9069   emit_operand(dst, src);
 9070 }
 9071 
 9072 void Assembler::evpermd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9073   assert(VM_Version::supports_evex() && vector_len > AVX_128bit, "");
 9074   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9075   attributes.set_is_evex_instruction();
 9076   attributes.set_embedded_opmask_register_specifier(mask);
 9077   if (merge) {
 9078     attributes.reset_is_clear_context();
 9079   }
 9080   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9081   emit_int16(0x36, (0xC0 | encode));
 9082 }
 9083 
 9084 void Assembler::evpermd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9085   assert(VM_Version::supports_evex() && vector_len > AVX_128bit, "");
 9086   InstructionMark im(this);
 9087   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9088   attributes.set_is_evex_instruction();
 9089   attributes.set_embedded_opmask_register_specifier(mask);
 9090   if (merge) {
 9091     attributes.reset_is_clear_context();
 9092   }
 9093   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9094   emit_int8(0x36);
 9095   emit_operand(dst, src);
 9096 }
 9097 
 9098 void Assembler::evpermq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9099   assert(VM_Version::supports_evex() && vector_len > AVX_128bit, "");
 9100   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9101   attributes.set_is_evex_instruction();
 9102   attributes.set_embedded_opmask_register_specifier(mask);
 9103   if (merge) {
 9104     attributes.reset_is_clear_context();
 9105   }
 9106   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9107   emit_int16(0x36, (0xC0 | encode));
 9108 }
 9109 
 9110 void Assembler::evpermq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9111   assert(VM_Version::supports_evex() && vector_len > AVX_128bit, "");
 9112   InstructionMark im(this);
 9113   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9114   attributes.set_is_evex_instruction();
 9115   attributes.set_embedded_opmask_register_specifier(mask);
 9116   if (merge) {
 9117     attributes.reset_is_clear_context();
 9118   }
 9119   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9120   emit_int8(0x36);
 9121   emit_operand(dst, src);
 9122 }
 9123 
 9124 void Assembler::evpsllw(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
 9125   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 9126   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9127   attributes.set_is_evex_instruction();
 9128   attributes.set_embedded_opmask_register_specifier(mask);
 9129   if (merge) {
 9130     attributes.reset_is_clear_context();
 9131   }
 9132   int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9133   emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
 9134 }
 9135 
 9136 void Assembler::evpslld(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
 9137   assert(VM_Version::supports_evex(), "");
 9138   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9139   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9140   attributes.set_is_evex_instruction();
 9141   attributes.set_embedded_opmask_register_specifier(mask);
 9142   if (merge) {
 9143     attributes.reset_is_clear_context();
 9144   }
 9145   int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9146   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
 9147 }
 9148 
 9149 void Assembler::evpsllq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
 9150   assert(VM_Version::supports_evex(), "");
 9151   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9152   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9153   attributes.set_is_evex_instruction();
 9154   attributes.set_embedded_opmask_register_specifier(mask);
 9155   if (merge) {
 9156     attributes.reset_is_clear_context();
 9157   }
 9158   int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9159   emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
 9160 }
 9161 
 9162 void Assembler::evpsrlw(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
 9163   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 9164   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9165   attributes.set_is_evex_instruction();
 9166   attributes.set_embedded_opmask_register_specifier(mask);
 9167   if (merge) {
 9168     attributes.reset_is_clear_context();
 9169   }
 9170   int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9171   emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
 9172 }
 9173 
 9174 void Assembler::evpsrld(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
 9175   assert(VM_Version::supports_evex(), "");
 9176   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9177   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9178   attributes.set_is_evex_instruction();
 9179   attributes.set_embedded_opmask_register_specifier(mask);
 9180   if (merge) {
 9181     attributes.reset_is_clear_context();
 9182   }
 9183   int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9184   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
 9185 }
 9186 
 9187 void Assembler::evpsrlq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
 9188   assert(VM_Version::supports_evex(), "");
 9189   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9190   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9191   attributes.set_is_evex_instruction();
 9192   attributes.set_embedded_opmask_register_specifier(mask);
 9193   if (merge) {
 9194     attributes.reset_is_clear_context();
 9195   }
 9196   int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9197   emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
 9198 }
 9199 
 9200 void Assembler::evpsraw(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
 9201   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 9202   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9203   attributes.set_is_evex_instruction();
 9204   attributes.set_embedded_opmask_register_specifier(mask);
 9205   if (merge) {
 9206     attributes.reset_is_clear_context();
 9207   }
 9208   int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9209   emit_int24(0x71, (0xC0 | encode), shift & 0xFF);
 9210 }
 9211 
 9212 void Assembler::evpsrad(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
 9213   assert(VM_Version::supports_evex(), "");
 9214   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9215   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9216   attributes.set_is_evex_instruction();
 9217   attributes.set_embedded_opmask_register_specifier(mask);
 9218   if (merge) {
 9219     attributes.reset_is_clear_context();
 9220   }
 9221   int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9222   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
 9223 }
 9224 
 9225 void Assembler::evpsraq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
 9226   assert(VM_Version::supports_evex(), "");
 9227   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9228   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9229   attributes.set_is_evex_instruction();
 9230   attributes.set_embedded_opmask_register_specifier(mask);
 9231   if (merge) {
 9232     attributes.reset_is_clear_context();
 9233   }
 9234   int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9235   emit_int24(0x73, (0xC0 | encode), shift & 0xFF);
 9236 }
 9237 
 9238 void Assembler::evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9239   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 9240   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9241   attributes.set_is_evex_instruction();
 9242   attributes.set_embedded_opmask_register_specifier(mask);
 9243   if (merge) {
 9244     attributes.reset_is_clear_context();
 9245   }
 9246   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9247   emit_int16((unsigned char)0xF1, (0xC0 | encode));
 9248 }
 9249 
 9250 void Assembler::evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9251   assert(VM_Version::supports_evex(), "");
 9252   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9253   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9254   attributes.set_is_evex_instruction();
 9255   attributes.set_embedded_opmask_register_specifier(mask);
 9256   if (merge) {
 9257     attributes.reset_is_clear_context();
 9258   }
 9259   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9260   emit_int16((unsigned char)0xF2, (0xC0 | encode));
 9261 }
 9262 
 9263 void Assembler::evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9264   assert(VM_Version::supports_evex(), "");
 9265   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9266   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9267   attributes.set_is_evex_instruction();
 9268   attributes.set_embedded_opmask_register_specifier(mask);
 9269   if (merge) {
 9270     attributes.reset_is_clear_context();
 9271   }
 9272   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9273   emit_int16((unsigned char)0xF3, (0xC0 | encode));
 9274 }
 9275 
 9276 void Assembler::evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9277   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 9278   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9279   attributes.set_is_evex_instruction();
 9280   attributes.set_embedded_opmask_register_specifier(mask);
 9281   if (merge) {
 9282     attributes.reset_is_clear_context();
 9283   }
 9284   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9285   emit_int16((unsigned char)0xD1, (0xC0 | encode));
 9286 }
 9287 
 9288 void Assembler::evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9289   assert(VM_Version::supports_evex(), "");
 9290   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9291   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9292   attributes.set_is_evex_instruction();
 9293   attributes.set_embedded_opmask_register_specifier(mask);
 9294   if (merge) {
 9295     attributes.reset_is_clear_context();
 9296   }
 9297   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9298   emit_int16((unsigned char)0xD2, (0xC0 | encode));
 9299 }
 9300 
 9301 void Assembler::evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9302   assert(VM_Version::supports_evex(), "");
 9303   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9304   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9305   attributes.set_is_evex_instruction();
 9306   attributes.set_embedded_opmask_register_specifier(mask);
 9307   if (merge) {
 9308     attributes.reset_is_clear_context();
 9309   }
 9310   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9311   emit_int16((unsigned char)0xD3, (0xC0 | encode));
 9312 }
 9313 
 9314 void Assembler::evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9315   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 9316   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9317   attributes.set_is_evex_instruction();
 9318   attributes.set_embedded_opmask_register_specifier(mask);
 9319   if (merge) {
 9320     attributes.reset_is_clear_context();
 9321   }
 9322   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9323   emit_int16((unsigned char)0xE1, (0xC0 | encode));
 9324 }
 9325 
 9326 void Assembler::evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9327   assert(VM_Version::supports_evex(), "");
 9328   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9329   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9330   attributes.set_is_evex_instruction();
 9331   attributes.set_embedded_opmask_register_specifier(mask);
 9332   if (merge) {
 9333     attributes.reset_is_clear_context();
 9334   }
 9335   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9336   emit_int16((unsigned char)0xE2, (0xC0 | encode));
 9337 }
 9338 
 9339 void Assembler::evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9340   assert(VM_Version::supports_evex(), "");
 9341   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9342   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9343   attributes.set_is_evex_instruction();
 9344   attributes.set_embedded_opmask_register_specifier(mask);
 9345   if (merge) {
 9346     attributes.reset_is_clear_context();
 9347   }
 9348   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9349   emit_int16((unsigned char)0xE2, (0xC0 | encode));
 9350 }
 9351 
 9352 void Assembler::evpsllvw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9353   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 9354   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9355   attributes.set_is_evex_instruction();
 9356   attributes.set_embedded_opmask_register_specifier(mask);
 9357   if (merge) {
 9358     attributes.reset_is_clear_context();
 9359   }
 9360   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9361   emit_int16(0x12, (0xC0 | encode));
 9362 }
 9363 
 9364 void Assembler::evpsllvd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9365   assert(VM_Version::supports_evex(), "");
 9366   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9367   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9368   attributes.set_is_evex_instruction();
 9369   attributes.set_embedded_opmask_register_specifier(mask);
 9370   if (merge) {
 9371     attributes.reset_is_clear_context();
 9372   }
 9373   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9374   emit_int16(0x47, (0xC0 | encode));
 9375 }
 9376 
 9377 void Assembler::evpsllvq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9378   assert(VM_Version::supports_evex(), "");
 9379   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9380   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9381   attributes.set_is_evex_instruction();
 9382   attributes.set_embedded_opmask_register_specifier(mask);
 9383   if (merge) {
 9384     attributes.reset_is_clear_context();
 9385   }
 9386   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9387   emit_int16(0x47, (0xC0 | encode));
 9388 }
 9389 
 9390 void Assembler::evpsrlvw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9391   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 9392   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9393   attributes.set_is_evex_instruction();
 9394   attributes.set_embedded_opmask_register_specifier(mask);
 9395   if (merge) {
 9396     attributes.reset_is_clear_context();
 9397   }
 9398   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9399   emit_int16(0x10, (0xC0 | encode));
 9400 }
 9401 
 9402 void Assembler::evpsrlvd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9403   assert(VM_Version::supports_evex(), "");
 9404   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9405   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9406   attributes.set_is_evex_instruction();
 9407   attributes.set_embedded_opmask_register_specifier(mask);
 9408   if (merge) {
 9409     attributes.reset_is_clear_context();
 9410   }
 9411   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9412   emit_int16(0x45, (0xC0 | encode));
 9413 }
 9414 
 9415 void Assembler::evpsrlvq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9416   assert(VM_Version::supports_evex(), "");
 9417   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9418   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9419   attributes.set_is_evex_instruction();
 9420   attributes.set_embedded_opmask_register_specifier(mask);
 9421   if (merge) {
 9422     attributes.reset_is_clear_context();
 9423   }
 9424   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9425   emit_int16(0x45, (0xC0 | encode));
 9426 }
 9427 
 9428 void Assembler::evpsravw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9429   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 9430   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9431   attributes.set_is_evex_instruction();
 9432   attributes.set_embedded_opmask_register_specifier(mask);
 9433   if (merge) {
 9434     attributes.reset_is_clear_context();
 9435   }
 9436   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9437   emit_int16(0x11, (0xC0 | encode));
 9438 }
 9439 
 9440 void Assembler::evpsravd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9441   assert(VM_Version::supports_evex(), "");
 9442   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9443   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9444   attributes.set_is_evex_instruction();
 9445   attributes.set_embedded_opmask_register_specifier(mask);
 9446   if (merge) {
 9447     attributes.reset_is_clear_context();
 9448   }
 9449   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9450   emit_int16(0x46, (0xC0 | encode));
 9451 }
 9452 
 9453 void Assembler::evpsravq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9454   assert(VM_Version::supports_evex(), "");
 9455   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9456   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9457   attributes.set_is_evex_instruction();
 9458   attributes.set_embedded_opmask_register_specifier(mask);
 9459   if (merge) {
 9460     attributes.reset_is_clear_context();
 9461   }
 9462   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9463   emit_int16(0x46, (0xC0 | encode));
 9464 }
 9465 
 9466 void Assembler::evpminsb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9467   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 9468   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9469   attributes.set_is_evex_instruction();
 9470   attributes.set_embedded_opmask_register_specifier(mask);
 9471   if (merge) {
 9472     attributes.reset_is_clear_context();
 9473   }
 9474   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9475   emit_int16(0x38, (0xC0 | encode));
 9476 }
 9477 
 9478 void Assembler::evpminsb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9479   assert(VM_Version::supports_avx512bw(), "");
 9480   InstructionMark im(this);
 9481   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9482   attributes.set_is_evex_instruction();
 9483   attributes.set_embedded_opmask_register_specifier(mask);
 9484   if (merge) {
 9485     attributes.reset_is_clear_context();
 9486   }
 9487   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9488   emit_int8(0x38);
 9489   emit_operand(dst, src);
 9490 }
 9491 
 9492 void Assembler::evpminsw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9493   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 9494   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9495   attributes.set_is_evex_instruction();
 9496   attributes.set_embedded_opmask_register_specifier(mask);
 9497   if (merge) {
 9498     attributes.reset_is_clear_context();
 9499   }
 9500   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9501   emit_int16((unsigned char)0xEA, (0xC0 | encode));
 9502 }
 9503 
 9504 void Assembler::evpminsw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9505   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 9506   InstructionMark im(this);
 9507   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9508   attributes.set_is_evex_instruction();
 9509   attributes.set_embedded_opmask_register_specifier(mask);
 9510   if (merge) {
 9511     attributes.reset_is_clear_context();
 9512   }
 9513   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9514   emit_int8((unsigned char)0xEA);
 9515   emit_operand(dst, src);
 9516 }
 9517 
 9518 void Assembler::evpminsd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9519   assert(VM_Version::supports_evex(), "");
 9520   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9521   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9522   attributes.set_is_evex_instruction();
 9523   attributes.set_embedded_opmask_register_specifier(mask);
 9524   if (merge) {
 9525     attributes.reset_is_clear_context();
 9526   }
 9527   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9528   emit_int16(0x39, (0xC0 | encode));
 9529 }
 9530 
 9531 void Assembler::evpminsd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9532   assert(VM_Version::supports_evex(), "");
 9533   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9534   InstructionMark im(this);
 9535   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9536   attributes.set_is_evex_instruction();
 9537   attributes.set_embedded_opmask_register_specifier(mask);
 9538   if (merge) {
 9539     attributes.reset_is_clear_context();
 9540   }
 9541   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9542   emit_int8(0x39);
 9543   emit_operand(dst, src);
 9544 }
 9545 
 9546 void Assembler::evpminsq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9547   assert(VM_Version::supports_evex(), "");
 9548   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9549   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9550   attributes.set_is_evex_instruction();
 9551   attributes.set_embedded_opmask_register_specifier(mask);
 9552   if (merge) {
 9553     attributes.reset_is_clear_context();
 9554   }
 9555   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9556   emit_int16(0x39, (0xC0 | encode));
 9557 }
 9558 
 9559 void Assembler::evpminsq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9560   assert(VM_Version::supports_evex(), "");
 9561   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9562   InstructionMark im(this);
 9563   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9564   attributes.set_is_evex_instruction();
 9565   attributes.set_embedded_opmask_register_specifier(mask);
 9566   if (merge) {
 9567     attributes.reset_is_clear_context();
 9568   }
 9569   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9570   emit_int8(0x39);
 9571   emit_operand(dst, src);
 9572 }
 9573 
 9574 
 9575 void Assembler::evpmaxsb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9576   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 9577   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9578   attributes.set_is_evex_instruction();
 9579   attributes.set_embedded_opmask_register_specifier(mask);
 9580   if (merge) {
 9581     attributes.reset_is_clear_context();
 9582   }
 9583   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9584   emit_int16(0x3C, (0xC0 | encode));
 9585 }
 9586 
 9587 void Assembler::evpmaxsb(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9588   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 9589   InstructionMark im(this);
 9590   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9591   attributes.set_is_evex_instruction();
 9592   attributes.set_embedded_opmask_register_specifier(mask);
 9593   if (merge) {
 9594     attributes.reset_is_clear_context();
 9595   }
 9596   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9597   emit_int8(0x3C);
 9598   emit_operand(dst, src);
 9599 }
 9600 
 9601 void Assembler::evpmaxsw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9602   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 9603   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9604   attributes.set_is_evex_instruction();
 9605   attributes.set_embedded_opmask_register_specifier(mask);
 9606   if (merge) {
 9607     attributes.reset_is_clear_context();
 9608   }
 9609   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9610   emit_int16((unsigned char)0xEE, (0xC0 | encode));
 9611 }
 9612 
 9613 void Assembler::evpmaxsw(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9614   assert(VM_Version::supports_avx512bw() && (vector_len == AVX_512bit || VM_Version::supports_avx512vl()), "");
 9615   InstructionMark im(this);
 9616   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9617   attributes.set_is_evex_instruction();
 9618   attributes.set_embedded_opmask_register_specifier(mask);
 9619   if (merge) {
 9620     attributes.reset_is_clear_context();
 9621   }
 9622   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
 9623   emit_int8((unsigned char)0xEE);
 9624   emit_operand(dst, src);
 9625 }
 9626 
 9627 void Assembler::evpmaxsd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9628   assert(VM_Version::supports_evex(), "");
 9629   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9630   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9631   attributes.set_is_evex_instruction();
 9632   attributes.set_embedded_opmask_register_specifier(mask);
 9633   if (merge) {
 9634     attributes.reset_is_clear_context();
 9635   }
 9636   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9637   emit_int16(0x3D, (0xC0 | encode));
 9638 }
 9639 
 9640 void Assembler::evpmaxsd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9641   assert(VM_Version::supports_evex(), "");
 9642   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9643   InstructionMark im(this);
 9644   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9645   attributes.set_is_evex_instruction();
 9646   attributes.set_embedded_opmask_register_specifier(mask);
 9647   if (merge) {
 9648     attributes.reset_is_clear_context();
 9649   }
 9650   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9651   emit_int8(0x3D);
 9652   emit_operand(dst, src);
 9653 }
 9654 
 9655 void Assembler::evpmaxsq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9656   assert(VM_Version::supports_evex(), "");
 9657   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9658   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9659   attributes.set_is_evex_instruction();
 9660   attributes.set_embedded_opmask_register_specifier(mask);
 9661   if (merge) {
 9662     attributes.reset_is_clear_context();
 9663   }
 9664   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9665   emit_int16(0x3D, (0xC0 | encode));
 9666 }
 9667 
 9668 void Assembler::evpmaxsq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9669   assert(VM_Version::supports_evex(), "");
 9670   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
 9671   InstructionMark im(this);
 9672   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
 9673   attributes.set_is_evex_instruction();
 9674   attributes.set_embedded_opmask_register_specifier(mask);
 9675   if (merge) {
 9676     attributes.reset_is_clear_context();
 9677   }
 9678   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9679   emit_int8(0x3D);
 9680   emit_operand(dst, src);
 9681 }
 9682 
 9683 // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL
 9684 void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len) {
 9685   assert(UseAVX >= 2, "");
 9686   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9687   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9688   emit_int16(0x58, (0xC0 | encode));
 9689 }
 9690 
 9691 void Assembler::vpbroadcastd(XMMRegister dst, Address src, int vector_len) {
 9692   assert(VM_Version::supports_avx2(), "");
 9693   assert(dst != xnoreg, "sanity");
 9694   InstructionMark im(this);
 9695   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9696   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
 9697   // swap src<->dst for encoding
 9698   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9699   emit_int8(0x58);
 9700   emit_operand(dst, src);
 9701 }
 9702 
 9703 // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL
 9704 void Assembler::vpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len) {
 9705   assert(VM_Version::supports_avx2(), "");
 9706   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9707   attributes.set_rex_vex_w_reverted();
 9708   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9709   emit_int16(0x59, (0xC0 | encode));
 9710 }
 9711 
 9712 void Assembler::vpbroadcastq(XMMRegister dst, Address src, int vector_len) {
 9713   assert(VM_Version::supports_avx2(), "");
 9714   assert(dst != xnoreg, "sanity");
 9715   InstructionMark im(this);
 9716   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
 9717   attributes.set_rex_vex_w_reverted();
 9718   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
 9719   // swap src<->dst for encoding
 9720   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
 9721   emit_int8(0x59);
 9722   emit_operand(dst, src);
 9723 }
 9724 
 9725 void Assembler::evbroadcasti32x4(XMMRegister dst, Address src, int vector_len) {

11055 void Assembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, Address src,
11056                         int comparison, bool is_signed, int vector_len) {
11057   assert(VM_Version::supports_evex(), "");
11058   assert(VM_Version::supports_avx512bw(), "");
11059   assert(comparison >= Assembler::eq && comparison <= Assembler::_true, "");
11060   // Encoding: EVEX.NDS.XXX.66.0F3A.W1 3F /r ib
11061   InstructionMark im(this);
11062   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
11063   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
11064   attributes.set_is_evex_instruction();
11065   attributes.set_embedded_opmask_register_specifier(mask);
11066   attributes.reset_is_clear_context();
11067   int dst_enc = kdst->encoding();
11068   vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
11069   int opcode = is_signed ? 0x3F : 0x3E;
11070   emit_int8((unsigned char)opcode);
11071   emit_operand(as_Register(dst_enc), src);
11072   emit_int8((unsigned char)comparison);
11073 }
11074 
11075 void Assembler::evprord(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
11076   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11077   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11078   attributes.set_is_evex_instruction();
11079   attributes.set_embedded_opmask_register_specifier(mask);
11080   if (merge) {
11081     attributes.reset_is_clear_context();
11082   }
11083   int encode = vex_prefix_and_encode(xmm0->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11084   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
11085 }
11086 
11087 void Assembler::evprorq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
11088   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11089   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11090   attributes.set_is_evex_instruction();
11091   attributes.set_embedded_opmask_register_specifier(mask);
11092   if (merge) {
11093     attributes.reset_is_clear_context();
11094   }
11095   int encode = vex_prefix_and_encode(xmm0->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11096   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
11097 }
11098 
11099 void Assembler::evprorvd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11100   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11101   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11102   attributes.set_is_evex_instruction();
11103   attributes.set_embedded_opmask_register_specifier(mask);
11104   if (merge) {
11105     attributes.reset_is_clear_context();
11106   }
11107   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11108   emit_int16(0x14, (0xC0 | encode));
11109 }
11110 
11111 void Assembler::evprorvq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11112   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11113   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11114   attributes.set_is_evex_instruction();
11115   attributes.set_embedded_opmask_register_specifier(mask);
11116   if (merge) {
11117     attributes.reset_is_clear_context();
11118   }
11119   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11120   emit_int16(0x14, (0xC0 | encode));
11121 }
11122 
11123 void Assembler::evprold(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
11124   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11125   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11126   attributes.set_is_evex_instruction();
11127   attributes.set_embedded_opmask_register_specifier(mask);
11128   if (merge) {
11129     attributes.reset_is_clear_context();
11130   }
11131   int encode = vex_prefix_and_encode(xmm1->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11132   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
11133 }
11134 
11135 void Assembler::evprolq(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
11136   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11137   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11138   attributes.set_is_evex_instruction();
11139   attributes.set_embedded_opmask_register_specifier(mask);
11140   if (merge) {
11141     attributes.reset_is_clear_context();
11142   }
11143   int encode = vex_prefix_and_encode(xmm1->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
11144   emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
11145 }
11146 
11147 void Assembler::evprolvd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11148   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11149   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11150   attributes.set_is_evex_instruction();
11151   attributes.set_embedded_opmask_register_specifier(mask);
11152   if (merge) {
11153     attributes.reset_is_clear_context();
11154   }
11155   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11156   emit_int16(0x15, (0xC0 | encode));
11157 }
11158 
11159 void Assembler::evprolvq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11160   assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
11161   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11162   attributes.set_is_evex_instruction();
11163   attributes.set_embedded_opmask_register_specifier(mask);
11164   if (merge) {
11165     attributes.reset_is_clear_context();
11166   }
11167   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11168   emit_int16(0x15, (0xC0 | encode));
11169 }
11170 
11171 void Assembler::vpblendvb(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len) {
11172   assert(VM_Version::supports_avx(), "");
11173   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
11174   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
11175   int mask_enc = mask->encoding();
11176   emit_int24(0x4C, (0xC0 | encode), 0xF0 & mask_enc << 4);
11177 }
11178 
11179 void Assembler::evblendmpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
11180   assert(VM_Version::supports_evex(), "");
11181   // Encoding: EVEX.NDS.XXX.66.0F38.W1 65 /r
11182   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
11183   attributes.set_is_evex_instruction();
11184   attributes.set_embedded_opmask_register_specifier(mask);
11185   if (merge) {
11186     attributes.reset_is_clear_context();
11187   }
11188   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11189   emit_int16(0x65, (0xC0 | encode));
11190 }

11274   assert(VM_Version::supports_bmi2(), "");
11275   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
11276   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
11277   emit_int16((unsigned char)0xF7, (0xC0 | encode));
11278 }
11279 
11280 void Assembler::shrxl(Register dst, Register src1, Register src2) {
11281   assert(VM_Version::supports_bmi2(), "");
11282   InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
11283   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes);
11284   emit_int16((unsigned char)0xF7, (0xC0 | encode));
11285 }
11286 
11287 void Assembler::shrxq(Register dst, Register src1, Register src2) {
11288   assert(VM_Version::supports_bmi2(), "");
11289   InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
11290   int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes);
11291   emit_int16((unsigned char)0xF7, (0xC0 | encode));
11292 }
11293 
11294 void Assembler::evpmovq2m(KRegister dst, XMMRegister src, int vector_len) {
11295   assert(VM_Version::supports_avx512vldq(), "");
11296   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11297   attributes.set_is_evex_instruction();
11298   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
11299   emit_int16(0x39, (0xC0 | encode));
11300 }
11301 
11302 void Assembler::evpmovd2m(KRegister dst, XMMRegister src, int vector_len) {
11303   assert(VM_Version::supports_avx512vldq(), "");
11304   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11305   attributes.set_is_evex_instruction();
11306   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
11307   emit_int16(0x39, (0xC0 | encode));
11308 }
11309 
11310 void Assembler::evpmovw2m(KRegister dst, XMMRegister src, int vector_len) {
11311   assert(VM_Version::supports_avx512vlbw(), "");
11312   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11313   attributes.set_is_evex_instruction();
11314   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
11315   emit_int16(0x29, (0xC0 | encode));
11316 }
11317 
11318 void Assembler::evpmovb2m(KRegister dst, XMMRegister src, int vector_len) {
11319   assert(VM_Version::supports_avx512vlbw(), "");
11320   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11321   attributes.set_is_evex_instruction();
11322   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
11323   emit_int16(0x29, (0xC0 | encode));
11324 }
11325 
11326 void Assembler::evpmovm2q(XMMRegister dst, KRegister src, int vector_len) {
11327   assert(VM_Version::supports_avx512vldq(), "");
11328   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11329   attributes.set_is_evex_instruction();
11330   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
11331   emit_int16(0x38, (0xC0 | encode));
11332 }
11333 
11334 void Assembler::evpmovm2d(XMMRegister dst, KRegister src, int vector_len) {
11335   assert(VM_Version::supports_avx512vldq(), "");
11336   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11337   attributes.set_is_evex_instruction();
11338   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
11339   emit_int16(0x38, (0xC0 | encode));
11340 }
11341 
11342 void Assembler::evpmovm2w(XMMRegister dst, KRegister src, int vector_len) {
11343   assert(VM_Version::supports_avx512vlbw(), "");
11344   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11345   attributes.set_is_evex_instruction();
11346   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
11347   emit_int16(0x28, (0xC0 | encode));
11348 }
11349 
11350 void Assembler::evpmovm2b(XMMRegister dst, KRegister src, int vector_len) {
11351   assert(VM_Version::supports_avx512vlbw(), "");
11352   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
11353   attributes.set_is_evex_instruction();
11354   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
11355   emit_int16(0x28, (0xC0 | encode));
11356 }
11357 #ifndef _LP64
11358 
11359 void Assembler::incl(Register dst) {
11360   // Don't use it directly. Use MacroAssembler::incrementl() instead.
11361   emit_int8(0x40 | dst->encoding());
11362 }
11363 
11364 void Assembler::lea(Register dst, Address src) {
11365   leal(dst, src);
11366 }
11367 
11368 void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) {
11369   InstructionMark im(this);
11370   emit_int8((unsigned char)0xC7);
11371   emit_operand(rax, dst);
11372   emit_data((int)imm32, rspec, 0);
11373 }
11374 
11375 void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) {
11376   InstructionMark im(this);
< prev index next >