1 /*
   2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
  26 #define CPU_X86_MACROASSEMBLER_X86_HPP
  27 
  28 #include "asm/assembler.hpp"
  29 #include "asm/register.hpp"
  30 #include "code/vmreg.inline.hpp"
  31 #include "compiler/oopMap.hpp"
  32 #include "utilities/macros.hpp"
  33 #include "runtime/vm_version.hpp"
  34 #include "utilities/checkedCast.hpp"
  35 
  36 // MacroAssembler extends Assembler by frequently used macros.
  37 //
  38 // Instructions for which a 'better' code sequence exists depending
  39 // on arguments should also go in here.
  40 
  41 class MacroAssembler: public Assembler {
  42   friend class LIR_Assembler;
  43   friend class Runtime1;      // as_Address()
  44 
  45  public:
  46   // Support for VM calls
  47   //
  48   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
  49   // may customize this version by overriding it for its purposes (e.g., to save/restore
  50   // additional registers when doing a VM call).
  51 
  52   virtual void call_VM_leaf_base(
  53     address entry_point,               // the entry point
  54     int     number_of_arguments        // the number of arguments to pop after the call
  55   );
  56 
  57  protected:
  58   // This is the base routine called by the different versions of call_VM. The interpreter
  59   // may customize this version by overriding it for its purposes (e.g., to save/restore
  60   // additional registers when doing a VM call).
  61   //
  62   // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base
  63   // returns the register which contains the thread upon return. If a thread register has been
  64   // specified, the return value will correspond to that register. If no last_java_sp is specified
  65   // (noreg) than rsp will be used instead.
  66   virtual void call_VM_base(           // returns the register containing the thread upon return
  67     Register oop_result,               // where an oop-result ends up if any; use noreg otherwise
  68     Register java_thread,              // the thread if computed before     ; use noreg otherwise
  69     Register last_java_sp,             // to set up last_Java_frame in stubs; use noreg otherwise
  70     address  entry_point,              // the entry point
  71     int      number_of_arguments,      // the number of arguments (w/o thread) to pop after the call
  72     bool     check_exceptions          // whether to check for pending exceptions after return
  73   );
  74 
  75   void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
  76 
  77   // helpers for FPU flag access
  78   // tmp is a temporary register, if none is available use noreg
  79   void save_rax   (Register tmp);
  80   void restore_rax(Register tmp);
  81 
  82  public:
  83   MacroAssembler(CodeBuffer* code) : Assembler(code) {}
  84 
  85  // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
  86  // The implementation is only non-empty for the InterpreterMacroAssembler,
  87  // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
  88  virtual void check_and_handle_popframe(Register java_thread);
  89  virtual void check_and_handle_earlyret(Register java_thread);
  90 
  91   Address as_Address(AddressLiteral adr);
  92   Address as_Address(ArrayAddress adr, Register rscratch);
  93 
  94   // Support for null-checks
  95   //
  96   // Generates code that causes a null OS exception if the content of reg is null.
  97   // If the accessed location is M[reg + offset] and the offset is known, provide the
  98   // offset. No explicit code generation is needed if the offset is within a certain
  99   // range (0 <= offset <= page_size).
 100 
 101   void null_check(Register reg, int offset = -1);
 102   static bool needs_explicit_null_check(intptr_t offset);
 103   static bool uses_implicit_null_check(void* address);
 104 
 105   // Required platform-specific helpers for Label::patch_instructions.
 106   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
 107   void pd_patch_instruction(address branch, address target, const char* file, int line) {
 108     unsigned char op = branch[0];
 109     assert(op == 0xE8 /* call */ ||
 110         op == 0xE9 /* jmp */ ||
 111         op == 0xEB /* short jmp */ ||
 112         (op & 0xF0) == 0x70 /* short jcc */ ||
 113         (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ ||
 114         (op == 0xC7 && branch[1] == 0xF8) /* xbegin */,
 115         "Invalid opcode at patch point");
 116 
 117     if (op == 0xEB || (op & 0xF0) == 0x70) {
 118       // short offset operators (jmp and jcc)
 119       char* disp = (char*) &branch[1];
 120       int imm8 = checked_cast<int>(target - (address) &disp[1]);
 121       guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
 122                 file == nullptr ? "<null>" : file, line);
 123       *disp = (char)imm8;
 124     } else {
 125       int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
 126       int imm32 = checked_cast<int>(target - (address) &disp[1]);
 127       *disp = imm32;
 128     }
 129   }
 130 
 131   // The following 4 methods return the offset of the appropriate move instruction
 132 
 133   // Support for fast byte/short loading with zero extension (depending on particular CPU)
 134   int load_unsigned_byte(Register dst, Address src);
 135   int load_unsigned_short(Register dst, Address src);
 136 
 137   // Support for fast byte/short loading with sign extension (depending on particular CPU)
 138   int load_signed_byte(Register dst, Address src);
 139   int load_signed_short(Register dst, Address src);
 140 
 141   // Support for sign-extension (hi:lo = extend_sign(lo))
 142   void extend_sign(Register hi, Register lo);
 143 
 144   // Load and store values by size and signed-ness
 145   void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
 146   void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
 147 
 148   // Support for inc/dec with optimal instruction selection depending on value
 149 
 150   void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; }
 151   void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; }
 152   void increment(Address dst, int value = 1)  { LP64_ONLY(incrementq(dst, value)) NOT_LP64(incrementl(dst, value)) ; }
 153   void decrement(Address dst, int value = 1)  { LP64_ONLY(decrementq(dst, value)) NOT_LP64(decrementl(dst, value)) ; }
 154 
 155   void decrementl(Address dst, int value = 1);
 156   void decrementl(Register reg, int value = 1);
 157 
 158   void decrementq(Register reg, int value = 1);
 159   void decrementq(Address dst, int value = 1);
 160 
 161   void incrementl(Address dst, int value = 1);
 162   void incrementl(Register reg, int value = 1);
 163 
 164   void incrementq(Register reg, int value = 1);
 165   void incrementq(Address dst, int value = 1);
 166 
 167   void incrementl(AddressLiteral dst, Register rscratch = noreg);
 168   void incrementl(ArrayAddress   dst, Register rscratch);
 169 
 170   void incrementq(AddressLiteral dst, Register rscratch = noreg);
 171 
 172   // Support optimal SSE move instructions.
 173   void movflt(XMMRegister dst, XMMRegister src) {
 174     if (dst-> encoding() == src->encoding()) return;
 175     if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
 176     else                       { movss (dst, src); return; }
 177   }
 178   void movflt(XMMRegister dst, Address src) { movss(dst, src); }
 179   void movflt(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
 180   void movflt(Address dst, XMMRegister src) { movss(dst, src); }
 181 
 182   // Move with zero extension
 183   void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); }
 184 
 185   void movdbl(XMMRegister dst, XMMRegister src) {
 186     if (dst-> encoding() == src->encoding()) return;
 187     if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
 188     else                       { movsd (dst, src); return; }
 189   }
 190 
 191   void movdbl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
 192 
 193   void movdbl(XMMRegister dst, Address src) {
 194     if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
 195     else                         { movlpd(dst, src); return; }
 196   }
 197   void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
 198 
 199   void flt_to_flt16(Register dst, XMMRegister src, XMMRegister tmp) {
 200     // Use separate tmp XMM register because caller may
 201     // requires src XMM register to be unchanged (as in x86.ad).
 202     vcvtps2ph(tmp, src, 0x04, Assembler::AVX_128bit);
 203     movdl(dst, tmp);
 204     movswl(dst, dst);
 205   }
 206 
 207   void flt16_to_flt(XMMRegister dst, Register src) {
 208     movdl(dst, src);
 209     vcvtph2ps(dst, dst, Assembler::AVX_128bit);
 210   }
 211 
 212   // Alignment
 213   void align32();
 214   void align64();
 215   void align(uint modulus);
 216   void align(uint modulus, uint target);
 217 
 218   void post_call_nop();
 219   // A 5 byte nop that is safe for patching (see patch_verified_entry)
 220   void fat_nop();
 221 
 222   // Stack frame creation/removal
 223   void enter();
 224   void leave();
 225 
 226   // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
 227   // The pointer will be loaded into the thread register.
 228   void get_thread(Register thread);
 229 
 230 #ifdef _LP64
 231   // Support for argument shuffling
 232 
 233   // bias in bytes
 234   void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
 235   void long_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
 236   void float_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
 237   void double_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
 238   void move_ptr(VMRegPair src, VMRegPair dst);
 239   void object_move(OopMap* map,
 240                    int oop_handle_offset,
 241                    int framesize_in_slots,
 242                    VMRegPair src,
 243                    VMRegPair dst,
 244                    bool is_receiver,
 245                    int* receiver_offset);
 246 #endif // _LP64
 247 
 248   // Support for VM calls
 249   //
 250   // It is imperative that all calls into the VM are handled via the call_VM macros.
 251   // They make sure that the stack linkage is setup correctly. call_VM's correspond
 252   // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
 253 
 254 
 255   void call_VM(Register oop_result,
 256                address entry_point,
 257                bool check_exceptions = true);
 258   void call_VM(Register oop_result,
 259                address entry_point,
 260                Register arg_1,
 261                bool check_exceptions = true);
 262   void call_VM(Register oop_result,
 263                address entry_point,
 264                Register arg_1, Register arg_2,
 265                bool check_exceptions = true);
 266   void call_VM(Register oop_result,
 267                address entry_point,
 268                Register arg_1, Register arg_2, Register arg_3,
 269                bool check_exceptions = true);
 270 
 271   // Overloadings with last_Java_sp
 272   void call_VM(Register oop_result,
 273                Register last_java_sp,
 274                address entry_point,
 275                int number_of_arguments = 0,
 276                bool check_exceptions = true);
 277   void call_VM(Register oop_result,
 278                Register last_java_sp,
 279                address entry_point,
 280                Register arg_1, bool
 281                check_exceptions = true);
 282   void call_VM(Register oop_result,
 283                Register last_java_sp,
 284                address entry_point,
 285                Register arg_1, Register arg_2,
 286                bool check_exceptions = true);
 287   void call_VM(Register oop_result,
 288                Register last_java_sp,
 289                address entry_point,
 290                Register arg_1, Register arg_2, Register arg_3,
 291                bool check_exceptions = true);
 292 
 293   void get_vm_result  (Register oop_result, Register thread);
 294   void get_vm_result_2(Register metadata_result, Register thread);
 295 
 296   // These always tightly bind to MacroAssembler::call_VM_base
 297   // bypassing the virtual implementation
 298   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
 299   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
 300   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
 301   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
 302   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
 303 
 304   void call_VM_leaf0(address entry_point);
 305   void call_VM_leaf(address entry_point,
 306                     int number_of_arguments = 0);
 307   void call_VM_leaf(address entry_point,
 308                     Register arg_1);
 309   void call_VM_leaf(address entry_point,
 310                     Register arg_1, Register arg_2);
 311   void call_VM_leaf(address entry_point,
 312                     Register arg_1, Register arg_2, Register arg_3);
 313 
 314   void call_VM_leaf(address entry_point,
 315                     Register arg_1, Register arg_2, Register arg_3, Register arg_4);
 316 
 317   // These always tightly bind to MacroAssembler::call_VM_leaf_base
 318   // bypassing the virtual implementation
 319   void super_call_VM_leaf(address entry_point);
 320   void super_call_VM_leaf(address entry_point, Register arg_1);
 321   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
 322   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
 323   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
 324 
 325   // last Java Frame (fills frame anchor)
 326   void set_last_Java_frame(Register thread,
 327                            Register last_java_sp,
 328                            Register last_java_fp,
 329                            address  last_java_pc,
 330                            Register rscratch);
 331 
 332   // thread in the default location (r15_thread on 64bit)
 333   void set_last_Java_frame(Register last_java_sp,
 334                            Register last_java_fp,
 335                            address  last_java_pc,
 336                            Register rscratch);
 337 
 338   void reset_last_Java_frame(Register thread, bool clear_fp);
 339 
 340   // thread in the default location (r15_thread on 64bit)
 341   void reset_last_Java_frame(bool clear_fp);
 342 
 343   // jobjects
 344   void clear_jobject_tag(Register possibly_non_local);
 345   void resolve_jobject(Register value, Register thread, Register tmp);
 346   void resolve_global_jobject(Register value, Register thread, Register tmp);
 347 
 348   // C 'boolean' to Java boolean: x == 0 ? 0 : 1
 349   void c2bool(Register x);
 350 
 351   // C++ bool manipulation
 352 
 353   void movbool(Register dst, Address src);
 354   void movbool(Address dst, bool boolconst);
 355   void movbool(Address dst, Register src);
 356   void testbool(Register dst);
 357 
 358   void resolve_oop_handle(Register result, Register tmp);
 359   void resolve_weak_handle(Register result, Register tmp);
 360   void load_mirror(Register mirror, Register method, Register tmp);
 361   void load_method_holder_cld(Register rresult, Register rmethod);
 362 
 363   void load_method_holder(Register holder, Register method);
 364 
 365   // oop manipulations
 366 #ifdef _LP64
 367   void load_nklass_compact(Register dst, Register src);
 368 #endif
 369   void load_klass(Register dst, Register src, Register tmp);
 370   void store_klass(Register dst, Register src, Register tmp);
 371 
 372   // Compares the Klass pointer of an object to a given Klass (which might be narrow,
 373   // depending on UseCompressedClassPointers).
 374   void cmp_klass(Register klass, Register dst, Register tmp);
 375 
 376   // Compares the Klass pointer of two objects o1 and o2. Result is in the condition flags.
 377   // Uses tmp1 and tmp2 as temporary registers.
 378   void cmp_klass(Register src, Register dst, Register tmp1, Register tmp2);
 379 
 380   void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
 381                       Register tmp1, Register thread_tmp);
 382   void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
 383                        Register tmp1, Register tmp2, Register tmp3);
 384 
 385   void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
 386                      Register thread_tmp = noreg, DecoratorSet decorators = 0);
 387   void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
 388                               Register thread_tmp = noreg, DecoratorSet decorators = 0);
 389   void store_heap_oop(Address dst, Register val, Register tmp1 = noreg,
 390                       Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
 391 
 392   // Used for storing null. All other oop constants should be
 393   // stored using routines that take a jobject.
 394   void store_heap_oop_null(Address dst);
 395 
 396 #ifdef _LP64
 397   void store_klass_gap(Register dst, Register src);
 398 
 399   // This dummy is to prevent a call to store_heap_oop from
 400   // converting a zero (like null) into a Register by giving
 401   // the compiler two choices it can't resolve
 402 
 403   void store_heap_oop(Address dst, void* dummy);
 404 
 405   void encode_heap_oop(Register r);
 406   void decode_heap_oop(Register r);
 407   void encode_heap_oop_not_null(Register r);
 408   void decode_heap_oop_not_null(Register r);
 409   void encode_heap_oop_not_null(Register dst, Register src);
 410   void decode_heap_oop_not_null(Register dst, Register src);
 411 
 412   void set_narrow_oop(Register dst, jobject obj);
 413   void set_narrow_oop(Address dst, jobject obj);
 414   void cmp_narrow_oop(Register dst, jobject obj);
 415   void cmp_narrow_oop(Address dst, jobject obj);
 416 
 417   void encode_klass_not_null(Register r, Register tmp);
 418   void decode_klass_not_null(Register r, Register tmp);
 419   void encode_and_move_klass_not_null(Register dst, Register src);
 420   void decode_and_move_klass_not_null(Register dst, Register src);
 421   void set_narrow_klass(Register dst, Klass* k);
 422   void set_narrow_klass(Address dst, Klass* k);
 423   void cmp_narrow_klass(Register dst, Klass* k);
 424   void cmp_narrow_klass(Address dst, Klass* k);
 425 
 426   // if heap base register is used - reinit it with the correct value
 427   void reinit_heapbase();
 428 
 429   DEBUG_ONLY(void verify_heapbase(const char* msg);)
 430 
 431 #endif // _LP64
 432 
 433   // Int division/remainder for Java
 434   // (as idivl, but checks for special case as described in JVM spec.)
 435   // returns idivl instruction offset for implicit exception handling
 436   int corrected_idivl(Register reg);
 437 
 438   // Long division/remainder for Java
 439   // (as idivq, but checks for special case as described in JVM spec.)
 440   // returns idivq instruction offset for implicit exception handling
 441   int corrected_idivq(Register reg);
 442 
 443   void int3();
 444 
 445   // Long operation macros for a 32bit cpu
 446   // Long negation for Java
 447   void lneg(Register hi, Register lo);
 448 
 449   // Long multiplication for Java
 450   // (destroys contents of eax, ebx, ecx and edx)
 451   void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
 452 
 453   // Long shifts for Java
 454   // (semantics as described in JVM spec.)
 455   void lshl(Register hi, Register lo);                               // hi:lo << (rcx & 0x3f)
 456   void lshr(Register hi, Register lo, bool sign_extension = false);  // hi:lo >> (rcx & 0x3f)
 457 
 458   // Long compare for Java
 459   // (semantics as described in JVM spec.)
 460   void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
 461 
 462 
 463   // misc
 464 
 465   // Sign extension
 466   void sign_extend_short(Register reg);
 467   void sign_extend_byte(Register reg);
 468 
 469   // Division by power of 2, rounding towards 0
 470   void division_with_shift(Register reg, int shift_value);
 471 
 472 #ifndef _LP64
 473   // Compares the top-most stack entries on the FPU stack and sets the eflags as follows:
 474   //
 475   // CF (corresponds to C0) if x < y
 476   // PF (corresponds to C2) if unordered
 477   // ZF (corresponds to C3) if x = y
 478   //
 479   // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
 480   // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code)
 481   void fcmp(Register tmp);
 482   // Variant of the above which allows y to be further down the stack
 483   // and which only pops x and y if specified. If pop_right is
 484   // specified then pop_left must also be specified.
 485   void fcmp(Register tmp, int index, bool pop_left, bool pop_right);
 486 
 487   // Floating-point comparison for Java
 488   // Compares the top-most stack entries on the FPU stack and stores the result in dst.
 489   // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
 490   // (semantics as described in JVM spec.)
 491   void fcmp2int(Register dst, bool unordered_is_less);
 492   // Variant of the above which allows y to be further down the stack
 493   // and which only pops x and y if specified. If pop_right is
 494   // specified then pop_left must also be specified.
 495   void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right);
 496 
 497   // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards)
 498   // tmp is a temporary register, if none is available use noreg
 499   void fremr(Register tmp);
 500 
 501   // only if +VerifyFPU
 502   void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
 503 #endif // !LP64
 504 
 505   // dst = c = a * b + c
 506   void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
 507   void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
 508 
 509   void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
 510   void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
 511   void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
 512   void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
 513 
 514 
 515   // same as fcmp2int, but using SSE2
 516   void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
 517   void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
 518 
 519   // branch to L if FPU flag C2 is set/not set
 520   // tmp is a temporary register, if none is available use noreg
 521   void jC2 (Register tmp, Label& L);
 522   void jnC2(Register tmp, Label& L);
 523 
 524   // Load float value from 'address'. If UseSSE >= 1, the value is loaded into
 525   // register xmm0. Otherwise, the value is loaded onto the FPU stack.
 526   void load_float(Address src);
 527 
 528   // Store float value to 'address'. If UseSSE >= 1, the value is stored
 529   // from register xmm0. Otherwise, the value is stored from the FPU stack.
 530   void store_float(Address dst);
 531 
 532   // Load double value from 'address'. If UseSSE >= 2, the value is loaded into
 533   // register xmm0. Otherwise, the value is loaded onto the FPU stack.
 534   void load_double(Address src);
 535 
 536   // Store double value to 'address'. If UseSSE >= 2, the value is stored
 537   // from register xmm0. Otherwise, the value is stored from the FPU stack.
 538   void store_double(Address dst);
 539 
 540 #ifndef _LP64
 541   // Pop ST (ffree & fincstp combined)
 542   void fpop();
 543 
 544   void empty_FPU_stack();
 545 #endif // !_LP64
 546 
 547   void push_IU_state();
 548   void pop_IU_state();
 549 
 550   void push_FPU_state();
 551   void pop_FPU_state();
 552 
 553   void push_CPU_state();
 554   void pop_CPU_state();
 555 
 556   void push_cont_fastpath();
 557   void pop_cont_fastpath();
 558 
 559   void inc_held_monitor_count();
 560   void dec_held_monitor_count();
 561 
 562   DEBUG_ONLY(void stop_if_in_cont(Register cont_reg, const char* name);)
 563 
 564   // Round up to a power of two
 565   void round_to(Register reg, int modulus);
 566 
 567 private:
 568   // General purpose and XMM registers potentially clobbered by native code; there
 569   // is no need for FPU or AVX opmask related methods because C1/interpreter
 570   // - we save/restore FPU state as a whole always
 571   // - do not care about AVX-512 opmask
 572   static RegSet call_clobbered_gp_registers();
 573   static XMMRegSet call_clobbered_xmm_registers();
 574 
 575   void push_set(XMMRegSet set, int offset);
 576   void pop_set(XMMRegSet set, int offset);
 577 
 578 public:
 579   void push_set(RegSet set, int offset = -1);
 580   void pop_set(RegSet set, int offset = -1);
 581 
 582   // Push and pop everything that might be clobbered by a native
 583   // runtime call.
 584   // Only save the lower 64 bits of each vector register.
 585   // Additional registers can be excluded in a passed RegSet.
 586   void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
 587   void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
 588 
 589   void push_call_clobbered_registers(bool save_fpu = true) {
 590     push_call_clobbered_registers_except(RegSet(), save_fpu);
 591   }
 592   void pop_call_clobbered_registers(bool restore_fpu = true) {
 593     pop_call_clobbered_registers_except(RegSet(), restore_fpu);
 594   }
 595 
 596   // allocation
 597   void tlab_allocate(
 598     Register thread,                   // Current thread
 599     Register obj,                      // result: pointer to object after successful allocation
 600     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
 601     int      con_size_in_bytes,        // object size in bytes if   known at compile time
 602     Register t1,                       // temp register
 603     Register t2,                       // temp register
 604     Label&   slow_case                 // continuation point if fast allocation fails
 605   );
 606   void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
 607 
 608   void population_count(Register dst, Register src, Register scratch1, Register scratch2);
 609 
 610   // interface method calling
 611   void lookup_interface_method(Register recv_klass,
 612                                Register intf_klass,
 613                                RegisterOrConstant itable_index,
 614                                Register method_result,
 615                                Register scan_temp,
 616                                Label& no_such_interface,
 617                                bool return_method = true);
 618 
 619   void lookup_interface_method_stub(Register recv_klass,
 620                                     Register holder_klass,
 621                                     Register resolved_klass,
 622                                     Register method_result,
 623                                     Register scan_temp,
 624                                     Register temp_reg2,
 625                                     Register receiver,
 626                                     int itable_index,
 627                                     Label& L_no_such_interface);
 628 
 629   // virtual method calling
 630   void lookup_virtual_method(Register recv_klass,
 631                              RegisterOrConstant vtable_index,
 632                              Register method_result);
 633 
 634   // Test sub_klass against super_klass, with fast and slow paths.
 635 
 636   // The fast path produces a tri-state answer: yes / no / maybe-slow.
 637   // One of the three labels can be null, meaning take the fall-through.
 638   // If super_check_offset is -1, the value is loaded up from super_klass.
 639   // No registers are killed, except temp_reg.
 640   void check_klass_subtype_fast_path(Register sub_klass,
 641                                      Register super_klass,
 642                                      Register temp_reg,
 643                                      Label* L_success,
 644                                      Label* L_failure,
 645                                      Label* L_slow_path,
 646                 RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
 647 
 648   // The rest of the type check; must be wired to a corresponding fast path.
 649   // It does not repeat the fast path logic, so don't use it standalone.
 650   // The temp_reg and temp2_reg can be noreg, if no temps are available.
 651   // Updates the sub's secondary super cache as necessary.
 652   // If set_cond_codes, condition codes will be Z on success, NZ on failure.
 653   void check_klass_subtype_slow_path(Register sub_klass,
 654                                      Register super_klass,
 655                                      Register temp_reg,
 656                                      Register temp2_reg,
 657                                      Label* L_success,
 658                                      Label* L_failure,
 659                                      bool set_cond_codes = false);
 660   void hashed_check_klass_subtype_slow_path(Register sub_klass,
 661                                      Register super_klass,
 662                                      Register temp_reg,
 663                                      Register temp2_reg,
 664                                      Label* L_success,
 665                                      Label* L_failure,
 666                                      bool set_cond_codes = false);
 667 
 668   // As above, but with a constant super_klass.
 669   // The result is in Register result, not the condition codes.
 670   void lookup_secondary_supers_table(Register sub_klass,
 671                                      Register super_klass,
 672                                      Register temp1,
 673                                      Register temp2,
 674                                      Register temp3,
 675                                      Register temp4,
 676                                      Register result,
 677                                      u1 super_klass_slot);
 678 
 679   void lookup_secondary_supers_table_slow_path(Register r_super_klass,
 680                                                Register r_array_base,
 681                                                Register r_array_index,
 682                                                Register r_bitmap,
 683                                                Register temp1,
 684                                                Register temp2,
 685                                                Label* L_success,
 686                                                Label* L_failure = nullptr);
 687 
 688   void verify_secondary_supers_table(Register r_sub_klass,
 689                                      Register r_super_klass,
 690                                      Register expected,
 691                                      Register temp1,
 692                                      Register temp2,
 693                                      Register temp3);
 694 
 695   void repne_scanq(Register addr, Register value, Register count, Register limit,
 696                    Label* L_success,
 697                    Label* L_failure = nullptr);
 698 
 699     // Simplified, combined version, good for typical uses.
 700   // Falls through on failure.
 701   void check_klass_subtype(Register sub_klass,
 702                            Register super_klass,
 703                            Register temp_reg,
 704                            Label& L_success);
 705 
 706   void clinit_barrier(Register klass,
 707                       Register thread,
 708                       Label* L_fast_path = nullptr,
 709                       Label* L_slow_path = nullptr);
 710 
 711   // method handles (JSR 292)
 712   Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
 713 
 714   // Debugging
 715 
 716   // only if +VerifyOops
 717   void _verify_oop(Register reg, const char* s, const char* file, int line);
 718   void _verify_oop_addr(Address addr, const char* s, const char* file, int line);
 719 
 720   void _verify_oop_checked(Register reg, const char* s, const char* file, int line) {
 721     if (VerifyOops) {
 722       _verify_oop(reg, s, file, line);
 723     }
 724   }
 725   void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) {
 726     if (VerifyOops) {
 727       _verify_oop_addr(reg, s, file, line);
 728     }
 729   }
 730 
 731   // TODO: verify method and klass metadata (compare against vptr?)
 732   void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
 733   void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
 734 
 735 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__)
 736 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__)
 737 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__)
 738 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
 739 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
 740 
 741   // Verify or restore cpu control state after JNI call
 742   void restore_cpu_control_state_after_jni(Register rscratch);
 743 
 744   // prints msg, dumps registers and stops execution
 745   void stop(const char* msg);
 746 
 747   // prints msg and continues
 748   void warn(const char* msg);
 749 
 750   // dumps registers and other state
 751   void print_state();
 752 
 753   static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
 754   static void debug64(char* msg, int64_t pc, int64_t regs[]);
 755   static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip);
 756   static void print_state64(int64_t pc, int64_t regs[]);
 757 
 758   void os_breakpoint();
 759 
 760   void untested()                                { stop("untested"); }
 761 
 762   void unimplemented(const char* what = "");
 763 
 764   void should_not_reach_here()                   { stop("should not reach here"); }
 765 
 766   void print_CPU_state();
 767 
 768   // Stack overflow checking
 769   void bang_stack_with_offset(int offset) {
 770     // stack grows down, caller passes positive offset
 771     assert(offset > 0, "must bang with negative offset");
 772     movl(Address(rsp, (-offset)), rax);
 773   }
 774 
 775   // Writes to stack successive pages until offset reached to check for
 776   // stack overflow + shadow pages.  Also, clobbers tmp
 777   void bang_stack_size(Register size, Register tmp);
 778 
 779   // Check for reserved stack access in method being exited (for JIT)
 780   void reserved_stack_check();
 781 
 782   void safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod);
 783 
 784   void verify_tlab();
 785 
 786   static Condition negate_condition(Condition cond);
 787 
 788   // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
 789   // operands. In general the names are modified to avoid hiding the instruction in Assembler
 790   // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
 791   // here in MacroAssembler. The major exception to this rule is call
 792 
 793   // Arithmetics
 794 
 795 
 796   void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
 797   void addptr(Address dst, Register src);
 798 
 799   void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
 800   void addptr(Register dst, int32_t src);
 801   void addptr(Register dst, Register src);
 802   void addptr(Register dst, RegisterOrConstant src) {
 803     if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant()));
 804     else                   addptr(dst, src.as_register());
 805   }
 806 
 807   void andptr(Register dst, int32_t src);
 808   void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }
 809 
 810 #ifdef _LP64
 811   using Assembler::andq;
 812   void andq(Register dst, AddressLiteral src, Register rscratch = noreg);
 813 #endif
 814 
 815   void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg);
 816 
 817   // renamed to drag out the casting of address to int32_t/intptr_t
 818   void cmp32(Register src1, int32_t imm);
 819 
 820   void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg);
 821   // compare reg - mem, or reg - &mem
 822   void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg);
 823 
 824   void cmp32(Register src1, Address src2);
 825 
 826 #ifndef _LP64
 827   void cmpklass(Address dst, Metadata* obj);
 828   void cmpklass(Register dst, Metadata* obj);
 829   void cmpoop(Address dst, jobject obj);
 830 #endif // _LP64
 831 
 832   void cmpoop(Register src1, Register src2);
 833   void cmpoop(Register src1, Address src2);
 834   void cmpoop(Register dst, jobject obj, Register rscratch);
 835 
 836   // NOTE src2 must be the lval. This is NOT an mem-mem compare
 837   void cmpptr(Address src1, AddressLiteral src2, Register rscratch);
 838 
 839   void cmpptr(Register src1, AddressLiteral src2, Register rscratch = noreg);
 840 
 841   void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 842   void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 843   // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 844 
 845   void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 846   void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 847 
 848   // cmp64 to avoild hiding cmpq
 849   void cmp64(Register src1, AddressLiteral src, Register rscratch = noreg);
 850 
 851   void cmpxchgptr(Register reg, Address adr);
 852 
 853   void locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch = noreg);
 854 
 855   void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
 856   void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); }
 857 
 858 
 859   void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
 860 
 861   void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); }
 862 
 863   void shlptr(Register dst, int32_t shift);
 864   void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); }
 865 
 866   void shrptr(Register dst, int32_t shift);
 867   void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); }
 868 
 869   void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); }
 870   void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); }
 871 
 872   void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
 873 
 874   void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
 875   void subptr(Register dst, int32_t src);
 876   // Force generation of a 4 byte immediate value even if it fits into 8bit
 877   void subptr_imm32(Register dst, int32_t src);
 878   void subptr(Register dst, Register src);
 879   void subptr(Register dst, RegisterOrConstant src) {
 880     if (src.is_constant()) subptr(dst, (int) src.as_constant());
 881     else                   subptr(dst,       src.as_register());
 882   }
 883 
 884   void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
 885   void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
 886 
 887   void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
 888   void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
 889 
 890   void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; }
 891 
 892 
 893 
 894   // Helper functions for statistics gathering.
 895   // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
 896   void cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch = noreg);
 897   // Unconditional atomic increment.
 898   void atomic_incl(Address counter_addr);
 899   void atomic_incl(AddressLiteral counter_addr, Register rscratch = noreg);
 900 #ifdef _LP64
 901   void atomic_incq(Address counter_addr);
 902   void atomic_incq(AddressLiteral counter_addr, Register rscratch = noreg);
 903 #endif
 904   void atomic_incptr(AddressLiteral counter_addr, Register rscratch = noreg) { LP64_ONLY(atomic_incq(counter_addr, rscratch)) NOT_LP64(atomic_incl(counter_addr, rscratch)) ; }
 905   void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; }
 906 
 907   void lea(Register dst, Address        adr) { Assembler::lea(dst, adr); }
 908   void lea(Register dst, AddressLiteral adr);
 909   void lea(Address  dst, AddressLiteral adr, Register rscratch);
 910 
 911   void leal32(Register dst, Address src) { leal(dst, src); }
 912 
 913   // Import other testl() methods from the parent class or else
 914   // they will be hidden by the following overriding declaration.
 915   using Assembler::testl;
 916   void testl(Address dst, int32_t imm32);
 917   void testl(Register dst, int32_t imm32);
 918   void testl(Register dst, AddressLiteral src); // requires reachable address
 919   using Assembler::testq;
 920   void testq(Address dst, int32_t imm32);
 921   void testq(Register dst, int32_t imm32);
 922 
 923   void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
 924   void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
 925   void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
 926   void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); }
 927 
 928   void testptr(Register src, int32_t imm32) {  LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
 929   void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); }
 930   void testptr(Address src, int32_t imm32) {  LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
 931   void testptr(Register src1, Register src2);
 932 
 933   void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
 934   void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
 935 
 936   // Calls
 937 
 938   void call(Label& L, relocInfo::relocType rtype);
 939   void call(Register entry);
 940   void call(Address addr) { Assembler::call(addr); }
 941 
 942   // NOTE: this call transfers to the effective address of entry NOT
 943   // the address contained by entry. This is because this is more natural
 944   // for jumps/calls.
 945   void call(AddressLiteral entry, Register rscratch = rax);
 946 
 947   // Emit the CompiledIC call idiom
 948   void ic_call(address entry, jint method_index = 0);
 949   static int ic_check_size();
 950   int ic_check(int end_alignment);
 951 
 952   void emit_static_call_stub();
 953 
 954   // Jumps
 955 
 956   // NOTE: these jumps transfer to the effective address of dst NOT
 957   // the address contained by dst. This is because this is more natural
 958   // for jumps/calls.
 959   void jump(AddressLiteral dst, Register rscratch = noreg);
 960 
 961   void jump_cc(Condition cc, AddressLiteral dst, Register rscratch = noreg);
 962 
 963   // 32bit can do a case table jump in one instruction but we no longer allow the base
 964   // to be installed in the Address class. This jump will transfer to the address
 965   // contained in the location described by entry (not the address of entry)
 966   void jump(ArrayAddress entry, Register rscratch);
 967 
 968   // Adding more natural conditional jump instructions
 969   void ALWAYSINLINE jo(Label& L, bool maybe_short = true) { jcc(Assembler::overflow, L, maybe_short); }
 970   void ALWAYSINLINE jno(Label& L, bool maybe_short = true) { jcc(Assembler::noOverflow, L, maybe_short); }
 971   void ALWAYSINLINE js(Label& L, bool maybe_short = true) { jcc(Assembler::negative, L, maybe_short); }
 972   void ALWAYSINLINE jns(Label& L, bool maybe_short = true) { jcc(Assembler::positive, L, maybe_short); }
 973   void ALWAYSINLINE je(Label& L, bool maybe_short = true) { jcc(Assembler::equal, L, maybe_short); }
 974   void ALWAYSINLINE jz(Label& L, bool maybe_short = true) { jcc(Assembler::zero, L, maybe_short); }
 975   void ALWAYSINLINE jne(Label& L, bool maybe_short = true) { jcc(Assembler::notEqual, L, maybe_short); }
 976   void ALWAYSINLINE jnz(Label& L, bool maybe_short = true) { jcc(Assembler::notZero, L, maybe_short); }
 977   void ALWAYSINLINE jb(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); }
 978   void ALWAYSINLINE jnae(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); }
 979   void ALWAYSINLINE jc(Label& L, bool maybe_short = true) { jcc(Assembler::carrySet, L, maybe_short); }
 980   void ALWAYSINLINE jnb(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); }
 981   void ALWAYSINLINE jae(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); }
 982   void ALWAYSINLINE jnc(Label& L, bool maybe_short = true) { jcc(Assembler::carryClear, L, maybe_short); }
 983   void ALWAYSINLINE jbe(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); }
 984   void ALWAYSINLINE jna(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); }
 985   void ALWAYSINLINE ja(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); }
 986   void ALWAYSINLINE jnbe(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); }
 987   void ALWAYSINLINE jl(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); }
 988   void ALWAYSINLINE jnge(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); }
 989   void ALWAYSINLINE jge(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); }
 990   void ALWAYSINLINE jnl(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); }
 991   void ALWAYSINLINE jle(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); }
 992   void ALWAYSINLINE jng(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); }
 993   void ALWAYSINLINE jg(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); }
 994   void ALWAYSINLINE jnle(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); }
 995   void ALWAYSINLINE jp(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); }
 996   void ALWAYSINLINE jpe(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); }
 997   void ALWAYSINLINE jnp(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); }
 998   void ALWAYSINLINE jpo(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); }
 999   // * No condition for this *  void ALWAYSINLINE jcxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); }
1000   // * No condition for this *  void ALWAYSINLINE jecxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); }
1001 
1002   // Short versions of the above
1003   void ALWAYSINLINE jo_b(Label& L) { jccb(Assembler::overflow, L); }
1004   void ALWAYSINLINE jno_b(Label& L) { jccb(Assembler::noOverflow, L); }
1005   void ALWAYSINLINE js_b(Label& L) { jccb(Assembler::negative, L); }
1006   void ALWAYSINLINE jns_b(Label& L) { jccb(Assembler::positive, L); }
1007   void ALWAYSINLINE je_b(Label& L) { jccb(Assembler::equal, L); }
1008   void ALWAYSINLINE jz_b(Label& L) { jccb(Assembler::zero, L); }
1009   void ALWAYSINLINE jne_b(Label& L) { jccb(Assembler::notEqual, L); }
1010   void ALWAYSINLINE jnz_b(Label& L) { jccb(Assembler::notZero, L); }
1011   void ALWAYSINLINE jb_b(Label& L) { jccb(Assembler::below, L); }
1012   void ALWAYSINLINE jnae_b(Label& L) { jccb(Assembler::below, L); }
1013   void ALWAYSINLINE jc_b(Label& L) { jccb(Assembler::carrySet, L); }
1014   void ALWAYSINLINE jnb_b(Label& L) { jccb(Assembler::aboveEqual, L); }
1015   void ALWAYSINLINE jae_b(Label& L) { jccb(Assembler::aboveEqual, L); }
1016   void ALWAYSINLINE jnc_b(Label& L) { jccb(Assembler::carryClear, L); }
1017   void ALWAYSINLINE jbe_b(Label& L) { jccb(Assembler::belowEqual, L); }
1018   void ALWAYSINLINE jna_b(Label& L) { jccb(Assembler::belowEqual, L); }
1019   void ALWAYSINLINE ja_b(Label& L) { jccb(Assembler::above, L); }
1020   void ALWAYSINLINE jnbe_b(Label& L) { jccb(Assembler::above, L); }
1021   void ALWAYSINLINE jl_b(Label& L) { jccb(Assembler::less, L); }
1022   void ALWAYSINLINE jnge_b(Label& L) { jccb(Assembler::less, L); }
1023   void ALWAYSINLINE jge_b(Label& L) { jccb(Assembler::greaterEqual, L); }
1024   void ALWAYSINLINE jnl_b(Label& L) { jccb(Assembler::greaterEqual, L); }
1025   void ALWAYSINLINE jle_b(Label& L) { jccb(Assembler::lessEqual, L); }
1026   void ALWAYSINLINE jng_b(Label& L) { jccb(Assembler::lessEqual, L); }
1027   void ALWAYSINLINE jg_b(Label& L) { jccb(Assembler::greater, L); }
1028   void ALWAYSINLINE jnle_b(Label& L) { jccb(Assembler::greater, L); }
1029   void ALWAYSINLINE jp_b(Label& L) { jccb(Assembler::parity, L); }
1030   void ALWAYSINLINE jpe_b(Label& L) { jccb(Assembler::parity, L); }
1031   void ALWAYSINLINE jnp_b(Label& L) { jccb(Assembler::noParity, L); }
1032   void ALWAYSINLINE jpo_b(Label& L) { jccb(Assembler::noParity, L); }
1033   // * No condition for this *  void ALWAYSINLINE jcxz_b(Label& L) { jccb(Assembler::cxz, L); }
1034   // * No condition for this *  void ALWAYSINLINE jecxz_b(Label& L) { jccb(Assembler::cxz, L); }
1035 
1036   // Floating
1037 
1038   void push_f(XMMRegister r);
1039   void pop_f(XMMRegister r);
1040   void push_d(XMMRegister r);
1041   void pop_d(XMMRegister r);
1042 
1043   void andpd(XMMRegister dst, XMMRegister    src) { Assembler::andpd(dst, src); }
1044   void andpd(XMMRegister dst, Address        src) { Assembler::andpd(dst, src); }
1045   void andpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1046 
1047   void andps(XMMRegister dst, XMMRegister    src) { Assembler::andps(dst, src); }
1048   void andps(XMMRegister dst, Address        src) { Assembler::andps(dst, src); }
1049   void andps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1050 
1051   void comiss(XMMRegister dst, XMMRegister    src) { Assembler::comiss(dst, src); }
1052   void comiss(XMMRegister dst, Address        src) { Assembler::comiss(dst, src); }
1053   void comiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1054 
1055   void comisd(XMMRegister dst, XMMRegister    src) { Assembler::comisd(dst, src); }
1056   void comisd(XMMRegister dst, Address        src) { Assembler::comisd(dst, src); }
1057   void comisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1058 
1059 #ifndef _LP64
1060   void fadd_s(Address        src) { Assembler::fadd_s(src); }
1061   void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); }
1062 
1063   void fldcw(Address        src) { Assembler::fldcw(src); }
1064   void fldcw(AddressLiteral src);
1065 
1066   void fld_s(int index)          { Assembler::fld_s(index); }
1067   void fld_s(Address        src) { Assembler::fld_s(src); }
1068   void fld_s(AddressLiteral src);
1069 
1070   void fld_d(Address        src) { Assembler::fld_d(src); }
1071   void fld_d(AddressLiteral src);
1072 
1073   void fld_x(Address        src) { Assembler::fld_x(src); }
1074   void fld_x(AddressLiteral src) { Assembler::fld_x(as_Address(src)); }
1075 
1076   void fmul_s(Address        src) { Assembler::fmul_s(src); }
1077   void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); }
1078 #endif // !_LP64
1079 
1080   void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
1081   void ldmxcsr(AddressLiteral src, Register rscratch = noreg);
1082 
1083 #ifdef _LP64
1084  private:
1085   void sha256_AVX2_one_round_compute(
1086     Register  reg_old_h,
1087     Register  reg_a,
1088     Register  reg_b,
1089     Register  reg_c,
1090     Register  reg_d,
1091     Register  reg_e,
1092     Register  reg_f,
1093     Register  reg_g,
1094     Register  reg_h,
1095     int iter);
1096   void sha256_AVX2_four_rounds_compute_first(int start);
1097   void sha256_AVX2_four_rounds_compute_last(int start);
1098   void sha256_AVX2_one_round_and_sched(
1099         XMMRegister xmm_0,     /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */
1100         XMMRegister xmm_1,     /* ymm5 */  /* full cycle is 16 iterations */
1101         XMMRegister xmm_2,     /* ymm6 */
1102         XMMRegister xmm_3,     /* ymm7 */
1103         Register    reg_a,      /* == eax on 0 iteration, then rotate 8 register right on each next iteration */
1104         Register    reg_b,      /* ebx */    /* full cycle is 8 iterations */
1105         Register    reg_c,      /* edi */
1106         Register    reg_d,      /* esi */
1107         Register    reg_e,      /* r8d */
1108         Register    reg_f,      /* r9d */
1109         Register    reg_g,      /* r10d */
1110         Register    reg_h,      /* r11d */
1111         int iter);
1112 
1113   void addm(int disp, Register r1, Register r2);
1114 
1115   void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d,
1116                                      Register e, Register f, Register g, Register h, int iteration);
1117 
1118   void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1119                                           Register a, Register b, Register c, Register d, Register e, Register f,
1120                                           Register g, Register h, int iteration);
1121 
1122   void addmq(int disp, Register r1, Register r2);
1123  public:
1124   void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1125                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1126                    Register buf, Register state, Register ofs, Register limit, Register rsp,
1127                    bool multi_block, XMMRegister shuf_mask);
1128   void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1129                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1130                    Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block,
1131                    XMMRegister shuf_mask);
1132 #endif // _LP64
1133 
1134   void fast_md5(Register buf, Address state, Address ofs, Address limit,
1135                 bool multi_block);
1136 
1137   void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
1138                  XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask,
1139                  Register buf, Register state, Register ofs, Register limit, Register rsp,
1140                  bool multi_block);
1141 
1142 #ifdef _LP64
1143   void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1144                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1145                    Register buf, Register state, Register ofs, Register limit, Register rsp,
1146                    bool multi_block, XMMRegister shuf_mask);
1147 #else
1148   void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1149                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1150                    Register buf, Register state, Register ofs, Register limit, Register rsp,
1151                    bool multi_block);
1152 #endif
1153 
1154   void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1155                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1156                 Register rax, Register rcx, Register rdx, Register tmp);
1157 
1158 #ifndef _LP64
1159  private:
1160   // Initialized in macroAssembler_x86_constants.cpp
1161   static address ONES;
1162   static address L_2IL0FLOATPACKET_0;
1163   static address PI4_INV;
1164   static address PI4X3;
1165   static address PI4X4;
1166 
1167  public:
1168   void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1169                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1170                 Register rax, Register rcx, Register rdx, Register tmp1);
1171 
1172   void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1173                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1174                 Register rax, Register rcx, Register rdx, Register tmp);
1175 
1176   void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4,
1177                 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx,
1178                 Register rdx, Register tmp);
1179 
1180   void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1181                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1182                 Register rax, Register rbx, Register rdx);
1183 
1184   void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1185                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1186                 Register rax, Register rcx, Register rdx, Register tmp);
1187 
1188   void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx,
1189                         Register edx, Register ebx, Register esi, Register edi,
1190                         Register ebp, Register esp);
1191 
1192   void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx,
1193                          Register esi, Register edi, Register ebp, Register esp);
1194 
1195   void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx,
1196                         Register edx, Register ebx, Register esi, Register edi,
1197                         Register ebp, Register esp);
1198 
1199   void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1200                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1201                 Register rax, Register rcx, Register rdx, Register tmp);
1202 #endif // !_LP64
1203 
1204 private:
1205 
1206   // these are private because users should be doing movflt/movdbl
1207 
1208   void movss(Address     dst, XMMRegister    src) { Assembler::movss(dst, src); }
1209   void movss(XMMRegister dst, XMMRegister    src) { Assembler::movss(dst, src); }
1210   void movss(XMMRegister dst, Address        src) { Assembler::movss(dst, src); }
1211   void movss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1212 
1213   void movlpd(XMMRegister dst, Address        src) {Assembler::movlpd(dst, src); }
1214   void movlpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1215 
1216 public:
1217 
1218   void addsd(XMMRegister dst, XMMRegister    src) { Assembler::addsd(dst, src); }
1219   void addsd(XMMRegister dst, Address        src) { Assembler::addsd(dst, src); }
1220   void addsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1221 
1222   void addss(XMMRegister dst, XMMRegister    src) { Assembler::addss(dst, src); }
1223   void addss(XMMRegister dst, Address        src) { Assembler::addss(dst, src); }
1224   void addss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1225 
1226   void addpd(XMMRegister dst, XMMRegister    src) { Assembler::addpd(dst, src); }
1227   void addpd(XMMRegister dst, Address        src) { Assembler::addpd(dst, src); }
1228   void addpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1229 
1230   using Assembler::vbroadcastsd;
1231   void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1232 
1233   using Assembler::vbroadcastss;
1234   void vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1235 
1236   // Vector float blend
1237   void vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg);
1238   void vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg);
1239 
1240   void divsd(XMMRegister dst, XMMRegister    src) { Assembler::divsd(dst, src); }
1241   void divsd(XMMRegister dst, Address        src) { Assembler::divsd(dst, src); }
1242   void divsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1243 
1244   void divss(XMMRegister dst, XMMRegister    src) { Assembler::divss(dst, src); }
1245   void divss(XMMRegister dst, Address        src) { Assembler::divss(dst, src); }
1246   void divss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1247 
1248   // Move Unaligned Double Quadword
1249   void movdqu(Address     dst, XMMRegister    src);
1250   void movdqu(XMMRegister dst, XMMRegister    src);
1251   void movdqu(XMMRegister dst, Address        src);
1252   void movdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1253 
1254   void kmovwl(Register  dst, KRegister      src) { Assembler::kmovwl(dst, src); }
1255   void kmovwl(Address   dst, KRegister      src) { Assembler::kmovwl(dst, src); }
1256   void kmovwl(KRegister dst, KRegister      src) { Assembler::kmovwl(dst, src); }
1257   void kmovwl(KRegister dst, Register       src) { Assembler::kmovwl(dst, src); }
1258   void kmovwl(KRegister dst, Address        src) { Assembler::kmovwl(dst, src); }
1259   void kmovwl(KRegister dst, AddressLiteral src, Register rscratch = noreg);
1260 
1261   void kmovql(KRegister dst, KRegister      src) { Assembler::kmovql(dst, src); }
1262   void kmovql(KRegister dst, Register       src) { Assembler::kmovql(dst, src); }
1263   void kmovql(Register  dst, KRegister      src) { Assembler::kmovql(dst, src); }
1264   void kmovql(KRegister dst, Address        src) { Assembler::kmovql(dst, src); }
1265   void kmovql(Address   dst, KRegister      src) { Assembler::kmovql(dst, src); }
1266   void kmovql(KRegister dst, AddressLiteral src, Register rscratch = noreg);
1267 
1268   // Safe move operation, lowers down to 16bit moves for targets supporting
1269   // AVX512F feature and 64bit moves for targets supporting AVX512BW feature.
1270   void kmov(Address  dst, KRegister src);
1271   void kmov(KRegister dst, Address src);
1272   void kmov(KRegister dst, KRegister src);
1273   void kmov(Register dst, KRegister src);
1274   void kmov(KRegister dst, Register src);
1275 
1276   using Assembler::movddup;
1277   void movddup(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1278 
1279   using Assembler::vmovddup;
1280   void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1281 
1282   // AVX Unaligned forms
1283   void vmovdqu(Address     dst, XMMRegister    src);
1284   void vmovdqu(XMMRegister dst, Address        src);
1285   void vmovdqu(XMMRegister dst, XMMRegister    src);
1286   void vmovdqu(XMMRegister dst, AddressLiteral src,                 Register rscratch = noreg);
1287   void vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1288 
1289   // AVX512 Unaligned
1290   void evmovdqu(BasicType type, KRegister kmask, Address     dst, XMMRegister src, bool merge, int vector_len);
1291   void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address     src, bool merge, int vector_len);
1292 
1293   void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); }
1294   void evmovdqub(XMMRegister dst, Address     src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); }
1295 
1296   void evmovdqub(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1297     if (dst->encoding() != src->encoding() || mask != k0)  {
1298       Assembler::evmovdqub(dst, mask, src, merge, vector_len);
1299     }
1300   }
1301   void evmovdqub(Address     dst, KRegister mask, XMMRegister    src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1302   void evmovdqub(XMMRegister dst, KRegister mask, Address        src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1303   void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1304 
1305   void evmovdquw(Address     dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1306   void evmovdquw(XMMRegister dst, Address     src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1307 
1308   void evmovdquw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1309     if (dst->encoding() != src->encoding() || mask != k0) {
1310       Assembler::evmovdquw(dst, mask, src, merge, vector_len);
1311     }
1312   }
1313   void evmovdquw(XMMRegister dst, KRegister mask, Address        src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1314   void evmovdquw(Address     dst, KRegister mask, XMMRegister    src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1315   void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1316 
1317   void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) {
1318      if (dst->encoding() != src->encoding()) {
1319        Assembler::evmovdqul(dst, src, vector_len);
1320      }
1321   }
1322   void evmovdqul(Address     dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1323   void evmovdqul(XMMRegister dst, Address     src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1324 
1325   void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1326     if (dst->encoding() != src->encoding() || mask != k0)  {
1327       Assembler::evmovdqul(dst, mask, src, merge, vector_len);
1328     }
1329   }
1330   void evmovdqul(Address     dst, KRegister mask, XMMRegister    src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1331   void evmovdqul(XMMRegister dst, KRegister mask, Address        src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1332   void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1333 
1334   void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) {
1335     if (dst->encoding() != src->encoding()) {
1336       Assembler::evmovdquq(dst, src, vector_len);
1337     }
1338   }
1339   void evmovdquq(XMMRegister dst, Address        src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1340   void evmovdquq(Address     dst, XMMRegister    src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1341   void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1342 
1343   void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1344     if (dst->encoding() != src->encoding() || mask != k0) {
1345       Assembler::evmovdquq(dst, mask, src, merge, vector_len);
1346     }
1347   }
1348   void evmovdquq(Address     dst, KRegister mask, XMMRegister    src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1349   void evmovdquq(XMMRegister dst, KRegister mask, Address        src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1350   void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1351 
1352   // Move Aligned Double Quadword
1353   void movdqa(XMMRegister dst, XMMRegister    src) { Assembler::movdqa(dst, src); }
1354   void movdqa(XMMRegister dst, Address        src) { Assembler::movdqa(dst, src); }
1355   void movdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1356 
1357   void movsd(Address     dst, XMMRegister    src) { Assembler::movsd(dst, src); }
1358   void movsd(XMMRegister dst, XMMRegister    src) { Assembler::movsd(dst, src); }
1359   void movsd(XMMRegister dst, Address        src) { Assembler::movsd(dst, src); }
1360   void movsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1361 
1362   void mulpd(XMMRegister dst, XMMRegister    src) { Assembler::mulpd(dst, src); }
1363   void mulpd(XMMRegister dst, Address        src) { Assembler::mulpd(dst, src); }
1364   void mulpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1365 
1366   void mulsd(XMMRegister dst, XMMRegister    src) { Assembler::mulsd(dst, src); }
1367   void mulsd(XMMRegister dst, Address        src) { Assembler::mulsd(dst, src); }
1368   void mulsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1369 
1370   void mulss(XMMRegister dst, XMMRegister    src) { Assembler::mulss(dst, src); }
1371   void mulss(XMMRegister dst, Address        src) { Assembler::mulss(dst, src); }
1372   void mulss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1373 
1374   // Carry-Less Multiplication Quadword
1375   void pclmulldq(XMMRegister dst, XMMRegister src) {
1376     // 0x00 - multiply lower 64 bits [0:63]
1377     Assembler::pclmulqdq(dst, src, 0x00);
1378   }
1379   void pclmulhdq(XMMRegister dst, XMMRegister src) {
1380     // 0x11 - multiply upper 64 bits [64:127]
1381     Assembler::pclmulqdq(dst, src, 0x11);
1382   }
1383 
1384   void pcmpeqb(XMMRegister dst, XMMRegister src);
1385   void pcmpeqw(XMMRegister dst, XMMRegister src);
1386 
1387   void pcmpestri(XMMRegister dst, Address src, int imm8);
1388   void pcmpestri(XMMRegister dst, XMMRegister src, int imm8);
1389 
1390   void pmovzxbw(XMMRegister dst, XMMRegister src);
1391   void pmovzxbw(XMMRegister dst, Address src);
1392 
1393   void pmovmskb(Register dst, XMMRegister src);
1394 
1395   void ptest(XMMRegister dst, XMMRegister src);
1396 
1397   void roundsd(XMMRegister dst, XMMRegister    src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1398   void roundsd(XMMRegister dst, Address        src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1399   void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch = noreg);
1400 
1401   void sqrtss(XMMRegister dst, XMMRegister     src) { Assembler::sqrtss(dst, src); }
1402   void sqrtss(XMMRegister dst, Address         src) { Assembler::sqrtss(dst, src); }
1403   void sqrtss(XMMRegister dst, AddressLiteral  src, Register rscratch = noreg);
1404 
1405   void subsd(XMMRegister dst, XMMRegister    src) { Assembler::subsd(dst, src); }
1406   void subsd(XMMRegister dst, Address        src) { Assembler::subsd(dst, src); }
1407   void subsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1408 
1409   void subss(XMMRegister dst, XMMRegister    src) { Assembler::subss(dst, src); }
1410   void subss(XMMRegister dst, Address        src) { Assembler::subss(dst, src); }
1411   void subss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1412 
1413   void ucomiss(XMMRegister dst, XMMRegister    src) { Assembler::ucomiss(dst, src); }
1414   void ucomiss(XMMRegister dst, Address        src) { Assembler::ucomiss(dst, src); }
1415   void ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1416 
1417   void ucomisd(XMMRegister dst, XMMRegister    src) { Assembler::ucomisd(dst, src); }
1418   void ucomisd(XMMRegister dst, Address        src) { Assembler::ucomisd(dst, src); }
1419   void ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1420 
1421   // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
1422   void xorpd(XMMRegister dst, XMMRegister    src);
1423   void xorpd(XMMRegister dst, Address        src) { Assembler::xorpd(dst, src); }
1424   void xorpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1425 
1426   // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
1427   void xorps(XMMRegister dst, XMMRegister    src);
1428   void xorps(XMMRegister dst, Address        src) { Assembler::xorps(dst, src); }
1429   void xorps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1430 
1431   // Shuffle Bytes
1432   void pshufb(XMMRegister dst, XMMRegister    src) { Assembler::pshufb(dst, src); }
1433   void pshufb(XMMRegister dst, Address        src) { Assembler::pshufb(dst, src); }
1434   void pshufb(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1435   // AVX 3-operands instructions
1436 
1437   void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vaddsd(dst, nds, src); }
1438   void vaddsd(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vaddsd(dst, nds, src); }
1439   void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1440 
1441   void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vaddss(dst, nds, src); }
1442   void vaddss(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vaddss(dst, nds, src); }
1443   void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1444 
1445   void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg);
1446   void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg);
1447 
1448   void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len);
1449   void vpaddb(XMMRegister dst, XMMRegister nds, Address        src, int vector_len);
1450   void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1451 
1452   void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1453   void vpaddw(XMMRegister dst, XMMRegister nds, Address     src, int vector_len);
1454 
1455   void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1456   void vpaddd(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1457   void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1458 
1459   void vpand(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1460   void vpand(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1461   void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1462 
1463   using Assembler::vpbroadcastd;
1464   void vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1465 
1466   using Assembler::vpbroadcastq;
1467   void vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1468 
1469   void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1470   void vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len);
1471 
1472   void vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1473   void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1474   void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1475 
1476   // Vector compares
1477   void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister    src, int comparison, bool is_signed, int vector_len) {
1478     Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len);
1479   }
1480   void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1481 
1482   void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister    src, int comparison, bool is_signed, int vector_len) {
1483     Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len);
1484   }
1485   void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1486 
1487   void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister    src, int comparison, bool is_signed, int vector_len) {
1488     Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len);
1489   }
1490   void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1491 
1492   void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister    src, int comparison, bool is_signed, int vector_len) {
1493     Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len);
1494   }
1495   void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1496 
1497   void evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len);
1498 
1499   // Emit comparison instruction for the specified comparison predicate.
1500   void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len);
1501   void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len);
1502 
1503   void vpmovzxbw(XMMRegister dst, Address     src, int vector_len);
1504   void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); }
1505 
1506   void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit);
1507 
1508   void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1509   void vpmullw(XMMRegister dst, XMMRegister nds, Address     src, int vector_len);
1510 
1511   void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); }
1512   void vpmulld(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); }
1513   void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1514 
1515   void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1516   void vpsubb(XMMRegister dst, XMMRegister nds, Address     src, int vector_len);
1517 
1518   void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1519   void vpsubw(XMMRegister dst, XMMRegister nds, Address     src, int vector_len);
1520 
1521   void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1522   void vpsraw(XMMRegister dst, XMMRegister nds, int         shift, int vector_len);
1523 
1524   void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1525   void evpsraq(XMMRegister dst, XMMRegister nds, int         shift, int vector_len);
1526 
1527   void evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1528     if (!is_varshift) {
1529       Assembler::evpsllw(dst, mask, nds, src, merge, vector_len);
1530     } else {
1531       Assembler::evpsllvw(dst, mask, nds, src, merge, vector_len);
1532     }
1533   }
1534   void evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1535     if (!is_varshift) {
1536       Assembler::evpslld(dst, mask, nds, src, merge, vector_len);
1537     } else {
1538       Assembler::evpsllvd(dst, mask, nds, src, merge, vector_len);
1539     }
1540   }
1541   void evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1542     if (!is_varshift) {
1543       Assembler::evpsllq(dst, mask, nds, src, merge, vector_len);
1544     } else {
1545       Assembler::evpsllvq(dst, mask, nds, src, merge, vector_len);
1546     }
1547   }
1548   void evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1549     if (!is_varshift) {
1550       Assembler::evpsrlw(dst, mask, nds, src, merge, vector_len);
1551     } else {
1552       Assembler::evpsrlvw(dst, mask, nds, src, merge, vector_len);
1553     }
1554   }
1555   void evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1556     if (!is_varshift) {
1557       Assembler::evpsrld(dst, mask, nds, src, merge, vector_len);
1558     } else {
1559       Assembler::evpsrlvd(dst, mask, nds, src, merge, vector_len);
1560     }
1561   }
1562 
1563   using Assembler::evpsrlq;
1564   void evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1565     if (!is_varshift) {
1566       Assembler::evpsrlq(dst, mask, nds, src, merge, vector_len);
1567     } else {
1568       Assembler::evpsrlvq(dst, mask, nds, src, merge, vector_len);
1569     }
1570   }
1571   void evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1572     if (!is_varshift) {
1573       Assembler::evpsraw(dst, mask, nds, src, merge, vector_len);
1574     } else {
1575       Assembler::evpsravw(dst, mask, nds, src, merge, vector_len);
1576     }
1577   }
1578   void evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1579     if (!is_varshift) {
1580       Assembler::evpsrad(dst, mask, nds, src, merge, vector_len);
1581     } else {
1582       Assembler::evpsravd(dst, mask, nds, src, merge, vector_len);
1583     }
1584   }
1585   using Assembler::evpsraq;
1586   void evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1587     if (!is_varshift) {
1588       Assembler::evpsraq(dst, mask, nds, src, merge, vector_len);
1589     } else {
1590       Assembler::evpsravq(dst, mask, nds, src, merge, vector_len);
1591     }
1592   }
1593 
1594   void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1595   void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1596   void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1597   void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1598 
1599   void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1600   void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1601 
1602   void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1603   void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1604 
1605   void vptest(XMMRegister dst, XMMRegister src);
1606   void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); }
1607 
1608   void punpcklbw(XMMRegister dst, XMMRegister src);
1609   void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); }
1610 
1611   void pshufd(XMMRegister dst, Address src, int mode);
1612   void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); }
1613 
1614   void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1615   void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); }
1616 
1617   void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1618   void vandpd(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1619   void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1620 
1621   void vandps(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1622   void vandps(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1623   void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1624 
1625   void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1626 
1627   void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vdivsd(dst, nds, src); }
1628   void vdivsd(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vdivsd(dst, nds, src); }
1629   void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1630 
1631   void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vdivss(dst, nds, src); }
1632   void vdivss(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vdivss(dst, nds, src); }
1633   void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1634 
1635   void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vmulsd(dst, nds, src); }
1636   void vmulsd(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vmulsd(dst, nds, src); }
1637   void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1638 
1639   void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vmulss(dst, nds, src); }
1640   void vmulss(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vmulss(dst, nds, src); }
1641   void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1642 
1643   void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vsubsd(dst, nds, src); }
1644   void vsubsd(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vsubsd(dst, nds, src); }
1645   void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1646 
1647   void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vsubss(dst, nds, src); }
1648   void vsubss(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vsubss(dst, nds, src); }
1649   void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1650 
1651   void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1652   void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1653 
1654   // AVX Vector instructions
1655 
1656   void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1657   void vxorpd(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1658   void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1659 
1660   void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1661   void vxorps(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1662   void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1663 
1664   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1665     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1666       Assembler::vpxor(dst, nds, src, vector_len);
1667     else
1668       Assembler::vxorpd(dst, nds, src, vector_len);
1669   }
1670   void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1671     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1672       Assembler::vpxor(dst, nds, src, vector_len);
1673     else
1674       Assembler::vxorpd(dst, nds, src, vector_len);
1675   }
1676   void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1677 
1678   // Simple version for AVX2 256bit vectors
1679   void vpxor(XMMRegister dst, XMMRegister src) {
1680     assert(UseAVX >= 2, "Should be at least AVX2");
1681     Assembler::vpxor(dst, dst, src, AVX_256bit);
1682   }
1683   void vpxor(XMMRegister dst, Address src) {
1684     assert(UseAVX >= 2, "Should be at least AVX2");
1685     Assembler::vpxor(dst, dst, src, AVX_256bit);
1686   }
1687 
1688   void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); }
1689   void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1690 
1691   void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
1692     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1693       Assembler::vinserti32x4(dst, nds, src, imm8);
1694     } else if (UseAVX > 1) {
1695       // vinserti128 is available only in AVX2
1696       Assembler::vinserti128(dst, nds, src, imm8);
1697     } else {
1698       Assembler::vinsertf128(dst, nds, src, imm8);
1699     }
1700   }
1701 
1702   void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
1703     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1704       Assembler::vinserti32x4(dst, nds, src, imm8);
1705     } else if (UseAVX > 1) {
1706       // vinserti128 is available only in AVX2
1707       Assembler::vinserti128(dst, nds, src, imm8);
1708     } else {
1709       Assembler::vinsertf128(dst, nds, src, imm8);
1710     }
1711   }
1712 
1713   void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1714     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1715       Assembler::vextracti32x4(dst, src, imm8);
1716     } else if (UseAVX > 1) {
1717       // vextracti128 is available only in AVX2
1718       Assembler::vextracti128(dst, src, imm8);
1719     } else {
1720       Assembler::vextractf128(dst, src, imm8);
1721     }
1722   }
1723 
1724   void vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
1725     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1726       Assembler::vextracti32x4(dst, src, imm8);
1727     } else if (UseAVX > 1) {
1728       // vextracti128 is available only in AVX2
1729       Assembler::vextracti128(dst, src, imm8);
1730     } else {
1731       Assembler::vextractf128(dst, src, imm8);
1732     }
1733   }
1734 
1735   // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers
1736   void vinserti128_high(XMMRegister dst, XMMRegister src) {
1737     vinserti128(dst, dst, src, 1);
1738   }
1739   void vinserti128_high(XMMRegister dst, Address src) {
1740     vinserti128(dst, dst, src, 1);
1741   }
1742   void vextracti128_high(XMMRegister dst, XMMRegister src) {
1743     vextracti128(dst, src, 1);
1744   }
1745   void vextracti128_high(Address dst, XMMRegister src) {
1746     vextracti128(dst, src, 1);
1747   }
1748 
1749   void vinsertf128_high(XMMRegister dst, XMMRegister src) {
1750     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1751       Assembler::vinsertf32x4(dst, dst, src, 1);
1752     } else {
1753       Assembler::vinsertf128(dst, dst, src, 1);
1754     }
1755   }
1756 
1757   void vinsertf128_high(XMMRegister dst, Address src) {
1758     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1759       Assembler::vinsertf32x4(dst, dst, src, 1);
1760     } else {
1761       Assembler::vinsertf128(dst, dst, src, 1);
1762     }
1763   }
1764 
1765   void vextractf128_high(XMMRegister dst, XMMRegister src) {
1766     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1767       Assembler::vextractf32x4(dst, src, 1);
1768     } else {
1769       Assembler::vextractf128(dst, src, 1);
1770     }
1771   }
1772 
1773   void vextractf128_high(Address dst, XMMRegister src) {
1774     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1775       Assembler::vextractf32x4(dst, src, 1);
1776     } else {
1777       Assembler::vextractf128(dst, src, 1);
1778     }
1779   }
1780 
1781   // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers
1782   void vinserti64x4_high(XMMRegister dst, XMMRegister src) {
1783     Assembler::vinserti64x4(dst, dst, src, 1);
1784   }
1785   void vinsertf64x4_high(XMMRegister dst, XMMRegister src) {
1786     Assembler::vinsertf64x4(dst, dst, src, 1);
1787   }
1788   void vextracti64x4_high(XMMRegister dst, XMMRegister src) {
1789     Assembler::vextracti64x4(dst, src, 1);
1790   }
1791   void vextractf64x4_high(XMMRegister dst, XMMRegister src) {
1792     Assembler::vextractf64x4(dst, src, 1);
1793   }
1794   void vextractf64x4_high(Address dst, XMMRegister src) {
1795     Assembler::vextractf64x4(dst, src, 1);
1796   }
1797   void vinsertf64x4_high(XMMRegister dst, Address src) {
1798     Assembler::vinsertf64x4(dst, dst, src, 1);
1799   }
1800 
1801   // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers
1802   void vinserti128_low(XMMRegister dst, XMMRegister src) {
1803     vinserti128(dst, dst, src, 0);
1804   }
1805   void vinserti128_low(XMMRegister dst, Address src) {
1806     vinserti128(dst, dst, src, 0);
1807   }
1808   void vextracti128_low(XMMRegister dst, XMMRegister src) {
1809     vextracti128(dst, src, 0);
1810   }
1811   void vextracti128_low(Address dst, XMMRegister src) {
1812     vextracti128(dst, src, 0);
1813   }
1814 
1815   void vinsertf128_low(XMMRegister dst, XMMRegister src) {
1816     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1817       Assembler::vinsertf32x4(dst, dst, src, 0);
1818     } else {
1819       Assembler::vinsertf128(dst, dst, src, 0);
1820     }
1821   }
1822 
1823   void vinsertf128_low(XMMRegister dst, Address src) {
1824     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1825       Assembler::vinsertf32x4(dst, dst, src, 0);
1826     } else {
1827       Assembler::vinsertf128(dst, dst, src, 0);
1828     }
1829   }
1830 
1831   void vextractf128_low(XMMRegister dst, XMMRegister src) {
1832     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1833       Assembler::vextractf32x4(dst, src, 0);
1834     } else {
1835       Assembler::vextractf128(dst, src, 0);
1836     }
1837   }
1838 
1839   void vextractf128_low(Address dst, XMMRegister src) {
1840     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1841       Assembler::vextractf32x4(dst, src, 0);
1842     } else {
1843       Assembler::vextractf128(dst, src, 0);
1844     }
1845   }
1846 
1847   // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers
1848   void vinserti64x4_low(XMMRegister dst, XMMRegister src) {
1849     Assembler::vinserti64x4(dst, dst, src, 0);
1850   }
1851   void vinsertf64x4_low(XMMRegister dst, XMMRegister src) {
1852     Assembler::vinsertf64x4(dst, dst, src, 0);
1853   }
1854   void vextracti64x4_low(XMMRegister dst, XMMRegister src) {
1855     Assembler::vextracti64x4(dst, src, 0);
1856   }
1857   void vextractf64x4_low(XMMRegister dst, XMMRegister src) {
1858     Assembler::vextractf64x4(dst, src, 0);
1859   }
1860   void vextractf64x4_low(Address dst, XMMRegister src) {
1861     Assembler::vextractf64x4(dst, src, 0);
1862   }
1863   void vinsertf64x4_low(XMMRegister dst, Address src) {
1864     Assembler::vinsertf64x4(dst, dst, src, 0);
1865   }
1866 
1867   // Carry-Less Multiplication Quadword
1868   void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1869     // 0x00 - multiply lower 64 bits [0:63]
1870     Assembler::vpclmulqdq(dst, nds, src, 0x00);
1871   }
1872   void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1873     // 0x11 - multiply upper 64 bits [64:127]
1874     Assembler::vpclmulqdq(dst, nds, src, 0x11);
1875   }
1876   void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1877     // 0x10 - multiply nds[0:63] and src[64:127]
1878     Assembler::vpclmulqdq(dst, nds, src, 0x10);
1879   }
1880   void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1881     //0x01 - multiply nds[64:127] and src[0:63]
1882     Assembler::vpclmulqdq(dst, nds, src, 0x01);
1883   }
1884 
1885   void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1886     // 0x00 - multiply lower 64 bits [0:63]
1887     Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len);
1888   }
1889   void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1890     // 0x11 - multiply upper 64 bits [64:127]
1891     Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len);
1892   }
1893 
1894   // AVX-512 mask operations.
1895   void kand(BasicType etype, KRegister dst, KRegister src1, KRegister src2);
1896   void kor(BasicType type, KRegister dst, KRegister src1, KRegister src2);
1897   void knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp = knoreg, Register rtmp = noreg);
1898   void kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2);
1899   void kortest(uint masklen, KRegister src1, KRegister src2);
1900   void ktest(uint masklen, KRegister src1, KRegister src2);
1901 
1902   void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1903   void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1904 
1905   void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1906   void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1907 
1908   void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1909   void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1910 
1911   void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1912   void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1913 
1914   void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc);
1915   void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc);
1916   void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc);
1917   void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc);
1918 
1919   using Assembler::evpandq;
1920   void evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1921 
1922   using Assembler::evpaddq;
1923   void evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1924 
1925   using Assembler::evporq;
1926   void evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1927 
1928   using Assembler::vpshufb;
1929   void vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1930 
1931   using Assembler::vpor;
1932   void vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1933 
1934   using Assembler::vpternlogq;
1935   void vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch = noreg);
1936 
1937   void cmov32( Condition cc, Register dst, Address  src);
1938   void cmov32( Condition cc, Register dst, Register src);
1939 
1940   void cmov(   Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1941 
1942   void cmovptr(Condition cc, Register dst, Address  src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1943   void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1944 
1945   void movoop(Register dst, jobject obj);
1946   void movoop(Address  dst, jobject obj, Register rscratch);
1947 
1948   void mov_metadata(Register dst, Metadata* obj);
1949   void mov_metadata(Address  dst, Metadata* obj, Register rscratch);
1950 
1951   void movptr(Register     dst, Register       src);
1952   void movptr(Register     dst, Address        src);
1953   void movptr(Register     dst, AddressLiteral src);
1954   void movptr(Register     dst, ArrayAddress   src);
1955   void movptr(Register     dst, intptr_t       src);
1956   void movptr(Address      dst, Register       src);
1957   void movptr(Address      dst, int32_t        imm);
1958   void movptr(Address      dst, intptr_t       src, Register rscratch);
1959   void movptr(ArrayAddress dst, Register       src, Register rscratch);
1960 
1961   void movptr(Register dst, RegisterOrConstant src) {
1962     if (src.is_constant()) movptr(dst, src.as_constant());
1963     else                   movptr(dst, src.as_register());
1964   }
1965 
1966 
1967   // to avoid hiding movl
1968   void mov32(Register       dst, AddressLiteral src);
1969   void mov32(AddressLiteral dst, Register        src, Register rscratch = noreg);
1970 
1971   // Import other mov() methods from the parent class or else
1972   // they will be hidden by the following overriding declaration.
1973   using Assembler::movdl;
1974   void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1975 
1976   using Assembler::movq;
1977   void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1978 
1979   // Can push value or effective address
1980   void pushptr(AddressLiteral src, Register rscratch);
1981 
1982   void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
1983   void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
1984 
1985   void pushoop(jobject obj, Register rscratch);
1986   void pushklass(Metadata* obj, Register rscratch);
1987 
1988   // sign extend as need a l to ptr sized element
1989   void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
1990   void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
1991 
1992 
1993  public:
1994   // clear memory of size 'cnt' qwords, starting at 'base';
1995   // if 'is_large' is set, do not try to produce short loop
1996   void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg);
1997 
1998   // clear memory initialization sequence for constant size;
1999   void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
2000 
2001   // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
2002   void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
2003 
2004   // Fill primitive arrays
2005   void generate_fill(BasicType t, bool aligned,
2006                      Register to, Register value, Register count,
2007                      Register rtmp, XMMRegister xtmp);
2008 
2009   void encode_iso_array(Register src, Register dst, Register len,
2010                         XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
2011                         XMMRegister tmp4, Register tmp5, Register result, bool ascii);
2012 
2013 #ifdef _LP64
2014   void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
2015   void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
2016                              Register y, Register y_idx, Register z,
2017                              Register carry, Register product,
2018                              Register idx, Register kdx);
2019   void multiply_add_128_x_128(Register x_xstart, Register y, Register z,
2020                               Register yz_idx, Register idx,
2021                               Register carry, Register product, int offset);
2022   void multiply_128_x_128_bmi2_loop(Register y, Register z,
2023                                     Register carry, Register carry2,
2024                                     Register idx, Register jdx,
2025                                     Register yz_idx1, Register yz_idx2,
2026                                     Register tmp, Register tmp3, Register tmp4);
2027   void multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
2028                                Register yz_idx, Register idx, Register jdx,
2029                                Register carry, Register product,
2030                                Register carry2);
2031   void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0,
2032                        Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5);
2033   void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3,
2034                      Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
2035   void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry,
2036                             Register tmp2);
2037   void multiply_add_64(Register sum, Register op1, Register op2, Register carry,
2038                        Register rdxReg, Register raxReg);
2039   void add_one_64(Register z, Register zlen, Register carry, Register tmp1);
2040   void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
2041                        Register tmp3, Register tmp4);
2042   void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
2043                      Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
2044 
2045   void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1,
2046                Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
2047                Register raxReg);
2048   void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1,
2049                Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
2050                Register raxReg);
2051   void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
2052                            Register result, Register tmp1, Register tmp2,
2053                            XMMRegister vec1, XMMRegister vec2, XMMRegister vec3);
2054 #endif
2055 
2056   // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic.
2057   void update_byte_crc32(Register crc, Register val, Register table);
2058   void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp);
2059 
2060 
2061 #ifdef _LP64
2062   void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2);
2063   void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos,
2064                                 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop,
2065                                 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup);
2066 #endif // _LP64
2067 
2068   // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic
2069   // Note on a naming convention:
2070   // Prefix w = register only used on a Westmere+ architecture
2071   // Prefix n = register only used on a Nehalem architecture
2072 #ifdef _LP64
2073   void crc32c_ipl_alg4(Register in_out, uint32_t n,
2074                        Register tmp1, Register tmp2, Register tmp3);
2075 #else
2076   void crc32c_ipl_alg4(Register in_out, uint32_t n,
2077                        Register tmp1, Register tmp2, Register tmp3,
2078                        XMMRegister xtmp1, XMMRegister xtmp2);
2079 #endif
2080   void crc32c_pclmulqdq(XMMRegister w_xtmp1,
2081                         Register in_out,
2082                         uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
2083                         XMMRegister w_xtmp2,
2084                         Register tmp1,
2085                         Register n_tmp2, Register n_tmp3);
2086   void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
2087                        XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
2088                        Register tmp1, Register tmp2,
2089                        Register n_tmp3);
2090   void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
2091                          Register in_out1, Register in_out2, Register in_out3,
2092                          Register tmp1, Register tmp2, Register tmp3,
2093                          XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
2094                          Register tmp4, Register tmp5,
2095                          Register n_tmp6);
2096   void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
2097                             Register tmp1, Register tmp2, Register tmp3,
2098                             Register tmp4, Register tmp5, Register tmp6,
2099                             XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
2100                             bool is_pclmulqdq_supported);
2101   // Fold 128-bit data chunk
2102   void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
2103   void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf);
2104 #ifdef _LP64
2105   // Fold 512-bit data chunk
2106   void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset);
2107 #endif // _LP64
2108   // Fold 8-bit data
2109   void fold_8bit_crc32(Register crc, Register table, Register tmp);
2110   void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp);
2111 
2112   // Compress char[] array to byte[].
2113   void char_array_compress(Register src, Register dst, Register len,
2114                            XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
2115                            XMMRegister tmp4, Register tmp5, Register result,
2116                            KRegister mask1 = knoreg, KRegister mask2 = knoreg);
2117 
2118   // Inflate byte[] array to char[].
2119   void byte_array_inflate(Register src, Register dst, Register len,
2120                           XMMRegister tmp1, Register tmp2, KRegister mask = knoreg);
2121 
2122   void fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask,
2123                    Register length, Register temp, int vec_enc);
2124 
2125   void fill64_masked(uint shift, Register dst, int disp,
2126                          XMMRegister xmm, KRegister mask, Register length,
2127                          Register temp, bool use64byteVector = false);
2128 
2129   void fill32_masked(uint shift, Register dst, int disp,
2130                          XMMRegister xmm, KRegister mask, Register length,
2131                          Register temp);
2132 
2133   void fill32(Address dst, XMMRegister xmm);
2134 
2135   void fill32(Register dst, int disp, XMMRegister xmm);
2136 
2137   void fill64(Address dst, XMMRegister xmm, bool use64byteVector = false);
2138 
2139   void fill64(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false);
2140 
2141 #ifdef _LP64
2142   void convert_f2i(Register dst, XMMRegister src);
2143   void convert_d2i(Register dst, XMMRegister src);
2144   void convert_f2l(Register dst, XMMRegister src);
2145   void convert_d2l(Register dst, XMMRegister src);
2146   void round_double(Register dst, XMMRegister src, Register rtmp, Register rcx);
2147   void round_float(Register dst, XMMRegister src, Register rtmp, Register rcx);
2148 
2149   void cache_wb(Address line);
2150   void cache_wbsync(bool is_pre);
2151 
2152 #ifdef COMPILER2_OR_JVMCI
2153   void generate_fill_avx3(BasicType type, Register to, Register value,
2154                           Register count, Register rtmp, XMMRegister xtmp);
2155 #endif // COMPILER2_OR_JVMCI
2156 #endif // _LP64
2157 
2158   void vallones(XMMRegister dst, int vector_len);
2159 
2160   void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg);
2161 
2162   void lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register thread, Register tmp, Label& slow);
2163   void lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow);
2164 };
2165 
2166 /**
2167  * class SkipIfEqual:
2168  *
2169  * Instantiating this class will result in assembly code being output that will
2170  * jump around any code emitted between the creation of the instance and it's
2171  * automatic destruction at the end of a scope block, depending on the value of
2172  * the flag passed to the constructor, which will be checked at run-time.
2173  */
2174 class SkipIfEqual {
2175  private:
2176   MacroAssembler* _masm;
2177   Label _label;
2178 
2179  public:
2180    SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value, Register rscratch);
2181    ~SkipIfEqual();
2182 };
2183 
2184 #endif // CPU_X86_MACROASSEMBLER_X86_HPP