1 /*
   2  * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
  26 #define CPU_X86_MACROASSEMBLER_X86_HPP
  27 
  28 #include "asm/assembler.hpp"
  29 #include "asm/register.hpp"
  30 #include "code/vmreg.inline.hpp"
  31 #include "compiler/oopMap.hpp"
  32 #include "utilities/macros.hpp"
  33 #include "runtime/rtmLocking.hpp"
  34 #include "runtime/vm_version.hpp"
  35 
  36 // MacroAssembler extends Assembler by frequently used macros.
  37 //
  38 // Instructions for which a 'better' code sequence exists depending
  39 // on arguments should also go in here.
  40 
  41 class MacroAssembler: public Assembler {
  42   friend class LIR_Assembler;
  43   friend class Runtime1;      // as_Address()
  44 
  45  public:
  46   // Support for VM calls
  47   //
  48   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
  49   // may customize this version by overriding it for its purposes (e.g., to save/restore
  50   // additional registers when doing a VM call).
  51 
  52   virtual void call_VM_leaf_base(
  53     address entry_point,               // the entry point
  54     int     number_of_arguments        // the number of arguments to pop after the call
  55   );
  56 
  57  protected:
  58   // This is the base routine called by the different versions of call_VM. The interpreter
  59   // may customize this version by overriding it for its purposes (e.g., to save/restore
  60   // additional registers when doing a VM call).
  61   //
  62   // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base
  63   // returns the register which contains the thread upon return. If a thread register has been
  64   // specified, the return value will correspond to that register. If no last_java_sp is specified
  65   // (noreg) than rsp will be used instead.
  66   virtual void call_VM_base(           // returns the register containing the thread upon return
  67     Register oop_result,               // where an oop-result ends up if any; use noreg otherwise
  68     Register java_thread,              // the thread if computed before     ; use noreg otherwise
  69     Register last_java_sp,             // to set up last_Java_frame in stubs; use noreg otherwise
  70     address  entry_point,              // the entry point
  71     int      number_of_arguments,      // the number of arguments (w/o thread) to pop after the call
  72     bool     check_exceptions          // whether to check for pending exceptions after return
  73   );
  74 
  75   void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
  76 
  77   // helpers for FPU flag access
  78   // tmp is a temporary register, if none is available use noreg
  79   void save_rax   (Register tmp);
  80   void restore_rax(Register tmp);
  81 
  82  public:
  83 
  84   enum KlassDecodeMode {
  85     KlassDecodeNone,
  86     KlassDecodeZero,
  87     KlassDecodeXor,
  88     KlassDecodeAdd
  89   };
  90 
  91   // Return the current narrow Klass pointer decode mode. Initialized on first call.
  92   static KlassDecodeMode klass_decode_mode();
  93 
  94   // Given an arbitrary base address, return the KlassDecodeMode that would be used. Return KlassDecodeNone
  95   // if base address is not valid for encoding.
  96   static KlassDecodeMode klass_decode_mode_for_base(address base);
  97 
  98   // Returns a static string
  99   static const char* describe_klass_decode_mode(KlassDecodeMode mode);
 100 
 101  private:
 102 
 103   static KlassDecodeMode _klass_decode_mode;
 104 
 105  public:
 106   MacroAssembler(CodeBuffer* code) : Assembler(code) {}
 107 
 108  // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
 109  // The implementation is only non-empty for the InterpreterMacroAssembler,
 110  // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
 111  virtual void check_and_handle_popframe(Register java_thread);
 112  virtual void check_and_handle_earlyret(Register java_thread);
 113 
 114   Address as_Address(AddressLiteral adr);
 115   Address as_Address(ArrayAddress adr);
 116 
 117   // Support for NULL-checks
 118   //
 119   // Generates code that causes a NULL OS exception if the content of reg is NULL.
 120   // If the accessed location is M[reg + offset] and the offset is known, provide the
 121   // offset. No explicit code generation is needed if the offset is within a certain
 122   // range (0 <= offset <= page_size).
 123 
 124   void null_check(Register reg, int offset = -1);
 125   static bool needs_explicit_null_check(intptr_t offset);
 126   static bool uses_implicit_null_check(void* address);
 127 
 128   // Required platform-specific helpers for Label::patch_instructions.
 129   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
 130   void pd_patch_instruction(address branch, address target, const char* file, int line) {
 131     unsigned char op = branch[0];
 132     assert(op == 0xE8 /* call */ ||
 133         op == 0xE9 /* jmp */ ||
 134         op == 0xEB /* short jmp */ ||
 135         (op & 0xF0) == 0x70 /* short jcc */ ||
 136         op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ ||
 137         op == 0xC7 && branch[1] == 0xF8 /* xbegin */,
 138         "Invalid opcode at patch point");
 139 
 140     if (op == 0xEB || (op & 0xF0) == 0x70) {
 141       // short offset operators (jmp and jcc)
 142       char* disp = (char*) &branch[1];
 143       int imm8 = target - (address) &disp[1];
 144       guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
 145                 file == NULL ? "<NULL>" : file, line);
 146       *disp = imm8;
 147     } else {
 148       int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
 149       int imm32 = target - (address) &disp[1];
 150       *disp = imm32;
 151     }
 152   }
 153 
 154   // The following 4 methods return the offset of the appropriate move instruction
 155 
 156   // Support for fast byte/short loading with zero extension (depending on particular CPU)
 157   int load_unsigned_byte(Register dst, Address src);
 158   int load_unsigned_short(Register dst, Address src);
 159 
 160   // Support for fast byte/short loading with sign extension (depending on particular CPU)
 161   int load_signed_byte(Register dst, Address src);
 162   int load_signed_short(Register dst, Address src);
 163 
 164   // Support for sign-extension (hi:lo = extend_sign(lo))
 165   void extend_sign(Register hi, Register lo);
 166 
 167   // Load and store values by size and signed-ness
 168   void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
 169   void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
 170 
 171   // Support for inc/dec with optimal instruction selection depending on value
 172 
 173   void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; }
 174   void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; }
 175 
 176   void decrementl(Address dst, int value = 1);
 177   void decrementl(Register reg, int value = 1);
 178 
 179   void decrementq(Register reg, int value = 1);
 180   void decrementq(Address dst, int value = 1);
 181 
 182   void incrementl(Address dst, int value = 1);
 183   void incrementl(Register reg, int value = 1);
 184 
 185   void incrementq(Register reg, int value = 1);
 186   void incrementq(Address dst, int value = 1);
 187 
 188   // Support optimal SSE move instructions.
 189   void movflt(XMMRegister dst, XMMRegister src) {
 190     if (dst-> encoding() == src->encoding()) return;
 191     if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
 192     else                       { movss (dst, src); return; }
 193   }
 194   void movflt(XMMRegister dst, Address src) { movss(dst, src); }
 195   void movflt(XMMRegister dst, AddressLiteral src);
 196   void movflt(Address dst, XMMRegister src) { movss(dst, src); }
 197 
 198   // Move with zero extension
 199   void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); }
 200 
 201   void movdbl(XMMRegister dst, XMMRegister src) {
 202     if (dst-> encoding() == src->encoding()) return;
 203     if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
 204     else                       { movsd (dst, src); return; }
 205   }
 206 
 207   void movdbl(XMMRegister dst, AddressLiteral src);
 208 
 209   void movdbl(XMMRegister dst, Address src) {
 210     if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
 211     else                         { movlpd(dst, src); return; }
 212   }
 213   void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
 214 
 215   void incrementl(AddressLiteral dst);
 216   void incrementl(ArrayAddress dst);
 217 
 218   void incrementq(AddressLiteral dst);
 219 
 220   // Alignment
 221   void align32();
 222   void align64();
 223   void align(int modulus);
 224   void align(int modulus, int target);
 225 
 226   // A 5 byte nop that is safe for patching (see patch_verified_entry)
 227   void fat_nop();
 228 
 229   // Stack frame creation/removal
 230   void enter();
 231   void leave();
 232 
 233   // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
 234   // The pointer will be loaded into the thread register.
 235   void get_thread(Register thread);
 236 
 237 #ifdef _LP64
 238   // Support for argument shuffling
 239 
 240   void move32_64(VMRegPair src, VMRegPair dst);
 241   void long_move(VMRegPair src, VMRegPair dst);
 242   void float_move(VMRegPair src, VMRegPair dst);
 243   void double_move(VMRegPair src, VMRegPair dst);
 244   void move_ptr(VMRegPair src, VMRegPair dst);
 245   void object_move(OopMap* map,
 246                    int oop_handle_offset,
 247                    int framesize_in_slots,
 248                    VMRegPair src,
 249                    VMRegPair dst,
 250                    bool is_receiver,
 251                    int* receiver_offset);
 252 #endif // _LP64
 253 
 254   // Support for VM calls
 255   //
 256   // It is imperative that all calls into the VM are handled via the call_VM macros.
 257   // They make sure that the stack linkage is setup correctly. call_VM's correspond
 258   // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
 259 
 260 
 261   void call_VM(Register oop_result,
 262                address entry_point,
 263                bool check_exceptions = true);
 264   void call_VM(Register oop_result,
 265                address entry_point,
 266                Register arg_1,
 267                bool check_exceptions = true);
 268   void call_VM(Register oop_result,
 269                address entry_point,
 270                Register arg_1, Register arg_2,
 271                bool check_exceptions = true);
 272   void call_VM(Register oop_result,
 273                address entry_point,
 274                Register arg_1, Register arg_2, Register arg_3,
 275                bool check_exceptions = true);
 276 
 277   // Overloadings with last_Java_sp
 278   void call_VM(Register oop_result,
 279                Register last_java_sp,
 280                address entry_point,
 281                int number_of_arguments = 0,
 282                bool check_exceptions = true);
 283   void call_VM(Register oop_result,
 284                Register last_java_sp,
 285                address entry_point,
 286                Register arg_1, bool
 287                check_exceptions = true);
 288   void call_VM(Register oop_result,
 289                Register last_java_sp,
 290                address entry_point,
 291                Register arg_1, Register arg_2,
 292                bool check_exceptions = true);
 293   void call_VM(Register oop_result,
 294                Register last_java_sp,
 295                address entry_point,
 296                Register arg_1, Register arg_2, Register arg_3,
 297                bool check_exceptions = true);
 298 
 299   void get_vm_result  (Register oop_result, Register thread);
 300   void get_vm_result_2(Register metadata_result, Register thread);
 301 
 302   // These always tightly bind to MacroAssembler::call_VM_base
 303   // bypassing the virtual implementation
 304   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
 305   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
 306   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
 307   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
 308   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
 309 
 310   void call_VM_leaf0(address entry_point);
 311   void call_VM_leaf(address entry_point,
 312                     int number_of_arguments = 0);
 313   void call_VM_leaf(address entry_point,
 314                     Register arg_1);
 315   void call_VM_leaf(address entry_point,
 316                     Register arg_1, Register arg_2);
 317   void call_VM_leaf(address entry_point,
 318                     Register arg_1, Register arg_2, Register arg_3);
 319 
 320   // These always tightly bind to MacroAssembler::call_VM_leaf_base
 321   // bypassing the virtual implementation
 322   void super_call_VM_leaf(address entry_point);
 323   void super_call_VM_leaf(address entry_point, Register arg_1);
 324   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
 325   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
 326   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
 327 
 328   // last Java Frame (fills frame anchor)
 329   void set_last_Java_frame(Register thread,
 330                            Register last_java_sp,
 331                            Register last_java_fp,
 332                            address last_java_pc);
 333 
 334   // thread in the default location (r15_thread on 64bit)
 335   void set_last_Java_frame(Register last_java_sp,
 336                            Register last_java_fp,
 337                            address last_java_pc);
 338 
 339   void reset_last_Java_frame(Register thread, bool clear_fp);
 340 
 341   // thread in the default location (r15_thread on 64bit)
 342   void reset_last_Java_frame(bool clear_fp);
 343 
 344   // jobjects
 345   void clear_jweak_tag(Register possibly_jweak);
 346   void resolve_jobject(Register value, Register thread, Register tmp);
 347 
 348   // C 'boolean' to Java boolean: x == 0 ? 0 : 1
 349   void c2bool(Register x);
 350 
 351   // C++ bool manipulation
 352 
 353   void movbool(Register dst, Address src);
 354   void movbool(Address dst, bool boolconst);
 355   void movbool(Address dst, Register src);
 356   void testbool(Register dst);
 357 
 358   void resolve_oop_handle(Register result, Register tmp = rscratch2);
 359   void resolve_weak_handle(Register result, Register tmp);
 360   void load_mirror(Register mirror, Register method, Register tmp = rscratch2);
 361   void load_method_holder_cld(Register rresult, Register rmethod);
 362 
 363   void load_method_holder(Register holder, Register method);
 364 
 365   // oop manipulations
 366   void load_klass(Register dst, Register src, Register tmp, bool null_check_src = false);
 367 #ifdef _LP64
 368   void load_nklass(Register dst, Register src);
 369 #else
 370   void store_klass(Register dst, Register src);
 371 #endif
 372 
 373   void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
 374                       Register tmp1, Register thread_tmp);
 375   void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src,
 376                        Register tmp1, Register tmp2, Register tmp3);
 377 
 378   void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
 379                      Register thread_tmp = noreg, DecoratorSet decorators = 0);
 380   void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
 381                               Register thread_tmp = noreg, DecoratorSet decorators = 0);
 382   void store_heap_oop(Address dst, Register src, Register tmp1 = noreg,
 383                       Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
 384 
 385   // Used for storing NULL. All other oop constants should be
 386   // stored using routines that take a jobject.
 387   void store_heap_oop_null(Address dst);
 388 
 389 #ifdef _LP64
 390   // This dummy is to prevent a call to store_heap_oop from
 391   // converting a zero (like NULL) into a Register by giving
 392   // the compiler two choices it can't resolve
 393 
 394   void store_heap_oop(Address dst, void* dummy);
 395 
 396   void encode_heap_oop(Register r);
 397   void decode_heap_oop(Register r);
 398   void encode_heap_oop_not_null(Register r);
 399   void decode_heap_oop_not_null(Register r);
 400   void encode_heap_oop_not_null(Register dst, Register src);
 401   void decode_heap_oop_not_null(Register dst, Register src);
 402 
 403   void set_narrow_oop(Register dst, jobject obj);
 404   void set_narrow_oop(Address dst, jobject obj);
 405   void cmp_narrow_oop(Register dst, jobject obj);
 406   void cmp_narrow_oop(Address dst, jobject obj);
 407 
 408   void encode_klass_not_null(Register r, Register tmp);
 409   void decode_klass_not_null(Register r, Register tmp);
 410   void encode_and_move_klass_not_null(Register dst, Register src);
 411   void decode_and_move_klass_not_null(Register dst, Register src);
 412   void set_narrow_klass(Register dst, Klass* k);
 413   void set_narrow_klass(Address dst, Klass* k);
 414   void cmp_narrow_klass(Register dst, Klass* k);
 415   void cmp_narrow_klass(Address dst, Klass* k);
 416 
 417   // if heap base register is used - reinit it with the correct value
 418   void reinit_heapbase();
 419 
 420   DEBUG_ONLY(void verify_heapbase(const char* msg);)
 421 
 422 #endif // _LP64
 423 
 424   // Int division/remainder for Java
 425   // (as idivl, but checks for special case as described in JVM spec.)
 426   // returns idivl instruction offset for implicit exception handling
 427   int corrected_idivl(Register reg);
 428 
 429   // Long division/remainder for Java
 430   // (as idivq, but checks for special case as described in JVM spec.)
 431   // returns idivq instruction offset for implicit exception handling
 432   int corrected_idivq(Register reg);
 433 
 434   void int3();
 435 
 436   // Long operation macros for a 32bit cpu
 437   // Long negation for Java
 438   void lneg(Register hi, Register lo);
 439 
 440   // Long multiplication for Java
 441   // (destroys contents of eax, ebx, ecx and edx)
 442   void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
 443 
 444   // Long shifts for Java
 445   // (semantics as described in JVM spec.)
 446   void lshl(Register hi, Register lo);                               // hi:lo << (rcx & 0x3f)
 447   void lshr(Register hi, Register lo, bool sign_extension = false);  // hi:lo >> (rcx & 0x3f)
 448 
 449   // Long compare for Java
 450   // (semantics as described in JVM spec.)
 451   void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
 452 
 453 
 454   // misc
 455 
 456   // Sign extension
 457   void sign_extend_short(Register reg);
 458   void sign_extend_byte(Register reg);
 459 
 460   // Division by power of 2, rounding towards 0
 461   void division_with_shift(Register reg, int shift_value);
 462 
 463 #ifndef _LP64
 464   // Compares the top-most stack entries on the FPU stack and sets the eflags as follows:
 465   //
 466   // CF (corresponds to C0) if x < y
 467   // PF (corresponds to C2) if unordered
 468   // ZF (corresponds to C3) if x = y
 469   //
 470   // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
 471   // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code)
 472   void fcmp(Register tmp);
 473   // Variant of the above which allows y to be further down the stack
 474   // and which only pops x and y if specified. If pop_right is
 475   // specified then pop_left must also be specified.
 476   void fcmp(Register tmp, int index, bool pop_left, bool pop_right);
 477 
 478   // Floating-point comparison for Java
 479   // Compares the top-most stack entries on the FPU stack and stores the result in dst.
 480   // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
 481   // (semantics as described in JVM spec.)
 482   void fcmp2int(Register dst, bool unordered_is_less);
 483   // Variant of the above which allows y to be further down the stack
 484   // and which only pops x and y if specified. If pop_right is
 485   // specified then pop_left must also be specified.
 486   void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right);
 487 
 488   // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards)
 489   // tmp is a temporary register, if none is available use noreg
 490   void fremr(Register tmp);
 491 
 492   // only if +VerifyFPU
 493   void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
 494 #endif // !LP64
 495 
 496   // dst = c = a * b + c
 497   void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
 498   void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
 499 
 500   void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
 501   void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
 502   void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
 503   void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
 504 
 505 
 506   // same as fcmp2int, but using SSE2
 507   void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
 508   void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
 509 
 510   // branch to L if FPU flag C2 is set/not set
 511   // tmp is a temporary register, if none is available use noreg
 512   void jC2 (Register tmp, Label& L);
 513   void jnC2(Register tmp, Label& L);
 514 
 515   // Load float value from 'address'. If UseSSE >= 1, the value is loaded into
 516   // register xmm0. Otherwise, the value is loaded onto the FPU stack.
 517   void load_float(Address src);
 518 
 519   // Store float value to 'address'. If UseSSE >= 1, the value is stored
 520   // from register xmm0. Otherwise, the value is stored from the FPU stack.
 521   void store_float(Address dst);
 522 
 523   // Load double value from 'address'. If UseSSE >= 2, the value is loaded into
 524   // register xmm0. Otherwise, the value is loaded onto the FPU stack.
 525   void load_double(Address src);
 526 
 527   // Store double value to 'address'. If UseSSE >= 2, the value is stored
 528   // from register xmm0. Otherwise, the value is stored from the FPU stack.
 529   void store_double(Address dst);
 530 
 531 #ifndef _LP64
 532   // Pop ST (ffree & fincstp combined)
 533   void fpop();
 534 
 535   void empty_FPU_stack();
 536 #endif // !_LP64
 537 
 538   void push_IU_state();
 539   void pop_IU_state();
 540 
 541   void push_FPU_state();
 542   void pop_FPU_state();
 543 
 544   void push_CPU_state();
 545   void pop_CPU_state();
 546 
 547   // Round up to a power of two
 548   void round_to(Register reg, int modulus);
 549 
 550 private:
 551   // General purpose and XMM registers potentially clobbered by native code; there
 552   // is no need for FPU or AVX opmask related methods because C1/interpreter
 553   // - we save/restore FPU state as a whole always
 554   // - do not care about AVX-512 opmask
 555   static RegSet call_clobbered_gp_registers();
 556   static XMMRegSet call_clobbered_xmm_registers();
 557 
 558   void push_set(XMMRegSet set, int offset);
 559   void pop_set(XMMRegSet set, int offset);
 560 
 561 public:
 562   void push_set(RegSet set, int offset = -1);
 563   void pop_set(RegSet set, int offset = -1);
 564 
 565   // Push and pop everything that might be clobbered by a native
 566   // runtime call.
 567   // Only save the lower 64 bits of each vector register.
 568   // Additional registers can be excluded in a passed RegSet.
 569   void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
 570   void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
 571 
 572   void push_call_clobbered_registers(bool save_fpu = true) {
 573     push_call_clobbered_registers_except(RegSet(), save_fpu);
 574   }
 575   void pop_call_clobbered_registers(bool restore_fpu = true) {
 576     pop_call_clobbered_registers_except(RegSet(), restore_fpu);
 577   }
 578 
 579   // allocation
 580   void eden_allocate(
 581     Register thread,                   // Current thread
 582     Register obj,                      // result: pointer to object after successful allocation
 583     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
 584     int      con_size_in_bytes,        // object size in bytes if   known at compile time
 585     Register t1,                       // temp register
 586     Label&   slow_case                 // continuation point if fast allocation fails
 587   );
 588   void tlab_allocate(
 589     Register thread,                   // Current thread
 590     Register obj,                      // result: pointer to object after successful allocation
 591     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
 592     int      con_size_in_bytes,        // object size in bytes if   known at compile time
 593     Register t1,                       // temp register
 594     Register t2,                       // temp register
 595     Label&   slow_case                 // continuation point if fast allocation fails
 596   );
 597   void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
 598 
 599   // interface method calling
 600   void lookup_interface_method(Register recv_klass,
 601                                Register intf_klass,
 602                                RegisterOrConstant itable_index,
 603                                Register method_result,
 604                                Register scan_temp,
 605                                Label& no_such_interface,
 606                                bool return_method = true);
 607 
 608   // virtual method calling
 609   void lookup_virtual_method(Register recv_klass,
 610                              RegisterOrConstant vtable_index,
 611                              Register method_result);
 612 
 613   // Test sub_klass against super_klass, with fast and slow paths.
 614 
 615   // The fast path produces a tri-state answer: yes / no / maybe-slow.
 616   // One of the three labels can be NULL, meaning take the fall-through.
 617   // If super_check_offset is -1, the value is loaded up from super_klass.
 618   // No registers are killed, except temp_reg.
 619   void check_klass_subtype_fast_path(Register sub_klass,
 620                                      Register super_klass,
 621                                      Register temp_reg,
 622                                      Label* L_success,
 623                                      Label* L_failure,
 624                                      Label* L_slow_path,
 625                 RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
 626 
 627   // The rest of the type check; must be wired to a corresponding fast path.
 628   // It does not repeat the fast path logic, so don't use it standalone.
 629   // The temp_reg and temp2_reg can be noreg, if no temps are available.
 630   // Updates the sub's secondary super cache as necessary.
 631   // If set_cond_codes, condition codes will be Z on success, NZ on failure.
 632   void check_klass_subtype_slow_path(Register sub_klass,
 633                                      Register super_klass,
 634                                      Register temp_reg,
 635                                      Register temp2_reg,
 636                                      Label* L_success,
 637                                      Label* L_failure,
 638                                      bool set_cond_codes = false);
 639 
 640   // Simplified, combined version, good for typical uses.
 641   // Falls through on failure.
 642   void check_klass_subtype(Register sub_klass,
 643                            Register super_klass,
 644                            Register temp_reg,
 645                            Label& L_success);
 646 
 647   void clinit_barrier(Register klass,
 648                       Register thread,
 649                       Label* L_fast_path = NULL,
 650                       Label* L_slow_path = NULL);
 651 
 652   // method handles (JSR 292)
 653   Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
 654 
 655   // Debugging
 656 
 657   // only if +VerifyOops
 658   void _verify_oop(Register reg, const char* s, const char* file, int line);
 659   void _verify_oop_addr(Address addr, const char* s, const char* file, int line);
 660 
 661   void _verify_oop_checked(Register reg, const char* s, const char* file, int line) {
 662     if (VerifyOops) {
 663       _verify_oop(reg, s, file, line);
 664     }
 665   }
 666   void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) {
 667     if (VerifyOops) {
 668       _verify_oop_addr(reg, s, file, line);
 669     }
 670   }
 671 
 672   // TODO: verify method and klass metadata (compare against vptr?)
 673   void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
 674   void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
 675 
 676 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__)
 677 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__)
 678 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__)
 679 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
 680 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
 681 
 682   // Verify or restore cpu control state after JNI call
 683   void restore_cpu_control_state_after_jni();
 684 
 685   // prints msg, dumps registers and stops execution
 686   void stop(const char* msg);
 687 
 688   // prints msg and continues
 689   void warn(const char* msg);
 690 
 691   // dumps registers and other state
 692   void print_state();
 693 
 694   static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
 695   static void debug64(char* msg, int64_t pc, int64_t regs[]);
 696   static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip);
 697   static void print_state64(int64_t pc, int64_t regs[]);
 698 
 699   void os_breakpoint();
 700 
 701   void untested()                                { stop("untested"); }
 702 
 703   void unimplemented(const char* what = "");
 704 
 705   void should_not_reach_here()                   { stop("should not reach here"); }
 706 
 707   void print_CPU_state();
 708 
 709   // Stack overflow checking
 710   void bang_stack_with_offset(int offset) {
 711     // stack grows down, caller passes positive offset
 712     assert(offset > 0, "must bang with negative offset");
 713     movl(Address(rsp, (-offset)), rax);
 714   }
 715 
 716   // Writes to stack successive pages until offset reached to check for
 717   // stack overflow + shadow pages.  Also, clobbers tmp
 718   void bang_stack_size(Register size, Register tmp);
 719 
 720   // Check for reserved stack access in method being exited (for JIT)
 721   void reserved_stack_check();
 722 
 723   void safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod);
 724 
 725   void verify_tlab();
 726 
 727   Condition negate_condition(Condition cond);
 728 
 729   // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
 730   // operands. In general the names are modified to avoid hiding the instruction in Assembler
 731   // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
 732   // here in MacroAssembler. The major exception to this rule is call
 733 
 734   // Arithmetics
 735 
 736 
 737   void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
 738   void addptr(Address dst, Register src);
 739 
 740   void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
 741   void addptr(Register dst, int32_t src);
 742   void addptr(Register dst, Register src);
 743   void addptr(Register dst, RegisterOrConstant src) {
 744     if (src.is_constant()) addptr(dst, (int) src.as_constant());
 745     else                   addptr(dst,       src.as_register());
 746   }
 747 
 748   void andptr(Register dst, int32_t src);
 749   void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }
 750 
 751   void cmp8(AddressLiteral src1, int imm);
 752 
 753   // renamed to drag out the casting of address to int32_t/intptr_t
 754   void cmp32(Register src1, int32_t imm);
 755 
 756   void cmp32(AddressLiteral src1, int32_t imm);
 757   // compare reg - mem, or reg - &mem
 758   void cmp32(Register src1, AddressLiteral src2);
 759 
 760   void cmp32(Register src1, Address src2);
 761 
 762 #ifndef _LP64
 763   void cmpklass(Address dst, Metadata* obj);
 764   void cmpklass(Register dst, Metadata* obj);
 765   void cmpoop(Address dst, jobject obj);
 766 #endif // _LP64
 767 
 768   void cmpoop(Register src1, Register src2);
 769   void cmpoop(Register src1, Address src2);
 770   void cmpoop(Register dst, jobject obj);
 771 
 772   // NOTE src2 must be the lval. This is NOT an mem-mem compare
 773   void cmpptr(Address src1, AddressLiteral src2);
 774 
 775   void cmpptr(Register src1, AddressLiteral src2);
 776 
 777   void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 778   void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 779   // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 780 
 781   void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 782   void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 783 
 784   // cmp64 to avoild hiding cmpq
 785   void cmp64(Register src1, AddressLiteral src);
 786 
 787   void cmpxchgptr(Register reg, Address adr);
 788 
 789   void locked_cmpxchgptr(Register reg, AddressLiteral adr);
 790 
 791 
 792   void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
 793   void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); }
 794 
 795 
 796   void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
 797 
 798   void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); }
 799 
 800   void shlptr(Register dst, int32_t shift);
 801   void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); }
 802 
 803   void shrptr(Register dst, int32_t shift);
 804   void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); }
 805 
 806   void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); }
 807   void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); }
 808 
 809   void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
 810 
 811   void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
 812   void subptr(Register dst, int32_t src);
 813   // Force generation of a 4 byte immediate value even if it fits into 8bit
 814   void subptr_imm32(Register dst, int32_t src);
 815   void subptr(Register dst, Register src);
 816   void subptr(Register dst, RegisterOrConstant src) {
 817     if (src.is_constant()) subptr(dst, (int) src.as_constant());
 818     else                   subptr(dst,       src.as_register());
 819   }
 820 
 821   void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
 822   void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
 823 
 824   void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
 825   void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
 826 
 827   void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; }
 828 
 829 
 830 
 831   // Helper functions for statistics gathering.
 832   // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
 833   void cond_inc32(Condition cond, AddressLiteral counter_addr);
 834   // Unconditional atomic increment.
 835   void atomic_incl(Address counter_addr);
 836   void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1);
 837 #ifdef _LP64
 838   void atomic_incq(Address counter_addr);
 839   void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1);
 840 #endif
 841   void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; }
 842   void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; }
 843 
 844   void lea(Register dst, AddressLiteral adr);
 845   void lea(Address dst, AddressLiteral adr);
 846   void lea(Register dst, Address adr) { Assembler::lea(dst, adr); }
 847 
 848   void leal32(Register dst, Address src) { leal(dst, src); }
 849 
 850   // Import other testl() methods from the parent class or else
 851   // they will be hidden by the following overriding declaration.
 852   using Assembler::testl;
 853   void testl(Register dst, AddressLiteral src);
 854 
 855   void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
 856   void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
 857   void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
 858   void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); }
 859 
 860   void testptr(Register src, int32_t imm32) {  LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
 861   void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); }
 862   void testptr(Register src1, Register src2);
 863 
 864   void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
 865   void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
 866 
 867   // Calls
 868 
 869   void call(Label& L, relocInfo::relocType rtype);
 870   void call(Register entry);
 871   void call(Address addr) { Assembler::call(addr); }
 872 
 873   // NOTE: this call transfers to the effective address of entry NOT
 874   // the address contained by entry. This is because this is more natural
 875   // for jumps/calls.
 876   void call(AddressLiteral entry);
 877 
 878   // Emit the CompiledIC call idiom
 879   void ic_call(address entry, jint method_index = 0);
 880 
 881   // Jumps
 882 
 883   // NOTE: these jumps transfer to the effective address of dst NOT
 884   // the address contained by dst. This is because this is more natural
 885   // for jumps/calls.
 886   void jump(AddressLiteral dst);
 887   void jump_cc(Condition cc, AddressLiteral dst);
 888 
 889   // 32bit can do a case table jump in one instruction but we no longer allow the base
 890   // to be installed in the Address class. This jump will transfer to the address
 891   // contained in the location described by entry (not the address of entry)
 892   void jump(ArrayAddress entry);
 893 
 894   // Floating
 895 
 896   void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); }
 897   void andpd(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
 898   void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); }
 899 
 900   void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); }
 901   void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); }
 902   void andps(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
 903 
 904   void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); }
 905   void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); }
 906   void comiss(XMMRegister dst, AddressLiteral src);
 907 
 908   void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); }
 909   void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); }
 910   void comisd(XMMRegister dst, AddressLiteral src);
 911 
 912 #ifndef _LP64
 913   void fadd_s(Address src)        { Assembler::fadd_s(src); }
 914   void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); }
 915 
 916   void fldcw(Address src) { Assembler::fldcw(src); }
 917   void fldcw(AddressLiteral src);
 918 
 919   void fld_s(int index)   { Assembler::fld_s(index); }
 920   void fld_s(Address src) { Assembler::fld_s(src); }
 921   void fld_s(AddressLiteral src);
 922 
 923   void fld_d(Address src) { Assembler::fld_d(src); }
 924   void fld_d(AddressLiteral src);
 925 
 926   void fmul_s(Address src)        { Assembler::fmul_s(src); }
 927   void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); }
 928 #endif // _LP64
 929 
 930   void fld_x(Address src) { Assembler::fld_x(src); }
 931   void fld_x(AddressLiteral src);
 932 
 933   void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
 934   void ldmxcsr(AddressLiteral src, Register scratchReg = rscratch1);
 935 
 936 #ifdef _LP64
 937  private:
 938   void sha256_AVX2_one_round_compute(
 939     Register  reg_old_h,
 940     Register  reg_a,
 941     Register  reg_b,
 942     Register  reg_c,
 943     Register  reg_d,
 944     Register  reg_e,
 945     Register  reg_f,
 946     Register  reg_g,
 947     Register  reg_h,
 948     int iter);
 949   void sha256_AVX2_four_rounds_compute_first(int start);
 950   void sha256_AVX2_four_rounds_compute_last(int start);
 951   void sha256_AVX2_one_round_and_sched(
 952         XMMRegister xmm_0,     /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */
 953         XMMRegister xmm_1,     /* ymm5 */  /* full cycle is 16 iterations */
 954         XMMRegister xmm_2,     /* ymm6 */
 955         XMMRegister xmm_3,     /* ymm7 */
 956         Register    reg_a,      /* == eax on 0 iteration, then rotate 8 register right on each next iteration */
 957         Register    reg_b,      /* ebx */    /* full cycle is 8 iterations */
 958         Register    reg_c,      /* edi */
 959         Register    reg_d,      /* esi */
 960         Register    reg_e,      /* r8d */
 961         Register    reg_f,      /* r9d */
 962         Register    reg_g,      /* r10d */
 963         Register    reg_h,      /* r11d */
 964         int iter);
 965 
 966   void addm(int disp, Register r1, Register r2);
 967   void gfmul(XMMRegister tmp0, XMMRegister t);
 968   void schoolbookAAD(int i, Register subkeyH, XMMRegister data, XMMRegister tmp0,
 969                      XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3);
 970   void generateHtbl_one_block(Register htbl);
 971   void generateHtbl_eight_blocks(Register htbl);
 972  public:
 973   void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
 974                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
 975                    Register buf, Register state, Register ofs, Register limit, Register rsp,
 976                    bool multi_block, XMMRegister shuf_mask);
 977   void avx_ghash(Register state, Register htbl, Register data, Register blocks);
 978 #endif
 979 
 980 #ifdef _LP64
 981  private:
 982   void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d,
 983                                      Register e, Register f, Register g, Register h, int iteration);
 984 
 985   void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
 986                                           Register a, Register b, Register c, Register d, Register e, Register f,
 987                                           Register g, Register h, int iteration);
 988 
 989   void addmq(int disp, Register r1, Register r2);
 990  public:
 991   void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
 992                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
 993                    Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block,
 994                    XMMRegister shuf_mask);
 995 private:
 996   void roundEnc(XMMRegister key, int rnum);
 997   void lastroundEnc(XMMRegister key, int rnum);
 998   void roundDec(XMMRegister key, int rnum);
 999   void lastroundDec(XMMRegister key, int rnum);
1000   void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask);
1001   void gfmul_avx512(XMMRegister ghash, XMMRegister hkey);
1002   void generateHtbl_48_block_zmm(Register htbl, Register avx512_subkeyHtbl);
1003   void ghash16_encrypt16_parallel(Register key, Register subkeyHtbl, XMMRegister ctr_blockx,
1004                                   XMMRegister aad_hashx, Register in, Register out, Register data, Register pos, bool reduction,
1005                                   XMMRegister addmask, bool no_ghash_input, Register rounds, Register ghash_pos,
1006                                   bool final_reduction, int index, XMMRegister counter_inc_mask);
1007 public:
1008   void aesecb_encrypt(Register source_addr, Register dest_addr, Register key, Register len);
1009   void aesecb_decrypt(Register source_addr, Register dest_addr, Register key, Register len);
1010   void aesctr_encrypt(Register src_addr, Register dest_addr, Register key, Register counter,
1011                       Register len_reg, Register used, Register used_addr, Register saved_encCounter_start);
1012   void aesgcm_encrypt(Register in, Register len, Register ct, Register out, Register key,
1013                       Register state, Register subkeyHtbl, Register avx512_subkeyHtbl, Register counter);
1014 
1015 #endif
1016 
1017   void fast_md5(Register buf, Address state, Address ofs, Address limit,
1018                 bool multi_block);
1019 
1020   void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
1021                  XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask,
1022                  Register buf, Register state, Register ofs, Register limit, Register rsp,
1023                  bool multi_block);
1024 
1025 #ifdef _LP64
1026   void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1027                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1028                    Register buf, Register state, Register ofs, Register limit, Register rsp,
1029                    bool multi_block, XMMRegister shuf_mask);
1030 #else
1031   void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1032                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1033                    Register buf, Register state, Register ofs, Register limit, Register rsp,
1034                    bool multi_block);
1035 #endif
1036 
1037   void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1038                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1039                 Register rax, Register rcx, Register rdx, Register tmp);
1040 
1041 #ifdef _LP64
1042   void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1043                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1044                 Register rax, Register rcx, Register rdx, Register tmp1, Register tmp2);
1045 
1046   void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1047                   XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1048                   Register rax, Register rcx, Register rdx, Register r11);
1049 
1050   void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4,
1051                 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx,
1052                 Register rdx, Register tmp1, Register tmp2, Register tmp3, Register tmp4);
1053 
1054   void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1055                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1056                 Register rax, Register rbx, Register rcx, Register rdx, Register tmp1, Register tmp2,
1057                 Register tmp3, Register tmp4);
1058 
1059   void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1060                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1061                 Register rax, Register rcx, Register rdx, Register tmp1,
1062                 Register tmp2, Register tmp3, Register tmp4);
1063   void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1064                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1065                 Register rax, Register rcx, Register rdx, Register tmp1,
1066                 Register tmp2, Register tmp3, Register tmp4);
1067 #else
1068   void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1069                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1070                 Register rax, Register rcx, Register rdx, Register tmp1);
1071 
1072   void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1073                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1074                 Register rax, Register rcx, Register rdx, Register tmp);
1075 
1076   void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4,
1077                 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx,
1078                 Register rdx, Register tmp);
1079 
1080   void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1081                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1082                 Register rax, Register rbx, Register rdx);
1083 
1084   void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1085                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1086                 Register rax, Register rcx, Register rdx, Register tmp);
1087 
1088   void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx,
1089                         Register edx, Register ebx, Register esi, Register edi,
1090                         Register ebp, Register esp);
1091 
1092   void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx,
1093                          Register esi, Register edi, Register ebp, Register esp);
1094 
1095   void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx,
1096                         Register edx, Register ebx, Register esi, Register edi,
1097                         Register ebp, Register esp);
1098 
1099   void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1100                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1101                 Register rax, Register rcx, Register rdx, Register tmp);
1102 #endif
1103 
1104 private:
1105 
1106   // these are private because users should be doing movflt/movdbl
1107 
1108   void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); }
1109   void movss(Address dst, XMMRegister src)     { Assembler::movss(dst, src); }
1110   void movss(XMMRegister dst, Address src)     { Assembler::movss(dst, src); }
1111   void movss(XMMRegister dst, AddressLiteral src);
1112 
1113   void movlpd(XMMRegister dst, Address src)    {Assembler::movlpd(dst, src); }
1114   void movlpd(XMMRegister dst, AddressLiteral src);
1115 
1116 public:
1117 
1118   void addsd(XMMRegister dst, XMMRegister src)    { Assembler::addsd(dst, src); }
1119   void addsd(XMMRegister dst, Address src)        { Assembler::addsd(dst, src); }
1120   void addsd(XMMRegister dst, AddressLiteral src);
1121 
1122   void addss(XMMRegister dst, XMMRegister src)    { Assembler::addss(dst, src); }
1123   void addss(XMMRegister dst, Address src)        { Assembler::addss(dst, src); }
1124   void addss(XMMRegister dst, AddressLiteral src);
1125 
1126   void addpd(XMMRegister dst, XMMRegister src)    { Assembler::addpd(dst, src); }
1127   void addpd(XMMRegister dst, Address src)        { Assembler::addpd(dst, src); }
1128   void addpd(XMMRegister dst, AddressLiteral src);
1129 
1130   void divsd(XMMRegister dst, XMMRegister src)    { Assembler::divsd(dst, src); }
1131   void divsd(XMMRegister dst, Address src)        { Assembler::divsd(dst, src); }
1132   void divsd(XMMRegister dst, AddressLiteral src);
1133 
1134   void divss(XMMRegister dst, XMMRegister src)    { Assembler::divss(dst, src); }
1135   void divss(XMMRegister dst, Address src)        { Assembler::divss(dst, src); }
1136   void divss(XMMRegister dst, AddressLiteral src);
1137 
1138   // Move Unaligned Double Quadword
1139   void movdqu(Address     dst, XMMRegister src);
1140   void movdqu(XMMRegister dst, Address src);
1141   void movdqu(XMMRegister dst, XMMRegister src);
1142   void movdqu(XMMRegister dst, AddressLiteral src, Register scratchReg = rscratch1);
1143 
1144   void kmovwl(KRegister dst, Register src) { Assembler::kmovwl(dst, src); }
1145   void kmovwl(Register dst, KRegister src) { Assembler::kmovwl(dst, src); }
1146   void kmovwl(KRegister dst, Address src) { Assembler::kmovwl(dst, src); }
1147   void kmovwl(KRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
1148   void kmovwl(Address dst,  KRegister src) { Assembler::kmovwl(dst, src); }
1149   void kmovwl(KRegister dst, KRegister src) { Assembler::kmovwl(dst, src); }
1150 
1151   void kmovql(KRegister dst, KRegister src) { Assembler::kmovql(dst, src); }
1152   void kmovql(KRegister dst, Register src) { Assembler::kmovql(dst, src); }
1153   void kmovql(Register dst, KRegister src) { Assembler::kmovql(dst, src); }
1154   void kmovql(KRegister dst, Address src) { Assembler::kmovql(dst, src); }
1155   void kmovql(Address  dst, KRegister src) { Assembler::kmovql(dst, src); }
1156   void kmovql(KRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
1157 
1158   // Safe move operation, lowers down to 16bit moves for targets supporting
1159   // AVX512F feature and 64bit moves for targets supporting AVX512BW feature.
1160   void kmov(Address  dst, KRegister src);
1161   void kmov(KRegister dst, Address src);
1162   void kmov(KRegister dst, KRegister src);
1163   void kmov(Register dst, KRegister src);
1164   void kmov(KRegister dst, Register src);
1165 
1166   // AVX Unaligned forms
1167   void vmovdqu(Address     dst, XMMRegister src);
1168   void vmovdqu(XMMRegister dst, Address src);
1169   void vmovdqu(XMMRegister dst, XMMRegister src);
1170   void vmovdqu(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
1171   void vmovdqu(XMMRegister dst, AddressLiteral src, Register scratch_reg, int vector_len);
1172 
1173 
1174   // AVX512 Unaligned
1175   void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, int vector_len);
1176   void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, int vector_len);
1177 
1178   void evmovdqub(Address dst, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, src, merge, vector_len); }
1179   void evmovdqub(XMMRegister dst, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, src, merge, vector_len); }
1180   void evmovdqub(XMMRegister dst, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, src, merge, vector_len); }
1181   void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1182   void evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1183   void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg);
1184 
1185   void evmovdquw(Address dst, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, src, merge, vector_len); }
1186   void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1187   void evmovdquw(XMMRegister dst, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, src, merge, vector_len); }
1188   void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1189   void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg);
1190 
1191   void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1192   void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1193   void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) {
1194      if (dst->encoding() == src->encoding()) return;
1195      Assembler::evmovdqul(dst, src, vector_len);
1196   }
1197   void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1198   void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1199   void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1200     if (dst->encoding() == src->encoding() && mask == k0) return;
1201     Assembler::evmovdqul(dst, mask, src, merge, vector_len);
1202    }
1203   void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg);
1204 
1205   void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1206   void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1207   void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch);
1208   void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) {
1209     if (dst->encoding() == src->encoding()) return;
1210     Assembler::evmovdquq(dst, src, vector_len);
1211   }
1212   void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1213   void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1214   void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1215     if (dst->encoding() == src->encoding() && mask == k0) return;
1216     Assembler::evmovdquq(dst, mask, src, merge, vector_len);
1217   }
1218   void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg);
1219 
1220   // Move Aligned Double Quadword
1221   void movdqa(XMMRegister dst, Address src)       { Assembler::movdqa(dst, src); }
1222   void movdqa(XMMRegister dst, XMMRegister src)   { Assembler::movdqa(dst, src); }
1223   void movdqa(XMMRegister dst, AddressLiteral src);
1224 
1225   void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
1226   void movsd(Address dst, XMMRegister src)     { Assembler::movsd(dst, src); }
1227   void movsd(XMMRegister dst, Address src)     { Assembler::movsd(dst, src); }
1228   void movsd(XMMRegister dst, AddressLiteral src);
1229 
1230   using Assembler::vmovddup;
1231   void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = rscratch1);
1232 
1233   void mulpd(XMMRegister dst, XMMRegister src)    { Assembler::mulpd(dst, src); }
1234   void mulpd(XMMRegister dst, Address src)        { Assembler::mulpd(dst, src); }
1235   void mulpd(XMMRegister dst, AddressLiteral src);
1236 
1237   void mulsd(XMMRegister dst, XMMRegister src)    { Assembler::mulsd(dst, src); }
1238   void mulsd(XMMRegister dst, Address src)        { Assembler::mulsd(dst, src); }
1239   void mulsd(XMMRegister dst, AddressLiteral src);
1240 
1241   void mulss(XMMRegister dst, XMMRegister src)    { Assembler::mulss(dst, src); }
1242   void mulss(XMMRegister dst, Address src)        { Assembler::mulss(dst, src); }
1243   void mulss(XMMRegister dst, AddressLiteral src);
1244 
1245   // Carry-Less Multiplication Quadword
1246   void pclmulldq(XMMRegister dst, XMMRegister src) {
1247     // 0x00 - multiply lower 64 bits [0:63]
1248     Assembler::pclmulqdq(dst, src, 0x00);
1249   }
1250   void pclmulhdq(XMMRegister dst, XMMRegister src) {
1251     // 0x11 - multiply upper 64 bits [64:127]
1252     Assembler::pclmulqdq(dst, src, 0x11);
1253   }
1254 
1255   void pcmpeqb(XMMRegister dst, XMMRegister src);
1256   void pcmpeqw(XMMRegister dst, XMMRegister src);
1257 
1258   void pcmpestri(XMMRegister dst, Address src, int imm8);
1259   void pcmpestri(XMMRegister dst, XMMRegister src, int imm8);
1260 
1261   void pmovzxbw(XMMRegister dst, XMMRegister src);
1262   void pmovzxbw(XMMRegister dst, Address src);
1263 
1264   void pmovmskb(Register dst, XMMRegister src);
1265 
1266   void ptest(XMMRegister dst, XMMRegister src);
1267 
1268   void sqrtsd(XMMRegister dst, XMMRegister src)    { Assembler::sqrtsd(dst, src); }
1269   void sqrtsd(XMMRegister dst, Address src)        { Assembler::sqrtsd(dst, src); }
1270   void sqrtsd(XMMRegister dst, AddressLiteral src);
1271 
1272   void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode)    { Assembler::roundsd(dst, src, rmode); }
1273   void roundsd(XMMRegister dst, Address src, int32_t rmode)        { Assembler::roundsd(dst, src, rmode); }
1274   void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register scratch_reg);
1275 
1276   void sqrtss(XMMRegister dst, XMMRegister src)    { Assembler::sqrtss(dst, src); }
1277   void sqrtss(XMMRegister dst, Address src)        { Assembler::sqrtss(dst, src); }
1278   void sqrtss(XMMRegister dst, AddressLiteral src);
1279 
1280   void subsd(XMMRegister dst, XMMRegister src)    { Assembler::subsd(dst, src); }
1281   void subsd(XMMRegister dst, Address src)        { Assembler::subsd(dst, src); }
1282   void subsd(XMMRegister dst, AddressLiteral src);
1283 
1284   void subss(XMMRegister dst, XMMRegister src)    { Assembler::subss(dst, src); }
1285   void subss(XMMRegister dst, Address src)        { Assembler::subss(dst, src); }
1286   void subss(XMMRegister dst, AddressLiteral src);
1287 
1288   void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); }
1289   void ucomiss(XMMRegister dst, Address src)     { Assembler::ucomiss(dst, src); }
1290   void ucomiss(XMMRegister dst, AddressLiteral src);
1291 
1292   void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); }
1293   void ucomisd(XMMRegister dst, Address src)     { Assembler::ucomisd(dst, src); }
1294   void ucomisd(XMMRegister dst, AddressLiteral src);
1295 
1296   // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
1297   void xorpd(XMMRegister dst, XMMRegister src);
1298   void xorpd(XMMRegister dst, Address src)     { Assembler::xorpd(dst, src); }
1299   void xorpd(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
1300 
1301   // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
1302   void xorps(XMMRegister dst, XMMRegister src);
1303   void xorps(XMMRegister dst, Address src)     { Assembler::xorps(dst, src); }
1304   void xorps(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
1305 
1306   // Shuffle Bytes
1307   void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); }
1308   void pshufb(XMMRegister dst, Address src)     { Assembler::pshufb(dst, src); }
1309   void pshufb(XMMRegister dst, AddressLiteral src);
1310   // AVX 3-operands instructions
1311 
1312   void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); }
1313   void vaddsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vaddsd(dst, nds, src); }
1314   void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1315 
1316   void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); }
1317   void vaddss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vaddss(dst, nds, src); }
1318   void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1319 
1320   void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len);
1321   void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len);
1322 
1323   void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1324   void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1325   void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch);
1326 
1327   void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1328   void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1329 
1330   void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1331   void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1332   void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch);
1333 
1334   void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1335   void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1336   void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1337 
1338   void vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len);
1339   void vpbroadcastw(XMMRegister dst, Address src, int vector_len) { Assembler::vpbroadcastw(dst, src, vector_len); }
1340 
1341   using Assembler::vbroadcastsd;
1342   void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = rscratch1);
1343 
1344   void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1345 
1346   void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1347   void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg);
1348 
1349   // Vector compares
1350   void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
1351                int comparison, bool is_signed, int vector_len) { Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len); }
1352   void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
1353                int comparison, bool is_signed, int vector_len, Register scratch_reg);
1354   void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
1355                int comparison, bool is_signed, int vector_len) { Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len); }
1356   void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
1357                int comparison, bool is_signed, int vector_len, Register scratch_reg);
1358   void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
1359                int comparison, bool is_signed, int vector_len) { Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len); }
1360   void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
1361                int comparison, bool is_signed, int vector_len, Register scratch_reg);
1362   void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
1363                int comparison, bool is_signed, int vector_len) { Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len); }
1364   void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
1365                int comparison, bool is_signed, int vector_len, Register scratch_reg);
1366 
1367   void evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len);
1368 
1369   // Emit comparison instruction for the specified comparison predicate.
1370   void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len);
1371   void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len);
1372 
1373   void vpmovzxbw(XMMRegister dst, Address src, int vector_len);
1374   void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); }
1375 
1376   void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit);
1377 
1378   void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1379   void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1380   void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1381     Assembler::vpmulld(dst, nds, src, vector_len);
1382   };
1383   void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1384     Assembler::vpmulld(dst, nds, src, vector_len);
1385   }
1386   void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg);
1387 
1388   void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1389   void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1390 
1391   void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1392   void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1393 
1394   void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1395   void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1396 
1397   void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1398   void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1399 
1400   void evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1401     if (!is_varshift) {
1402       Assembler::evpsllw(dst, mask, nds, src, merge, vector_len);
1403     } else {
1404       Assembler::evpsllvw(dst, mask, nds, src, merge, vector_len);
1405     }
1406   }
1407   void evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1408     if (!is_varshift) {
1409       Assembler::evpslld(dst, mask, nds, src, merge, vector_len);
1410     } else {
1411       Assembler::evpsllvd(dst, mask, nds, src, merge, vector_len);
1412     }
1413   }
1414   void evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1415     if (!is_varshift) {
1416       Assembler::evpsllq(dst, mask, nds, src, merge, vector_len);
1417     } else {
1418       Assembler::evpsllvq(dst, mask, nds, src, merge, vector_len);
1419     }
1420   }
1421   void evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1422     if (!is_varshift) {
1423       Assembler::evpsrlw(dst, mask, nds, src, merge, vector_len);
1424     } else {
1425       Assembler::evpsrlvw(dst, mask, nds, src, merge, vector_len);
1426     }
1427   }
1428   void evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1429     if (!is_varshift) {
1430       Assembler::evpsrld(dst, mask, nds, src, merge, vector_len);
1431     } else {
1432       Assembler::evpsrlvd(dst, mask, nds, src, merge, vector_len);
1433     }
1434   }
1435   void evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1436     if (!is_varshift) {
1437       Assembler::evpsrlq(dst, mask, nds, src, merge, vector_len);
1438     } else {
1439       Assembler::evpsrlvq(dst, mask, nds, src, merge, vector_len);
1440     }
1441   }
1442   void evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1443     if (!is_varshift) {
1444       Assembler::evpsraw(dst, mask, nds, src, merge, vector_len);
1445     } else {
1446       Assembler::evpsravw(dst, mask, nds, src, merge, vector_len);
1447     }
1448   }
1449   void evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1450     if (!is_varshift) {
1451       Assembler::evpsrad(dst, mask, nds, src, merge, vector_len);
1452     } else {
1453       Assembler::evpsravd(dst, mask, nds, src, merge, vector_len);
1454     }
1455   }
1456   void evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1457     if (!is_varshift) {
1458       Assembler::evpsraq(dst, mask, nds, src, merge, vector_len);
1459     } else {
1460       Assembler::evpsravq(dst, mask, nds, src, merge, vector_len);
1461     }
1462   }
1463 
1464   void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1465   void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1466   void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1467   void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1468 
1469   void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1470   void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1471 
1472   void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1473   void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1474 
1475   void vptest(XMMRegister dst, XMMRegister src);
1476   void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); }
1477 
1478   void punpcklbw(XMMRegister dst, XMMRegister src);
1479   void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); }
1480 
1481   void pshufd(XMMRegister dst, Address src, int mode);
1482   void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); }
1483 
1484   void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1485   void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); }
1486 
1487   void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1488   void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len)     { Assembler::vandpd(dst, nds, src, vector_len); }
1489   void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1490 
1491   void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1492   void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len)     { Assembler::vandps(dst, nds, src, vector_len); }
1493   void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1494 
1495   void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register scratch_reg);
1496 
1497   void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); }
1498   void vdivsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vdivsd(dst, nds, src); }
1499   void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1500 
1501   void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); }
1502   void vdivss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vdivss(dst, nds, src); }
1503   void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1504 
1505   void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); }
1506   void vmulsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vmulsd(dst, nds, src); }
1507   void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1508 
1509   void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); }
1510   void vmulss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vmulss(dst, nds, src); }
1511   void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1512 
1513   void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); }
1514   void vsubsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vsubsd(dst, nds, src); }
1515   void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1516 
1517   void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); }
1518   void vsubss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vsubss(dst, nds, src); }
1519   void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1520 
1521   void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1522   void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1523 
1524   // AVX Vector instructions
1525 
1526   void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1527   void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1528   void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1529 
1530   void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1531   void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1532   void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1533 
1534   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1535     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1536       Assembler::vpxor(dst, nds, src, vector_len);
1537     else
1538       Assembler::vxorpd(dst, nds, src, vector_len);
1539   }
1540   void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1541     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1542       Assembler::vpxor(dst, nds, src, vector_len);
1543     else
1544       Assembler::vxorpd(dst, nds, src, vector_len);
1545   }
1546   void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1547 
1548   // Simple version for AVX2 256bit vectors
1549   void vpxor(XMMRegister dst, XMMRegister src) {
1550     assert(UseAVX >= 2, "Should be at least AVX2");
1551     Assembler::vpxor(dst, dst, src, AVX_256bit);
1552   }
1553   void vpxor(XMMRegister dst, Address src) {
1554     assert(UseAVX >= 2, "Should be at least AVX2");
1555     Assembler::vpxor(dst, dst, src, AVX_256bit);
1556   }
1557 
1558   void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); }
1559   void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg);
1560 
1561   void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
1562     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1563       Assembler::vinserti32x4(dst, nds, src, imm8);
1564     } else if (UseAVX > 1) {
1565       // vinserti128 is available only in AVX2
1566       Assembler::vinserti128(dst, nds, src, imm8);
1567     } else {
1568       Assembler::vinsertf128(dst, nds, src, imm8);
1569     }
1570   }
1571 
1572   void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
1573     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1574       Assembler::vinserti32x4(dst, nds, src, imm8);
1575     } else if (UseAVX > 1) {
1576       // vinserti128 is available only in AVX2
1577       Assembler::vinserti128(dst, nds, src, imm8);
1578     } else {
1579       Assembler::vinsertf128(dst, nds, src, imm8);
1580     }
1581   }
1582 
1583   void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1584     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1585       Assembler::vextracti32x4(dst, src, imm8);
1586     } else if (UseAVX > 1) {
1587       // vextracti128 is available only in AVX2
1588       Assembler::vextracti128(dst, src, imm8);
1589     } else {
1590       Assembler::vextractf128(dst, src, imm8);
1591     }
1592   }
1593 
1594   void vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
1595     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1596       Assembler::vextracti32x4(dst, src, imm8);
1597     } else if (UseAVX > 1) {
1598       // vextracti128 is available only in AVX2
1599       Assembler::vextracti128(dst, src, imm8);
1600     } else {
1601       Assembler::vextractf128(dst, src, imm8);
1602     }
1603   }
1604 
1605   // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers
1606   void vinserti128_high(XMMRegister dst, XMMRegister src) {
1607     vinserti128(dst, dst, src, 1);
1608   }
1609   void vinserti128_high(XMMRegister dst, Address src) {
1610     vinserti128(dst, dst, src, 1);
1611   }
1612   void vextracti128_high(XMMRegister dst, XMMRegister src) {
1613     vextracti128(dst, src, 1);
1614   }
1615   void vextracti128_high(Address dst, XMMRegister src) {
1616     vextracti128(dst, src, 1);
1617   }
1618 
1619   void vinsertf128_high(XMMRegister dst, XMMRegister src) {
1620     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1621       Assembler::vinsertf32x4(dst, dst, src, 1);
1622     } else {
1623       Assembler::vinsertf128(dst, dst, src, 1);
1624     }
1625   }
1626 
1627   void vinsertf128_high(XMMRegister dst, Address src) {
1628     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1629       Assembler::vinsertf32x4(dst, dst, src, 1);
1630     } else {
1631       Assembler::vinsertf128(dst, dst, src, 1);
1632     }
1633   }
1634 
1635   void vextractf128_high(XMMRegister dst, XMMRegister src) {
1636     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1637       Assembler::vextractf32x4(dst, src, 1);
1638     } else {
1639       Assembler::vextractf128(dst, src, 1);
1640     }
1641   }
1642 
1643   void vextractf128_high(Address dst, XMMRegister src) {
1644     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1645       Assembler::vextractf32x4(dst, src, 1);
1646     } else {
1647       Assembler::vextractf128(dst, src, 1);
1648     }
1649   }
1650 
1651   // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers
1652   void vinserti64x4_high(XMMRegister dst, XMMRegister src) {
1653     Assembler::vinserti64x4(dst, dst, src, 1);
1654   }
1655   void vinsertf64x4_high(XMMRegister dst, XMMRegister src) {
1656     Assembler::vinsertf64x4(dst, dst, src, 1);
1657   }
1658   void vextracti64x4_high(XMMRegister dst, XMMRegister src) {
1659     Assembler::vextracti64x4(dst, src, 1);
1660   }
1661   void vextractf64x4_high(XMMRegister dst, XMMRegister src) {
1662     Assembler::vextractf64x4(dst, src, 1);
1663   }
1664   void vextractf64x4_high(Address dst, XMMRegister src) {
1665     Assembler::vextractf64x4(dst, src, 1);
1666   }
1667   void vinsertf64x4_high(XMMRegister dst, Address src) {
1668     Assembler::vinsertf64x4(dst, dst, src, 1);
1669   }
1670 
1671   // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers
1672   void vinserti128_low(XMMRegister dst, XMMRegister src) {
1673     vinserti128(dst, dst, src, 0);
1674   }
1675   void vinserti128_low(XMMRegister dst, Address src) {
1676     vinserti128(dst, dst, src, 0);
1677   }
1678   void vextracti128_low(XMMRegister dst, XMMRegister src) {
1679     vextracti128(dst, src, 0);
1680   }
1681   void vextracti128_low(Address dst, XMMRegister src) {
1682     vextracti128(dst, src, 0);
1683   }
1684 
1685   void vinsertf128_low(XMMRegister dst, XMMRegister src) {
1686     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1687       Assembler::vinsertf32x4(dst, dst, src, 0);
1688     } else {
1689       Assembler::vinsertf128(dst, dst, src, 0);
1690     }
1691   }
1692 
1693   void vinsertf128_low(XMMRegister dst, Address src) {
1694     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1695       Assembler::vinsertf32x4(dst, dst, src, 0);
1696     } else {
1697       Assembler::vinsertf128(dst, dst, src, 0);
1698     }
1699   }
1700 
1701   void vextractf128_low(XMMRegister dst, XMMRegister src) {
1702     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1703       Assembler::vextractf32x4(dst, src, 0);
1704     } else {
1705       Assembler::vextractf128(dst, src, 0);
1706     }
1707   }
1708 
1709   void vextractf128_low(Address dst, XMMRegister src) {
1710     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1711       Assembler::vextractf32x4(dst, src, 0);
1712     } else {
1713       Assembler::vextractf128(dst, src, 0);
1714     }
1715   }
1716 
1717   // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers
1718   void vinserti64x4_low(XMMRegister dst, XMMRegister src) {
1719     Assembler::vinserti64x4(dst, dst, src, 0);
1720   }
1721   void vinsertf64x4_low(XMMRegister dst, XMMRegister src) {
1722     Assembler::vinsertf64x4(dst, dst, src, 0);
1723   }
1724   void vextracti64x4_low(XMMRegister dst, XMMRegister src) {
1725     Assembler::vextracti64x4(dst, src, 0);
1726   }
1727   void vextractf64x4_low(XMMRegister dst, XMMRegister src) {
1728     Assembler::vextractf64x4(dst, src, 0);
1729   }
1730   void vextractf64x4_low(Address dst, XMMRegister src) {
1731     Assembler::vextractf64x4(dst, src, 0);
1732   }
1733   void vinsertf64x4_low(XMMRegister dst, Address src) {
1734     Assembler::vinsertf64x4(dst, dst, src, 0);
1735   }
1736 
1737   // Carry-Less Multiplication Quadword
1738   void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1739     // 0x00 - multiply lower 64 bits [0:63]
1740     Assembler::vpclmulqdq(dst, nds, src, 0x00);
1741   }
1742   void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1743     // 0x11 - multiply upper 64 bits [64:127]
1744     Assembler::vpclmulqdq(dst, nds, src, 0x11);
1745   }
1746   void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1747     // 0x10 - multiply nds[0:63] and src[64:127]
1748     Assembler::vpclmulqdq(dst, nds, src, 0x10);
1749   }
1750   void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1751     //0x01 - multiply nds[64:127] and src[0:63]
1752     Assembler::vpclmulqdq(dst, nds, src, 0x01);
1753   }
1754 
1755   void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1756     // 0x00 - multiply lower 64 bits [0:63]
1757     Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len);
1758   }
1759   void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1760     // 0x11 - multiply upper 64 bits [64:127]
1761     Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len);
1762   }
1763 
1764   // AVX-512 mask operations.
1765   void kand(BasicType etype, KRegister dst, KRegister src1, KRegister src2);
1766   void kor(BasicType type, KRegister dst, KRegister src1, KRegister src2);
1767   void knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp = knoreg, Register rtmp = noreg);
1768   void kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2);
1769   void kortest(uint masklen, KRegister src1, KRegister src2);
1770   void ktest(uint masklen, KRegister src1, KRegister src2);
1771 
1772   void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1773   void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1774 
1775   void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1776   void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1777 
1778   void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1779   void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1780 
1781   void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1782   void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1783 
1784   void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc);
1785   void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc);
1786   void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc);
1787   void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc);
1788 
1789   void alltrue(Register dst, uint masklen, KRegister src1, KRegister src2, KRegister kscratch);
1790   void anytrue(Register dst, uint masklen, KRegister src, KRegister kscratch);
1791 
1792   void cmov32( Condition cc, Register dst, Address  src);
1793   void cmov32( Condition cc, Register dst, Register src);
1794 
1795   void cmov(   Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1796 
1797   void cmovptr(Condition cc, Register dst, Address  src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1798   void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1799 
1800   void movoop(Register dst, jobject obj);
1801   void movoop(Address dst, jobject obj);
1802 
1803   void mov_metadata(Register dst, Metadata* obj);
1804   void mov_metadata(Address dst, Metadata* obj);
1805 
1806   void movptr(ArrayAddress dst, Register src);
1807   // can this do an lea?
1808   void movptr(Register dst, ArrayAddress src);
1809 
1810   void movptr(Register dst, Address src);
1811 
1812 #ifdef _LP64
1813   void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1);
1814 #else
1815   void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit
1816 #endif
1817 
1818   void movptr(Register dst, intptr_t src);
1819   void movptr(Register dst, Register src);
1820   void movptr(Address dst, intptr_t src);
1821 
1822   void movptr(Address dst, Register src);
1823 
1824   void movptr(Register dst, RegisterOrConstant src) {
1825     if (src.is_constant()) movptr(dst, src.as_constant());
1826     else                   movptr(dst, src.as_register());
1827   }
1828 
1829 #ifdef _LP64
1830   // Generally the next two are only used for moving NULL
1831   // Although there are situations in initializing the mark word where
1832   // they could be used. They are dangerous.
1833 
1834   // They only exist on LP64 so that int32_t and intptr_t are not the same
1835   // and we have ambiguous declarations.
1836 
1837   void movptr(Address dst, int32_t imm32);
1838   void movptr(Register dst, int32_t imm32);
1839 #endif // _LP64
1840 
1841   // to avoid hiding movl
1842   void mov32(AddressLiteral dst, Register src);
1843   void mov32(Register dst, AddressLiteral src);
1844 
1845   // to avoid hiding movb
1846   void movbyte(ArrayAddress dst, int src);
1847 
1848   // Import other mov() methods from the parent class or else
1849   // they will be hidden by the following overriding declaration.
1850   using Assembler::movdl;
1851   using Assembler::movq;
1852   void movdl(XMMRegister dst, AddressLiteral src);
1853   void movq(XMMRegister dst, AddressLiteral src);
1854 
1855   // Can push value or effective address
1856   void pushptr(AddressLiteral src);
1857 
1858   void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
1859   void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
1860 
1861   void pushoop(jobject obj);
1862   void pushklass(Metadata* obj);
1863 
1864   // sign extend as need a l to ptr sized element
1865   void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
1866   void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
1867 
1868 
1869  public:
1870   // C2 compiled method's prolog code.
1871   void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b, bool is_stub);
1872 
1873   // clear memory of size 'cnt' qwords, starting at 'base';
1874   // if 'is_large' is set, do not try to produce short loop
1875   void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg);
1876 
1877   // clear memory initialization sequence for constant size;
1878   void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1879 
1880   // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
1881   void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1882 
1883   // Fill primitive arrays
1884   void generate_fill(BasicType t, bool aligned,
1885                      Register to, Register value, Register count,
1886                      Register rtmp, XMMRegister xtmp);
1887 
1888   void encode_iso_array(Register src, Register dst, Register len,
1889                         XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1890                         XMMRegister tmp4, Register tmp5, Register result, bool ascii);
1891 
1892 #ifdef _LP64
1893   void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
1894   void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1895                              Register y, Register y_idx, Register z,
1896                              Register carry, Register product,
1897                              Register idx, Register kdx);
1898   void multiply_add_128_x_128(Register x_xstart, Register y, Register z,
1899                               Register yz_idx, Register idx,
1900                               Register carry, Register product, int offset);
1901   void multiply_128_x_128_bmi2_loop(Register y, Register z,
1902                                     Register carry, Register carry2,
1903                                     Register idx, Register jdx,
1904                                     Register yz_idx1, Register yz_idx2,
1905                                     Register tmp, Register tmp3, Register tmp4);
1906   void multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
1907                                Register yz_idx, Register idx, Register jdx,
1908                                Register carry, Register product,
1909                                Register carry2);
1910   void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen,
1911                        Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5);
1912   void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3,
1913                      Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
1914   void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry,
1915                             Register tmp2);
1916   void multiply_add_64(Register sum, Register op1, Register op2, Register carry,
1917                        Register rdxReg, Register raxReg);
1918   void add_one_64(Register z, Register zlen, Register carry, Register tmp1);
1919   void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
1920                        Register tmp3, Register tmp4);
1921   void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
1922                      Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
1923 
1924   void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1,
1925                Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
1926                Register raxReg);
1927   void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1,
1928                Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
1929                Register raxReg);
1930   void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
1931                            Register result, Register tmp1, Register tmp2,
1932                            XMMRegister vec1, XMMRegister vec2, XMMRegister vec3);
1933 #endif
1934 
1935   // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic.
1936   void update_byte_crc32(Register crc, Register val, Register table);
1937   void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp);
1938 
1939 
1940 #ifdef _LP64
1941   void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2);
1942   void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos,
1943                                 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop,
1944                                 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup);
1945   void updateBytesAdler32(Register adler32, Register buf, Register length, XMMRegister shuf0, XMMRegister shuf1, ExternalAddress scale);
1946 #endif // _LP64
1947 
1948   // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic
1949   // Note on a naming convention:
1950   // Prefix w = register only used on a Westmere+ architecture
1951   // Prefix n = register only used on a Nehalem architecture
1952 #ifdef _LP64
1953   void crc32c_ipl_alg4(Register in_out, uint32_t n,
1954                        Register tmp1, Register tmp2, Register tmp3);
1955 #else
1956   void crc32c_ipl_alg4(Register in_out, uint32_t n,
1957                        Register tmp1, Register tmp2, Register tmp3,
1958                        XMMRegister xtmp1, XMMRegister xtmp2);
1959 #endif
1960   void crc32c_pclmulqdq(XMMRegister w_xtmp1,
1961                         Register in_out,
1962                         uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
1963                         XMMRegister w_xtmp2,
1964                         Register tmp1,
1965                         Register n_tmp2, Register n_tmp3);
1966   void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
1967                        XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1968                        Register tmp1, Register tmp2,
1969                        Register n_tmp3);
1970   void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
1971                          Register in_out1, Register in_out2, Register in_out3,
1972                          Register tmp1, Register tmp2, Register tmp3,
1973                          XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1974                          Register tmp4, Register tmp5,
1975                          Register n_tmp6);
1976   void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
1977                             Register tmp1, Register tmp2, Register tmp3,
1978                             Register tmp4, Register tmp5, Register tmp6,
1979                             XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1980                             bool is_pclmulqdq_supported);
1981   // Fold 128-bit data chunk
1982   void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
1983   void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf);
1984 #ifdef _LP64
1985   // Fold 512-bit data chunk
1986   void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset);
1987 #endif // _LP64
1988   // Fold 8-bit data
1989   void fold_8bit_crc32(Register crc, Register table, Register tmp);
1990   void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp);
1991 
1992   // Compress char[] array to byte[].
1993   void char_array_compress(Register src, Register dst, Register len,
1994                            XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1995                            XMMRegister tmp4, Register tmp5, Register result,
1996                            KRegister mask1 = knoreg, KRegister mask2 = knoreg);
1997 
1998   // Inflate byte[] array to char[].
1999   void byte_array_inflate(Register src, Register dst, Register len,
2000                           XMMRegister tmp1, Register tmp2, KRegister mask = knoreg);
2001 
2002   void fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask,
2003                    Register length, Register temp, int vec_enc);
2004 
2005   void fill64_masked(uint shift, Register dst, int disp,
2006                          XMMRegister xmm, KRegister mask, Register length,
2007                          Register temp, bool use64byteVector = false);
2008 
2009   void fill32_masked(uint shift, Register dst, int disp,
2010                          XMMRegister xmm, KRegister mask, Register length,
2011                          Register temp);
2012 
2013   void fill32(Register dst, int disp, XMMRegister xmm);
2014 
2015   void fill64(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false);
2016 
2017 #ifdef _LP64
2018   void convert_f2i(Register dst, XMMRegister src);
2019   void convert_d2i(Register dst, XMMRegister src);
2020   void convert_f2l(Register dst, XMMRegister src);
2021   void convert_d2l(Register dst, XMMRegister src);
2022   void round_double(Register dst, XMMRegister src, Register rtmp, Register rcx);
2023   void round_float(Register dst, XMMRegister src, Register rtmp, Register rcx);
2024 
2025   void cache_wb(Address line);
2026   void cache_wbsync(bool is_pre);
2027 
2028 #if COMPILER2_OR_JVMCI
2029   void arraycopy_avx3_special_cases(XMMRegister xmm, KRegister mask, Register from,
2030                                     Register to, Register count, int shift,
2031                                     Register index, Register temp,
2032                                     bool use64byteVector, Label& L_entry, Label& L_exit);
2033 
2034   void arraycopy_avx3_special_cases_conjoint(XMMRegister xmm, KRegister mask, Register from,
2035                                              Register to, Register start_index, Register end_index,
2036                                              Register count, int shift, Register temp,
2037                                              bool use64byteVector, Label& L_entry, Label& L_exit);
2038 
2039   void copy64_masked_avx(Register dst, Register src, XMMRegister xmm,
2040                          KRegister mask, Register length, Register index,
2041                          Register temp, int shift = Address::times_1, int offset = 0,
2042                          bool use64byteVector = false);
2043 
2044   void copy32_masked_avx(Register dst, Register src, XMMRegister xmm,
2045                          KRegister mask, Register length, Register index,
2046                          Register temp, int shift = Address::times_1, int offset = 0);
2047 
2048   void copy32_avx(Register dst, Register src, Register index, XMMRegister xmm,
2049                   int shift = Address::times_1, int offset = 0);
2050 
2051   void copy64_avx(Register dst, Register src, Register index, XMMRegister xmm,
2052                   bool conjoint, int shift = Address::times_1, int offset = 0,
2053                   bool use64byteVector = false);
2054 
2055   void generate_fill_avx3(BasicType type, Register to, Register value,
2056                           Register count, Register rtmp, XMMRegister xtmp);
2057 
2058 #endif // COMPILER2_OR_JVMCI
2059 
2060 #endif // _LP64
2061 
2062   void vallones(XMMRegister dst, int vector_len);
2063 };
2064 
2065 /**
2066  * class SkipIfEqual:
2067  *
2068  * Instantiating this class will result in assembly code being output that will
2069  * jump around any code emitted between the creation of the instance and it's
2070  * automatic destruction at the end of a scope block, depending on the value of
2071  * the flag passed to the constructor, which will be checked at run-time.
2072  */
2073 class SkipIfEqual {
2074  private:
2075   MacroAssembler* _masm;
2076   Label _label;
2077 
2078  public:
2079    SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value);
2080    ~SkipIfEqual();
2081 };
2082 
2083 #endif // CPU_X86_MACROASSEMBLER_X86_HPP