1 /*
   2  * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
  26 #define CPU_X86_MACROASSEMBLER_X86_HPP
  27 
  28 #include "asm/assembler.hpp"
  29 #include "asm/register.hpp"
  30 #include "code/vmreg.inline.hpp"
  31 #include "compiler/oopMap.hpp"
  32 #include "utilities/macros.hpp"
  33 #include "runtime/rtmLocking.hpp"
  34 #include "runtime/vm_version.hpp"
  35 
  36 // MacroAssembler extends Assembler by frequently used macros.
  37 //
  38 // Instructions for which a 'better' code sequence exists depending
  39 // on arguments should also go in here.
  40 
  41 class MacroAssembler: public Assembler {
  42   friend class LIR_Assembler;
  43   friend class Runtime1;      // as_Address()
  44 
  45  public:
  46   // Support for VM calls
  47   //
  48   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
  49   // may customize this version by overriding it for its purposes (e.g., to save/restore
  50   // additional registers when doing a VM call).
  51 
  52   virtual void call_VM_leaf_base(
  53     address entry_point,               // the entry point
  54     int     number_of_arguments        // the number of arguments to pop after the call
  55   );
  56 
  57  protected:
  58   // This is the base routine called by the different versions of call_VM. The interpreter
  59   // may customize this version by overriding it for its purposes (e.g., to save/restore
  60   // additional registers when doing a VM call).
  61   //
  62   // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base
  63   // returns the register which contains the thread upon return. If a thread register has been
  64   // specified, the return value will correspond to that register. If no last_java_sp is specified
  65   // (noreg) than rsp will be used instead.
  66   virtual void call_VM_base(           // returns the register containing the thread upon return
  67     Register oop_result,               // where an oop-result ends up if any; use noreg otherwise
  68     Register java_thread,              // the thread if computed before     ; use noreg otherwise
  69     Register last_java_sp,             // to set up last_Java_frame in stubs; use noreg otherwise
  70     address  entry_point,              // the entry point
  71     int      number_of_arguments,      // the number of arguments (w/o thread) to pop after the call
  72     bool     check_exceptions          // whether to check for pending exceptions after return
  73   );
  74 
  75   void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
  76 
  77   // helpers for FPU flag access
  78   // tmp is a temporary register, if none is available use noreg
  79   void save_rax   (Register tmp);
  80   void restore_rax(Register tmp);
  81 
  82  public:
  83   MacroAssembler(CodeBuffer* code) : Assembler(code) {}
  84 
  85  // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
  86  // The implementation is only non-empty for the InterpreterMacroAssembler,
  87  // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
  88  virtual void check_and_handle_popframe(Register java_thread);
  89  virtual void check_and_handle_earlyret(Register java_thread);
  90 
  91   Address as_Address(AddressLiteral adr);
  92   Address as_Address(ArrayAddress adr, Register rscratch);
  93 
  94   // Support for NULL-checks
  95   //
  96   // Generates code that causes a NULL OS exception if the content of reg is NULL.
  97   // If the accessed location is M[reg + offset] and the offset is known, provide the
  98   // offset. No explicit code generation is needed if the offset is within a certain
  99   // range (0 <= offset <= page_size).
 100 
 101   void null_check(Register reg, int offset = -1);
 102   static bool needs_explicit_null_check(intptr_t offset);
 103   static bool uses_implicit_null_check(void* address);
 104 
 105   // Required platform-specific helpers for Label::patch_instructions.
 106   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
 107   void pd_patch_instruction(address branch, address target, const char* file, int line) {
 108     unsigned char op = branch[0];
 109     assert(op == 0xE8 /* call */ ||
 110         op == 0xE9 /* jmp */ ||
 111         op == 0xEB /* short jmp */ ||
 112         (op & 0xF0) == 0x70 /* short jcc */ ||
 113         op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ ||
 114         op == 0xC7 && branch[1] == 0xF8 /* xbegin */,
 115         "Invalid opcode at patch point");
 116 
 117     if (op == 0xEB || (op & 0xF0) == 0x70) {
 118       // short offset operators (jmp and jcc)
 119       char* disp = (char*) &branch[1];
 120       int imm8 = target - (address) &disp[1];
 121       guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
 122                 file == NULL ? "<NULL>" : file, line);
 123       *disp = imm8;
 124     } else {
 125       int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
 126       int imm32 = target - (address) &disp[1];
 127       *disp = imm32;
 128     }
 129   }
 130 
 131   // The following 4 methods return the offset of the appropriate move instruction
 132 
 133   // Support for fast byte/short loading with zero extension (depending on particular CPU)
 134   int load_unsigned_byte(Register dst, Address src);
 135   int load_unsigned_short(Register dst, Address src);
 136 
 137   // Support for fast byte/short loading with sign extension (depending on particular CPU)
 138   int load_signed_byte(Register dst, Address src);
 139   int load_signed_short(Register dst, Address src);
 140 
 141   // Support for sign-extension (hi:lo = extend_sign(lo))
 142   void extend_sign(Register hi, Register lo);
 143 
 144   // Load and store values by size and signed-ness
 145   void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
 146   void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
 147 
 148   // Support for inc/dec with optimal instruction selection depending on value
 149 
 150   void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; }
 151   void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; }
 152 
 153   void decrementl(Address dst, int value = 1);
 154   void decrementl(Register reg, int value = 1);
 155 
 156   void decrementq(Register reg, int value = 1);
 157   void decrementq(Address dst, int value = 1);
 158 
 159   void incrementl(Address dst, int value = 1);
 160   void incrementl(Register reg, int value = 1);
 161 
 162   void incrementq(Register reg, int value = 1);
 163   void incrementq(Address dst, int value = 1);
 164 
 165   // Support optimal SSE move instructions.
 166   void movflt(XMMRegister dst, XMMRegister src) {
 167     if (dst-> encoding() == src->encoding()) return;
 168     if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
 169     else                       { movss (dst, src); return; }
 170   }
 171   void movflt(XMMRegister dst, Address src) { movss(dst, src); }
 172   void movflt(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
 173   void movflt(Address dst, XMMRegister src) { movss(dst, src); }
 174 
 175   // Move with zero extension
 176   void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); }
 177 
 178   void movdbl(XMMRegister dst, XMMRegister src) {
 179     if (dst-> encoding() == src->encoding()) return;
 180     if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
 181     else                       { movsd (dst, src); return; }
 182   }
 183 
 184   void movdbl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
 185 
 186   void movdbl(XMMRegister dst, Address src) {
 187     if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
 188     else                         { movlpd(dst, src); return; }
 189   }
 190   void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
 191 
 192   void incrementl(AddressLiteral dst, Register rscratch = noreg);
 193   void incrementl(ArrayAddress   dst, Register rscratch);
 194 
 195   void incrementq(AddressLiteral dst, Register rscratch = noreg);
 196 
 197   // Alignment
 198   void align32();
 199   void align64();
 200   void align(int modulus);
 201   void align(int modulus, int target);
 202 
 203   void post_call_nop();
 204   // A 5 byte nop that is safe for patching (see patch_verified_entry)
 205   void fat_nop();
 206 
 207   // Stack frame creation/removal
 208   void enter();
 209   void leave();
 210 
 211   // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
 212   // The pointer will be loaded into the thread register.
 213   void get_thread(Register thread);
 214 
 215 #ifdef _LP64
 216   // Support for argument shuffling
 217 
 218   // bias in bytes
 219   void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
 220   void long_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
 221   void float_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
 222   void double_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
 223   void move_ptr(VMRegPair src, VMRegPair dst);
 224   void object_move(OopMap* map,
 225                    int oop_handle_offset,
 226                    int framesize_in_slots,
 227                    VMRegPair src,
 228                    VMRegPair dst,
 229                    bool is_receiver,
 230                    int* receiver_offset);
 231 #endif // _LP64
 232 
 233   // Support for VM calls
 234   //
 235   // It is imperative that all calls into the VM are handled via the call_VM macros.
 236   // They make sure that the stack linkage is setup correctly. call_VM's correspond
 237   // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
 238 
 239 
 240   void call_VM(Register oop_result,
 241                address entry_point,
 242                bool check_exceptions = true);
 243   void call_VM(Register oop_result,
 244                address entry_point,
 245                Register arg_1,
 246                bool check_exceptions = true);
 247   void call_VM(Register oop_result,
 248                address entry_point,
 249                Register arg_1, Register arg_2,
 250                bool check_exceptions = true);
 251   void call_VM(Register oop_result,
 252                address entry_point,
 253                Register arg_1, Register arg_2, Register arg_3,
 254                bool check_exceptions = true);
 255 
 256   // Overloadings with last_Java_sp
 257   void call_VM(Register oop_result,
 258                Register last_java_sp,
 259                address entry_point,
 260                int number_of_arguments = 0,
 261                bool check_exceptions = true);
 262   void call_VM(Register oop_result,
 263                Register last_java_sp,
 264                address entry_point,
 265                Register arg_1, bool
 266                check_exceptions = true);
 267   void call_VM(Register oop_result,
 268                Register last_java_sp,
 269                address entry_point,
 270                Register arg_1, Register arg_2,
 271                bool check_exceptions = true);
 272   void call_VM(Register oop_result,
 273                Register last_java_sp,
 274                address entry_point,
 275                Register arg_1, Register arg_2, Register arg_3,
 276                bool check_exceptions = true);
 277 
 278   void get_vm_result  (Register oop_result, Register thread);
 279   void get_vm_result_2(Register metadata_result, Register thread);
 280 
 281   // These always tightly bind to MacroAssembler::call_VM_base
 282   // bypassing the virtual implementation
 283   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
 284   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
 285   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
 286   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
 287   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
 288 
 289   void call_VM_leaf0(address entry_point);
 290   void call_VM_leaf(address entry_point,
 291                     int number_of_arguments = 0);
 292   void call_VM_leaf(address entry_point,
 293                     Register arg_1);
 294   void call_VM_leaf(address entry_point,
 295                     Register arg_1, Register arg_2);
 296   void call_VM_leaf(address entry_point,
 297                     Register arg_1, Register arg_2, Register arg_3);
 298 
 299   void call_VM_leaf(address entry_point,
 300                     Register arg_1, Register arg_2, Register arg_3, Register arg_4);
 301 
 302   // These always tightly bind to MacroAssembler::call_VM_leaf_base
 303   // bypassing the virtual implementation
 304   void super_call_VM_leaf(address entry_point);
 305   void super_call_VM_leaf(address entry_point, Register arg_1);
 306   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
 307   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
 308   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
 309 
 310   // last Java Frame (fills frame anchor)
 311   void set_last_Java_frame(Register thread,
 312                            Register last_java_sp,
 313                            Register last_java_fp,
 314                            address  last_java_pc,
 315                            Register rscratch);
 316 
 317   // thread in the default location (r15_thread on 64bit)
 318   void set_last_Java_frame(Register last_java_sp,
 319                            Register last_java_fp,
 320                            address  last_java_pc,
 321                            Register rscratch);
 322 
 323   void reset_last_Java_frame(Register thread, bool clear_fp);
 324 
 325   // thread in the default location (r15_thread on 64bit)
 326   void reset_last_Java_frame(bool clear_fp);
 327 
 328   // jobjects
 329   void clear_jobject_tag(Register possibly_non_local);
 330   void resolve_jobject(Register value, Register thread, Register tmp);
 331   void resolve_global_jobject(Register value, Register thread, Register tmp);
 332 
 333   // C 'boolean' to Java boolean: x == 0 ? 0 : 1
 334   void c2bool(Register x);
 335 
 336   // C++ bool manipulation
 337 
 338   void movbool(Register dst, Address src);
 339   void movbool(Address dst, bool boolconst);
 340   void movbool(Address dst, Register src);
 341   void testbool(Register dst);
 342 
 343   void resolve_oop_handle(Register result, Register tmp);
 344   void resolve_weak_handle(Register result, Register tmp);
 345   void load_mirror(Register mirror, Register method, Register tmp);
 346   void load_method_holder_cld(Register rresult, Register rmethod);
 347 
 348   void load_method_holder(Register holder, Register method);
 349 
 350   // oop manipulations
 351   void load_klass(Register dst, Register src, Register tmp);
 352   void load_klass_check_null(Register dst, Register src, Register tmp);
 353   void store_klass(Register dst, Register src, Register tmp);
 354 
 355   void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
 356                       Register tmp1, Register thread_tmp);
 357   void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
 358                        Register tmp1, Register tmp2, Register tmp3);
 359 
 360   void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
 361                      Register thread_tmp = noreg, DecoratorSet decorators = 0);
 362   void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
 363                               Register thread_tmp = noreg, DecoratorSet decorators = 0);
 364   void store_heap_oop(Address dst, Register val, Register tmp1 = noreg,
 365                       Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
 366 
 367   // Used for storing NULL. All other oop constants should be
 368   // stored using routines that take a jobject.
 369   void store_heap_oop_null(Address dst);
 370 
 371 #ifdef _LP64
 372   void store_klass_gap(Register dst, Register src);
 373 
 374   // This dummy is to prevent a call to store_heap_oop from
 375   // converting a zero (like NULL) into a Register by giving
 376   // the compiler two choices it can't resolve
 377 
 378   void store_heap_oop(Address dst, void* dummy);
 379 
 380   void encode_heap_oop(Register r);
 381   void decode_heap_oop(Register r);
 382   void encode_heap_oop_not_null(Register r);
 383   void decode_heap_oop_not_null(Register r);
 384   void encode_heap_oop_not_null(Register dst, Register src);
 385   void decode_heap_oop_not_null(Register dst, Register src);
 386 
 387   void set_narrow_oop(Register dst, jobject obj);
 388   void set_narrow_oop(Address dst, jobject obj);
 389   void cmp_narrow_oop(Register dst, jobject obj);
 390   void cmp_narrow_oop(Address dst, jobject obj);
 391 
 392   void encode_klass_not_null(Register r, Register tmp);
 393   void decode_klass_not_null(Register r, Register tmp);
 394   void encode_and_move_klass_not_null(Register dst, Register src);
 395   void decode_and_move_klass_not_null(Register dst, Register src);
 396   void set_narrow_klass(Register dst, Klass* k);
 397   void set_narrow_klass(Address dst, Klass* k);
 398   void cmp_narrow_klass(Register dst, Klass* k);
 399   void cmp_narrow_klass(Address dst, Klass* k);
 400 
 401   // if heap base register is used - reinit it with the correct value
 402   void reinit_heapbase();
 403 
 404   DEBUG_ONLY(void verify_heapbase(const char* msg);)
 405 
 406 #endif // _LP64
 407 
 408   // Int division/remainder for Java
 409   // (as idivl, but checks for special case as described in JVM spec.)
 410   // returns idivl instruction offset for implicit exception handling
 411   int corrected_idivl(Register reg);
 412 
 413   // Long division/remainder for Java
 414   // (as idivq, but checks for special case as described in JVM spec.)
 415   // returns idivq instruction offset for implicit exception handling
 416   int corrected_idivq(Register reg);
 417 
 418   void int3();
 419 
 420   // Long operation macros for a 32bit cpu
 421   // Long negation for Java
 422   void lneg(Register hi, Register lo);
 423 
 424   // Long multiplication for Java
 425   // (destroys contents of eax, ebx, ecx and edx)
 426   void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
 427 
 428   // Long shifts for Java
 429   // (semantics as described in JVM spec.)
 430   void lshl(Register hi, Register lo);                               // hi:lo << (rcx & 0x3f)
 431   void lshr(Register hi, Register lo, bool sign_extension = false);  // hi:lo >> (rcx & 0x3f)
 432 
 433   // Long compare for Java
 434   // (semantics as described in JVM spec.)
 435   void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
 436 
 437 
 438   // misc
 439 
 440   // Sign extension
 441   void sign_extend_short(Register reg);
 442   void sign_extend_byte(Register reg);
 443 
 444   // Division by power of 2, rounding towards 0
 445   void division_with_shift(Register reg, int shift_value);
 446 
 447 #ifndef _LP64
 448   // Compares the top-most stack entries on the FPU stack and sets the eflags as follows:
 449   //
 450   // CF (corresponds to C0) if x < y
 451   // PF (corresponds to C2) if unordered
 452   // ZF (corresponds to C3) if x = y
 453   //
 454   // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
 455   // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code)
 456   void fcmp(Register tmp);
 457   // Variant of the above which allows y to be further down the stack
 458   // and which only pops x and y if specified. If pop_right is
 459   // specified then pop_left must also be specified.
 460   void fcmp(Register tmp, int index, bool pop_left, bool pop_right);
 461 
 462   // Floating-point comparison for Java
 463   // Compares the top-most stack entries on the FPU stack and stores the result in dst.
 464   // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
 465   // (semantics as described in JVM spec.)
 466   void fcmp2int(Register dst, bool unordered_is_less);
 467   // Variant of the above which allows y to be further down the stack
 468   // and which only pops x and y if specified. If pop_right is
 469   // specified then pop_left must also be specified.
 470   void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right);
 471 
 472   // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards)
 473   // tmp is a temporary register, if none is available use noreg
 474   void fremr(Register tmp);
 475 
 476   // only if +VerifyFPU
 477   void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
 478 #endif // !LP64
 479 
 480   // dst = c = a * b + c
 481   void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
 482   void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
 483 
 484   void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
 485   void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
 486   void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
 487   void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
 488 
 489 
 490   // same as fcmp2int, but using SSE2
 491   void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
 492   void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
 493 
 494   // branch to L if FPU flag C2 is set/not set
 495   // tmp is a temporary register, if none is available use noreg
 496   void jC2 (Register tmp, Label& L);
 497   void jnC2(Register tmp, Label& L);
 498 
 499   // Load float value from 'address'. If UseSSE >= 1, the value is loaded into
 500   // register xmm0. Otherwise, the value is loaded onto the FPU stack.
 501   void load_float(Address src);
 502 
 503   // Store float value to 'address'. If UseSSE >= 1, the value is stored
 504   // from register xmm0. Otherwise, the value is stored from the FPU stack.
 505   void store_float(Address dst);
 506 
 507   // Load double value from 'address'. If UseSSE >= 2, the value is loaded into
 508   // register xmm0. Otherwise, the value is loaded onto the FPU stack.
 509   void load_double(Address src);
 510 
 511   // Store double value to 'address'. If UseSSE >= 2, the value is stored
 512   // from register xmm0. Otherwise, the value is stored from the FPU stack.
 513   void store_double(Address dst);
 514 
 515 #ifndef _LP64
 516   // Pop ST (ffree & fincstp combined)
 517   void fpop();
 518 
 519   void empty_FPU_stack();
 520 #endif // !_LP64
 521 
 522   void push_IU_state();
 523   void pop_IU_state();
 524 
 525   void push_FPU_state();
 526   void pop_FPU_state();
 527 
 528   void push_CPU_state();
 529   void pop_CPU_state();
 530 
 531   void push_cont_fastpath();
 532   void pop_cont_fastpath();
 533 
 534   void inc_held_monitor_count();
 535   void dec_held_monitor_count();
 536 
 537   DEBUG_ONLY(void stop_if_in_cont(Register cont_reg, const char* name);)
 538 
 539   // Round up to a power of two
 540   void round_to(Register reg, int modulus);
 541 
 542 private:
 543   // General purpose and XMM registers potentially clobbered by native code; there
 544   // is no need for FPU or AVX opmask related methods because C1/interpreter
 545   // - we save/restore FPU state as a whole always
 546   // - do not care about AVX-512 opmask
 547   static RegSet call_clobbered_gp_registers();
 548   static XMMRegSet call_clobbered_xmm_registers();
 549 
 550   void push_set(XMMRegSet set, int offset);
 551   void pop_set(XMMRegSet set, int offset);
 552 
 553 public:
 554   void push_set(RegSet set, int offset = -1);
 555   void pop_set(RegSet set, int offset = -1);
 556 
 557   // Push and pop everything that might be clobbered by a native
 558   // runtime call.
 559   // Only save the lower 64 bits of each vector register.
 560   // Additional registers can be excluded in a passed RegSet.
 561   void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
 562   void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
 563 
 564   void push_call_clobbered_registers(bool save_fpu = true) {
 565     push_call_clobbered_registers_except(RegSet(), save_fpu);
 566   }
 567   void pop_call_clobbered_registers(bool restore_fpu = true) {
 568     pop_call_clobbered_registers_except(RegSet(), restore_fpu);
 569   }
 570 
 571   // allocation
 572   void tlab_allocate(
 573     Register thread,                   // Current thread
 574     Register obj,                      // result: pointer to object after successful allocation
 575     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
 576     int      con_size_in_bytes,        // object size in bytes if   known at compile time
 577     Register t1,                       // temp register
 578     Register t2,                       // temp register
 579     Label&   slow_case                 // continuation point if fast allocation fails
 580   );
 581   void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
 582 
 583   // interface method calling
 584   void lookup_interface_method(Register recv_klass,
 585                                Register intf_klass,
 586                                RegisterOrConstant itable_index,
 587                                Register method_result,
 588                                Register scan_temp,
 589                                Label& no_such_interface,
 590                                bool return_method = true);
 591 
 592   // virtual method calling
 593   void lookup_virtual_method(Register recv_klass,
 594                              RegisterOrConstant vtable_index,
 595                              Register method_result);
 596 
 597   // Test sub_klass against super_klass, with fast and slow paths.
 598 
 599   // The fast path produces a tri-state answer: yes / no / maybe-slow.
 600   // One of the three labels can be NULL, meaning take the fall-through.
 601   // If super_check_offset is -1, the value is loaded up from super_klass.
 602   // No registers are killed, except temp_reg.
 603   void check_klass_subtype_fast_path(Register sub_klass,
 604                                      Register super_klass,
 605                                      Register temp_reg,
 606                                      Label* L_success,
 607                                      Label* L_failure,
 608                                      Label* L_slow_path,
 609                 RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
 610 
 611   // The rest of the type check; must be wired to a corresponding fast path.
 612   // It does not repeat the fast path logic, so don't use it standalone.
 613   // The temp_reg and temp2_reg can be noreg, if no temps are available.
 614   // Updates the sub's secondary super cache as necessary.
 615   // If set_cond_codes, condition codes will be Z on success, NZ on failure.
 616   void check_klass_subtype_slow_path(Register sub_klass,
 617                                      Register super_klass,
 618                                      Register temp_reg,
 619                                      Register temp2_reg,
 620                                      Label* L_success,
 621                                      Label* L_failure,
 622                                      bool set_cond_codes = false);
 623 
 624   // Simplified, combined version, good for typical uses.
 625   // Falls through on failure.
 626   void check_klass_subtype(Register sub_klass,
 627                            Register super_klass,
 628                            Register temp_reg,
 629                            Label& L_success);
 630 
 631   void clinit_barrier(Register klass,
 632                       Register thread,
 633                       Label* L_fast_path = NULL,
 634                       Label* L_slow_path = NULL);
 635 
 636   // method handles (JSR 292)
 637   Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
 638 
 639   // Debugging
 640 
 641   // only if +VerifyOops
 642   void _verify_oop(Register reg, const char* s, const char* file, int line);
 643   void _verify_oop_addr(Address addr, const char* s, const char* file, int line);
 644 
 645   void _verify_oop_checked(Register reg, const char* s, const char* file, int line) {
 646     if (VerifyOops) {
 647       _verify_oop(reg, s, file, line);
 648     }
 649   }
 650   void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) {
 651     if (VerifyOops) {
 652       _verify_oop_addr(reg, s, file, line);
 653     }
 654   }
 655 
 656   // TODO: verify method and klass metadata (compare against vptr?)
 657   void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
 658   void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
 659 
 660 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__)
 661 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__)
 662 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__)
 663 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
 664 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
 665 
 666   // Verify or restore cpu control state after JNI call
 667   void restore_cpu_control_state_after_jni(Register rscratch);
 668 
 669   // prints msg, dumps registers and stops execution
 670   void stop(const char* msg);
 671 
 672   // prints msg and continues
 673   void warn(const char* msg);
 674 
 675   // dumps registers and other state
 676   void print_state();
 677 
 678   static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
 679   static void debug64(char* msg, int64_t pc, int64_t regs[]);
 680   static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip);
 681   static void print_state64(int64_t pc, int64_t regs[]);
 682 
 683   void os_breakpoint();
 684 
 685   void untested()                                { stop("untested"); }
 686 
 687   void unimplemented(const char* what = "");
 688 
 689   void should_not_reach_here()                   { stop("should not reach here"); }
 690 
 691   void print_CPU_state();
 692 
 693   // Stack overflow checking
 694   void bang_stack_with_offset(int offset) {
 695     // stack grows down, caller passes positive offset
 696     assert(offset > 0, "must bang with negative offset");
 697     movl(Address(rsp, (-offset)), rax);
 698   }
 699 
 700   // Writes to stack successive pages until offset reached to check for
 701   // stack overflow + shadow pages.  Also, clobbers tmp
 702   void bang_stack_size(Register size, Register tmp);
 703 
 704   // Check for reserved stack access in method being exited (for JIT)
 705   void reserved_stack_check();
 706 
 707   void safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod);
 708 
 709   void verify_tlab();
 710 
 711   static Condition negate_condition(Condition cond);
 712 
 713   // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
 714   // operands. In general the names are modified to avoid hiding the instruction in Assembler
 715   // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
 716   // here in MacroAssembler. The major exception to this rule is call
 717 
 718   // Arithmetics
 719 
 720 
 721   void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
 722   void addptr(Address dst, Register src);
 723 
 724   void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
 725   void addptr(Register dst, int32_t src);
 726   void addptr(Register dst, Register src);
 727   void addptr(Register dst, RegisterOrConstant src) {
 728     if (src.is_constant()) addptr(dst, src.as_constant());
 729     else                   addptr(dst, src.as_register());
 730   }
 731 
 732   void andptr(Register dst, int32_t src);
 733   void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }
 734 
 735 #ifdef _LP64
 736   using Assembler::andq;
 737   void andq(Register dst, AddressLiteral src, Register rscratch = noreg);
 738 #endif
 739 
 740   void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg);
 741 
 742   // renamed to drag out the casting of address to int32_t/intptr_t
 743   void cmp32(Register src1, int32_t imm);
 744 
 745   void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg);
 746   // compare reg - mem, or reg - &mem
 747   void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg);
 748 
 749   void cmp32(Register src1, Address src2);
 750 
 751 #ifndef _LP64
 752   void cmpklass(Address dst, Metadata* obj);
 753   void cmpklass(Register dst, Metadata* obj);
 754   void cmpoop(Address dst, jobject obj);
 755 #endif // _LP64
 756 
 757   void cmpoop(Register src1, Register src2);
 758   void cmpoop(Register src1, Address src2);
 759   void cmpoop(Register dst, jobject obj, Register rscratch);
 760 
 761   // NOTE src2 must be the lval. This is NOT an mem-mem compare
 762   void cmpptr(Address src1, AddressLiteral src2, Register rscratch);
 763 
 764   void cmpptr(Register src1, AddressLiteral src2, Register rscratch = noreg);
 765 
 766   void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 767   void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 768   // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 769 
 770   void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 771   void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 772 
 773   // cmp64 to avoild hiding cmpq
 774   void cmp64(Register src1, AddressLiteral src, Register rscratch = noreg);
 775 
 776   void cmpxchgptr(Register reg, Address adr);
 777 
 778   void locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch = noreg);
 779 
 780   void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
 781   void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); }
 782 
 783 
 784   void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
 785 
 786   void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); }
 787 
 788   void shlptr(Register dst, int32_t shift);
 789   void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); }
 790 
 791   void shrptr(Register dst, int32_t shift);
 792   void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); }
 793 
 794   void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); }
 795   void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); }
 796 
 797   void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
 798 
 799   void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
 800   void subptr(Register dst, int32_t src);
 801   // Force generation of a 4 byte immediate value even if it fits into 8bit
 802   void subptr_imm32(Register dst, int32_t src);
 803   void subptr(Register dst, Register src);
 804   void subptr(Register dst, RegisterOrConstant src) {
 805     if (src.is_constant()) subptr(dst, (int) src.as_constant());
 806     else                   subptr(dst,       src.as_register());
 807   }
 808 
 809   void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
 810   void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
 811 
 812   void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
 813   void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
 814 
 815   void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; }
 816 
 817 
 818 
 819   // Helper functions for statistics gathering.
 820   // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
 821   void cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch = noreg);
 822   // Unconditional atomic increment.
 823   void atomic_incl(Address counter_addr);
 824   void atomic_incl(AddressLiteral counter_addr, Register rscratch = noreg);
 825 #ifdef _LP64
 826   void atomic_incq(Address counter_addr);
 827   void atomic_incq(AddressLiteral counter_addr, Register rscratch = noreg);
 828 #endif
 829   void atomic_incptr(AddressLiteral counter_addr, Register rscratch = noreg) { LP64_ONLY(atomic_incq(counter_addr, rscratch)) NOT_LP64(atomic_incl(counter_addr, rscratch)) ; }
 830   void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; }
 831 
 832   void lea(Register dst, Address        adr) { Assembler::lea(dst, adr); }
 833   void lea(Register dst, AddressLiteral adr);
 834   void lea(Address  dst, AddressLiteral adr, Register rscratch);
 835 
 836   void leal32(Register dst, Address src) { leal(dst, src); }
 837 
 838   // Import other testl() methods from the parent class or else
 839   // they will be hidden by the following overriding declaration.
 840   using Assembler::testl;
 841   void testl(Address dst, int32_t imm32);
 842   void testl(Register dst, int32_t imm32);
 843   void testl(Register dst, AddressLiteral src); // requires reachable address
 844   using Assembler::testq;
 845   void testq(Address dst, int32_t imm32);
 846   void testq(Register dst, int32_t imm32);
 847 
 848   void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
 849   void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
 850   void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
 851   void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); }
 852 
 853   void testptr(Register src, int32_t imm32) {  LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
 854   void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); }
 855   void testptr(Register src1, Register src2);
 856 
 857   void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
 858   void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
 859 
 860   // Calls
 861 
 862   void call(Label& L, relocInfo::relocType rtype);
 863   void call(Register entry);
 864   void call(Address addr) { Assembler::call(addr); }
 865 
 866   // NOTE: this call transfers to the effective address of entry NOT
 867   // the address contained by entry. This is because this is more natural
 868   // for jumps/calls.
 869   void call(AddressLiteral entry, Register rscratch = rax);
 870 
 871   // Emit the CompiledIC call idiom
 872   void ic_call(address entry, jint method_index = 0);
 873 
 874   void emit_static_call_stub();
 875 
 876   // Jumps
 877 
 878   // NOTE: these jumps transfer to the effective address of dst NOT
 879   // the address contained by dst. This is because this is more natural
 880   // for jumps/calls.
 881   void jump(AddressLiteral dst, Register rscratch = noreg);
 882 
 883   void jump_cc(Condition cc, AddressLiteral dst, Register rscratch = noreg);
 884 
 885   // 32bit can do a case table jump in one instruction but we no longer allow the base
 886   // to be installed in the Address class. This jump will transfer to the address
 887   // contained in the location described by entry (not the address of entry)
 888   void jump(ArrayAddress entry, Register rscratch);
 889 
 890   // Floating
 891 
 892   void push_f(XMMRegister r);
 893   void pop_f(XMMRegister r);
 894   void push_d(XMMRegister r);
 895   void pop_d(XMMRegister r);
 896 
 897   void andpd(XMMRegister dst, XMMRegister    src) { Assembler::andpd(dst, src); }
 898   void andpd(XMMRegister dst, Address        src) { Assembler::andpd(dst, src); }
 899   void andpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
 900 
 901   void andps(XMMRegister dst, XMMRegister    src) { Assembler::andps(dst, src); }
 902   void andps(XMMRegister dst, Address        src) { Assembler::andps(dst, src); }
 903   void andps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
 904 
 905   void comiss(XMMRegister dst, XMMRegister    src) { Assembler::comiss(dst, src); }
 906   void comiss(XMMRegister dst, Address        src) { Assembler::comiss(dst, src); }
 907   void comiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
 908 
 909   void comisd(XMMRegister dst, XMMRegister    src) { Assembler::comisd(dst, src); }
 910   void comisd(XMMRegister dst, Address        src) { Assembler::comisd(dst, src); }
 911   void comisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
 912 
 913 #ifndef _LP64
 914   void fadd_s(Address        src) { Assembler::fadd_s(src); }
 915   void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); }
 916 
 917   void fldcw(Address        src) { Assembler::fldcw(src); }
 918   void fldcw(AddressLiteral src);
 919 
 920   void fld_s(int index)          { Assembler::fld_s(index); }
 921   void fld_s(Address        src) { Assembler::fld_s(src); }
 922   void fld_s(AddressLiteral src);
 923 
 924   void fld_d(Address        src) { Assembler::fld_d(src); }
 925   void fld_d(AddressLiteral src);
 926 
 927   void fld_x(Address        src) { Assembler::fld_x(src); }
 928   void fld_x(AddressLiteral src) { Assembler::fld_x(as_Address(src)); }
 929 
 930   void fmul_s(Address        src) { Assembler::fmul_s(src); }
 931   void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); }
 932 #endif // !_LP64
 933 
 934   void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
 935   void ldmxcsr(AddressLiteral src, Register rscratch = noreg);
 936 
 937 #ifdef _LP64
 938  private:
 939   void sha256_AVX2_one_round_compute(
 940     Register  reg_old_h,
 941     Register  reg_a,
 942     Register  reg_b,
 943     Register  reg_c,
 944     Register  reg_d,
 945     Register  reg_e,
 946     Register  reg_f,
 947     Register  reg_g,
 948     Register  reg_h,
 949     int iter);
 950   void sha256_AVX2_four_rounds_compute_first(int start);
 951   void sha256_AVX2_four_rounds_compute_last(int start);
 952   void sha256_AVX2_one_round_and_sched(
 953         XMMRegister xmm_0,     /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */
 954         XMMRegister xmm_1,     /* ymm5 */  /* full cycle is 16 iterations */
 955         XMMRegister xmm_2,     /* ymm6 */
 956         XMMRegister xmm_3,     /* ymm7 */
 957         Register    reg_a,      /* == eax on 0 iteration, then rotate 8 register right on each next iteration */
 958         Register    reg_b,      /* ebx */    /* full cycle is 8 iterations */
 959         Register    reg_c,      /* edi */
 960         Register    reg_d,      /* esi */
 961         Register    reg_e,      /* r8d */
 962         Register    reg_f,      /* r9d */
 963         Register    reg_g,      /* r10d */
 964         Register    reg_h,      /* r11d */
 965         int iter);
 966 
 967   void addm(int disp, Register r1, Register r2);
 968 
 969   void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d,
 970                                      Register e, Register f, Register g, Register h, int iteration);
 971 
 972   void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
 973                                           Register a, Register b, Register c, Register d, Register e, Register f,
 974                                           Register g, Register h, int iteration);
 975 
 976   void addmq(int disp, Register r1, Register r2);
 977  public:
 978   void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
 979                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
 980                    Register buf, Register state, Register ofs, Register limit, Register rsp,
 981                    bool multi_block, XMMRegister shuf_mask);
 982   void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
 983                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
 984                    Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block,
 985                    XMMRegister shuf_mask);
 986 #endif // _LP64
 987 
 988   void fast_md5(Register buf, Address state, Address ofs, Address limit,
 989                 bool multi_block);
 990 
 991   void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
 992                  XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask,
 993                  Register buf, Register state, Register ofs, Register limit, Register rsp,
 994                  bool multi_block);
 995 
 996 #ifdef _LP64
 997   void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
 998                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
 999                    Register buf, Register state, Register ofs, Register limit, Register rsp,
1000                    bool multi_block, XMMRegister shuf_mask);
1001 #else
1002   void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1003                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1004                    Register buf, Register state, Register ofs, Register limit, Register rsp,
1005                    bool multi_block);
1006 #endif
1007 
1008   void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1009                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1010                 Register rax, Register rcx, Register rdx, Register tmp);
1011 
1012 #ifndef _LP64
1013  private:
1014   // Initialized in macroAssembler_x86_constants.cpp
1015   static address ONES;
1016   static address L_2IL0FLOATPACKET_0;
1017   static address PI4_INV;
1018   static address PI4X3;
1019   static address PI4X4;
1020 
1021  public:
1022   void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1023                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1024                 Register rax, Register rcx, Register rdx, Register tmp1);
1025 
1026   void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1027                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1028                 Register rax, Register rcx, Register rdx, Register tmp);
1029 
1030   void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4,
1031                 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx,
1032                 Register rdx, Register tmp);
1033 
1034   void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1035                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1036                 Register rax, Register rbx, Register rdx);
1037 
1038   void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1039                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1040                 Register rax, Register rcx, Register rdx, Register tmp);
1041 
1042   void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx,
1043                         Register edx, Register ebx, Register esi, Register edi,
1044                         Register ebp, Register esp);
1045 
1046   void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx,
1047                          Register esi, Register edi, Register ebp, Register esp);
1048 
1049   void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx,
1050                         Register edx, Register ebx, Register esi, Register edi,
1051                         Register ebp, Register esp);
1052 
1053   void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1054                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1055                 Register rax, Register rcx, Register rdx, Register tmp);
1056 #endif // !_LP64
1057 
1058 private:
1059 
1060   // these are private because users should be doing movflt/movdbl
1061 
1062   void movss(Address     dst, XMMRegister    src) { Assembler::movss(dst, src); }
1063   void movss(XMMRegister dst, XMMRegister    src) { Assembler::movss(dst, src); }
1064   void movss(XMMRegister dst, Address        src) { Assembler::movss(dst, src); }
1065   void movss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1066 
1067   void movlpd(XMMRegister dst, Address        src) {Assembler::movlpd(dst, src); }
1068   void movlpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1069 
1070 public:
1071 
1072   void addsd(XMMRegister dst, XMMRegister    src) { Assembler::addsd(dst, src); }
1073   void addsd(XMMRegister dst, Address        src) { Assembler::addsd(dst, src); }
1074   void addsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1075 
1076   void addss(XMMRegister dst, XMMRegister    src) { Assembler::addss(dst, src); }
1077   void addss(XMMRegister dst, Address        src) { Assembler::addss(dst, src); }
1078   void addss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1079 
1080   void addpd(XMMRegister dst, XMMRegister    src) { Assembler::addpd(dst, src); }
1081   void addpd(XMMRegister dst, Address        src) { Assembler::addpd(dst, src); }
1082   void addpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1083 
1084   using Assembler::vbroadcastsd;
1085   void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1086 
1087   using Assembler::vbroadcastss;
1088   void vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1089 
1090   void divsd(XMMRegister dst, XMMRegister    src) { Assembler::divsd(dst, src); }
1091   void divsd(XMMRegister dst, Address        src) { Assembler::divsd(dst, src); }
1092   void divsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1093 
1094   void divss(XMMRegister dst, XMMRegister    src) { Assembler::divss(dst, src); }
1095   void divss(XMMRegister dst, Address        src) { Assembler::divss(dst, src); }
1096   void divss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1097 
1098   // Move Unaligned Double Quadword
1099   void movdqu(Address     dst, XMMRegister    src);
1100   void movdqu(XMMRegister dst, XMMRegister    src);
1101   void movdqu(XMMRegister dst, Address        src);
1102   void movdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1103 
1104   void kmovwl(Register  dst, KRegister      src) { Assembler::kmovwl(dst, src); }
1105   void kmovwl(Address   dst, KRegister      src) { Assembler::kmovwl(dst, src); }
1106   void kmovwl(KRegister dst, KRegister      src) { Assembler::kmovwl(dst, src); }
1107   void kmovwl(KRegister dst, Register       src) { Assembler::kmovwl(dst, src); }
1108   void kmovwl(KRegister dst, Address        src) { Assembler::kmovwl(dst, src); }
1109   void kmovwl(KRegister dst, AddressLiteral src, Register rscratch = noreg);
1110 
1111   void kmovql(KRegister dst, KRegister      src) { Assembler::kmovql(dst, src); }
1112   void kmovql(KRegister dst, Register       src) { Assembler::kmovql(dst, src); }
1113   void kmovql(Register  dst, KRegister      src) { Assembler::kmovql(dst, src); }
1114   void kmovql(KRegister dst, Address        src) { Assembler::kmovql(dst, src); }
1115   void kmovql(Address   dst, KRegister      src) { Assembler::kmovql(dst, src); }
1116   void kmovql(KRegister dst, AddressLiteral src, Register rscratch = noreg);
1117 
1118   // Safe move operation, lowers down to 16bit moves for targets supporting
1119   // AVX512F feature and 64bit moves for targets supporting AVX512BW feature.
1120   void kmov(Address  dst, KRegister src);
1121   void kmov(KRegister dst, Address src);
1122   void kmov(KRegister dst, KRegister src);
1123   void kmov(Register dst, KRegister src);
1124   void kmov(KRegister dst, Register src);
1125 
1126   using Assembler::movddup;
1127   void movddup(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1128 
1129   using Assembler::vmovddup;
1130   void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1131 
1132   // AVX Unaligned forms
1133   void vmovdqu(Address     dst, XMMRegister    src);
1134   void vmovdqu(XMMRegister dst, Address        src);
1135   void vmovdqu(XMMRegister dst, XMMRegister    src);
1136   void vmovdqu(XMMRegister dst, AddressLiteral src,                 Register rscratch = noreg);
1137   void vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1138 
1139   // AVX512 Unaligned
1140   void evmovdqu(BasicType type, KRegister kmask, Address     dst, XMMRegister src, bool merge, int vector_len);
1141   void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address     src, bool merge, int vector_len);
1142 
1143   void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); }
1144   void evmovdqub(XMMRegister dst, Address     src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); }
1145 
1146   void evmovdqub(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1147     if (dst->encoding() != src->encoding() || mask != k0)  {
1148       Assembler::evmovdqub(dst, mask, src, merge, vector_len);
1149     }
1150   }
1151   void evmovdqub(Address     dst, KRegister mask, XMMRegister    src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1152   void evmovdqub(XMMRegister dst, KRegister mask, Address        src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1153   void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1154 
1155   void evmovdquw(Address     dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1156   void evmovdquw(XMMRegister dst, Address     src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1157 
1158   void evmovdquw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1159     if (dst->encoding() != src->encoding() || mask != k0) {
1160       Assembler::evmovdquw(dst, mask, src, merge, vector_len);
1161     }
1162   }
1163   void evmovdquw(XMMRegister dst, KRegister mask, Address        src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1164   void evmovdquw(Address     dst, KRegister mask, XMMRegister    src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1165   void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1166 
1167   void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) {
1168      if (dst->encoding() != src->encoding()) {
1169        Assembler::evmovdqul(dst, src, vector_len);
1170      }
1171   }
1172   void evmovdqul(Address     dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1173   void evmovdqul(XMMRegister dst, Address     src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1174 
1175   void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1176     if (dst->encoding() != src->encoding() || mask != k0)  {
1177       Assembler::evmovdqul(dst, mask, src, merge, vector_len);
1178     }
1179   }
1180   void evmovdqul(Address     dst, KRegister mask, XMMRegister    src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1181   void evmovdqul(XMMRegister dst, KRegister mask, Address        src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1182   void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1183 
1184   void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) {
1185     if (dst->encoding() != src->encoding()) {
1186       Assembler::evmovdquq(dst, src, vector_len);
1187     }
1188   }
1189   void evmovdquq(XMMRegister dst, Address        src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1190   void evmovdquq(Address     dst, XMMRegister    src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1191   void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1192 
1193   void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1194     if (dst->encoding() != src->encoding() || mask != k0) {
1195       Assembler::evmovdquq(dst, mask, src, merge, vector_len);
1196     }
1197   }
1198   void evmovdquq(Address     dst, KRegister mask, XMMRegister    src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1199   void evmovdquq(XMMRegister dst, KRegister mask, Address        src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1200   void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1201 
1202   // Move Aligned Double Quadword
1203   void movdqa(XMMRegister dst, XMMRegister    src) { Assembler::movdqa(dst, src); }
1204   void movdqa(XMMRegister dst, Address        src) { Assembler::movdqa(dst, src); }
1205   void movdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1206 
1207   void movsd(Address     dst, XMMRegister    src) { Assembler::movsd(dst, src); }
1208   void movsd(XMMRegister dst, XMMRegister    src) { Assembler::movsd(dst, src); }
1209   void movsd(XMMRegister dst, Address        src) { Assembler::movsd(dst, src); }
1210   void movsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1211 
1212   void mulpd(XMMRegister dst, XMMRegister    src) { Assembler::mulpd(dst, src); }
1213   void mulpd(XMMRegister dst, Address        src) { Assembler::mulpd(dst, src); }
1214   void mulpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1215 
1216   void mulsd(XMMRegister dst, XMMRegister    src) { Assembler::mulsd(dst, src); }
1217   void mulsd(XMMRegister dst, Address        src) { Assembler::mulsd(dst, src); }
1218   void mulsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1219 
1220   void mulss(XMMRegister dst, XMMRegister    src) { Assembler::mulss(dst, src); }
1221   void mulss(XMMRegister dst, Address        src) { Assembler::mulss(dst, src); }
1222   void mulss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1223 
1224   // Carry-Less Multiplication Quadword
1225   void pclmulldq(XMMRegister dst, XMMRegister src) {
1226     // 0x00 - multiply lower 64 bits [0:63]
1227     Assembler::pclmulqdq(dst, src, 0x00);
1228   }
1229   void pclmulhdq(XMMRegister dst, XMMRegister src) {
1230     // 0x11 - multiply upper 64 bits [64:127]
1231     Assembler::pclmulqdq(dst, src, 0x11);
1232   }
1233 
1234   void pcmpeqb(XMMRegister dst, XMMRegister src);
1235   void pcmpeqw(XMMRegister dst, XMMRegister src);
1236 
1237   void pcmpestri(XMMRegister dst, Address src, int imm8);
1238   void pcmpestri(XMMRegister dst, XMMRegister src, int imm8);
1239 
1240   void pmovzxbw(XMMRegister dst, XMMRegister src);
1241   void pmovzxbw(XMMRegister dst, Address src);
1242 
1243   void pmovmskb(Register dst, XMMRegister src);
1244 
1245   void ptest(XMMRegister dst, XMMRegister src);
1246 
1247   void roundsd(XMMRegister dst, XMMRegister    src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1248   void roundsd(XMMRegister dst, Address        src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1249   void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch = noreg);
1250 
1251   void sqrtss(XMMRegister dst, XMMRegister     src) { Assembler::sqrtss(dst, src); }
1252   void sqrtss(XMMRegister dst, Address         src) { Assembler::sqrtss(dst, src); }
1253   void sqrtss(XMMRegister dst, AddressLiteral  src, Register rscratch = noreg);
1254 
1255   void subsd(XMMRegister dst, XMMRegister    src) { Assembler::subsd(dst, src); }
1256   void subsd(XMMRegister dst, Address        src) { Assembler::subsd(dst, src); }
1257   void subsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1258 
1259   void subss(XMMRegister dst, XMMRegister    src) { Assembler::subss(dst, src); }
1260   void subss(XMMRegister dst, Address        src) { Assembler::subss(dst, src); }
1261   void subss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1262 
1263   void ucomiss(XMMRegister dst, XMMRegister    src) { Assembler::ucomiss(dst, src); }
1264   void ucomiss(XMMRegister dst, Address        src) { Assembler::ucomiss(dst, src); }
1265   void ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1266 
1267   void ucomisd(XMMRegister dst, XMMRegister    src) { Assembler::ucomisd(dst, src); }
1268   void ucomisd(XMMRegister dst, Address        src) { Assembler::ucomisd(dst, src); }
1269   void ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1270 
1271   // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
1272   void xorpd(XMMRegister dst, XMMRegister    src);
1273   void xorpd(XMMRegister dst, Address        src) { Assembler::xorpd(dst, src); }
1274   void xorpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1275 
1276   // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
1277   void xorps(XMMRegister dst, XMMRegister    src);
1278   void xorps(XMMRegister dst, Address        src) { Assembler::xorps(dst, src); }
1279   void xorps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1280 
1281   // Shuffle Bytes
1282   void pshufb(XMMRegister dst, XMMRegister    src) { Assembler::pshufb(dst, src); }
1283   void pshufb(XMMRegister dst, Address        src) { Assembler::pshufb(dst, src); }
1284   void pshufb(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1285   // AVX 3-operands instructions
1286 
1287   void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vaddsd(dst, nds, src); }
1288   void vaddsd(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vaddsd(dst, nds, src); }
1289   void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1290 
1291   void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vaddss(dst, nds, src); }
1292   void vaddss(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vaddss(dst, nds, src); }
1293   void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1294 
1295   void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg);
1296   void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg);
1297 
1298   void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len);
1299   void vpaddb(XMMRegister dst, XMMRegister nds, Address        src, int vector_len);
1300   void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1301 
1302   void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1303   void vpaddw(XMMRegister dst, XMMRegister nds, Address     src, int vector_len);
1304 
1305   void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1306   void vpaddd(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1307   void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1308 
1309   void vpand(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1310   void vpand(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1311   void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1312 
1313   using Assembler::vpbroadcastd;
1314   void vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1315 
1316   using Assembler::vpbroadcastq;
1317   void vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1318 
1319   void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1320 
1321   void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1322   void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1323 
1324   // Vector compares
1325   void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister    src, int comparison, bool is_signed, int vector_len) {
1326     Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len);
1327   }
1328   void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1329 
1330   void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister    src, int comparison, bool is_signed, int vector_len) {
1331     Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len);
1332   }
1333   void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1334 
1335   void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister    src, int comparison, bool is_signed, int vector_len) {
1336     Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len);
1337   }
1338   void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1339 
1340   void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister    src, int comparison, bool is_signed, int vector_len) {
1341     Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len);
1342   }
1343   void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1344 
1345   void evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len);
1346 
1347   // Emit comparison instruction for the specified comparison predicate.
1348   void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len);
1349   void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len);
1350 
1351   void vpmovzxbw(XMMRegister dst, Address     src, int vector_len);
1352   void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); }
1353 
1354   void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit);
1355 
1356   void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1357   void vpmullw(XMMRegister dst, XMMRegister nds, Address     src, int vector_len);
1358 
1359   void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); }
1360   void vpmulld(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); }
1361   void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1362 
1363   void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1364   void vpsubb(XMMRegister dst, XMMRegister nds, Address     src, int vector_len);
1365 
1366   void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1367   void vpsubw(XMMRegister dst, XMMRegister nds, Address     src, int vector_len);
1368 
1369   void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1370   void vpsraw(XMMRegister dst, XMMRegister nds, int         shift, int vector_len);
1371 
1372   void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1373   void evpsraq(XMMRegister dst, XMMRegister nds, int         shift, int vector_len);
1374 
1375   void evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1376     if (!is_varshift) {
1377       Assembler::evpsllw(dst, mask, nds, src, merge, vector_len);
1378     } else {
1379       Assembler::evpsllvw(dst, mask, nds, src, merge, vector_len);
1380     }
1381   }
1382   void evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1383     if (!is_varshift) {
1384       Assembler::evpslld(dst, mask, nds, src, merge, vector_len);
1385     } else {
1386       Assembler::evpsllvd(dst, mask, nds, src, merge, vector_len);
1387     }
1388   }
1389   void evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1390     if (!is_varshift) {
1391       Assembler::evpsllq(dst, mask, nds, src, merge, vector_len);
1392     } else {
1393       Assembler::evpsllvq(dst, mask, nds, src, merge, vector_len);
1394     }
1395   }
1396   void evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1397     if (!is_varshift) {
1398       Assembler::evpsrlw(dst, mask, nds, src, merge, vector_len);
1399     } else {
1400       Assembler::evpsrlvw(dst, mask, nds, src, merge, vector_len);
1401     }
1402   }
1403   void evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1404     if (!is_varshift) {
1405       Assembler::evpsrld(dst, mask, nds, src, merge, vector_len);
1406     } else {
1407       Assembler::evpsrlvd(dst, mask, nds, src, merge, vector_len);
1408     }
1409   }
1410   void evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1411     if (!is_varshift) {
1412       Assembler::evpsrlq(dst, mask, nds, src, merge, vector_len);
1413     } else {
1414       Assembler::evpsrlvq(dst, mask, nds, src, merge, vector_len);
1415     }
1416   }
1417   void evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1418     if (!is_varshift) {
1419       Assembler::evpsraw(dst, mask, nds, src, merge, vector_len);
1420     } else {
1421       Assembler::evpsravw(dst, mask, nds, src, merge, vector_len);
1422     }
1423   }
1424   void evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1425     if (!is_varshift) {
1426       Assembler::evpsrad(dst, mask, nds, src, merge, vector_len);
1427     } else {
1428       Assembler::evpsravd(dst, mask, nds, src, merge, vector_len);
1429     }
1430   }
1431   void evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1432     if (!is_varshift) {
1433       Assembler::evpsraq(dst, mask, nds, src, merge, vector_len);
1434     } else {
1435       Assembler::evpsravq(dst, mask, nds, src, merge, vector_len);
1436     }
1437   }
1438 
1439   void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1440   void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1441   void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1442   void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1443 
1444   void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1445   void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1446 
1447   void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1448   void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1449 
1450   void vptest(XMMRegister dst, XMMRegister src);
1451   void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); }
1452 
1453   void punpcklbw(XMMRegister dst, XMMRegister src);
1454   void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); }
1455 
1456   void pshufd(XMMRegister dst, Address src, int mode);
1457   void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); }
1458 
1459   void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1460   void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); }
1461 
1462   void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1463   void vandpd(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1464   void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1465 
1466   void vandps(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1467   void vandps(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1468   void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1469 
1470   void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1471 
1472   void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vdivsd(dst, nds, src); }
1473   void vdivsd(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vdivsd(dst, nds, src); }
1474   void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1475 
1476   void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vdivss(dst, nds, src); }
1477   void vdivss(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vdivss(dst, nds, src); }
1478   void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1479 
1480   void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vmulsd(dst, nds, src); }
1481   void vmulsd(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vmulsd(dst, nds, src); }
1482   void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1483 
1484   void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vmulss(dst, nds, src); }
1485   void vmulss(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vmulss(dst, nds, src); }
1486   void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1487 
1488   void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vsubsd(dst, nds, src); }
1489   void vsubsd(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vsubsd(dst, nds, src); }
1490   void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1491 
1492   void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vsubss(dst, nds, src); }
1493   void vsubss(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vsubss(dst, nds, src); }
1494   void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1495 
1496   void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1497   void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1498 
1499   // AVX Vector instructions
1500 
1501   void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1502   void vxorpd(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1503   void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1504 
1505   void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1506   void vxorps(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1507   void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1508 
1509   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1510     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1511       Assembler::vpxor(dst, nds, src, vector_len);
1512     else
1513       Assembler::vxorpd(dst, nds, src, vector_len);
1514   }
1515   void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1516     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1517       Assembler::vpxor(dst, nds, src, vector_len);
1518     else
1519       Assembler::vxorpd(dst, nds, src, vector_len);
1520   }
1521   void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1522 
1523   // Simple version for AVX2 256bit vectors
1524   void vpxor(XMMRegister dst, XMMRegister src) {
1525     assert(UseAVX >= 2, "Should be at least AVX2");
1526     Assembler::vpxor(dst, dst, src, AVX_256bit);
1527   }
1528   void vpxor(XMMRegister dst, Address src) {
1529     assert(UseAVX >= 2, "Should be at least AVX2");
1530     Assembler::vpxor(dst, dst, src, AVX_256bit);
1531   }
1532 
1533   void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); }
1534   void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1535 
1536   void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
1537     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1538       Assembler::vinserti32x4(dst, nds, src, imm8);
1539     } else if (UseAVX > 1) {
1540       // vinserti128 is available only in AVX2
1541       Assembler::vinserti128(dst, nds, src, imm8);
1542     } else {
1543       Assembler::vinsertf128(dst, nds, src, imm8);
1544     }
1545   }
1546 
1547   void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
1548     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1549       Assembler::vinserti32x4(dst, nds, src, imm8);
1550     } else if (UseAVX > 1) {
1551       // vinserti128 is available only in AVX2
1552       Assembler::vinserti128(dst, nds, src, imm8);
1553     } else {
1554       Assembler::vinsertf128(dst, nds, src, imm8);
1555     }
1556   }
1557 
1558   void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1559     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1560       Assembler::vextracti32x4(dst, src, imm8);
1561     } else if (UseAVX > 1) {
1562       // vextracti128 is available only in AVX2
1563       Assembler::vextracti128(dst, src, imm8);
1564     } else {
1565       Assembler::vextractf128(dst, src, imm8);
1566     }
1567   }
1568 
1569   void vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
1570     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1571       Assembler::vextracti32x4(dst, src, imm8);
1572     } else if (UseAVX > 1) {
1573       // vextracti128 is available only in AVX2
1574       Assembler::vextracti128(dst, src, imm8);
1575     } else {
1576       Assembler::vextractf128(dst, src, imm8);
1577     }
1578   }
1579 
1580   // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers
1581   void vinserti128_high(XMMRegister dst, XMMRegister src) {
1582     vinserti128(dst, dst, src, 1);
1583   }
1584   void vinserti128_high(XMMRegister dst, Address src) {
1585     vinserti128(dst, dst, src, 1);
1586   }
1587   void vextracti128_high(XMMRegister dst, XMMRegister src) {
1588     vextracti128(dst, src, 1);
1589   }
1590   void vextracti128_high(Address dst, XMMRegister src) {
1591     vextracti128(dst, src, 1);
1592   }
1593 
1594   void vinsertf128_high(XMMRegister dst, XMMRegister src) {
1595     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1596       Assembler::vinsertf32x4(dst, dst, src, 1);
1597     } else {
1598       Assembler::vinsertf128(dst, dst, src, 1);
1599     }
1600   }
1601 
1602   void vinsertf128_high(XMMRegister dst, Address src) {
1603     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1604       Assembler::vinsertf32x4(dst, dst, src, 1);
1605     } else {
1606       Assembler::vinsertf128(dst, dst, src, 1);
1607     }
1608   }
1609 
1610   void vextractf128_high(XMMRegister dst, XMMRegister src) {
1611     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1612       Assembler::vextractf32x4(dst, src, 1);
1613     } else {
1614       Assembler::vextractf128(dst, src, 1);
1615     }
1616   }
1617 
1618   void vextractf128_high(Address dst, XMMRegister src) {
1619     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1620       Assembler::vextractf32x4(dst, src, 1);
1621     } else {
1622       Assembler::vextractf128(dst, src, 1);
1623     }
1624   }
1625 
1626   // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers
1627   void vinserti64x4_high(XMMRegister dst, XMMRegister src) {
1628     Assembler::vinserti64x4(dst, dst, src, 1);
1629   }
1630   void vinsertf64x4_high(XMMRegister dst, XMMRegister src) {
1631     Assembler::vinsertf64x4(dst, dst, src, 1);
1632   }
1633   void vextracti64x4_high(XMMRegister dst, XMMRegister src) {
1634     Assembler::vextracti64x4(dst, src, 1);
1635   }
1636   void vextractf64x4_high(XMMRegister dst, XMMRegister src) {
1637     Assembler::vextractf64x4(dst, src, 1);
1638   }
1639   void vextractf64x4_high(Address dst, XMMRegister src) {
1640     Assembler::vextractf64x4(dst, src, 1);
1641   }
1642   void vinsertf64x4_high(XMMRegister dst, Address src) {
1643     Assembler::vinsertf64x4(dst, dst, src, 1);
1644   }
1645 
1646   // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers
1647   void vinserti128_low(XMMRegister dst, XMMRegister src) {
1648     vinserti128(dst, dst, src, 0);
1649   }
1650   void vinserti128_low(XMMRegister dst, Address src) {
1651     vinserti128(dst, dst, src, 0);
1652   }
1653   void vextracti128_low(XMMRegister dst, XMMRegister src) {
1654     vextracti128(dst, src, 0);
1655   }
1656   void vextracti128_low(Address dst, XMMRegister src) {
1657     vextracti128(dst, src, 0);
1658   }
1659 
1660   void vinsertf128_low(XMMRegister dst, XMMRegister src) {
1661     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1662       Assembler::vinsertf32x4(dst, dst, src, 0);
1663     } else {
1664       Assembler::vinsertf128(dst, dst, src, 0);
1665     }
1666   }
1667 
1668   void vinsertf128_low(XMMRegister dst, Address src) {
1669     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1670       Assembler::vinsertf32x4(dst, dst, src, 0);
1671     } else {
1672       Assembler::vinsertf128(dst, dst, src, 0);
1673     }
1674   }
1675 
1676   void vextractf128_low(XMMRegister dst, XMMRegister src) {
1677     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1678       Assembler::vextractf32x4(dst, src, 0);
1679     } else {
1680       Assembler::vextractf128(dst, src, 0);
1681     }
1682   }
1683 
1684   void vextractf128_low(Address dst, XMMRegister src) {
1685     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1686       Assembler::vextractf32x4(dst, src, 0);
1687     } else {
1688       Assembler::vextractf128(dst, src, 0);
1689     }
1690   }
1691 
1692   // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers
1693   void vinserti64x4_low(XMMRegister dst, XMMRegister src) {
1694     Assembler::vinserti64x4(dst, dst, src, 0);
1695   }
1696   void vinsertf64x4_low(XMMRegister dst, XMMRegister src) {
1697     Assembler::vinsertf64x4(dst, dst, src, 0);
1698   }
1699   void vextracti64x4_low(XMMRegister dst, XMMRegister src) {
1700     Assembler::vextracti64x4(dst, src, 0);
1701   }
1702   void vextractf64x4_low(XMMRegister dst, XMMRegister src) {
1703     Assembler::vextractf64x4(dst, src, 0);
1704   }
1705   void vextractf64x4_low(Address dst, XMMRegister src) {
1706     Assembler::vextractf64x4(dst, src, 0);
1707   }
1708   void vinsertf64x4_low(XMMRegister dst, Address src) {
1709     Assembler::vinsertf64x4(dst, dst, src, 0);
1710   }
1711 
1712   // Carry-Less Multiplication Quadword
1713   void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1714     // 0x00 - multiply lower 64 bits [0:63]
1715     Assembler::vpclmulqdq(dst, nds, src, 0x00);
1716   }
1717   void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1718     // 0x11 - multiply upper 64 bits [64:127]
1719     Assembler::vpclmulqdq(dst, nds, src, 0x11);
1720   }
1721   void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1722     // 0x10 - multiply nds[0:63] and src[64:127]
1723     Assembler::vpclmulqdq(dst, nds, src, 0x10);
1724   }
1725   void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1726     //0x01 - multiply nds[64:127] and src[0:63]
1727     Assembler::vpclmulqdq(dst, nds, src, 0x01);
1728   }
1729 
1730   void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1731     // 0x00 - multiply lower 64 bits [0:63]
1732     Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len);
1733   }
1734   void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1735     // 0x11 - multiply upper 64 bits [64:127]
1736     Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len);
1737   }
1738 
1739   // AVX-512 mask operations.
1740   void kand(BasicType etype, KRegister dst, KRegister src1, KRegister src2);
1741   void kor(BasicType type, KRegister dst, KRegister src1, KRegister src2);
1742   void knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp = knoreg, Register rtmp = noreg);
1743   void kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2);
1744   void kortest(uint masklen, KRegister src1, KRegister src2);
1745   void ktest(uint masklen, KRegister src1, KRegister src2);
1746 
1747   void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1748   void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1749 
1750   void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1751   void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1752 
1753   void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1754   void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1755 
1756   void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1757   void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1758 
1759   void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc);
1760   void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc);
1761   void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc);
1762   void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc);
1763 
1764   using Assembler::evpandq;
1765   void evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1766 
1767   using Assembler::evporq;
1768   void evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1769 
1770   using Assembler::vpternlogq;
1771   void vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch = noreg);
1772 
1773   void cmov32( Condition cc, Register dst, Address  src);
1774   void cmov32( Condition cc, Register dst, Register src);
1775 
1776   void cmov(   Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1777 
1778   void cmovptr(Condition cc, Register dst, Address  src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1779   void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1780 
1781   void movoop(Register dst, jobject obj);
1782   void movoop(Address  dst, jobject obj, Register rscratch);
1783 
1784   void mov_metadata(Register dst, Metadata* obj);
1785   void mov_metadata(Address  dst, Metadata* obj, Register rscratch);
1786 
1787   void movptr(Register     dst, Register       src);
1788   void movptr(Register     dst, Address        src);
1789   void movptr(Register     dst, AddressLiteral src);
1790   void movptr(Register     dst, ArrayAddress   src);
1791   void movptr(Register     dst, intptr_t       src);
1792   void movptr(Address      dst, Register       src);
1793   void movptr(Address      dst, int32_t        imm);
1794   void movptr(Address      dst, intptr_t       src, Register rscratch);
1795   void movptr(ArrayAddress dst, Register       src, Register rscratch);
1796 
1797   void movptr(Register dst, RegisterOrConstant src) {
1798     if (src.is_constant()) movptr(dst, src.as_constant());
1799     else                   movptr(dst, src.as_register());
1800   }
1801 
1802 
1803   // to avoid hiding movl
1804   void mov32(Register       dst, AddressLiteral src);
1805   void mov32(AddressLiteral dst, Register        src, Register rscratch = noreg);
1806 
1807   // Import other mov() methods from the parent class or else
1808   // they will be hidden by the following overriding declaration.
1809   using Assembler::movdl;
1810   void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1811 
1812   using Assembler::movq;
1813   void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1814 
1815   // Can push value or effective address
1816   void pushptr(AddressLiteral src, Register rscratch);
1817 
1818   void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
1819   void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
1820 
1821   void pushoop(jobject obj, Register rscratch);
1822   void pushklass(Metadata* obj, Register rscratch);
1823 
1824   // sign extend as need a l to ptr sized element
1825   void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
1826   void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
1827 
1828 
1829  public:
1830   // clear memory of size 'cnt' qwords, starting at 'base';
1831   // if 'is_large' is set, do not try to produce short loop
1832   void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg);
1833 
1834   // clear memory initialization sequence for constant size;
1835   void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1836 
1837   // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
1838   void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1839 
1840   // Fill primitive arrays
1841   void generate_fill(BasicType t, bool aligned,
1842                      Register to, Register value, Register count,
1843                      Register rtmp, XMMRegister xtmp);
1844 
1845   void encode_iso_array(Register src, Register dst, Register len,
1846                         XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1847                         XMMRegister tmp4, Register tmp5, Register result, bool ascii);
1848 
1849 #ifdef _LP64
1850   void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
1851   void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1852                              Register y, Register y_idx, Register z,
1853                              Register carry, Register product,
1854                              Register idx, Register kdx);
1855   void multiply_add_128_x_128(Register x_xstart, Register y, Register z,
1856                               Register yz_idx, Register idx,
1857                               Register carry, Register product, int offset);
1858   void multiply_128_x_128_bmi2_loop(Register y, Register z,
1859                                     Register carry, Register carry2,
1860                                     Register idx, Register jdx,
1861                                     Register yz_idx1, Register yz_idx2,
1862                                     Register tmp, Register tmp3, Register tmp4);
1863   void multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
1864                                Register yz_idx, Register idx, Register jdx,
1865                                Register carry, Register product,
1866                                Register carry2);
1867   void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen,
1868                        Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5);
1869   void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3,
1870                      Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
1871   void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry,
1872                             Register tmp2);
1873   void multiply_add_64(Register sum, Register op1, Register op2, Register carry,
1874                        Register rdxReg, Register raxReg);
1875   void add_one_64(Register z, Register zlen, Register carry, Register tmp1);
1876   void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
1877                        Register tmp3, Register tmp4);
1878   void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
1879                      Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
1880 
1881   void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1,
1882                Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
1883                Register raxReg);
1884   void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1,
1885                Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
1886                Register raxReg);
1887   void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
1888                            Register result, Register tmp1, Register tmp2,
1889                            XMMRegister vec1, XMMRegister vec2, XMMRegister vec3);
1890 #endif
1891 
1892   // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic.
1893   void update_byte_crc32(Register crc, Register val, Register table);
1894   void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp);
1895 
1896 
1897 #ifdef _LP64
1898   void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2);
1899   void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos,
1900                                 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop,
1901                                 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup);
1902 #endif // _LP64
1903 
1904   // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic
1905   // Note on a naming convention:
1906   // Prefix w = register only used on a Westmere+ architecture
1907   // Prefix n = register only used on a Nehalem architecture
1908 #ifdef _LP64
1909   void crc32c_ipl_alg4(Register in_out, uint32_t n,
1910                        Register tmp1, Register tmp2, Register tmp3);
1911 #else
1912   void crc32c_ipl_alg4(Register in_out, uint32_t n,
1913                        Register tmp1, Register tmp2, Register tmp3,
1914                        XMMRegister xtmp1, XMMRegister xtmp2);
1915 #endif
1916   void crc32c_pclmulqdq(XMMRegister w_xtmp1,
1917                         Register in_out,
1918                         uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
1919                         XMMRegister w_xtmp2,
1920                         Register tmp1,
1921                         Register n_tmp2, Register n_tmp3);
1922   void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
1923                        XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1924                        Register tmp1, Register tmp2,
1925                        Register n_tmp3);
1926   void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
1927                          Register in_out1, Register in_out2, Register in_out3,
1928                          Register tmp1, Register tmp2, Register tmp3,
1929                          XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1930                          Register tmp4, Register tmp5,
1931                          Register n_tmp6);
1932   void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
1933                             Register tmp1, Register tmp2, Register tmp3,
1934                             Register tmp4, Register tmp5, Register tmp6,
1935                             XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1936                             bool is_pclmulqdq_supported);
1937   // Fold 128-bit data chunk
1938   void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
1939   void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf);
1940 #ifdef _LP64
1941   // Fold 512-bit data chunk
1942   void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset);
1943 #endif // _LP64
1944   // Fold 8-bit data
1945   void fold_8bit_crc32(Register crc, Register table, Register tmp);
1946   void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp);
1947 
1948   // Compress char[] array to byte[].
1949   void char_array_compress(Register src, Register dst, Register len,
1950                            XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1951                            XMMRegister tmp4, Register tmp5, Register result,
1952                            KRegister mask1 = knoreg, KRegister mask2 = knoreg);
1953 
1954   // Inflate byte[] array to char[].
1955   void byte_array_inflate(Register src, Register dst, Register len,
1956                           XMMRegister tmp1, Register tmp2, KRegister mask = knoreg);
1957 
1958   void fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask,
1959                    Register length, Register temp, int vec_enc);
1960 
1961   void fill64_masked(uint shift, Register dst, int disp,
1962                          XMMRegister xmm, KRegister mask, Register length,
1963                          Register temp, bool use64byteVector = false);
1964 
1965   void fill32_masked(uint shift, Register dst, int disp,
1966                          XMMRegister xmm, KRegister mask, Register length,
1967                          Register temp);
1968 
1969   void fill32(Address dst, XMMRegister xmm);
1970 
1971   void fill32(Register dst, int disp, XMMRegister xmm);
1972 
1973   void fill64(Address dst, XMMRegister xmm, bool use64byteVector = false);
1974 
1975   void fill64(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false);
1976 
1977 #ifdef _LP64
1978   void convert_f2i(Register dst, XMMRegister src);
1979   void convert_d2i(Register dst, XMMRegister src);
1980   void convert_f2l(Register dst, XMMRegister src);
1981   void convert_d2l(Register dst, XMMRegister src);
1982   void round_double(Register dst, XMMRegister src, Register rtmp, Register rcx);
1983   void round_float(Register dst, XMMRegister src, Register rtmp, Register rcx);
1984 
1985   void cache_wb(Address line);
1986   void cache_wbsync(bool is_pre);
1987 
1988 #ifdef COMPILER2_OR_JVMCI
1989   void generate_fill_avx3(BasicType type, Register to, Register value,
1990                           Register count, Register rtmp, XMMRegister xtmp);
1991 #endif // COMPILER2_OR_JVMCI
1992 #endif // _LP64
1993 
1994   void vallones(XMMRegister dst, int vector_len);
1995 
1996   void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg);
1997 
1998 };
1999 
2000 /**
2001  * class SkipIfEqual:
2002  *
2003  * Instantiating this class will result in assembly code being output that will
2004  * jump around any code emitted between the creation of the instance and it's
2005  * automatic destruction at the end of a scope block, depending on the value of
2006  * the flag passed to the constructor, which will be checked at run-time.
2007  */
2008 class SkipIfEqual {
2009  private:
2010   MacroAssembler* _masm;
2011   Label _label;
2012 
2013  public:
2014    SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value, Register rscratch);
2015    ~SkipIfEqual();
2016 };
2017 
2018 #endif // CPU_X86_MACROASSEMBLER_X86_HPP