1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
  26 #define CPU_X86_MACROASSEMBLER_X86_HPP
  27 
  28 #include "asm/assembler.hpp"
  29 #include "asm/register.hpp"
  30 #include "code/vmreg.inline.hpp"
  31 #include "compiler/oopMap.hpp"
  32 #include "utilities/macros.hpp"
  33 #include "runtime/signature.hpp"
  34 #include "runtime/vm_version.hpp"
  35 #include "utilities/checkedCast.hpp"
  36 
  37 class ciInlineKlass;
  38 
  39 // MacroAssembler extends Assembler by frequently used macros.
  40 //
  41 // Instructions for which a 'better' code sequence exists depending
  42 // on arguments should also go in here.
  43 
  44 class MacroAssembler: public Assembler {
  45   friend class LIR_Assembler;
  46   friend class Runtime1;      // as_Address()
  47 
  48  public:
  49   // Support for VM calls
  50   //
  51   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
  52   // may customize this version by overriding it for its purposes (e.g., to save/restore
  53   // additional registers when doing a VM call).
  54 
  55   virtual void call_VM_leaf_base(
  56     address entry_point,               // the entry point
  57     int     number_of_arguments        // the number of arguments to pop after the call
  58   );
  59 
  60  protected:
  61   // This is the base routine called by the different versions of call_VM. The interpreter
  62   // may customize this version by overriding it for its purposes (e.g., to save/restore
  63   // additional registers when doing a VM call).
  64   //
  65   // call_VM_base returns the register which contains the thread upon return.
  66   // If no last_java_sp is specified (noreg) than rsp will be used instead.
  67   virtual void call_VM_base(           // returns the register containing the thread upon return
  68     Register oop_result,               // where an oop-result ends up if any; use noreg otherwise
  69     Register last_java_sp,             // to set up last_Java_frame in stubs; use noreg otherwise
  70     address  entry_point,              // the entry point
  71     int      number_of_arguments,      // the number of arguments (w/o thread) to pop after the call
  72     bool     check_exceptions          // whether to check for pending exceptions after return
  73   );
  74 
  75   void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
  76 
  77  public:
  78   MacroAssembler(CodeBuffer* code) : Assembler(code) {}
  79 
  80  // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
  81  // The implementation is only non-empty for the InterpreterMacroAssembler,
  82  // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
  83  virtual void check_and_handle_popframe();
  84  virtual void check_and_handle_earlyret();
  85 
  86   Address as_Address(AddressLiteral adr);
  87   Address as_Address(ArrayAddress adr, Register rscratch);
  88 
  89   // Support for null-checks
  90   //
  91   // Generates code that causes a null OS exception if the content of reg is null.
  92   // If the accessed location is M[reg + offset] and the offset is known, provide the
  93   // offset. No explicit code generation is needed if the offset is within a certain
  94   // range (0 <= offset <= page_size).
  95 
  96   void null_check(Register reg, int offset = -1);
  97   static bool needs_explicit_null_check(intptr_t offset);
  98   static bool uses_implicit_null_check(void* address);
  99 
 100   // markWord tests, kills markWord reg
 101   void test_markword_is_inline_type(Register markword, Label& is_inline_type);
 102 
 103   // inlineKlass queries, kills temp_reg
 104   void test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type, bool can_be_null = true);
 105 
 106   void test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free);
 107   void test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free);
 108   void test_field_is_flat(Register flags, Register temp_reg, Label& is_flat);
 109   void test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker);
 110 
 111   // Check oops for special arrays, i.e. flat arrays and/or null-free arrays
 112   void test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label);
 113   void test_flat_array_oop(Register oop, Register temp_reg, Label& is_flat_array);
 114   void test_non_flat_array_oop(Register oop, Register temp_reg, Label& is_non_flat_array);
 115   void test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array);
 116   void test_non_null_free_array_oop(Register oop, Register temp_reg, Label& is_non_null_free_array);
 117 
 118   // Check array klass layout helper for flat or null-free arrays...
 119   void test_flat_array_layout(Register lh, Label& is_flat_array);
 120   void test_non_flat_array_layout(Register lh, Label& is_non_flat_array);
 121 
 122   // Required platform-specific helpers for Label::patch_instructions.
 123   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
 124   void pd_patch_instruction(address branch, address target, const char* file, int line) {
 125     unsigned char op = branch[0];
 126     assert(op == 0xE8 /* call */ ||
 127         op == 0xE9 /* jmp */ ||
 128         op == 0xEB /* short jmp */ ||
 129         (op & 0xF0) == 0x70 /* short jcc */ ||
 130         (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ ||
 131         (op == 0xC7 && branch[1] == 0xF8) /* xbegin */ ||
 132         (op == 0x8D) /* lea */,
 133         "Invalid opcode at patch point");
 134 
 135     if (op == 0xEB || (op & 0xF0) == 0x70) {
 136       // short offset operators (jmp and jcc)
 137       char* disp = (char*) &branch[1];
 138       int imm8 = checked_cast<int>(target - (address) &disp[1]);
 139       guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
 140                 file == nullptr ? "<null>" : file, line);
 141       *disp = (char)imm8;
 142     } else {
 143       int* disp = (int*) &branch[(op == 0x0F || op == 0xC7 || op == 0x8D) ? 2 : 1];
 144       int imm32 = checked_cast<int>(target - (address) &disp[1]);
 145       *disp = imm32;
 146     }
 147   }
 148 
 149   // The following 4 methods return the offset of the appropriate move instruction
 150 
 151   // Support for fast byte/short loading with zero extension (depending on particular CPU)
 152   int load_unsigned_byte(Register dst, Address src);
 153   int load_unsigned_short(Register dst, Address src);
 154 
 155   // Support for fast byte/short loading with sign extension (depending on particular CPU)
 156   int load_signed_byte(Register dst, Address src);
 157   int load_signed_short(Register dst, Address src);
 158 
 159   // Support for sign-extension (hi:lo = extend_sign(lo))
 160   void extend_sign(Register hi, Register lo);
 161 
 162   // Load and store values by size and signed-ness
 163   void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
 164   void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
 165 
 166   // Support for inc/dec with optimal instruction selection depending on value
 167 
 168   void increment(Register reg, int value = 1) { incrementq(reg, value); }
 169   void decrement(Register reg, int value = 1) { decrementq(reg, value); }
 170   void increment(Address dst, int value = 1)  { incrementq(dst, value); }
 171   void decrement(Address dst, int value = 1)  { decrementq(dst, value); }
 172 
 173   void decrementl(Address dst, int value = 1);
 174   void decrementl(Register reg, int value = 1);
 175 
 176   void decrementq(Register reg, int value = 1);
 177   void decrementq(Address dst, int value = 1);
 178 
 179   void incrementl(Address dst, int value = 1);
 180   void incrementl(Register reg, int value = 1);
 181 
 182   void incrementq(Register reg, int value = 1);
 183   void incrementq(Address dst, int value = 1);
 184 
 185   void incrementl(AddressLiteral dst, Register rscratch = noreg);
 186   void incrementl(ArrayAddress   dst, Register rscratch);
 187 
 188   void incrementq(AddressLiteral dst, Register rscratch = noreg);
 189 
 190   // Support optimal SSE move instructions.
 191   void movflt(XMMRegister dst, XMMRegister src) {
 192     if (dst-> encoding() == src->encoding()) return;
 193     if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
 194     else                       { movss (dst, src); return; }
 195   }
 196   void movflt(XMMRegister dst, Address src) { movss(dst, src); }
 197   void movflt(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
 198   void movflt(Address dst, XMMRegister src) { movss(dst, src); }
 199 
 200   // Move with zero extension
 201   void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); }
 202 
 203   void movdbl(XMMRegister dst, XMMRegister src) {
 204     if (dst-> encoding() == src->encoding()) return;
 205     if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
 206     else                       { movsd (dst, src); return; }
 207   }
 208 
 209   void movdbl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
 210 
 211   void movdbl(XMMRegister dst, Address src) {
 212     if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
 213     else                         { movlpd(dst, src); return; }
 214   }
 215   void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
 216 
 217   void flt_to_flt16(Register dst, XMMRegister src, XMMRegister tmp) {
 218     // Use separate tmp XMM register because caller may
 219     // requires src XMM register to be unchanged (as in x86.ad).
 220     vcvtps2ph(tmp, src, 0x04, Assembler::AVX_128bit);
 221     movdl(dst, tmp);
 222     movswl(dst, dst);
 223   }
 224 
 225   void flt16_to_flt(XMMRegister dst, Register src) {
 226     movdl(dst, src);
 227     vcvtph2ps(dst, dst, Assembler::AVX_128bit);
 228   }
 229 
 230   // Alignment
 231   void align32();
 232   void align64();
 233   void align(uint modulus);
 234   void align(uint modulus, uint target);
 235 
 236   void post_call_nop();
 237 
 238   // Stack frame creation/removal
 239   void enter();
 240   void leave();
 241 
 242   // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information).
 243   // The pointer will be loaded into the thread register. This is a slow version that does native call.
 244   // Normally, JavaThread pointer is available in r15_thread, use that where possible.
 245   void get_thread_slow(Register thread);
 246 
 247   // Support for argument shuffling
 248 
 249   // bias in bytes
 250   void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
 251   void long_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
 252   void float_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
 253   void double_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
 254   void move_ptr(VMRegPair src, VMRegPair dst);
 255   void object_move(OopMap* map,
 256                    int oop_handle_offset,
 257                    int framesize_in_slots,
 258                    VMRegPair src,
 259                    VMRegPair dst,
 260                    bool is_receiver,
 261                    int* receiver_offset);
 262 
 263   // Support for VM calls
 264   //
 265   // It is imperative that all calls into the VM are handled via the call_VM macros.
 266   // They make sure that the stack linkage is setup correctly. call_VM's correspond
 267   // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
 268 
 269 
 270   void call_VM(Register oop_result,
 271                address entry_point,
 272                bool check_exceptions = true);
 273   void call_VM(Register oop_result,
 274                address entry_point,
 275                Register arg_1,
 276                bool check_exceptions = true);
 277   void call_VM(Register oop_result,
 278                address entry_point,
 279                Register arg_1, Register arg_2,
 280                bool check_exceptions = true);
 281   void call_VM(Register oop_result,
 282                address entry_point,
 283                Register arg_1, Register arg_2, Register arg_3,
 284                bool check_exceptions = true);
 285 
 286   // Overloadings with last_Java_sp
 287   void call_VM(Register oop_result,
 288                Register last_java_sp,
 289                address entry_point,
 290                int number_of_arguments = 0,
 291                bool check_exceptions = true);
 292   void call_VM(Register oop_result,
 293                Register last_java_sp,
 294                address entry_point,
 295                Register arg_1, bool
 296                check_exceptions = true);
 297   void call_VM(Register oop_result,
 298                Register last_java_sp,
 299                address entry_point,
 300                Register arg_1, Register arg_2,
 301                bool check_exceptions = true);
 302   void call_VM(Register oop_result,
 303                Register last_java_sp,
 304                address entry_point,
 305                Register arg_1, Register arg_2, Register arg_3,
 306                bool check_exceptions = true);
 307 
 308   void get_vm_result_oop(Register oop_result);
 309   void get_vm_result_metadata(Register metadata_result);
 310 
 311   // These always tightly bind to MacroAssembler::call_VM_base
 312   // bypassing the virtual implementation
 313   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
 314   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
 315   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
 316   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
 317   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
 318 
 319   void call_VM_leaf0(address entry_point);
 320   void call_VM_leaf(address entry_point,
 321                     int number_of_arguments = 0);
 322   void call_VM_leaf(address entry_point,
 323                     Register arg_1);
 324   void call_VM_leaf(address entry_point,
 325                     Register arg_1, Register arg_2);
 326   void call_VM_leaf(address entry_point,
 327                     Register arg_1, Register arg_2, Register arg_3);
 328 
 329   void call_VM_leaf(address entry_point,
 330                     Register arg_1, Register arg_2, Register arg_3, Register arg_4);
 331 
 332   // These always tightly bind to MacroAssembler::call_VM_leaf_base
 333   // bypassing the virtual implementation
 334   void super_call_VM_leaf(address entry_point);
 335   void super_call_VM_leaf(address entry_point, Register arg_1);
 336   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
 337   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
 338   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
 339 
 340   void set_last_Java_frame(Register last_java_sp,
 341                            Register last_java_fp,
 342                            address  last_java_pc,
 343                            Register rscratch);
 344 
 345   void set_last_Java_frame(Register last_java_sp,
 346                            Register last_java_fp,
 347                            Label &last_java_pc,
 348                            Register scratch);
 349 
 350   void reset_last_Java_frame(bool clear_fp);
 351 
 352   // jobjects
 353   void clear_jobject_tag(Register possibly_non_local);
 354   void resolve_jobject(Register value, Register tmp);
 355   void resolve_global_jobject(Register value, Register tmp);
 356 
 357   // C 'boolean' to Java boolean: x == 0 ? 0 : 1
 358   void c2bool(Register x);
 359 
 360   // C++ bool manipulation
 361 
 362   void movbool(Register dst, Address src);
 363   void movbool(Address dst, bool boolconst);
 364   void movbool(Address dst, Register src);
 365   void testbool(Register dst);
 366 
 367   void resolve_oop_handle(Register result, Register tmp);
 368   void resolve_weak_handle(Register result, Register tmp);
 369   void load_mirror(Register mirror, Register method, Register tmp);
 370   void load_method_holder_cld(Register rresult, Register rmethod);
 371 
 372   void load_method_holder(Register holder, Register method);
 373 
 374   // oop manipulations
 375 
 376   // Load oopDesc._metadata without decode (useful for direct Klass* compare from oops)
 377   void load_metadata(Register dst, Register src);
 378   void load_narrow_klass_compact(Register dst, Register src);
 379   void load_klass(Register dst, Register src, Register tmp);
 380   void store_klass(Register dst, Register src, Register tmp);
 381 
 382   // Compares the Klass pointer of an object to a given Klass (which might be narrow,
 383   // depending on UseCompressedClassPointers).
 384   void cmp_klass(Register klass, Register obj, Register tmp);
 385 
 386   // Compares the Klass pointer of two objects obj1 and obj2. Result is in the condition flags.
 387   // Uses tmp1 and tmp2 as temporary registers.
 388   void cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2);
 389 
 390   void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
 391                       Register tmp1);
 392   void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
 393                        Register tmp1, Register tmp2, Register tmp3);
 394 
 395   void flat_field_copy(DecoratorSet decorators, Register src, Register dst, Register inline_layout_info);
 396 
 397   // inline type data payload offsets...
 398   void payload_offset(Register inline_klass, Register offset);
 399   void payload_addr(Register oop, Register data, Register inline_klass);
 400   // get data payload ptr a flat value array at index, kills rcx and index
 401   void data_for_value_array_index(Register array, Register array_klass,
 402                                   Register index, Register data);
 403 
 404   void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, DecoratorSet decorators = 0);
 405   void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, DecoratorSet decorators = 0);
 406   void store_heap_oop(Address dst, Register val, Register tmp1 = noreg,
 407                       Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
 408 
 409   // Used for storing null. All other oop constants should be
 410   // stored using routines that take a jobject.
 411   void store_heap_oop_null(Address dst);
 412 
 413   void load_prototype_header(Register dst, Register src, Register tmp);
 414 
 415   void store_klass_gap(Register dst, Register src);
 416 
 417   // This dummy is to prevent a call to store_heap_oop from
 418   // converting a zero (like null) into a Register by giving
 419   // the compiler two choices it can't resolve
 420 
 421   void store_heap_oop(Address dst, void* dummy);
 422 
 423   void encode_heap_oop(Register r);
 424   void decode_heap_oop(Register r);
 425   void encode_heap_oop_not_null(Register r);
 426   void decode_heap_oop_not_null(Register r);
 427   void encode_heap_oop_not_null(Register dst, Register src);
 428   void decode_heap_oop_not_null(Register dst, Register src);
 429 
 430   void set_narrow_oop(Register dst, jobject obj);
 431   void set_narrow_oop(Address dst, jobject obj);
 432   void cmp_narrow_oop(Register dst, jobject obj);
 433   void cmp_narrow_oop(Address dst, jobject obj);
 434 
 435   void encode_klass_not_null(Register r, Register tmp);
 436   void decode_klass_not_null(Register r, Register tmp);
 437   void encode_and_move_klass_not_null(Register dst, Register src);
 438   void decode_and_move_klass_not_null(Register dst, Register src);
 439   void set_narrow_klass(Register dst, Klass* k);
 440   void set_narrow_klass(Address dst, Klass* k);
 441   void cmp_narrow_klass(Register dst, Klass* k);
 442   void cmp_narrow_klass(Address dst, Klass* k);
 443 
 444   // if heap base register is used - reinit it with the correct value
 445   void reinit_heapbase();
 446 
 447   DEBUG_ONLY(void verify_heapbase(const char* msg);)
 448 
 449   // Int division/remainder for Java
 450   // (as idivl, but checks for special case as described in JVM spec.)
 451   // returns idivl instruction offset for implicit exception handling
 452   int corrected_idivl(Register reg);
 453 
 454   // Long division/remainder for Java
 455   // (as idivq, but checks for special case as described in JVM spec.)
 456   // returns idivq instruction offset for implicit exception handling
 457   int corrected_idivq(Register reg);
 458 
 459   void int3();
 460 
 461   // Long operation macros for a 32bit cpu
 462   // Long negation for Java
 463   void lneg(Register hi, Register lo);
 464 
 465   // Long multiplication for Java
 466   // (destroys contents of eax, ebx, ecx and edx)
 467   void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
 468 
 469   // Long shifts for Java
 470   // (semantics as described in JVM spec.)
 471   void lshl(Register hi, Register lo);                               // hi:lo << (rcx & 0x3f)
 472   void lshr(Register hi, Register lo, bool sign_extension = false);  // hi:lo >> (rcx & 0x3f)
 473 
 474   // Long compare for Java
 475   // (semantics as described in JVM spec.)
 476   void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
 477 
 478 
 479   // misc
 480 
 481   // Sign extension
 482   void sign_extend_short(Register reg);
 483   void sign_extend_byte(Register reg);
 484 
 485   // Division by power of 2, rounding towards 0
 486   void division_with_shift(Register reg, int shift_value);
 487 
 488   // dst = c = a * b + c
 489   void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
 490   void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
 491 
 492   void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
 493   void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
 494   void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
 495   void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
 496 
 497 
 498   // same as fcmp2int, but using SSE2
 499   void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
 500   void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
 501 
 502   void push_IU_state();
 503   void pop_IU_state();
 504 
 505   void push_FPU_state();
 506   void pop_FPU_state();
 507 
 508   void push_CPU_state();
 509   void pop_CPU_state();
 510 
 511   void push_cont_fastpath();
 512   void pop_cont_fastpath();
 513 
 514   void inc_held_monitor_count();
 515   void dec_held_monitor_count();
 516 
 517   DEBUG_ONLY(void stop_if_in_cont(Register cont_reg, const char* name);)
 518 
 519   // Round up to a power of two
 520   void round_to(Register reg, int modulus);
 521 
 522 private:
 523   // General purpose and XMM registers potentially clobbered by native code; there
 524   // is no need for FPU or AVX opmask related methods because C1/interpreter
 525   // - we save/restore FPU state as a whole always
 526   // - do not care about AVX-512 opmask
 527   static RegSet call_clobbered_gp_registers();
 528   static XMMRegSet call_clobbered_xmm_registers();
 529 
 530   void push_set(XMMRegSet set, int offset);
 531   void pop_set(XMMRegSet set, int offset);
 532 
 533 public:
 534   void push_set(RegSet set, int offset = -1);
 535   void pop_set(RegSet set, int offset = -1);
 536 
 537   // Push and pop everything that might be clobbered by a native
 538   // runtime call.
 539   // Only save the lower 64 bits of each vector register.
 540   // Additional registers can be excluded in a passed RegSet.
 541   void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
 542   void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
 543 
 544   void push_call_clobbered_registers(bool save_fpu = true) {
 545     push_call_clobbered_registers_except(RegSet(), save_fpu);
 546   }
 547   void pop_call_clobbered_registers(bool restore_fpu = true) {
 548     pop_call_clobbered_registers_except(RegSet(), restore_fpu);
 549   }
 550 
 551   // allocation
 552 
 553   // Object / value buffer allocation...
 554   // Allocate instance of klass, assumes klass initialized by caller
 555   // new_obj prefers to be rax
 556   // Kills t1 and t2, perserves klass, return allocation in new_obj (rsi on LP64)
 557   void allocate_instance(Register klass, Register new_obj,
 558                          Register t1, Register t2,
 559                          bool clear_fields, Label& alloc_failed);
 560 
 561   void tlab_allocate(
 562     Register obj,                      // result: pointer to object after successful allocation
 563     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
 564     int      con_size_in_bytes,        // object size in bytes if   known at compile time
 565     Register t1,                       // temp register
 566     Register t2,                       // temp register
 567     Label&   slow_case                 // continuation point if fast allocation fails
 568   );
 569   void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
 570 
 571   // For field "index" within "klass", return inline_klass ...
 572   void get_inline_type_field_klass(Register klass, Register index, Register inline_klass);
 573 
 574   void inline_layout_info(Register klass, Register index, Register layout_info);
 575 
 576   void population_count(Register dst, Register src, Register scratch1, Register scratch2);
 577 
 578   // interface method calling
 579   void lookup_interface_method(Register recv_klass,
 580                                Register intf_klass,
 581                                RegisterOrConstant itable_index,
 582                                Register method_result,
 583                                Register scan_temp,
 584                                Label& no_such_interface,
 585                                bool return_method = true);
 586 
 587   void lookup_interface_method_stub(Register recv_klass,
 588                                     Register holder_klass,
 589                                     Register resolved_klass,
 590                                     Register method_result,
 591                                     Register scan_temp,
 592                                     Register temp_reg2,
 593                                     Register receiver,
 594                                     int itable_index,
 595                                     Label& L_no_such_interface);
 596 
 597   // virtual method calling
 598   void lookup_virtual_method(Register recv_klass,
 599                              RegisterOrConstant vtable_index,
 600                              Register method_result);
 601 
 602   // Test sub_klass against super_klass, with fast and slow paths.
 603 
 604   // The fast path produces a tri-state answer: yes / no / maybe-slow.
 605   // One of the three labels can be null, meaning take the fall-through.
 606   // If super_check_offset is -1, the value is loaded up from super_klass.
 607   // No registers are killed, except temp_reg.
 608   void check_klass_subtype_fast_path(Register sub_klass,
 609                                      Register super_klass,
 610                                      Register temp_reg,
 611                                      Label* L_success,
 612                                      Label* L_failure,
 613                                      Label* L_slow_path,
 614                 RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
 615 
 616   // The rest of the type check; must be wired to a corresponding fast path.
 617   // It does not repeat the fast path logic, so don't use it standalone.
 618   // The temp_reg and temp2_reg can be noreg, if no temps are available.
 619   // Updates the sub's secondary super cache as necessary.
 620   // If set_cond_codes, condition codes will be Z on success, NZ on failure.
 621   void check_klass_subtype_slow_path(Register sub_klass,
 622                                      Register super_klass,
 623                                      Register temp_reg,
 624                                      Register temp2_reg,
 625                                      Label* L_success,
 626                                      Label* L_failure,
 627                                      bool set_cond_codes = false);
 628 
 629   // The 64-bit version, which may do a hashed subclass lookup.
 630   void check_klass_subtype_slow_path(Register sub_klass,
 631                                      Register super_klass,
 632                                      Register temp_reg,
 633                                      Register temp2_reg,
 634                                      Register temp3_reg,
 635                                      Register temp4_reg,
 636                                      Label* L_success,
 637                                      Label* L_failure);
 638 
 639   // Three parts of a hashed subclass lookup: a simple linear search,
 640   // a table lookup, and a fallback that does linear probing in the
 641   // event of a hash collision.
 642   void check_klass_subtype_slow_path_linear(Register sub_klass,
 643                                             Register super_klass,
 644                                             Register temp_reg,
 645                                             Register temp2_reg,
 646                                             Label* L_success,
 647                                             Label* L_failure,
 648                                             bool set_cond_codes = false);
 649   void check_klass_subtype_slow_path_table(Register sub_klass,
 650                                            Register super_klass,
 651                                            Register temp_reg,
 652                                            Register temp2_reg,
 653                                            Register temp3_reg,
 654                                            Register result_reg,
 655                                            Label* L_success,
 656                                            Label* L_failure);
 657   void hashed_check_klass_subtype_slow_path(Register sub_klass,
 658                                             Register super_klass,
 659                                             Register temp_reg,
 660                                             Label* L_success,
 661                                             Label* L_failure);
 662 
 663   // As above, but with a constant super_klass.
 664   // The result is in Register result, not the condition codes.
 665   void lookup_secondary_supers_table_const(Register sub_klass,
 666                                            Register super_klass,
 667                                            Register temp1,
 668                                            Register temp2,
 669                                            Register temp3,
 670                                            Register temp4,
 671                                            Register result,
 672                                            u1 super_klass_slot);
 673 
 674   using Assembler::salq;
 675   void salq(Register dest, Register count);
 676   using Assembler::rorq;
 677   void rorq(Register dest, Register count);
 678   void lookup_secondary_supers_table_var(Register sub_klass,
 679                                          Register super_klass,
 680                                          Register temp1,
 681                                          Register temp2,
 682                                          Register temp3,
 683                                          Register temp4,
 684                                          Register result);
 685 
 686   void lookup_secondary_supers_table_slow_path(Register r_super_klass,
 687                                                Register r_array_base,
 688                                                Register r_array_index,
 689                                                Register r_bitmap,
 690                                                Register temp1,
 691                                                Register temp2,
 692                                                Label* L_success,
 693                                                Label* L_failure = nullptr);
 694 
 695   void verify_secondary_supers_table(Register r_sub_klass,
 696                                      Register r_super_klass,
 697                                      Register expected,
 698                                      Register temp1,
 699                                      Register temp2,
 700                                      Register temp3);
 701 
 702   void repne_scanq(Register addr, Register value, Register count, Register limit,
 703                    Label* L_success,
 704                    Label* L_failure = nullptr);
 705 
 706   // If r is valid, return r.
 707   // If r is invalid, remove a register r2 from available_regs, add r2
 708   // to regs_to_push, then return r2.
 709   Register allocate_if_noreg(const Register r,
 710                              RegSetIterator<Register> &available_regs,
 711                              RegSet &regs_to_push);
 712 
 713   // Simplified, combined version, good for typical uses.
 714   // Falls through on failure.
 715   void check_klass_subtype(Register sub_klass,
 716                            Register super_klass,
 717                            Register temp_reg,
 718                            Label& L_success);
 719 
 720   void clinit_barrier(Register klass,
 721                       Label* L_fast_path = nullptr,
 722                       Label* L_slow_path = nullptr);
 723 
 724   // method handles (JSR 292)
 725   Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
 726 
 727   // Debugging
 728 
 729   // only if +VerifyOops
 730   void _verify_oop(Register reg, const char* s, const char* file, int line);
 731   void _verify_oop_addr(Address addr, const char* s, const char* file, int line);
 732 
 733   void _verify_oop_checked(Register reg, const char* s, const char* file, int line) {
 734     if (VerifyOops) {
 735       _verify_oop(reg, s, file, line);
 736     }
 737   }
 738   void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) {
 739     if (VerifyOops) {
 740       _verify_oop_addr(reg, s, file, line);
 741     }
 742   }
 743 
 744   // TODO: verify method and klass metadata (compare against vptr?)
 745   void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
 746   void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
 747 
 748 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__)
 749 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__)
 750 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__)
 751 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
 752 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
 753 
 754   // Verify or restore cpu control state after JNI call
 755   void restore_cpu_control_state_after_jni(Register rscratch);
 756 
 757   // prints msg, dumps registers and stops execution
 758   void stop(const char* msg);
 759 
 760   // prints msg and continues
 761   void warn(const char* msg);
 762 
 763   // dumps registers and other state
 764   void print_state();
 765 
 766   static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
 767   static void debug64(char* msg, int64_t pc, int64_t regs[]);
 768   static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip);
 769   static void print_state64(int64_t pc, int64_t regs[]);
 770 
 771   void os_breakpoint();
 772 
 773   void untested()                                { stop("untested"); }
 774 
 775   void unimplemented(const char* what = "");
 776 
 777   void should_not_reach_here()                   { stop("should not reach here"); }
 778 
 779   void print_CPU_state();
 780 
 781   // Stack overflow checking
 782   void bang_stack_with_offset(int offset) {
 783     // stack grows down, caller passes positive offset
 784     assert(offset > 0, "must bang with negative offset");
 785     movl(Address(rsp, (-offset)), rax);
 786   }
 787 
 788   // Writes to stack successive pages until offset reached to check for
 789   // stack overflow + shadow pages.  Also, clobbers tmp
 790   void bang_stack_size(Register size, Register tmp);
 791 
 792   // Check for reserved stack access in method being exited (for JIT)
 793   void reserved_stack_check();
 794 
 795   void safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod);
 796 
 797   void verify_tlab();
 798 
 799   static Condition negate_condition(Condition cond);
 800 
 801   // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
 802   // operands. In general the names are modified to avoid hiding the instruction in Assembler
 803   // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
 804   // here in MacroAssembler. The major exception to this rule is call
 805 
 806   // Arithmetics
 807 
 808 
 809   void addptr(Address dst, int32_t src) { addq(dst, src); }
 810   void addptr(Address dst, Register src);
 811 
 812   void addptr(Register dst, Address src) { addq(dst, src); }
 813   void addptr(Register dst, int32_t src);
 814   void addptr(Register dst, Register src);
 815   void addptr(Register dst, RegisterOrConstant src) {
 816     if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant()));
 817     else                   addptr(dst, src.as_register());
 818   }
 819 
 820   void andptr(Register dst, int32_t src);
 821   void andptr(Register src1, Register src2) { andq(src1, src2); }
 822   void andptr(Register dst, Address src) { andq(dst, src); }
 823 
 824   using Assembler::andq;
 825   void andq(Register dst, AddressLiteral src, Register rscratch = noreg);
 826 
 827   void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg);
 828 
 829   // renamed to drag out the casting of address to int32_t/intptr_t
 830   void cmp32(Register src1, int32_t imm);
 831 
 832   void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg);
 833   // compare reg - mem, or reg - &mem
 834   void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg);
 835 
 836   void cmp32(Register src1, Address src2);
 837 
 838   void cmpoop(Register src1, Register src2);
 839   void cmpoop(Register src1, Address src2);
 840   void cmpoop(Register dst, jobject obj, Register rscratch);
 841 
 842   // NOTE src2 must be the lval. This is NOT an mem-mem compare
 843   void cmpptr(Address src1, AddressLiteral src2, Register rscratch);
 844 
 845   void cmpptr(Register src1, AddressLiteral src2, Register rscratch = noreg);
 846 
 847   void cmpptr(Register src1, Register src2) { cmpq(src1, src2); }
 848   void cmpptr(Register src1, Address src2) { cmpq(src1, src2); }
 849 
 850   void cmpptr(Register src1, int32_t src2) { cmpq(src1, src2); }
 851   void cmpptr(Address src1, int32_t src2) { cmpq(src1, src2); }
 852 
 853   // cmp64 to avoild hiding cmpq
 854   void cmp64(Register src1, AddressLiteral src, Register rscratch = noreg);
 855 
 856   void cmpxchgptr(Register reg, Address adr);
 857 
 858   void locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch = noreg);
 859 
 860   void imulptr(Register dst, Register src) { imulq(dst, src); }
 861   void imulptr(Register dst, Register src, int imm32) { imulq(dst, src, imm32); }
 862 
 863 
 864   void negptr(Register dst) { negq(dst); }
 865 
 866   void notptr(Register dst) { notq(dst); }
 867 
 868   void shlptr(Register dst, int32_t shift);
 869   void shlptr(Register dst) { shlq(dst); }
 870 
 871   void shrptr(Register dst, int32_t shift);
 872   void shrptr(Register dst) { shrq(dst); }
 873 
 874   void sarptr(Register dst) { sarq(dst); }
 875   void sarptr(Register dst, int32_t src) { sarq(dst, src); }
 876 
 877   void subptr(Address dst, int32_t src) { subq(dst, src); }
 878 
 879   void subptr(Register dst, Address src) { subq(dst, src); }
 880   void subptr(Register dst, int32_t src);
 881   // Force generation of a 4 byte immediate value even if it fits into 8bit
 882   void subptr_imm32(Register dst, int32_t src);
 883   void subptr(Register dst, Register src);
 884   void subptr(Register dst, RegisterOrConstant src) {
 885     if (src.is_constant()) subptr(dst, (int) src.as_constant());
 886     else                   subptr(dst,       src.as_register());
 887   }
 888 
 889   void sbbptr(Address dst, int32_t src) { sbbq(dst, src); }
 890   void sbbptr(Register dst, int32_t src) { sbbq(dst, src); }
 891 
 892   void xchgptr(Register src1, Register src2) { xchgq(src1, src2); }
 893   void xchgptr(Register src1, Address src2) { xchgq(src1, src2); }
 894 
 895   void xaddptr(Address src1, Register src2) { xaddq(src1, src2); }
 896 
 897 
 898 
 899   // Helper functions for statistics gathering.
 900   // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
 901   void cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch = noreg);
 902   // Unconditional atomic increment.
 903   void atomic_incl(Address counter_addr);
 904   void atomic_incl(AddressLiteral counter_addr, Register rscratch = noreg);
 905   void atomic_incq(Address counter_addr);
 906   void atomic_incq(AddressLiteral counter_addr, Register rscratch = noreg);
 907   void atomic_incptr(AddressLiteral counter_addr, Register rscratch = noreg) { atomic_incq(counter_addr, rscratch); }
 908   void atomic_incptr(Address counter_addr) { atomic_incq(counter_addr); }
 909 
 910   using Assembler::lea;
 911   void lea(Register dst, AddressLiteral adr);
 912   void lea(Address  dst, AddressLiteral adr, Register rscratch);
 913 
 914   void leal32(Register dst, Address src) { leal(dst, src); }
 915 
 916   // Import other testl() methods from the parent class or else
 917   // they will be hidden by the following overriding declaration.
 918   using Assembler::testl;
 919   void testl(Address dst, int32_t imm32);
 920   void testl(Register dst, int32_t imm32);
 921   void testl(Register dst, AddressLiteral src); // requires reachable address
 922   using Assembler::testq;
 923   void testq(Address dst, int32_t imm32);
 924   void testq(Register dst, int32_t imm32);
 925 
 926   void orptr(Register dst, Address src) { orq(dst, src); }
 927   void orptr(Register dst, Register src) { orq(dst, src); }
 928   void orptr(Register dst, int32_t src) { orq(dst, src); }
 929   void orptr(Address dst, int32_t imm32) { orq(dst, imm32); }
 930 
 931   void testptr(Register src, int32_t imm32) { testq(src, imm32); }
 932   void testptr(Register src1, Address src2) { testq(src1, src2); }
 933   void testptr(Address src, int32_t imm32) { testq(src, imm32); }
 934   void testptr(Register src1, Register src2);
 935 
 936   void xorptr(Register dst, Register src) { xorq(dst, src); }
 937   void xorptr(Register dst, Address src) { xorq(dst, src); }
 938 
 939   // Calls
 940 
 941   void call(Label& L, relocInfo::relocType rtype);
 942   void call(Register entry);
 943   void call(Address addr) { Assembler::call(addr); }
 944 
 945   // NOTE: this call transfers to the effective address of entry NOT
 946   // the address contained by entry. This is because this is more natural
 947   // for jumps/calls.
 948   void call(AddressLiteral entry, Register rscratch = rax);
 949 
 950   // Emit the CompiledIC call idiom
 951   void ic_call(address entry, jint method_index = 0);
 952   static int ic_check_size();
 953   int ic_check(int end_alignment);
 954 
 955   void emit_static_call_stub();
 956 
 957   // Jumps
 958 
 959   // NOTE: these jumps transfer to the effective address of dst NOT
 960   // the address contained by dst. This is because this is more natural
 961   // for jumps/calls.
 962   void jump(AddressLiteral dst, Register rscratch = noreg);
 963 
 964   void jump_cc(Condition cc, AddressLiteral dst, Register rscratch = noreg);
 965 
 966   // 32bit can do a case table jump in one instruction but we no longer allow the base
 967   // to be installed in the Address class. This jump will transfer to the address
 968   // contained in the location described by entry (not the address of entry)
 969   void jump(ArrayAddress entry, Register rscratch);
 970 
 971   // Adding more natural conditional jump instructions
 972   void ALWAYSINLINE jo(Label& L, bool maybe_short = true) { jcc(Assembler::overflow, L, maybe_short); }
 973   void ALWAYSINLINE jno(Label& L, bool maybe_short = true) { jcc(Assembler::noOverflow, L, maybe_short); }
 974   void ALWAYSINLINE js(Label& L, bool maybe_short = true) { jcc(Assembler::negative, L, maybe_short); }
 975   void ALWAYSINLINE jns(Label& L, bool maybe_short = true) { jcc(Assembler::positive, L, maybe_short); }
 976   void ALWAYSINLINE je(Label& L, bool maybe_short = true) { jcc(Assembler::equal, L, maybe_short); }
 977   void ALWAYSINLINE jz(Label& L, bool maybe_short = true) { jcc(Assembler::zero, L, maybe_short); }
 978   void ALWAYSINLINE jne(Label& L, bool maybe_short = true) { jcc(Assembler::notEqual, L, maybe_short); }
 979   void ALWAYSINLINE jnz(Label& L, bool maybe_short = true) { jcc(Assembler::notZero, L, maybe_short); }
 980   void ALWAYSINLINE jb(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); }
 981   void ALWAYSINLINE jnae(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); }
 982   void ALWAYSINLINE jc(Label& L, bool maybe_short = true) { jcc(Assembler::carrySet, L, maybe_short); }
 983   void ALWAYSINLINE jnb(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); }
 984   void ALWAYSINLINE jae(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); }
 985   void ALWAYSINLINE jnc(Label& L, bool maybe_short = true) { jcc(Assembler::carryClear, L, maybe_short); }
 986   void ALWAYSINLINE jbe(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); }
 987   void ALWAYSINLINE jna(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); }
 988   void ALWAYSINLINE ja(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); }
 989   void ALWAYSINLINE jnbe(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); }
 990   void ALWAYSINLINE jl(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); }
 991   void ALWAYSINLINE jnge(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); }
 992   void ALWAYSINLINE jge(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); }
 993   void ALWAYSINLINE jnl(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); }
 994   void ALWAYSINLINE jle(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); }
 995   void ALWAYSINLINE jng(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); }
 996   void ALWAYSINLINE jg(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); }
 997   void ALWAYSINLINE jnle(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); }
 998   void ALWAYSINLINE jp(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); }
 999   void ALWAYSINLINE jpe(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); }
1000   void ALWAYSINLINE jnp(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); }
1001   void ALWAYSINLINE jpo(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); }
1002   // * No condition for this *  void ALWAYSINLINE jcxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); }
1003   // * No condition for this *  void ALWAYSINLINE jecxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); }
1004 
1005   // Short versions of the above
1006   void ALWAYSINLINE jo_b(Label& L) { jccb(Assembler::overflow, L); }
1007   void ALWAYSINLINE jno_b(Label& L) { jccb(Assembler::noOverflow, L); }
1008   void ALWAYSINLINE js_b(Label& L) { jccb(Assembler::negative, L); }
1009   void ALWAYSINLINE jns_b(Label& L) { jccb(Assembler::positive, L); }
1010   void ALWAYSINLINE je_b(Label& L) { jccb(Assembler::equal, L); }
1011   void ALWAYSINLINE jz_b(Label& L) { jccb(Assembler::zero, L); }
1012   void ALWAYSINLINE jne_b(Label& L) { jccb(Assembler::notEqual, L); }
1013   void ALWAYSINLINE jnz_b(Label& L) { jccb(Assembler::notZero, L); }
1014   void ALWAYSINLINE jb_b(Label& L) { jccb(Assembler::below, L); }
1015   void ALWAYSINLINE jnae_b(Label& L) { jccb(Assembler::below, L); }
1016   void ALWAYSINLINE jc_b(Label& L) { jccb(Assembler::carrySet, L); }
1017   void ALWAYSINLINE jnb_b(Label& L) { jccb(Assembler::aboveEqual, L); }
1018   void ALWAYSINLINE jae_b(Label& L) { jccb(Assembler::aboveEqual, L); }
1019   void ALWAYSINLINE jnc_b(Label& L) { jccb(Assembler::carryClear, L); }
1020   void ALWAYSINLINE jbe_b(Label& L) { jccb(Assembler::belowEqual, L); }
1021   void ALWAYSINLINE jna_b(Label& L) { jccb(Assembler::belowEqual, L); }
1022   void ALWAYSINLINE ja_b(Label& L) { jccb(Assembler::above, L); }
1023   void ALWAYSINLINE jnbe_b(Label& L) { jccb(Assembler::above, L); }
1024   void ALWAYSINLINE jl_b(Label& L) { jccb(Assembler::less, L); }
1025   void ALWAYSINLINE jnge_b(Label& L) { jccb(Assembler::less, L); }
1026   void ALWAYSINLINE jge_b(Label& L) { jccb(Assembler::greaterEqual, L); }
1027   void ALWAYSINLINE jnl_b(Label& L) { jccb(Assembler::greaterEqual, L); }
1028   void ALWAYSINLINE jle_b(Label& L) { jccb(Assembler::lessEqual, L); }
1029   void ALWAYSINLINE jng_b(Label& L) { jccb(Assembler::lessEqual, L); }
1030   void ALWAYSINLINE jg_b(Label& L) { jccb(Assembler::greater, L); }
1031   void ALWAYSINLINE jnle_b(Label& L) { jccb(Assembler::greater, L); }
1032   void ALWAYSINLINE jp_b(Label& L) { jccb(Assembler::parity, L); }
1033   void ALWAYSINLINE jpe_b(Label& L) { jccb(Assembler::parity, L); }
1034   void ALWAYSINLINE jnp_b(Label& L) { jccb(Assembler::noParity, L); }
1035   void ALWAYSINLINE jpo_b(Label& L) { jccb(Assembler::noParity, L); }
1036   // * No condition for this *  void ALWAYSINLINE jcxz_b(Label& L) { jccb(Assembler::cxz, L); }
1037   // * No condition for this *  void ALWAYSINLINE jecxz_b(Label& L) { jccb(Assembler::cxz, L); }
1038 
1039   // Floating
1040 
1041   void push_f(XMMRegister r);
1042   void pop_f(XMMRegister r);
1043   void push_d(XMMRegister r);
1044   void pop_d(XMMRegister r);
1045 
1046   void push_ppx(Register src);
1047   void pop_ppx(Register dst);
1048 
1049   void andpd(XMMRegister dst, XMMRegister    src) { Assembler::andpd(dst, src); }
1050   void andpd(XMMRegister dst, Address        src) { Assembler::andpd(dst, src); }
1051   void andpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1052 
1053   void andnpd(XMMRegister dst, XMMRegister src) { Assembler::andnpd(dst, src); }
1054 
1055   void andps(XMMRegister dst, XMMRegister    src) { Assembler::andps(dst, src); }
1056   void andps(XMMRegister dst, Address        src) { Assembler::andps(dst, src); }
1057   void andps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1058 
1059   void comiss(XMMRegister dst, XMMRegister    src) { Assembler::comiss(dst, src); }
1060   void comiss(XMMRegister dst, Address        src) { Assembler::comiss(dst, src); }
1061   void comiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1062 
1063   void comisd(XMMRegister dst, XMMRegister    src) { Assembler::comisd(dst, src); }
1064   void comisd(XMMRegister dst, Address        src) { Assembler::comisd(dst, src); }
1065   void comisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1066 
1067   void orpd(XMMRegister dst, XMMRegister src) { Assembler::orpd(dst, src); }
1068 
1069   void cmp32_mxcsr_std(Address mxcsr_save, Register tmp, Register rscratch = noreg);
1070   void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
1071   void ldmxcsr(AddressLiteral src, Register rscratch = noreg);
1072 
1073  private:
1074   void sha256_AVX2_one_round_compute(
1075     Register  reg_old_h,
1076     Register  reg_a,
1077     Register  reg_b,
1078     Register  reg_c,
1079     Register  reg_d,
1080     Register  reg_e,
1081     Register  reg_f,
1082     Register  reg_g,
1083     Register  reg_h,
1084     int iter);
1085   void sha256_AVX2_four_rounds_compute_first(int start);
1086   void sha256_AVX2_four_rounds_compute_last(int start);
1087   void sha256_AVX2_one_round_and_sched(
1088         XMMRegister xmm_0,     /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */
1089         XMMRegister xmm_1,     /* ymm5 */  /* full cycle is 16 iterations */
1090         XMMRegister xmm_2,     /* ymm6 */
1091         XMMRegister xmm_3,     /* ymm7 */
1092         Register    reg_a,      /* == eax on 0 iteration, then rotate 8 register right on each next iteration */
1093         Register    reg_b,      /* ebx */    /* full cycle is 8 iterations */
1094         Register    reg_c,      /* edi */
1095         Register    reg_d,      /* esi */
1096         Register    reg_e,      /* r8d */
1097         Register    reg_f,      /* r9d */
1098         Register    reg_g,      /* r10d */
1099         Register    reg_h,      /* r11d */
1100         int iter);
1101 
1102   void addm(int disp, Register r1, Register r2);
1103 
1104   void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d,
1105                                      Register e, Register f, Register g, Register h, int iteration);
1106 
1107   void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1108                                           Register a, Register b, Register c, Register d, Register e, Register f,
1109                                           Register g, Register h, int iteration);
1110 
1111   void addmq(int disp, Register r1, Register r2);
1112  public:
1113   void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1114                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1115                    Register buf, Register state, Register ofs, Register limit, Register rsp,
1116                    bool multi_block, XMMRegister shuf_mask);
1117   void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1118                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1119                    Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block,
1120                    XMMRegister shuf_mask);
1121   void sha512_update_ni_x1(Register arg_hash, Register arg_msg, Register ofs, Register limit, bool multi_block);
1122 
1123   void fast_md5(Register buf, Address state, Address ofs, Address limit,
1124                 bool multi_block);
1125 
1126   void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
1127                  XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask,
1128                  Register buf, Register state, Register ofs, Register limit, Register rsp,
1129                  bool multi_block);
1130 
1131   void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1132                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1133                    Register buf, Register state, Register ofs, Register limit, Register rsp,
1134                    bool multi_block, XMMRegister shuf_mask);
1135 
1136   void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1137                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1138                 Register rax, Register rcx, Register rdx, Register tmp);
1139 
1140 private:
1141 
1142   // these are private because users should be doing movflt/movdbl
1143 
1144   void movss(Address     dst, XMMRegister    src) { Assembler::movss(dst, src); }
1145   void movss(XMMRegister dst, XMMRegister    src) { Assembler::movss(dst, src); }
1146   void movss(XMMRegister dst, Address        src) { Assembler::movss(dst, src); }
1147   void movss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1148 
1149   void movlpd(XMMRegister dst, Address        src) {Assembler::movlpd(dst, src); }
1150   void movlpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1151 
1152 public:
1153 
1154   void addsd(XMMRegister dst, XMMRegister    src) { Assembler::addsd(dst, src); }
1155   void addsd(XMMRegister dst, Address        src) { Assembler::addsd(dst, src); }
1156   void addsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1157 
1158   void addss(XMMRegister dst, XMMRegister    src) { Assembler::addss(dst, src); }
1159   void addss(XMMRegister dst, Address        src) { Assembler::addss(dst, src); }
1160   void addss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1161 
1162   void addpd(XMMRegister dst, XMMRegister    src) { Assembler::addpd(dst, src); }
1163   void addpd(XMMRegister dst, Address        src) { Assembler::addpd(dst, src); }
1164   void addpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1165 
1166   using Assembler::vbroadcasti128;
1167   void vbroadcasti128(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1168 
1169   using Assembler::vbroadcastsd;
1170   void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1171 
1172   using Assembler::vbroadcastss;
1173   void vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1174 
1175   // Vector float blend
1176   void vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg);
1177   void vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg);
1178 
1179   void divsd(XMMRegister dst, XMMRegister    src) { Assembler::divsd(dst, src); }
1180   void divsd(XMMRegister dst, Address        src) { Assembler::divsd(dst, src); }
1181   void divsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1182 
1183   void divss(XMMRegister dst, XMMRegister    src) { Assembler::divss(dst, src); }
1184   void divss(XMMRegister dst, Address        src) { Assembler::divss(dst, src); }
1185   void divss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1186 
1187   // Move Unaligned Double Quadword
1188   void movdqu(Address     dst, XMMRegister    src);
1189   void movdqu(XMMRegister dst, XMMRegister    src);
1190   void movdqu(XMMRegister dst, Address        src);
1191   void movdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1192 
1193   void kmovwl(Register  dst, KRegister      src) { Assembler::kmovwl(dst, src); }
1194   void kmovwl(Address   dst, KRegister      src) { Assembler::kmovwl(dst, src); }
1195   void kmovwl(KRegister dst, KRegister      src) { Assembler::kmovwl(dst, src); }
1196   void kmovwl(KRegister dst, Register       src) { Assembler::kmovwl(dst, src); }
1197   void kmovwl(KRegister dst, Address        src) { Assembler::kmovwl(dst, src); }
1198   void kmovwl(KRegister dst, AddressLiteral src, Register rscratch = noreg);
1199 
1200   void kmovql(KRegister dst, KRegister      src) { Assembler::kmovql(dst, src); }
1201   void kmovql(KRegister dst, Register       src) { Assembler::kmovql(dst, src); }
1202   void kmovql(Register  dst, KRegister      src) { Assembler::kmovql(dst, src); }
1203   void kmovql(KRegister dst, Address        src) { Assembler::kmovql(dst, src); }
1204   void kmovql(Address   dst, KRegister      src) { Assembler::kmovql(dst, src); }
1205   void kmovql(KRegister dst, AddressLiteral src, Register rscratch = noreg);
1206 
1207   // Safe move operation, lowers down to 16bit moves for targets supporting
1208   // AVX512F feature and 64bit moves for targets supporting AVX512BW feature.
1209   void kmov(Address  dst, KRegister src);
1210   void kmov(KRegister dst, Address src);
1211   void kmov(KRegister dst, KRegister src);
1212   void kmov(Register dst, KRegister src);
1213   void kmov(KRegister dst, Register src);
1214 
1215   using Assembler::movddup;
1216   void movddup(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1217 
1218   using Assembler::vmovddup;
1219   void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1220 
1221   // AVX Unaligned forms
1222   void vmovdqu(Address     dst, XMMRegister    src);
1223   void vmovdqu(XMMRegister dst, Address        src);
1224   void vmovdqu(XMMRegister dst, XMMRegister    src);
1225   void vmovdqu(XMMRegister dst, AddressLiteral src,                 Register rscratch = noreg);
1226   void vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1227   void vmovdqu(XMMRegister dst, XMMRegister    src, int vector_len);
1228   void vmovdqu(XMMRegister dst, Address        src, int vector_len);
1229   void vmovdqu(Address     dst, XMMRegister    src, int vector_len);
1230 
1231   // AVX Aligned forms
1232   using Assembler::vmovdqa;
1233   void vmovdqa(XMMRegister dst, AddressLiteral src,                 Register rscratch = noreg);
1234   void vmovdqa(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1235 
1236   // AVX512 Unaligned
1237   void evmovdqu(BasicType type, KRegister kmask, Address     dst, XMMRegister src, bool merge, int vector_len);
1238   void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address     src, bool merge, int vector_len);
1239   void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len);
1240 
1241   void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); }
1242   void evmovdqub(XMMRegister dst, Address     src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); }
1243 
1244   void evmovdqub(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1245     if (dst->encoding() != src->encoding() || mask != k0)  {
1246       Assembler::evmovdqub(dst, mask, src, merge, vector_len);
1247     }
1248   }
1249   void evmovdqub(Address     dst, KRegister mask, XMMRegister    src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1250   void evmovdqub(XMMRegister dst, KRegister mask, Address        src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1251   void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1252 
1253   void evmovdquw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1254   void evmovdquw(Address     dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1255   void evmovdquw(XMMRegister dst, Address     src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1256 
1257   void evmovdquw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1258     if (dst->encoding() != src->encoding() || mask != k0) {
1259       Assembler::evmovdquw(dst, mask, src, merge, vector_len);
1260     }
1261   }
1262   void evmovdquw(XMMRegister dst, KRegister mask, Address        src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1263   void evmovdquw(Address     dst, KRegister mask, XMMRegister    src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1264   void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1265 
1266   void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) {
1267      if (dst->encoding() != src->encoding()) {
1268        Assembler::evmovdqul(dst, src, vector_len);
1269      }
1270   }
1271   void evmovdqul(Address     dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1272   void evmovdqul(XMMRegister dst, Address     src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1273 
1274   void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1275     if (dst->encoding() != src->encoding() || mask != k0)  {
1276       Assembler::evmovdqul(dst, mask, src, merge, vector_len);
1277     }
1278   }
1279   void evmovdqul(Address     dst, KRegister mask, XMMRegister    src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1280   void evmovdqul(XMMRegister dst, KRegister mask, Address        src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1281   void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1282 
1283   void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) {
1284     if (dst->encoding() != src->encoding()) {
1285       Assembler::evmovdquq(dst, src, vector_len);
1286     }
1287   }
1288   void evmovdquq(XMMRegister dst, Address        src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1289   void evmovdquq(Address     dst, XMMRegister    src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1290   void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1291   void evmovdqaq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1292 
1293   void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1294     if (dst->encoding() != src->encoding() || mask != k0) {
1295       Assembler::evmovdquq(dst, mask, src, merge, vector_len);
1296     }
1297   }
1298   void evmovdquq(Address     dst, KRegister mask, XMMRegister    src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1299   void evmovdquq(XMMRegister dst, KRegister mask, Address        src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1300   void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1301   void evmovdqaq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1302 
1303   using Assembler::movapd;
1304   void movapd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1305 
1306   // Move Aligned Double Quadword
1307   void movdqa(XMMRegister dst, XMMRegister    src) { Assembler::movdqa(dst, src); }
1308   void movdqa(XMMRegister dst, Address        src) { Assembler::movdqa(dst, src); }
1309   void movdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1310 
1311   void movsd(Address     dst, XMMRegister    src) { Assembler::movsd(dst, src); }
1312   void movsd(XMMRegister dst, XMMRegister    src) { Assembler::movsd(dst, src); }
1313   void movsd(XMMRegister dst, Address        src) { Assembler::movsd(dst, src); }
1314   void movsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1315 
1316   void mulpd(XMMRegister dst, XMMRegister    src) { Assembler::mulpd(dst, src); }
1317   void mulpd(XMMRegister dst, Address        src) { Assembler::mulpd(dst, src); }
1318   void mulpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1319 
1320   void mulsd(XMMRegister dst, XMMRegister    src) { Assembler::mulsd(dst, src); }
1321   void mulsd(XMMRegister dst, Address        src) { Assembler::mulsd(dst, src); }
1322   void mulsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1323 
1324   void mulss(XMMRegister dst, XMMRegister    src) { Assembler::mulss(dst, src); }
1325   void mulss(XMMRegister dst, Address        src) { Assembler::mulss(dst, src); }
1326   void mulss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1327 
1328   // Carry-Less Multiplication Quadword
1329   void pclmulldq(XMMRegister dst, XMMRegister src) {
1330     // 0x00 - multiply lower 64 bits [0:63]
1331     Assembler::pclmulqdq(dst, src, 0x00);
1332   }
1333   void pclmulhdq(XMMRegister dst, XMMRegister src) {
1334     // 0x11 - multiply upper 64 bits [64:127]
1335     Assembler::pclmulqdq(dst, src, 0x11);
1336   }
1337 
1338   void pcmpeqb(XMMRegister dst, XMMRegister src);
1339   void pcmpeqw(XMMRegister dst, XMMRegister src);
1340 
1341   void pcmpestri(XMMRegister dst, Address src, int imm8);
1342   void pcmpestri(XMMRegister dst, XMMRegister src, int imm8);
1343 
1344   void pmovzxbw(XMMRegister dst, XMMRegister src);
1345   void pmovzxbw(XMMRegister dst, Address src);
1346 
1347   void pmovmskb(Register dst, XMMRegister src);
1348 
1349   void ptest(XMMRegister dst, XMMRegister src);
1350 
1351   void roundsd(XMMRegister dst, XMMRegister    src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1352   void roundsd(XMMRegister dst, Address        src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1353   void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch = noreg);
1354 
1355   void sqrtss(XMMRegister dst, XMMRegister     src) { Assembler::sqrtss(dst, src); }
1356   void sqrtss(XMMRegister dst, Address         src) { Assembler::sqrtss(dst, src); }
1357   void sqrtss(XMMRegister dst, AddressLiteral  src, Register rscratch = noreg);
1358 
1359   void subsd(XMMRegister dst, XMMRegister    src) { Assembler::subsd(dst, src); }
1360   void subsd(XMMRegister dst, Address        src) { Assembler::subsd(dst, src); }
1361   void subsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1362 
1363   void subss(XMMRegister dst, XMMRegister    src) { Assembler::subss(dst, src); }
1364   void subss(XMMRegister dst, Address        src) { Assembler::subss(dst, src); }
1365   void subss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1366 
1367   void ucomiss(XMMRegister dst, XMMRegister    src) { Assembler::ucomiss(dst, src); }
1368   void ucomiss(XMMRegister dst, Address        src) { Assembler::ucomiss(dst, src); }
1369   void ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1370 
1371   void ucomisd(XMMRegister dst, XMMRegister    src) { Assembler::ucomisd(dst, src); }
1372   void ucomisd(XMMRegister dst, Address        src) { Assembler::ucomisd(dst, src); }
1373   void ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1374 
1375   // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
1376   void xorpd(XMMRegister dst, XMMRegister    src);
1377   void xorpd(XMMRegister dst, Address        src) { Assembler::xorpd(dst, src); }
1378   void xorpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1379 
1380   // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
1381   void xorps(XMMRegister dst, XMMRegister    src);
1382   void xorps(XMMRegister dst, Address        src) { Assembler::xorps(dst, src); }
1383   void xorps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1384 
1385   // Shuffle Bytes
1386   void pshufb(XMMRegister dst, XMMRegister    src) { Assembler::pshufb(dst, src); }
1387   void pshufb(XMMRegister dst, Address        src) { Assembler::pshufb(dst, src); }
1388   void pshufb(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1389   // AVX 3-operands instructions
1390 
1391   void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vaddsd(dst, nds, src); }
1392   void vaddsd(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vaddsd(dst, nds, src); }
1393   void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1394 
1395   void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vaddss(dst, nds, src); }
1396   void vaddss(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vaddss(dst, nds, src); }
1397   void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1398 
1399   void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg);
1400   void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg);
1401 
1402   void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len);
1403   void vpaddb(XMMRegister dst, XMMRegister nds, Address        src, int vector_len);
1404   void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1405 
1406   void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1407   void vpaddw(XMMRegister dst, XMMRegister nds, Address     src, int vector_len);
1408 
1409   void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1410   void vpaddd(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1411   void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1412 
1413   void vpand(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1414   void vpand(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1415   void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1416 
1417   using Assembler::vpbroadcastd;
1418   void vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1419 
1420   using Assembler::vpbroadcastq;
1421   void vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1422 
1423   void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1424   void vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len);
1425 
1426   void vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1427   void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1428   void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1429 
1430   // Vector compares
1431   void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister    src, int comparison, bool is_signed, int vector_len) {
1432     Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len);
1433   }
1434   void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1435 
1436   void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister    src, int comparison, bool is_signed, int vector_len) {
1437     Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len);
1438   }
1439   void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1440 
1441   void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister    src, int comparison, bool is_signed, int vector_len) {
1442     Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len);
1443   }
1444   void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1445 
1446   void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister    src, int comparison, bool is_signed, int vector_len) {
1447     Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len);
1448   }
1449   void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1450 
1451   void evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len);
1452 
1453   // Emit comparison instruction for the specified comparison predicate.
1454   void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len);
1455   void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len);
1456 
1457   void vpmovzxbw(XMMRegister dst, Address     src, int vector_len);
1458   void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); }
1459 
1460   void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit);
1461 
1462   void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1463   void vpmullw(XMMRegister dst, XMMRegister nds, Address     src, int vector_len);
1464 
1465   void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); }
1466   void vpmulld(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); }
1467   void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1468 
1469   void vpmuldq(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vpmuldq(dst, nds, src, vector_len); }
1470 
1471   void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1472   void vpsubb(XMMRegister dst, XMMRegister nds, Address     src, int vector_len);
1473 
1474   void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1475   void vpsubw(XMMRegister dst, XMMRegister nds, Address     src, int vector_len);
1476 
1477   void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1478   void vpsraw(XMMRegister dst, XMMRegister nds, int         shift, int vector_len);
1479 
1480   void evpsrad(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1481   void evpsrad(XMMRegister dst, XMMRegister nds, int         shift, int vector_len);
1482 
1483   void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1484   void evpsraq(XMMRegister dst, XMMRegister nds, int         shift, int vector_len);
1485 
1486   using Assembler::evpsllw;
1487   void evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1488     if (!is_varshift) {
1489       Assembler::evpsllw(dst, mask, nds, src, merge, vector_len);
1490     } else {
1491       Assembler::evpsllvw(dst, mask, nds, src, merge, vector_len);
1492     }
1493   }
1494   void evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1495     if (!is_varshift) {
1496       Assembler::evpslld(dst, mask, nds, src, merge, vector_len);
1497     } else {
1498       Assembler::evpsllvd(dst, mask, nds, src, merge, vector_len);
1499     }
1500   }
1501   void evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1502     if (!is_varshift) {
1503       Assembler::evpsllq(dst, mask, nds, src, merge, vector_len);
1504     } else {
1505       Assembler::evpsllvq(dst, mask, nds, src, merge, vector_len);
1506     }
1507   }
1508   void evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1509     if (!is_varshift) {
1510       Assembler::evpsrlw(dst, mask, nds, src, merge, vector_len);
1511     } else {
1512       Assembler::evpsrlvw(dst, mask, nds, src, merge, vector_len);
1513     }
1514   }
1515   void evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1516     if (!is_varshift) {
1517       Assembler::evpsrld(dst, mask, nds, src, merge, vector_len);
1518     } else {
1519       Assembler::evpsrlvd(dst, mask, nds, src, merge, vector_len);
1520     }
1521   }
1522 
1523   using Assembler::evpsrlq;
1524   void evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1525     if (!is_varshift) {
1526       Assembler::evpsrlq(dst, mask, nds, src, merge, vector_len);
1527     } else {
1528       Assembler::evpsrlvq(dst, mask, nds, src, merge, vector_len);
1529     }
1530   }
1531   using Assembler::evpsraw;
1532   void evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1533     if (!is_varshift) {
1534       Assembler::evpsraw(dst, mask, nds, src, merge, vector_len);
1535     } else {
1536       Assembler::evpsravw(dst, mask, nds, src, merge, vector_len);
1537     }
1538   }
1539   using Assembler::evpsrad;
1540   void evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1541     if (!is_varshift) {
1542       Assembler::evpsrad(dst, mask, nds, src, merge, vector_len);
1543     } else {
1544       Assembler::evpsravd(dst, mask, nds, src, merge, vector_len);
1545     }
1546   }
1547   using Assembler::evpsraq;
1548   void evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1549     if (!is_varshift) {
1550       Assembler::evpsraq(dst, mask, nds, src, merge, vector_len);
1551     } else {
1552       Assembler::evpsravq(dst, mask, nds, src, merge, vector_len);
1553     }
1554   }
1555 
1556   void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1557   void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1558   void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1559   void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1560 
1561   void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1562   void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1563   void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1564   void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1565 
1566   void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1567   void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1568 
1569   void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1570   void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1571 
1572   void vptest(XMMRegister dst, XMMRegister src);
1573   void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); }
1574 
1575   void punpcklbw(XMMRegister dst, XMMRegister src);
1576   void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); }
1577 
1578   void pshufd(XMMRegister dst, Address src, int mode);
1579   void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); }
1580 
1581   void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1582   void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); }
1583 
1584   void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1585   void vandpd(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1586   void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1587 
1588   void vandps(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1589   void vandps(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1590   void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1591 
1592   void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1593 
1594   void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vdivsd(dst, nds, src); }
1595   void vdivsd(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vdivsd(dst, nds, src); }
1596   void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1597 
1598   void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vdivss(dst, nds, src); }
1599   void vdivss(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vdivss(dst, nds, src); }
1600   void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1601 
1602   void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vmulsd(dst, nds, src); }
1603   void vmulsd(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vmulsd(dst, nds, src); }
1604   void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1605 
1606   void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vmulss(dst, nds, src); }
1607   void vmulss(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vmulss(dst, nds, src); }
1608   void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1609 
1610   void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vsubsd(dst, nds, src); }
1611   void vsubsd(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vsubsd(dst, nds, src); }
1612   void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1613 
1614   void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vsubss(dst, nds, src); }
1615   void vsubss(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vsubss(dst, nds, src); }
1616   void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1617 
1618   void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1619   void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1620 
1621   // AVX Vector instructions
1622 
1623   void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1624   void vxorpd(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1625   void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1626 
1627   void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1628   void vxorps(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1629   void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1630 
1631   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1632     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1633       Assembler::vpxor(dst, nds, src, vector_len);
1634     else
1635       Assembler::vxorpd(dst, nds, src, vector_len);
1636   }
1637   void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1638     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1639       Assembler::vpxor(dst, nds, src, vector_len);
1640     else
1641       Assembler::vxorpd(dst, nds, src, vector_len);
1642   }
1643   void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1644 
1645   // Simple version for AVX2 256bit vectors
1646   void vpxor(XMMRegister dst, XMMRegister src) {
1647     assert(UseAVX >= 2, "Should be at least AVX2");
1648     Assembler::vpxor(dst, dst, src, AVX_256bit);
1649   }
1650   void vpxor(XMMRegister dst, Address src) {
1651     assert(UseAVX >= 2, "Should be at least AVX2");
1652     Assembler::vpxor(dst, dst, src, AVX_256bit);
1653   }
1654 
1655   void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); }
1656   void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1657 
1658   void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
1659     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1660       Assembler::vinserti32x4(dst, nds, src, imm8);
1661     } else if (UseAVX > 1) {
1662       // vinserti128 is available only in AVX2
1663       Assembler::vinserti128(dst, nds, src, imm8);
1664     } else {
1665       Assembler::vinsertf128(dst, nds, src, imm8);
1666     }
1667   }
1668 
1669   void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
1670     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1671       Assembler::vinserti32x4(dst, nds, src, imm8);
1672     } else if (UseAVX > 1) {
1673       // vinserti128 is available only in AVX2
1674       Assembler::vinserti128(dst, nds, src, imm8);
1675     } else {
1676       Assembler::vinsertf128(dst, nds, src, imm8);
1677     }
1678   }
1679 
1680   void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1681     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1682       Assembler::vextracti32x4(dst, src, imm8);
1683     } else if (UseAVX > 1) {
1684       // vextracti128 is available only in AVX2
1685       Assembler::vextracti128(dst, src, imm8);
1686     } else {
1687       Assembler::vextractf128(dst, src, imm8);
1688     }
1689   }
1690 
1691   void vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
1692     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1693       Assembler::vextracti32x4(dst, src, imm8);
1694     } else if (UseAVX > 1) {
1695       // vextracti128 is available only in AVX2
1696       Assembler::vextracti128(dst, src, imm8);
1697     } else {
1698       Assembler::vextractf128(dst, src, imm8);
1699     }
1700   }
1701 
1702   // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers
1703   void vinserti128_high(XMMRegister dst, XMMRegister src) {
1704     vinserti128(dst, dst, src, 1);
1705   }
1706   void vinserti128_high(XMMRegister dst, Address src) {
1707     vinserti128(dst, dst, src, 1);
1708   }
1709   void vextracti128_high(XMMRegister dst, XMMRegister src) {
1710     vextracti128(dst, src, 1);
1711   }
1712   void vextracti128_high(Address dst, XMMRegister src) {
1713     vextracti128(dst, src, 1);
1714   }
1715 
1716   void vinsertf128_high(XMMRegister dst, XMMRegister src) {
1717     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1718       Assembler::vinsertf32x4(dst, dst, src, 1);
1719     } else {
1720       Assembler::vinsertf128(dst, dst, src, 1);
1721     }
1722   }
1723 
1724   void vinsertf128_high(XMMRegister dst, Address src) {
1725     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1726       Assembler::vinsertf32x4(dst, dst, src, 1);
1727     } else {
1728       Assembler::vinsertf128(dst, dst, src, 1);
1729     }
1730   }
1731 
1732   void vextractf128_high(XMMRegister dst, XMMRegister src) {
1733     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1734       Assembler::vextractf32x4(dst, src, 1);
1735     } else {
1736       Assembler::vextractf128(dst, src, 1);
1737     }
1738   }
1739 
1740   void vextractf128_high(Address dst, XMMRegister src) {
1741     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1742       Assembler::vextractf32x4(dst, src, 1);
1743     } else {
1744       Assembler::vextractf128(dst, src, 1);
1745     }
1746   }
1747 
1748   // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers
1749   void vinserti64x4_high(XMMRegister dst, XMMRegister src) {
1750     Assembler::vinserti64x4(dst, dst, src, 1);
1751   }
1752   void vinsertf64x4_high(XMMRegister dst, XMMRegister src) {
1753     Assembler::vinsertf64x4(dst, dst, src, 1);
1754   }
1755   void vextracti64x4_high(XMMRegister dst, XMMRegister src) {
1756     Assembler::vextracti64x4(dst, src, 1);
1757   }
1758   void vextractf64x4_high(XMMRegister dst, XMMRegister src) {
1759     Assembler::vextractf64x4(dst, src, 1);
1760   }
1761   void vextractf64x4_high(Address dst, XMMRegister src) {
1762     Assembler::vextractf64x4(dst, src, 1);
1763   }
1764   void vinsertf64x4_high(XMMRegister dst, Address src) {
1765     Assembler::vinsertf64x4(dst, dst, src, 1);
1766   }
1767 
1768   // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers
1769   void vinserti128_low(XMMRegister dst, XMMRegister src) {
1770     vinserti128(dst, dst, src, 0);
1771   }
1772   void vinserti128_low(XMMRegister dst, Address src) {
1773     vinserti128(dst, dst, src, 0);
1774   }
1775   void vextracti128_low(XMMRegister dst, XMMRegister src) {
1776     vextracti128(dst, src, 0);
1777   }
1778   void vextracti128_low(Address dst, XMMRegister src) {
1779     vextracti128(dst, src, 0);
1780   }
1781 
1782   void vinsertf128_low(XMMRegister dst, XMMRegister src) {
1783     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1784       Assembler::vinsertf32x4(dst, dst, src, 0);
1785     } else {
1786       Assembler::vinsertf128(dst, dst, src, 0);
1787     }
1788   }
1789 
1790   void vinsertf128_low(XMMRegister dst, Address src) {
1791     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1792       Assembler::vinsertf32x4(dst, dst, src, 0);
1793     } else {
1794       Assembler::vinsertf128(dst, dst, src, 0);
1795     }
1796   }
1797 
1798   void vextractf128_low(XMMRegister dst, XMMRegister src) {
1799     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1800       Assembler::vextractf32x4(dst, src, 0);
1801     } else {
1802       Assembler::vextractf128(dst, src, 0);
1803     }
1804   }
1805 
1806   void vextractf128_low(Address dst, XMMRegister src) {
1807     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1808       Assembler::vextractf32x4(dst, src, 0);
1809     } else {
1810       Assembler::vextractf128(dst, src, 0);
1811     }
1812   }
1813 
1814   // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers
1815   void vinserti64x4_low(XMMRegister dst, XMMRegister src) {
1816     Assembler::vinserti64x4(dst, dst, src, 0);
1817   }
1818   void vinsertf64x4_low(XMMRegister dst, XMMRegister src) {
1819     Assembler::vinsertf64x4(dst, dst, src, 0);
1820   }
1821   void vextracti64x4_low(XMMRegister dst, XMMRegister src) {
1822     Assembler::vextracti64x4(dst, src, 0);
1823   }
1824   void vextractf64x4_low(XMMRegister dst, XMMRegister src) {
1825     Assembler::vextractf64x4(dst, src, 0);
1826   }
1827   void vextractf64x4_low(Address dst, XMMRegister src) {
1828     Assembler::vextractf64x4(dst, src, 0);
1829   }
1830   void vinsertf64x4_low(XMMRegister dst, Address src) {
1831     Assembler::vinsertf64x4(dst, dst, src, 0);
1832   }
1833 
1834   // Carry-Less Multiplication Quadword
1835   void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1836     // 0x00 - multiply lower 64 bits [0:63]
1837     Assembler::vpclmulqdq(dst, nds, src, 0x00);
1838   }
1839   void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1840     // 0x11 - multiply upper 64 bits [64:127]
1841     Assembler::vpclmulqdq(dst, nds, src, 0x11);
1842   }
1843   void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1844     // 0x10 - multiply nds[0:63] and src[64:127]
1845     Assembler::vpclmulqdq(dst, nds, src, 0x10);
1846   }
1847   void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1848     //0x01 - multiply nds[64:127] and src[0:63]
1849     Assembler::vpclmulqdq(dst, nds, src, 0x01);
1850   }
1851 
1852   void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1853     // 0x00 - multiply lower 64 bits [0:63]
1854     Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len);
1855   }
1856   void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1857     // 0x11 - multiply upper 64 bits [64:127]
1858     Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len);
1859   }
1860 
1861   // AVX-512 mask operations.
1862   void kand(BasicType etype, KRegister dst, KRegister src1, KRegister src2);
1863   void kor(BasicType type, KRegister dst, KRegister src1, KRegister src2);
1864   void knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp = knoreg, Register rtmp = noreg);
1865   void kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2);
1866   void kortest(uint masklen, KRegister src1, KRegister src2);
1867   void ktest(uint masklen, KRegister src1, KRegister src2);
1868 
1869   void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1870   void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1871 
1872   void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1873   void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1874 
1875   void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1876   void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1877 
1878   void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1879   void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1880 
1881   void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc);
1882   void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc);
1883   void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc);
1884   void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc);
1885 
1886   using Assembler::evpandq;
1887   void evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1888 
1889   using Assembler::evpaddq;
1890   void evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1891 
1892   using Assembler::evporq;
1893   void evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1894 
1895   using Assembler::vpshufb;
1896   void vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1897 
1898   using Assembler::vpor;
1899   void vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1900 
1901   using Assembler::vpternlogq;
1902   void vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch = noreg);
1903 
1904   void cmov32( Condition cc, Register dst, Address  src);
1905   void cmov32( Condition cc, Register dst, Register src);
1906 
1907   void cmov(   Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1908 
1909   void cmovptr(Condition cc, Register dst, Address  src) { cmovq(cc, dst, src); }
1910   void cmovptr(Condition cc, Register dst, Register src) { cmovq(cc, dst, src); }
1911 
1912   void movoop(Register dst, jobject obj);
1913   void movoop(Address  dst, jobject obj, Register rscratch);
1914 
1915   void mov_metadata(Register dst, Metadata* obj);
1916   void mov_metadata(Address  dst, Metadata* obj, Register rscratch);
1917 
1918   void movptr(Register     dst, Register       src);
1919   void movptr(Register     dst, Address        src);
1920   void movptr(Register     dst, AddressLiteral src);
1921   void movptr(Register     dst, ArrayAddress   src);
1922   void movptr(Register     dst, intptr_t       src);
1923   void movptr(Address      dst, Register       src);
1924   void movptr(Address      dst, int32_t        imm);
1925   void movptr(Address      dst, intptr_t       src, Register rscratch);
1926   void movptr(ArrayAddress dst, Register       src, Register rscratch);
1927 
1928   void movptr(Register dst, RegisterOrConstant src) {
1929     if (src.is_constant()) movptr(dst, src.as_constant());
1930     else                   movptr(dst, src.as_register());
1931   }
1932 
1933 
1934   // to avoid hiding movl
1935   void mov32(Register       dst, AddressLiteral src);
1936   void mov32(AddressLiteral dst, Register        src, Register rscratch = noreg);
1937 
1938   // Import other mov() methods from the parent class or else
1939   // they will be hidden by the following overriding declaration.
1940   using Assembler::movdl;
1941   void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1942 
1943   using Assembler::movq;
1944   void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1945 
1946   // Can push value or effective address
1947   void pushptr(AddressLiteral src, Register rscratch);
1948 
1949   void pushptr(Address src) { pushq(src); }
1950   void popptr(Address src) { popq(src); }
1951 
1952   void pushoop(jobject obj, Register rscratch);
1953   void pushklass(Metadata* obj, Register rscratch);
1954 
1955   // sign extend as need a l to ptr sized element
1956   void movl2ptr(Register dst, Address src) { movslq(dst, src); }
1957   void movl2ptr(Register dst, Register src) { movslq(dst, src); }
1958 
1959 
1960  public:
1961   // Inline type specific methods
1962   #include "asm/macroAssembler_common.hpp"
1963 
1964   int store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter = true);
1965   bool move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]);
1966   bool unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
1967                             VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
1968                             RegState reg_state[]);
1969   bool pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
1970                           VMRegPair* from, int from_count, int& from_index, VMReg to,
1971                           RegState reg_state[], Register val_array);
1972   int extend_stack_for_inline_args(int args_on_stack);
1973   void remove_frame(int initial_framesize, bool needs_stack_repair);
1974   VMReg spill_reg_for(VMReg reg);
1975 
1976   // clear memory of size 'cnt' qwords, starting at 'base';
1977   // if 'is_large' is set, do not try to produce short loop
1978   void clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, bool is_large, bool word_copy_only, KRegister mask=knoreg);
1979 
1980   // clear memory initialization sequence for constant size;
1981   void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1982 
1983   // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
1984   void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1985 
1986   // Fill primitive arrays
1987   void generate_fill(BasicType t, bool aligned,
1988                      Register to, Register value, Register count,
1989                      Register rtmp, XMMRegister xtmp);
1990 
1991   void encode_iso_array(Register src, Register dst, Register len,
1992                         XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1993                         XMMRegister tmp4, Register tmp5, Register result, bool ascii);
1994 
1995   void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
1996   void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1997                              Register y, Register y_idx, Register z,
1998                              Register carry, Register product,
1999                              Register idx, Register kdx);
2000   void multiply_add_128_x_128(Register x_xstart, Register y, Register z,
2001                               Register yz_idx, Register idx,
2002                               Register carry, Register product, int offset);
2003   void multiply_128_x_128_bmi2_loop(Register y, Register z,
2004                                     Register carry, Register carry2,
2005                                     Register idx, Register jdx,
2006                                     Register yz_idx1, Register yz_idx2,
2007                                     Register tmp, Register tmp3, Register tmp4);
2008   void multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
2009                                Register yz_idx, Register idx, Register jdx,
2010                                Register carry, Register product,
2011                                Register carry2);
2012   void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0,
2013                        Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5);
2014   void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3,
2015                      Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
2016   void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry,
2017                             Register tmp2);
2018   void multiply_add_64(Register sum, Register op1, Register op2, Register carry,
2019                        Register rdxReg, Register raxReg);
2020   void add_one_64(Register z, Register zlen, Register carry, Register tmp1);
2021   void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
2022                        Register tmp3, Register tmp4);
2023   void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
2024                      Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
2025 
2026   void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1,
2027                Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
2028                Register raxReg);
2029   void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1,
2030                Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
2031                Register raxReg);
2032   void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
2033                            Register result, Register tmp1, Register tmp2,
2034                            XMMRegister vec1, XMMRegister vec2, XMMRegister vec3);
2035 
2036   // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic.
2037   void update_byte_crc32(Register crc, Register val, Register table);
2038   void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp);
2039 
2040   void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2);
2041   void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos,
2042                                 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop,
2043                                 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup);
2044 
2045   // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic
2046   // Note on a naming convention:
2047   // Prefix w = register only used on a Westmere+ architecture
2048   // Prefix n = register only used on a Nehalem architecture
2049   void crc32c_ipl_alg4(Register in_out, uint32_t n,
2050                        Register tmp1, Register tmp2, Register tmp3);
2051   void crc32c_pclmulqdq(XMMRegister w_xtmp1,
2052                         Register in_out,
2053                         uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
2054                         XMMRegister w_xtmp2,
2055                         Register tmp1,
2056                         Register n_tmp2, Register n_tmp3);
2057   void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
2058                        XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
2059                        Register tmp1, Register tmp2,
2060                        Register n_tmp3);
2061   void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
2062                          Register in_out1, Register in_out2, Register in_out3,
2063                          Register tmp1, Register tmp2, Register tmp3,
2064                          XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
2065                          Register tmp4, Register tmp5,
2066                          Register n_tmp6);
2067   void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
2068                             Register tmp1, Register tmp2, Register tmp3,
2069                             Register tmp4, Register tmp5, Register tmp6,
2070                             XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
2071                             bool is_pclmulqdq_supported);
2072   // Fold 128-bit data chunk
2073   void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
2074   void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf);
2075   // Fold 512-bit data chunk
2076   void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset);
2077   // Fold 8-bit data
2078   void fold_8bit_crc32(Register crc, Register table, Register tmp);
2079   void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp);
2080 
2081   // Compress char[] array to byte[].
2082   void char_array_compress(Register src, Register dst, Register len,
2083                            XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
2084                            XMMRegister tmp4, Register tmp5, Register result,
2085                            KRegister mask1 = knoreg, KRegister mask2 = knoreg);
2086 
2087   // Inflate byte[] array to char[].
2088   void byte_array_inflate(Register src, Register dst, Register len,
2089                           XMMRegister tmp1, Register tmp2, KRegister mask = knoreg);
2090 
2091   void fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask,
2092                    Register length, Register temp, int vec_enc);
2093 
2094   void fill64_masked(uint shift, Register dst, int disp,
2095                          XMMRegister xmm, KRegister mask, Register length,
2096                          Register temp, bool use64byteVector = false);
2097 
2098   void fill32_masked(uint shift, Register dst, int disp,
2099                          XMMRegister xmm, KRegister mask, Register length,
2100                          Register temp);
2101 
2102   void fill32(Address dst, XMMRegister xmm);
2103 
2104   void fill32(Register dst, int disp, XMMRegister xmm);
2105 
2106   void fill64(Address dst, XMMRegister xmm, bool use64byteVector = false);
2107 
2108   void fill64(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false);
2109 
2110   void convert_f2i(Register dst, XMMRegister src);
2111   void convert_d2i(Register dst, XMMRegister src);
2112   void convert_f2l(Register dst, XMMRegister src);
2113   void convert_d2l(Register dst, XMMRegister src);
2114   void round_double(Register dst, XMMRegister src, Register rtmp, Register rcx);
2115   void round_float(Register dst, XMMRegister src, Register rtmp, Register rcx);
2116 
2117   void cache_wb(Address line);
2118   void cache_wbsync(bool is_pre);
2119 
2120 #ifdef COMPILER2_OR_JVMCI
2121   void generate_fill_avx3(BasicType type, Register to, Register value,
2122                           Register count, Register rtmp, XMMRegister xtmp);
2123 #endif // COMPILER2_OR_JVMCI
2124 
2125   void vallones(XMMRegister dst, int vector_len);
2126 
2127   void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg);
2128 
2129   void lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow);
2130   void lightweight_unlock(Register obj, Register reg_rax, Register tmp, Label& slow);
2131 
2132   void save_legacy_gprs();
2133   void restore_legacy_gprs();
2134   void setcc(Assembler::Condition comparison, Register dst);
2135 };
2136 
2137 #endif // CPU_X86_MACROASSEMBLER_X86_HPP