1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
  26 #define CPU_X86_MACROASSEMBLER_X86_HPP
  27 
  28 #include "asm/assembler.hpp"
  29 #include "asm/register.hpp"
  30 #include "code/vmreg.inline.hpp"
  31 #include "compiler/oopMap.hpp"
  32 #include "utilities/macros.hpp"
  33 #include "runtime/signature.hpp"
  34 #include "runtime/vm_version.hpp"
  35 #include "utilities/checkedCast.hpp"
  36 
  37 class ciInlineKlass;
  38 
  39 // MacroAssembler extends Assembler by frequently used macros.
  40 //
  41 // Instructions for which a 'better' code sequence exists depending
  42 // on arguments should also go in here.
  43 
  44 class MacroAssembler: public Assembler {
  45   friend class LIR_Assembler;
  46   friend class Runtime1;      // as_Address()
  47 
  48  public:
  49   // Support for VM calls
  50   //
  51   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
  52   // may customize this version by overriding it for its purposes (e.g., to save/restore
  53   // additional registers when doing a VM call).
  54 
  55   virtual void call_VM_leaf_base(
  56     address entry_point,               // the entry point
  57     int     number_of_arguments        // the number of arguments to pop after the call
  58   );
  59 
  60  protected:
  61   // This is the base routine called by the different versions of call_VM. The interpreter
  62   // may customize this version by overriding it for its purposes (e.g., to save/restore
  63   // additional registers when doing a VM call).
  64   //
  65   // call_VM_base returns the register which contains the thread upon return.
  66   // If no last_java_sp is specified (noreg) than rsp will be used instead.
  67   virtual void call_VM_base(           // returns the register containing the thread upon return
  68     Register oop_result,               // where an oop-result ends up if any; use noreg otherwise
  69     Register last_java_sp,             // to set up last_Java_frame in stubs; use noreg otherwise
  70     address  entry_point,              // the entry point
  71     int      number_of_arguments,      // the number of arguments (w/o thread) to pop after the call
  72     bool     check_exceptions          // whether to check for pending exceptions after return
  73   );
  74 
  75   void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
  76 
  77  public:
  78   MacroAssembler(CodeBuffer* code) : Assembler(code) {}
  79 
  80  // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
  81  // The implementation is only non-empty for the InterpreterMacroAssembler,
  82  // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
  83  virtual void check_and_handle_popframe();
  84  virtual void check_and_handle_earlyret();
  85 
  86   Address as_Address(AddressLiteral adr);
  87   Address as_Address(ArrayAddress adr, Register rscratch);
  88 
  89   // Support for null-checks
  90   //
  91   // Generates code that causes a null OS exception if the content of reg is null.
  92   // If the accessed location is M[reg + offset] and the offset is known, provide the
  93   // offset. No explicit code generation is needed if the offset is within a certain
  94   // range (0 <= offset <= page_size).
  95 
  96   void null_check(Register reg, int offset = -1);
  97   static bool needs_explicit_null_check(intptr_t offset);
  98   static bool uses_implicit_null_check(void* address);
  99 
 100   // markWord tests, kills markWord reg
 101   void test_markword_is_inline_type(Register markword, Label& is_inline_type);
 102 
 103   // inlineKlass queries, kills temp_reg
 104   void test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type, bool can_be_null = true);
 105 
 106   void test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free);
 107   void test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free);
 108   void test_field_is_flat(Register flags, Register temp_reg, Label& is_flat);
 109   void test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker);
 110 
 111   // Check oops for special arrays, i.e. flat arrays and/or null-free arrays
 112   void test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label);
 113   void test_flat_array_oop(Register oop, Register temp_reg, Label& is_flat_array);
 114   void test_non_flat_array_oop(Register oop, Register temp_reg, Label& is_non_flat_array);
 115   void test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array);
 116   void test_non_null_free_array_oop(Register oop, Register temp_reg, Label& is_non_null_free_array);
 117 
 118   // Check array klass layout helper for flat or null-free arrays...
 119   void test_flat_array_layout(Register lh, Label& is_flat_array);
 120   void test_non_flat_array_layout(Register lh, Label& is_non_flat_array);
 121 
 122   // Required platform-specific helpers for Label::patch_instructions.
 123   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
 124   void pd_patch_instruction(address branch, address target, const char* file, int line) {
 125     unsigned char op = branch[0];
 126     assert(op == 0xE8 /* call */ ||
 127         op == 0xE9 /* jmp */ ||
 128         op == 0xEB /* short jmp */ ||
 129         (op & 0xF0) == 0x70 /* short jcc */ ||
 130         (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ ||
 131         (op == 0xC7 && branch[1] == 0xF8) /* xbegin */ ||
 132         (op == 0x8D) /* lea */,
 133         "Invalid opcode at patch point");
 134 
 135     if (op == 0xEB || (op & 0xF0) == 0x70) {
 136       // short offset operators (jmp and jcc)
 137       char* disp = (char*) &branch[1];
 138       int imm8 = checked_cast<int>(target - (address) &disp[1]);
 139       guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
 140                 file == nullptr ? "<null>" : file, line);
 141       *disp = (char)imm8;
 142     } else {
 143       int* disp = (int*) &branch[(op == 0x0F || op == 0xC7 || op == 0x8D) ? 2 : 1];
 144       int imm32 = checked_cast<int>(target - (address) &disp[1]);
 145       *disp = imm32;
 146     }
 147   }
 148 
 149   // The following 4 methods return the offset of the appropriate move instruction
 150 
 151   // Support for fast byte/short loading with zero extension (depending on particular CPU)
 152   int load_unsigned_byte(Register dst, Address src);
 153   int load_unsigned_short(Register dst, Address src);
 154 
 155   // Support for fast byte/short loading with sign extension (depending on particular CPU)
 156   int load_signed_byte(Register dst, Address src);
 157   int load_signed_short(Register dst, Address src);
 158 
 159   // Support for sign-extension (hi:lo = extend_sign(lo))
 160   void extend_sign(Register hi, Register lo);
 161 
 162   // Load and store values by size and signed-ness
 163   void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
 164   void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
 165 
 166   // Support for inc/dec with optimal instruction selection depending on value
 167 
 168   void increment(Register reg, int value = 1) { incrementq(reg, value); }
 169   void decrement(Register reg, int value = 1) { decrementq(reg, value); }
 170   void increment(Address dst, int value = 1)  { incrementq(dst, value); }
 171   void decrement(Address dst, int value = 1)  { decrementq(dst, value); }
 172 
 173   void decrementl(Address dst, int value = 1);
 174   void decrementl(Register reg, int value = 1);
 175 
 176   void decrementq(Register reg, int value = 1);
 177   void decrementq(Address dst, int value = 1);
 178 
 179   void incrementl(Address dst, int value = 1);
 180   void incrementl(Register reg, int value = 1);
 181 
 182   void incrementq(Register reg, int value = 1);
 183   void incrementq(Address dst, int value = 1);
 184 
 185   void incrementl(AddressLiteral dst, Register rscratch = noreg);
 186   void incrementl(ArrayAddress   dst, Register rscratch);
 187 
 188   void incrementq(AddressLiteral dst, Register rscratch = noreg);
 189 
 190   // Support optimal SSE move instructions.
 191   void movflt(XMMRegister dst, XMMRegister src) {
 192     if (dst-> encoding() == src->encoding()) return;
 193     if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
 194     else                       { movss (dst, src); return; }
 195   }
 196   void movflt(XMMRegister dst, Address src) { movss(dst, src); }
 197   void movflt(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
 198   void movflt(Address dst, XMMRegister src) { movss(dst, src); }
 199 
 200   // Move with zero extension
 201   void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); }
 202 
 203   void movdbl(XMMRegister dst, XMMRegister src) {
 204     if (dst-> encoding() == src->encoding()) return;
 205     if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
 206     else                       { movsd (dst, src); return; }
 207   }
 208 
 209   void movdbl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
 210 
 211   void movdbl(XMMRegister dst, Address src) {
 212     if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
 213     else                         { movlpd(dst, src); return; }
 214   }
 215   void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
 216 
 217   void flt_to_flt16(Register dst, XMMRegister src, XMMRegister tmp) {
 218     // Use separate tmp XMM register because caller may
 219     // requires src XMM register to be unchanged (as in x86.ad).
 220     vcvtps2ph(tmp, src, 0x04, Assembler::AVX_128bit);
 221     movdl(dst, tmp);
 222     movswl(dst, dst);
 223   }
 224 
 225   void flt16_to_flt(XMMRegister dst, Register src) {
 226     movdl(dst, src);
 227     vcvtph2ps(dst, dst, Assembler::AVX_128bit);
 228   }
 229 
 230   // Alignment
 231   void align32();
 232   void align64();
 233   void align(uint modulus);
 234   void align(uint modulus, uint target);
 235 
 236   void post_call_nop();
 237 
 238   // Stack frame creation/removal
 239   void enter();
 240   void leave();
 241 
 242   // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information).
 243   // The pointer will be loaded into the thread register. This is a slow version that does native call.
 244   // Normally, JavaThread pointer is available in r15_thread, use that where possible.
 245   void get_thread_slow(Register thread);
 246 
 247   // Support for argument shuffling
 248 
 249   // bias in bytes
 250   void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
 251   void long_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
 252   void float_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
 253   void double_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
 254   void move_ptr(VMRegPair src, VMRegPair dst);
 255   void object_move(OopMap* map,
 256                    int oop_handle_offset,
 257                    int framesize_in_slots,
 258                    VMRegPair src,
 259                    VMRegPair dst,
 260                    bool is_receiver,
 261                    int* receiver_offset);
 262 
 263   // Support for VM calls
 264   //
 265   // It is imperative that all calls into the VM are handled via the call_VM macros.
 266   // They make sure that the stack linkage is setup correctly. call_VM's correspond
 267   // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
 268 
 269 
 270   void call_VM(Register oop_result,
 271                address entry_point,
 272                bool check_exceptions = true);
 273   void call_VM(Register oop_result,
 274                address entry_point,
 275                Register arg_1,
 276                bool check_exceptions = true);
 277   void call_VM(Register oop_result,
 278                address entry_point,
 279                Register arg_1, Register arg_2,
 280                bool check_exceptions = true);
 281   void call_VM(Register oop_result,
 282                address entry_point,
 283                Register arg_1, Register arg_2, Register arg_3,
 284                bool check_exceptions = true);
 285 
 286   // Overloadings with last_Java_sp
 287   void call_VM(Register oop_result,
 288                Register last_java_sp,
 289                address entry_point,
 290                int number_of_arguments = 0,
 291                bool check_exceptions = true);
 292   void call_VM(Register oop_result,
 293                Register last_java_sp,
 294                address entry_point,
 295                Register arg_1, bool
 296                check_exceptions = true);
 297   void call_VM(Register oop_result,
 298                Register last_java_sp,
 299                address entry_point,
 300                Register arg_1, Register arg_2,
 301                bool check_exceptions = true);
 302   void call_VM(Register oop_result,
 303                Register last_java_sp,
 304                address entry_point,
 305                Register arg_1, Register arg_2, Register arg_3,
 306                bool check_exceptions = true);
 307 
 308   void get_vm_result_oop(Register oop_result);
 309   void get_vm_result_metadata(Register metadata_result);
 310 
 311   // These always tightly bind to MacroAssembler::call_VM_base
 312   // bypassing the virtual implementation
 313   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
 314   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
 315   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
 316   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
 317   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
 318 
 319   void call_VM_leaf0(address entry_point);
 320   void call_VM_leaf(address entry_point,
 321                     int number_of_arguments = 0);
 322   void call_VM_leaf(address entry_point,
 323                     Register arg_1);
 324   void call_VM_leaf(address entry_point,
 325                     Register arg_1, Register arg_2);
 326   void call_VM_leaf(address entry_point,
 327                     Register arg_1, Register arg_2, Register arg_3);
 328 
 329   void call_VM_leaf(address entry_point,
 330                     Register arg_1, Register arg_2, Register arg_3, Register arg_4);
 331 
 332   // These always tightly bind to MacroAssembler::call_VM_leaf_base
 333   // bypassing the virtual implementation
 334   void super_call_VM_leaf(address entry_point);
 335   void super_call_VM_leaf(address entry_point, Register arg_1);
 336   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
 337   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
 338   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
 339 
 340   void set_last_Java_frame(Register last_java_sp,
 341                            Register last_java_fp,
 342                            address  last_java_pc,
 343                            Register rscratch);
 344 
 345   void set_last_Java_frame(Register last_java_sp,
 346                            Register last_java_fp,
 347                            Label &last_java_pc,
 348                            Register scratch);
 349 
 350   void reset_last_Java_frame(bool clear_fp);
 351 
 352   // jobjects
 353   void clear_jobject_tag(Register possibly_non_local);
 354   void resolve_jobject(Register value, Register tmp);
 355   void resolve_global_jobject(Register value, Register tmp);
 356 
 357   // C 'boolean' to Java boolean: x == 0 ? 0 : 1
 358   void c2bool(Register x);
 359 
 360   // C++ bool manipulation
 361 
 362   void movbool(Register dst, Address src);
 363   void movbool(Address dst, bool boolconst);
 364   void movbool(Address dst, Register src);
 365   void testbool(Register dst);
 366 
 367   void resolve_oop_handle(Register result, Register tmp);
 368   void resolve_weak_handle(Register result, Register tmp);
 369   void load_mirror(Register mirror, Register method, Register tmp);
 370   void load_method_holder_cld(Register rresult, Register rmethod);
 371 
 372   void load_method_holder(Register holder, Register method);
 373 
 374   // oop manipulations
 375 
 376   // Load oopDesc._metadata without decode (useful for direct Klass* compare from oops)
 377   void load_metadata(Register dst, Register src);
 378   void load_narrow_klass_compact(Register dst, Register src);
 379   void load_klass(Register dst, Register src, Register tmp);
 380   void store_klass(Register dst, Register src, Register tmp);
 381 
 382   // Compares the Klass pointer of an object to a given Klass (which might be narrow,
 383   // depending on UseCompressedClassPointers).
 384   void cmp_klass(Register klass, Register obj, Register tmp);
 385 
 386   // Compares the Klass pointer of two objects obj1 and obj2. Result is in the condition flags.
 387   // Uses tmp1 and tmp2 as temporary registers.
 388   void cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2);
 389 
 390   void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
 391                       Register tmp1);
 392   void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
 393                        Register tmp1, Register tmp2, Register tmp3);
 394 
 395   void flat_field_copy(DecoratorSet decorators, Register src, Register dst, Register inline_layout_info);
 396 
 397   // inline type data payload offsets...
 398   void payload_offset(Register inline_klass, Register offset);
 399   void payload_addr(Register oop, Register data, Register inline_klass);
 400   // get data payload ptr a flat value array at index, kills rcx and index
 401   void data_for_value_array_index(Register array, Register array_klass,
 402                                   Register index, Register data);
 403 
 404   void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, DecoratorSet decorators = 0);
 405   void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, DecoratorSet decorators = 0);
 406   void store_heap_oop(Address dst, Register val, Register tmp1 = noreg,
 407                       Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
 408 
 409   // Used for storing null. All other oop constants should be
 410   // stored using routines that take a jobject.
 411   void store_heap_oop_null(Address dst);
 412 
 413   void load_prototype_header(Register dst, Register src, Register tmp);
 414 
 415   void store_klass_gap(Register dst, Register src);
 416 
 417   // This dummy is to prevent a call to store_heap_oop from
 418   // converting a zero (like null) into a Register by giving
 419   // the compiler two choices it can't resolve
 420 
 421   void store_heap_oop(Address dst, void* dummy);
 422 
 423   void encode_heap_oop(Register r);
 424   void decode_heap_oop(Register r);
 425   void encode_heap_oop_not_null(Register r);
 426   void decode_heap_oop_not_null(Register r);
 427   void encode_heap_oop_not_null(Register dst, Register src);
 428   void decode_heap_oop_not_null(Register dst, Register src);
 429 
 430   void set_narrow_oop(Register dst, jobject obj);
 431   void set_narrow_oop(Address dst, jobject obj);
 432   void cmp_narrow_oop(Register dst, jobject obj);
 433   void cmp_narrow_oop(Address dst, jobject obj);
 434 
 435   void encode_klass_not_null(Register r, Register tmp);
 436   void decode_klass_not_null(Register r, Register tmp);
 437   void encode_and_move_klass_not_null(Register dst, Register src);
 438   void decode_and_move_klass_not_null(Register dst, Register src);
 439   void set_narrow_klass(Register dst, Klass* k);
 440   void set_narrow_klass(Address dst, Klass* k);
 441   void cmp_narrow_klass(Register dst, Klass* k);
 442   void cmp_narrow_klass(Address dst, Klass* k);
 443 
 444   // if heap base register is used - reinit it with the correct value
 445   void reinit_heapbase();
 446 
 447   DEBUG_ONLY(void verify_heapbase(const char* msg);)
 448 
 449   // Int division/remainder for Java
 450   // (as idivl, but checks for special case as described in JVM spec.)
 451   // returns idivl instruction offset for implicit exception handling
 452   int corrected_idivl(Register reg);
 453 
 454   // Long division/remainder for Java
 455   // (as idivq, but checks for special case as described in JVM spec.)
 456   // returns idivq instruction offset for implicit exception handling
 457   int corrected_idivq(Register reg);
 458 
 459   void int3();
 460 
 461   // Long operation macros for a 32bit cpu
 462   // Long negation for Java
 463   void lneg(Register hi, Register lo);
 464 
 465   // Long multiplication for Java
 466   // (destroys contents of eax, ebx, ecx and edx)
 467   void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
 468 
 469   // Long shifts for Java
 470   // (semantics as described in JVM spec.)
 471   void lshl(Register hi, Register lo);                               // hi:lo << (rcx & 0x3f)
 472   void lshr(Register hi, Register lo, bool sign_extension = false);  // hi:lo >> (rcx & 0x3f)
 473 
 474   // Long compare for Java
 475   // (semantics as described in JVM spec.)
 476   void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
 477 
 478 
 479   // misc
 480 
 481   // Sign extension
 482   void sign_extend_short(Register reg);
 483   void sign_extend_byte(Register reg);
 484 
 485   // Division by power of 2, rounding towards 0
 486   void division_with_shift(Register reg, int shift_value);
 487 
 488   // dst = c = a * b + c
 489   void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
 490   void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
 491 
 492   void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
 493   void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
 494   void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
 495   void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
 496 
 497 
 498   // same as fcmp2int, but using SSE2
 499   void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
 500   void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
 501 
 502   void push_IU_state();
 503   void pop_IU_state();
 504 
 505   void push_FPU_state();
 506   void pop_FPU_state();
 507 
 508   void push_CPU_state();
 509   void pop_CPU_state();
 510 
 511   void push_cont_fastpath();
 512   void pop_cont_fastpath();
 513 
 514   void inc_held_monitor_count();
 515   void dec_held_monitor_count();
 516 
 517   DEBUG_ONLY(void stop_if_in_cont(Register cont_reg, const char* name);)
 518 
 519   // Round up to a power of two
 520   void round_to(Register reg, int modulus);
 521 
 522 private:
 523   // General purpose and XMM registers potentially clobbered by native code; there
 524   // is no need for FPU or AVX opmask related methods because C1/interpreter
 525   // - we save/restore FPU state as a whole always
 526   // - do not care about AVX-512 opmask
 527   static RegSet call_clobbered_gp_registers();
 528   static XMMRegSet call_clobbered_xmm_registers();
 529 
 530   void push_set(XMMRegSet set, int offset);
 531   void pop_set(XMMRegSet set, int offset);
 532 
 533 public:
 534   void push_set(RegSet set, int offset = -1);
 535   void pop_set(RegSet set, int offset = -1);
 536 
 537   // Push and pop everything that might be clobbered by a native
 538   // runtime call.
 539   // Only save the lower 64 bits of each vector register.
 540   // Additional registers can be excluded in a passed RegSet.
 541   void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
 542   void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
 543 
 544   void push_call_clobbered_registers(bool save_fpu = true) {
 545     push_call_clobbered_registers_except(RegSet(), save_fpu);
 546   }
 547   void pop_call_clobbered_registers(bool restore_fpu = true) {
 548     pop_call_clobbered_registers_except(RegSet(), restore_fpu);
 549   }
 550 
 551   // allocation
 552 
 553   // Object / value buffer allocation...
 554   // Allocate instance of klass, assumes klass initialized by caller
 555   // new_obj prefers to be rax
 556   // Kills t1 and t2, perserves klass, return allocation in new_obj (rsi on LP64)
 557   void allocate_instance(Register klass, Register new_obj,
 558                          Register t1, Register t2,
 559                          bool clear_fields, Label& alloc_failed);
 560 
 561   void tlab_allocate(
 562     Register obj,                      // result: pointer to object after successful allocation
 563     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
 564     int      con_size_in_bytes,        // object size in bytes if   known at compile time
 565     Register t1,                       // temp register
 566     Register t2,                       // temp register
 567     Label&   slow_case                 // continuation point if fast allocation fails
 568   );
 569   void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
 570 
 571   // For field "index" within "klass", return inline_klass ...
 572   void get_inline_type_field_klass(Register klass, Register index, Register inline_klass);
 573 
 574   void inline_layout_info(Register klass, Register index, Register layout_info);
 575 
 576   void population_count(Register dst, Register src, Register scratch1, Register scratch2);
 577 
 578   // interface method calling
 579   void lookup_interface_method(Register recv_klass,
 580                                Register intf_klass,
 581                                RegisterOrConstant itable_index,
 582                                Register method_result,
 583                                Register scan_temp,
 584                                Label& no_such_interface,
 585                                bool return_method = true);
 586 
 587   void lookup_interface_method_stub(Register recv_klass,
 588                                     Register holder_klass,
 589                                     Register resolved_klass,
 590                                     Register method_result,
 591                                     Register scan_temp,
 592                                     Register temp_reg2,
 593                                     Register receiver,
 594                                     int itable_index,
 595                                     Label& L_no_such_interface);
 596 
 597   // virtual method calling
 598   void lookup_virtual_method(Register recv_klass,
 599                              RegisterOrConstant vtable_index,
 600                              Register method_result);
 601 
 602   // Test sub_klass against super_klass, with fast and slow paths.
 603 
 604   // The fast path produces a tri-state answer: yes / no / maybe-slow.
 605   // One of the three labels can be null, meaning take the fall-through.
 606   // If super_check_offset is -1, the value is loaded up from super_klass.
 607   // No registers are killed, except temp_reg.
 608   void check_klass_subtype_fast_path(Register sub_klass,
 609                                      Register super_klass,
 610                                      Register temp_reg,
 611                                      Label* L_success,
 612                                      Label* L_failure,
 613                                      Label* L_slow_path,
 614                 RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
 615 
 616   // The rest of the type check; must be wired to a corresponding fast path.
 617   // It does not repeat the fast path logic, so don't use it standalone.
 618   // The temp_reg and temp2_reg can be noreg, if no temps are available.
 619   // Updates the sub's secondary super cache as necessary.
 620   // If set_cond_codes, condition codes will be Z on success, NZ on failure.
 621   void check_klass_subtype_slow_path(Register sub_klass,
 622                                      Register super_klass,
 623                                      Register temp_reg,
 624                                      Register temp2_reg,
 625                                      Label* L_success,
 626                                      Label* L_failure,
 627                                      bool set_cond_codes = false);
 628 
 629   // The 64-bit version, which may do a hashed subclass lookup.
 630   void check_klass_subtype_slow_path(Register sub_klass,
 631                                      Register super_klass,
 632                                      Register temp_reg,
 633                                      Register temp2_reg,
 634                                      Register temp3_reg,
 635                                      Register temp4_reg,
 636                                      Label* L_success,
 637                                      Label* L_failure);
 638 
 639   // Three parts of a hashed subclass lookup: a simple linear search,
 640   // a table lookup, and a fallback that does linear probing in the
 641   // event of a hash collision.
 642   void check_klass_subtype_slow_path_linear(Register sub_klass,
 643                                             Register super_klass,
 644                                             Register temp_reg,
 645                                             Register temp2_reg,
 646                                             Label* L_success,
 647                                             Label* L_failure,
 648                                             bool set_cond_codes = false);
 649   void check_klass_subtype_slow_path_table(Register sub_klass,
 650                                            Register super_klass,
 651                                            Register temp_reg,
 652                                            Register temp2_reg,
 653                                            Register temp3_reg,
 654                                            Register result_reg,
 655                                            Label* L_success,
 656                                            Label* L_failure);
 657   void hashed_check_klass_subtype_slow_path(Register sub_klass,
 658                                             Register super_klass,
 659                                             Register temp_reg,
 660                                             Label* L_success,
 661                                             Label* L_failure);
 662 
 663   // As above, but with a constant super_klass.
 664   // The result is in Register result, not the condition codes.
 665   void lookup_secondary_supers_table_const(Register sub_klass,
 666                                            Register super_klass,
 667                                            Register temp1,
 668                                            Register temp2,
 669                                            Register temp3,
 670                                            Register temp4,
 671                                            Register result,
 672                                            u1 super_klass_slot);
 673 
 674   using Assembler::salq;
 675   void salq(Register dest, Register count);
 676   using Assembler::rorq;
 677   void rorq(Register dest, Register count);
 678   void lookup_secondary_supers_table_var(Register sub_klass,
 679                                          Register super_klass,
 680                                          Register temp1,
 681                                          Register temp2,
 682                                          Register temp3,
 683                                          Register temp4,
 684                                          Register result);
 685 
 686   void lookup_secondary_supers_table_slow_path(Register r_super_klass,
 687                                                Register r_array_base,
 688                                                Register r_array_index,
 689                                                Register r_bitmap,
 690                                                Register temp1,
 691                                                Register temp2,
 692                                                Label* L_success,
 693                                                Label* L_failure = nullptr);
 694 
 695   void verify_secondary_supers_table(Register r_sub_klass,
 696                                      Register r_super_klass,
 697                                      Register expected,
 698                                      Register temp1,
 699                                      Register temp2,
 700                                      Register temp3);
 701 
 702   void repne_scanq(Register addr, Register value, Register count, Register limit,
 703                    Label* L_success,
 704                    Label* L_failure = nullptr);
 705 
 706   // If r is valid, return r.
 707   // If r is invalid, remove a register r2 from available_regs, add r2
 708   // to regs_to_push, then return r2.
 709   Register allocate_if_noreg(const Register r,
 710                              RegSetIterator<Register> &available_regs,
 711                              RegSet &regs_to_push);
 712 
 713   // Simplified, combined version, good for typical uses.
 714   // Falls through on failure.
 715   void check_klass_subtype(Register sub_klass,
 716                            Register super_klass,
 717                            Register temp_reg,
 718                            Label& L_success);
 719 
 720   void clinit_barrier(Register klass,
 721                       Label* L_fast_path = nullptr,
 722                       Label* L_slow_path = nullptr);
 723 
 724   // method handles (JSR 292)
 725   Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
 726 
 727   // Debugging
 728 
 729   // only if +VerifyOops
 730   void _verify_oop(Register reg, const char* s, const char* file, int line);
 731   void _verify_oop_addr(Address addr, const char* s, const char* file, int line);
 732 
 733   void _verify_oop_checked(Register reg, const char* s, const char* file, int line) {
 734     if (VerifyOops) {
 735       _verify_oop(reg, s, file, line);
 736     }
 737   }
 738   void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) {
 739     if (VerifyOops) {
 740       _verify_oop_addr(reg, s, file, line);
 741     }
 742   }
 743 
 744   // TODO: verify method and klass metadata (compare against vptr?)
 745   void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
 746   void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
 747 
 748 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__)
 749 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__)
 750 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__)
 751 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
 752 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
 753 
 754   // Verify or restore cpu control state after JNI call
 755   void restore_cpu_control_state_after_jni(Register rscratch);
 756 
 757   // prints msg, dumps registers and stops execution
 758   void stop(const char* msg);
 759 
 760   // prints msg and continues
 761   void warn(const char* msg);
 762 
 763   // dumps registers and other state
 764   void print_state();
 765 
 766   static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
 767   static void debug64(char* msg, int64_t pc, int64_t regs[]);
 768   static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip);
 769   static void print_state64(int64_t pc, int64_t regs[]);
 770 
 771   void os_breakpoint();
 772 
 773   void untested()                                { stop("untested"); }
 774 
 775   void unimplemented(const char* what = "");
 776 
 777   void should_not_reach_here()                   { stop("should not reach here"); }
 778 
 779   void print_CPU_state();
 780 
 781   // Stack overflow checking
 782   void bang_stack_with_offset(int offset) {
 783     // stack grows down, caller passes positive offset
 784     assert(offset > 0, "must bang with negative offset");
 785     movl(Address(rsp, (-offset)), rax);
 786   }
 787 
 788   // Writes to stack successive pages until offset reached to check for
 789   // stack overflow + shadow pages.  Also, clobbers tmp
 790   void bang_stack_size(Register size, Register tmp);
 791 
 792   // Check for reserved stack access in method being exited (for JIT)
 793   void reserved_stack_check();
 794 
 795   void safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod);
 796 
 797   void verify_tlab();
 798 
 799   static Condition negate_condition(Condition cond);
 800 
 801   // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
 802   // operands. In general the names are modified to avoid hiding the instruction in Assembler
 803   // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
 804   // here in MacroAssembler. The major exception to this rule is call
 805 
 806   // Arithmetics
 807 
 808 
 809   void addptr(Address dst, int32_t src) { addq(dst, src); }
 810   void addptr(Address dst, Register src);
 811 
 812   void addptr(Register dst, Address src) { addq(dst, src); }
 813   void addptr(Register dst, int32_t src);
 814   void addptr(Register dst, Register src);
 815   void addptr(Register dst, RegisterOrConstant src) {
 816     if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant()));
 817     else                   addptr(dst, src.as_register());
 818   }
 819 
 820   void andptr(Register dst, int32_t src);
 821   void andptr(Register src1, Register src2) { andq(src1, src2); }
 822   void andptr(Register dst, Address src) { andq(dst, src); }
 823 
 824   using Assembler::andq;
 825   void andq(Register dst, AddressLiteral src, Register rscratch = noreg);
 826 
 827   void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg);
 828 
 829   // renamed to drag out the casting of address to int32_t/intptr_t
 830   void cmp32(Register src1, int32_t imm);
 831 
 832   void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg);
 833   // compare reg - mem, or reg - &mem
 834   void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg);
 835 
 836   void cmp32(Register src1, Address src2);
 837 
 838   void cmpoop(Register src1, Register src2);
 839   void cmpoop(Register src1, Address src2);
 840   void cmpoop(Register dst, jobject obj, Register rscratch);
 841 
 842   // NOTE src2 must be the lval. This is NOT an mem-mem compare
 843   void cmpptr(Address src1, AddressLiteral src2, Register rscratch);
 844 
 845   void cmpptr(Register src1, AddressLiteral src2, Register rscratch = noreg);
 846 
 847   void cmpptr(Register src1, Register src2) { cmpq(src1, src2); }
 848   void cmpptr(Register src1, Address src2) { cmpq(src1, src2); }
 849 
 850   void cmpptr(Register src1, int32_t src2) { cmpq(src1, src2); }
 851   void cmpptr(Address src1, int32_t src2) { cmpq(src1, src2); }
 852 
 853   // cmp64 to avoild hiding cmpq
 854   void cmp64(Register src1, AddressLiteral src, Register rscratch = noreg);
 855 
 856   void cmpxchgptr(Register reg, Address adr);
 857 
 858   void locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch = noreg);
 859 
 860   void imulptr(Register dst, Register src) { imulq(dst, src); }
 861   void imulptr(Register dst, Register src, int imm32) { imulq(dst, src, imm32); }
 862 
 863 
 864   void negptr(Register dst) { negq(dst); }
 865 
 866   void notptr(Register dst) { notq(dst); }
 867 
 868   void shlptr(Register dst, int32_t shift);
 869   void shlptr(Register dst) { shlq(dst); }
 870 
 871   void shrptr(Register dst, int32_t shift);
 872   void shrptr(Register dst) { shrq(dst); }
 873 
 874   void sarptr(Register dst) { sarq(dst); }
 875   void sarptr(Register dst, int32_t src) { sarq(dst, src); }
 876 
 877   void subptr(Address dst, int32_t src) { subq(dst, src); }
 878 
 879   void subptr(Register dst, Address src) { subq(dst, src); }
 880   void subptr(Register dst, int32_t src);
 881   // Force generation of a 4 byte immediate value even if it fits into 8bit
 882   void subptr_imm32(Register dst, int32_t src);
 883   void subptr(Register dst, Register src);
 884   void subptr(Register dst, RegisterOrConstant src) {
 885     if (src.is_constant()) subptr(dst, (int) src.as_constant());
 886     else                   subptr(dst,       src.as_register());
 887   }
 888 
 889   void sbbptr(Address dst, int32_t src) { sbbq(dst, src); }
 890   void sbbptr(Register dst, int32_t src) { sbbq(dst, src); }
 891 
 892   void xchgptr(Register src1, Register src2) { xchgq(src1, src2); }
 893   void xchgptr(Register src1, Address src2) { xchgq(src1, src2); }
 894 
 895   void xaddptr(Address src1, Register src2) { xaddq(src1, src2); }
 896 
 897 
 898 
 899   // Helper functions for statistics gathering.
 900   // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
 901   void cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch = noreg);
 902   // Unconditional atomic increment.
 903   void atomic_incl(Address counter_addr);
 904   void atomic_incl(AddressLiteral counter_addr, Register rscratch = noreg);
 905   void atomic_incq(Address counter_addr);
 906   void atomic_incq(AddressLiteral counter_addr, Register rscratch = noreg);
 907   void atomic_incptr(AddressLiteral counter_addr, Register rscratch = noreg) { atomic_incq(counter_addr, rscratch); }
 908   void atomic_incptr(Address counter_addr) { atomic_incq(counter_addr); }
 909 
 910   using Assembler::lea;
 911   void lea(Register dst, AddressLiteral adr);
 912   void lea(Address  dst, AddressLiteral adr, Register rscratch);
 913 
 914   void leal32(Register dst, Address src) { leal(dst, src); }
 915 
 916   // Import other testl() methods from the parent class or else
 917   // they will be hidden by the following overriding declaration.
 918   using Assembler::testl;
 919   void testl(Address dst, int32_t imm32);
 920   void testl(Register dst, int32_t imm32);
 921   void testl(Register dst, AddressLiteral src); // requires reachable address
 922   using Assembler::testq;
 923   void testq(Address dst, int32_t imm32);
 924   void testq(Register dst, int32_t imm32);
 925 
 926   void orptr(Register dst, Address src) { orq(dst, src); }
 927   void orptr(Register dst, Register src) { orq(dst, src); }
 928   void orptr(Register dst, int32_t src) { orq(dst, src); }
 929   void orptr(Address dst, int32_t imm32) { orq(dst, imm32); }
 930 
 931   void testptr(Register src, int32_t imm32) { testq(src, imm32); }
 932   void testptr(Register src1, Address src2) { testq(src1, src2); }
 933   void testptr(Address src, int32_t imm32) { testq(src, imm32); }
 934   void testptr(Register src1, Register src2);
 935 
 936   void xorptr(Register dst, Register src) { xorq(dst, src); }
 937   void xorptr(Register dst, Address src) { xorq(dst, src); }
 938 
 939   // Calls
 940 
 941   void call(Label& L, relocInfo::relocType rtype);
 942   void call(Register entry);
 943   void call(Address addr) { Assembler::call(addr); }
 944 
 945   // NOTE: this call transfers to the effective address of entry NOT
 946   // the address contained by entry. This is because this is more natural
 947   // for jumps/calls.
 948   void call(AddressLiteral entry, Register rscratch = rax);
 949 
 950   // Emit the CompiledIC call idiom
 951   void ic_call(address entry, jint method_index = 0);
 952   static int ic_check_size();
 953   int ic_check(int end_alignment);
 954 
 955   void emit_static_call_stub();
 956 
 957   // Jumps
 958 
 959   // NOTE: these jumps transfer to the effective address of dst NOT
 960   // the address contained by dst. This is because this is more natural
 961   // for jumps/calls.
 962   void jump(AddressLiteral dst, Register rscratch = noreg);
 963 
 964   void jump_cc(Condition cc, AddressLiteral dst, Register rscratch = noreg);
 965 
 966   // 32bit can do a case table jump in one instruction but we no longer allow the base
 967   // to be installed in the Address class. This jump will transfer to the address
 968   // contained in the location described by entry (not the address of entry)
 969   void jump(ArrayAddress entry, Register rscratch);
 970 
 971   // Adding more natural conditional jump instructions
 972   void ALWAYSINLINE jo(Label& L, bool maybe_short = true) { jcc(Assembler::overflow, L, maybe_short); }
 973   void ALWAYSINLINE jno(Label& L, bool maybe_short = true) { jcc(Assembler::noOverflow, L, maybe_short); }
 974   void ALWAYSINLINE js(Label& L, bool maybe_short = true) { jcc(Assembler::negative, L, maybe_short); }
 975   void ALWAYSINLINE jns(Label& L, bool maybe_short = true) { jcc(Assembler::positive, L, maybe_short); }
 976   void ALWAYSINLINE je(Label& L, bool maybe_short = true) { jcc(Assembler::equal, L, maybe_short); }
 977   void ALWAYSINLINE jz(Label& L, bool maybe_short = true) { jcc(Assembler::zero, L, maybe_short); }
 978   void ALWAYSINLINE jne(Label& L, bool maybe_short = true) { jcc(Assembler::notEqual, L, maybe_short); }
 979   void ALWAYSINLINE jnz(Label& L, bool maybe_short = true) { jcc(Assembler::notZero, L, maybe_short); }
 980   void ALWAYSINLINE jb(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); }
 981   void ALWAYSINLINE jnae(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); }
 982   void ALWAYSINLINE jc(Label& L, bool maybe_short = true) { jcc(Assembler::carrySet, L, maybe_short); }
 983   void ALWAYSINLINE jnb(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); }
 984   void ALWAYSINLINE jae(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); }
 985   void ALWAYSINLINE jnc(Label& L, bool maybe_short = true) { jcc(Assembler::carryClear, L, maybe_short); }
 986   void ALWAYSINLINE jbe(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); }
 987   void ALWAYSINLINE jna(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); }
 988   void ALWAYSINLINE ja(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); }
 989   void ALWAYSINLINE jnbe(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); }
 990   void ALWAYSINLINE jl(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); }
 991   void ALWAYSINLINE jnge(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); }
 992   void ALWAYSINLINE jge(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); }
 993   void ALWAYSINLINE jnl(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); }
 994   void ALWAYSINLINE jle(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); }
 995   void ALWAYSINLINE jng(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); }
 996   void ALWAYSINLINE jg(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); }
 997   void ALWAYSINLINE jnle(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); }
 998   void ALWAYSINLINE jp(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); }
 999   void ALWAYSINLINE jpe(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); }
1000   void ALWAYSINLINE jnp(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); }
1001   void ALWAYSINLINE jpo(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); }
1002   // * No condition for this *  void ALWAYSINLINE jcxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); }
1003   // * No condition for this *  void ALWAYSINLINE jecxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); }
1004 
1005   // Short versions of the above
1006   void ALWAYSINLINE jo_b(Label& L) { jccb(Assembler::overflow, L); }
1007   void ALWAYSINLINE jno_b(Label& L) { jccb(Assembler::noOverflow, L); }
1008   void ALWAYSINLINE js_b(Label& L) { jccb(Assembler::negative, L); }
1009   void ALWAYSINLINE jns_b(Label& L) { jccb(Assembler::positive, L); }
1010   void ALWAYSINLINE je_b(Label& L) { jccb(Assembler::equal, L); }
1011   void ALWAYSINLINE jz_b(Label& L) { jccb(Assembler::zero, L); }
1012   void ALWAYSINLINE jne_b(Label& L) { jccb(Assembler::notEqual, L); }
1013   void ALWAYSINLINE jnz_b(Label& L) { jccb(Assembler::notZero, L); }
1014   void ALWAYSINLINE jb_b(Label& L) { jccb(Assembler::below, L); }
1015   void ALWAYSINLINE jnae_b(Label& L) { jccb(Assembler::below, L); }
1016   void ALWAYSINLINE jc_b(Label& L) { jccb(Assembler::carrySet, L); }
1017   void ALWAYSINLINE jnb_b(Label& L) { jccb(Assembler::aboveEqual, L); }
1018   void ALWAYSINLINE jae_b(Label& L) { jccb(Assembler::aboveEqual, L); }
1019   void ALWAYSINLINE jnc_b(Label& L) { jccb(Assembler::carryClear, L); }
1020   void ALWAYSINLINE jbe_b(Label& L) { jccb(Assembler::belowEqual, L); }
1021   void ALWAYSINLINE jna_b(Label& L) { jccb(Assembler::belowEqual, L); }
1022   void ALWAYSINLINE ja_b(Label& L) { jccb(Assembler::above, L); }
1023   void ALWAYSINLINE jnbe_b(Label& L) { jccb(Assembler::above, L); }
1024   void ALWAYSINLINE jl_b(Label& L) { jccb(Assembler::less, L); }
1025   void ALWAYSINLINE jnge_b(Label& L) { jccb(Assembler::less, L); }
1026   void ALWAYSINLINE jge_b(Label& L) { jccb(Assembler::greaterEqual, L); }
1027   void ALWAYSINLINE jnl_b(Label& L) { jccb(Assembler::greaterEqual, L); }
1028   void ALWAYSINLINE jle_b(Label& L) { jccb(Assembler::lessEqual, L); }
1029   void ALWAYSINLINE jng_b(Label& L) { jccb(Assembler::lessEqual, L); }
1030   void ALWAYSINLINE jg_b(Label& L) { jccb(Assembler::greater, L); }
1031   void ALWAYSINLINE jnle_b(Label& L) { jccb(Assembler::greater, L); }
1032   void ALWAYSINLINE jp_b(Label& L) { jccb(Assembler::parity, L); }
1033   void ALWAYSINLINE jpe_b(Label& L) { jccb(Assembler::parity, L); }
1034   void ALWAYSINLINE jnp_b(Label& L) { jccb(Assembler::noParity, L); }
1035   void ALWAYSINLINE jpo_b(Label& L) { jccb(Assembler::noParity, L); }
1036   // * No condition for this *  void ALWAYSINLINE jcxz_b(Label& L) { jccb(Assembler::cxz, L); }
1037   // * No condition for this *  void ALWAYSINLINE jecxz_b(Label& L) { jccb(Assembler::cxz, L); }
1038 
1039   // Floating
1040 
1041   void push_f(XMMRegister r);
1042   void pop_f(XMMRegister r);
1043   void push_d(XMMRegister r);
1044   void pop_d(XMMRegister r);
1045 
1046   void andpd(XMMRegister dst, XMMRegister    src) { Assembler::andpd(dst, src); }
1047   void andpd(XMMRegister dst, Address        src) { Assembler::andpd(dst, src); }
1048   void andpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1049 
1050   void andnpd(XMMRegister dst, XMMRegister src) { Assembler::andnpd(dst, src); }
1051 
1052   void andps(XMMRegister dst, XMMRegister    src) { Assembler::andps(dst, src); }
1053   void andps(XMMRegister dst, Address        src) { Assembler::andps(dst, src); }
1054   void andps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1055 
1056   void comiss(XMMRegister dst, XMMRegister    src) { Assembler::comiss(dst, src); }
1057   void comiss(XMMRegister dst, Address        src) { Assembler::comiss(dst, src); }
1058   void comiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1059 
1060   void comisd(XMMRegister dst, XMMRegister    src) { Assembler::comisd(dst, src); }
1061   void comisd(XMMRegister dst, Address        src) { Assembler::comisd(dst, src); }
1062   void comisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1063 
1064   void orpd(XMMRegister dst, XMMRegister src) { Assembler::orpd(dst, src); }
1065 
1066   void cmp32_mxcsr_std(Address mxcsr_save, Register tmp, Register rscratch = noreg);
1067   void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
1068   void ldmxcsr(AddressLiteral src, Register rscratch = noreg);
1069 
1070  private:
1071   void sha256_AVX2_one_round_compute(
1072     Register  reg_old_h,
1073     Register  reg_a,
1074     Register  reg_b,
1075     Register  reg_c,
1076     Register  reg_d,
1077     Register  reg_e,
1078     Register  reg_f,
1079     Register  reg_g,
1080     Register  reg_h,
1081     int iter);
1082   void sha256_AVX2_four_rounds_compute_first(int start);
1083   void sha256_AVX2_four_rounds_compute_last(int start);
1084   void sha256_AVX2_one_round_and_sched(
1085         XMMRegister xmm_0,     /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */
1086         XMMRegister xmm_1,     /* ymm5 */  /* full cycle is 16 iterations */
1087         XMMRegister xmm_2,     /* ymm6 */
1088         XMMRegister xmm_3,     /* ymm7 */
1089         Register    reg_a,      /* == eax on 0 iteration, then rotate 8 register right on each next iteration */
1090         Register    reg_b,      /* ebx */    /* full cycle is 8 iterations */
1091         Register    reg_c,      /* edi */
1092         Register    reg_d,      /* esi */
1093         Register    reg_e,      /* r8d */
1094         Register    reg_f,      /* r9d */
1095         Register    reg_g,      /* r10d */
1096         Register    reg_h,      /* r11d */
1097         int iter);
1098 
1099   void addm(int disp, Register r1, Register r2);
1100 
1101   void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d,
1102                                      Register e, Register f, Register g, Register h, int iteration);
1103 
1104   void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1105                                           Register a, Register b, Register c, Register d, Register e, Register f,
1106                                           Register g, Register h, int iteration);
1107 
1108   void addmq(int disp, Register r1, Register r2);
1109  public:
1110   void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1111                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1112                    Register buf, Register state, Register ofs, Register limit, Register rsp,
1113                    bool multi_block, XMMRegister shuf_mask);
1114   void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1115                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1116                    Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block,
1117                    XMMRegister shuf_mask);
1118   void sha512_update_ni_x1(Register arg_hash, Register arg_msg, Register ofs, Register limit, bool multi_block);
1119 
1120   void fast_md5(Register buf, Address state, Address ofs, Address limit,
1121                 bool multi_block);
1122 
1123   void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
1124                  XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask,
1125                  Register buf, Register state, Register ofs, Register limit, Register rsp,
1126                  bool multi_block);
1127 
1128   void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1129                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1130                    Register buf, Register state, Register ofs, Register limit, Register rsp,
1131                    bool multi_block, XMMRegister shuf_mask);
1132 
1133   void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1134                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1135                 Register rax, Register rcx, Register rdx, Register tmp);
1136 
1137 private:
1138 
1139   // these are private because users should be doing movflt/movdbl
1140 
1141   void movss(Address     dst, XMMRegister    src) { Assembler::movss(dst, src); }
1142   void movss(XMMRegister dst, XMMRegister    src) { Assembler::movss(dst, src); }
1143   void movss(XMMRegister dst, Address        src) { Assembler::movss(dst, src); }
1144   void movss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1145 
1146   void movlpd(XMMRegister dst, Address        src) {Assembler::movlpd(dst, src); }
1147   void movlpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1148 
1149 public:
1150 
1151   void addsd(XMMRegister dst, XMMRegister    src) { Assembler::addsd(dst, src); }
1152   void addsd(XMMRegister dst, Address        src) { Assembler::addsd(dst, src); }
1153   void addsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1154 
1155   void addss(XMMRegister dst, XMMRegister    src) { Assembler::addss(dst, src); }
1156   void addss(XMMRegister dst, Address        src) { Assembler::addss(dst, src); }
1157   void addss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1158 
1159   void addpd(XMMRegister dst, XMMRegister    src) { Assembler::addpd(dst, src); }
1160   void addpd(XMMRegister dst, Address        src) { Assembler::addpd(dst, src); }
1161   void addpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1162 
1163   using Assembler::vbroadcasti128;
1164   void vbroadcasti128(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1165 
1166   using Assembler::vbroadcastsd;
1167   void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1168 
1169   using Assembler::vbroadcastss;
1170   void vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1171 
1172   // Vector float blend
1173   void vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg);
1174   void vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg);
1175 
1176   void divsd(XMMRegister dst, XMMRegister    src) { Assembler::divsd(dst, src); }
1177   void divsd(XMMRegister dst, Address        src) { Assembler::divsd(dst, src); }
1178   void divsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1179 
1180   void divss(XMMRegister dst, XMMRegister    src) { Assembler::divss(dst, src); }
1181   void divss(XMMRegister dst, Address        src) { Assembler::divss(dst, src); }
1182   void divss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1183 
1184   // Move Unaligned Double Quadword
1185   void movdqu(Address     dst, XMMRegister    src);
1186   void movdqu(XMMRegister dst, XMMRegister    src);
1187   void movdqu(XMMRegister dst, Address        src);
1188   void movdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1189 
1190   void kmovwl(Register  dst, KRegister      src) { Assembler::kmovwl(dst, src); }
1191   void kmovwl(Address   dst, KRegister      src) { Assembler::kmovwl(dst, src); }
1192   void kmovwl(KRegister dst, KRegister      src) { Assembler::kmovwl(dst, src); }
1193   void kmovwl(KRegister dst, Register       src) { Assembler::kmovwl(dst, src); }
1194   void kmovwl(KRegister dst, Address        src) { Assembler::kmovwl(dst, src); }
1195   void kmovwl(KRegister dst, AddressLiteral src, Register rscratch = noreg);
1196 
1197   void kmovql(KRegister dst, KRegister      src) { Assembler::kmovql(dst, src); }
1198   void kmovql(KRegister dst, Register       src) { Assembler::kmovql(dst, src); }
1199   void kmovql(Register  dst, KRegister      src) { Assembler::kmovql(dst, src); }
1200   void kmovql(KRegister dst, Address        src) { Assembler::kmovql(dst, src); }
1201   void kmovql(Address   dst, KRegister      src) { Assembler::kmovql(dst, src); }
1202   void kmovql(KRegister dst, AddressLiteral src, Register rscratch = noreg);
1203 
1204   // Safe move operation, lowers down to 16bit moves for targets supporting
1205   // AVX512F feature and 64bit moves for targets supporting AVX512BW feature.
1206   void kmov(Address  dst, KRegister src);
1207   void kmov(KRegister dst, Address src);
1208   void kmov(KRegister dst, KRegister src);
1209   void kmov(Register dst, KRegister src);
1210   void kmov(KRegister dst, Register src);
1211 
1212   using Assembler::movddup;
1213   void movddup(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1214 
1215   using Assembler::vmovddup;
1216   void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1217 
1218   // AVX Unaligned forms
1219   void vmovdqu(Address     dst, XMMRegister    src);
1220   void vmovdqu(XMMRegister dst, Address        src);
1221   void vmovdqu(XMMRegister dst, XMMRegister    src);
1222   void vmovdqu(XMMRegister dst, AddressLiteral src,                 Register rscratch = noreg);
1223   void vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1224   void vmovdqu(XMMRegister dst, XMMRegister    src, int vector_len);
1225   void vmovdqu(XMMRegister dst, Address        src, int vector_len);
1226   void vmovdqu(Address     dst, XMMRegister    src, int vector_len);
1227 
1228   // AVX Aligned forms
1229   using Assembler::vmovdqa;
1230   void vmovdqa(XMMRegister dst, AddressLiteral src,                 Register rscratch = noreg);
1231   void vmovdqa(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1232 
1233   // AVX512 Unaligned
1234   void evmovdqu(BasicType type, KRegister kmask, Address     dst, XMMRegister src, bool merge, int vector_len);
1235   void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address     src, bool merge, int vector_len);
1236   void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len);
1237 
1238   void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); }
1239   void evmovdqub(XMMRegister dst, Address     src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); }
1240 
1241   void evmovdqub(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1242     if (dst->encoding() != src->encoding() || mask != k0)  {
1243       Assembler::evmovdqub(dst, mask, src, merge, vector_len);
1244     }
1245   }
1246   void evmovdqub(Address     dst, KRegister mask, XMMRegister    src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1247   void evmovdqub(XMMRegister dst, KRegister mask, Address        src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1248   void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1249 
1250   void evmovdquw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1251   void evmovdquw(Address     dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1252   void evmovdquw(XMMRegister dst, Address     src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1253 
1254   void evmovdquw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1255     if (dst->encoding() != src->encoding() || mask != k0) {
1256       Assembler::evmovdquw(dst, mask, src, merge, vector_len);
1257     }
1258   }
1259   void evmovdquw(XMMRegister dst, KRegister mask, Address        src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1260   void evmovdquw(Address     dst, KRegister mask, XMMRegister    src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1261   void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1262 
1263   void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) {
1264      if (dst->encoding() != src->encoding()) {
1265        Assembler::evmovdqul(dst, src, vector_len);
1266      }
1267   }
1268   void evmovdqul(Address     dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1269   void evmovdqul(XMMRegister dst, Address     src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1270 
1271   void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1272     if (dst->encoding() != src->encoding() || mask != k0)  {
1273       Assembler::evmovdqul(dst, mask, src, merge, vector_len);
1274     }
1275   }
1276   void evmovdqul(Address     dst, KRegister mask, XMMRegister    src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1277   void evmovdqul(XMMRegister dst, KRegister mask, Address        src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1278   void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1279 
1280   void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) {
1281     if (dst->encoding() != src->encoding()) {
1282       Assembler::evmovdquq(dst, src, vector_len);
1283     }
1284   }
1285   void evmovdquq(XMMRegister dst, Address        src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1286   void evmovdquq(Address     dst, XMMRegister    src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1287   void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1288   void evmovdqaq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1289 
1290   void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1291     if (dst->encoding() != src->encoding() || mask != k0) {
1292       Assembler::evmovdquq(dst, mask, src, merge, vector_len);
1293     }
1294   }
1295   void evmovdquq(Address     dst, KRegister mask, XMMRegister    src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1296   void evmovdquq(XMMRegister dst, KRegister mask, Address        src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1297   void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1298   void evmovdqaq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1299 
1300   using Assembler::movapd;
1301   void movapd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1302 
1303   // Move Aligned Double Quadword
1304   void movdqa(XMMRegister dst, XMMRegister    src) { Assembler::movdqa(dst, src); }
1305   void movdqa(XMMRegister dst, Address        src) { Assembler::movdqa(dst, src); }
1306   void movdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1307 
1308   void movsd(Address     dst, XMMRegister    src) { Assembler::movsd(dst, src); }
1309   void movsd(XMMRegister dst, XMMRegister    src) { Assembler::movsd(dst, src); }
1310   void movsd(XMMRegister dst, Address        src) { Assembler::movsd(dst, src); }
1311   void movsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1312 
1313   void mulpd(XMMRegister dst, XMMRegister    src) { Assembler::mulpd(dst, src); }
1314   void mulpd(XMMRegister dst, Address        src) { Assembler::mulpd(dst, src); }
1315   void mulpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1316 
1317   void mulsd(XMMRegister dst, XMMRegister    src) { Assembler::mulsd(dst, src); }
1318   void mulsd(XMMRegister dst, Address        src) { Assembler::mulsd(dst, src); }
1319   void mulsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1320 
1321   void mulss(XMMRegister dst, XMMRegister    src) { Assembler::mulss(dst, src); }
1322   void mulss(XMMRegister dst, Address        src) { Assembler::mulss(dst, src); }
1323   void mulss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1324 
1325   // Carry-Less Multiplication Quadword
1326   void pclmulldq(XMMRegister dst, XMMRegister src) {
1327     // 0x00 - multiply lower 64 bits [0:63]
1328     Assembler::pclmulqdq(dst, src, 0x00);
1329   }
1330   void pclmulhdq(XMMRegister dst, XMMRegister src) {
1331     // 0x11 - multiply upper 64 bits [64:127]
1332     Assembler::pclmulqdq(dst, src, 0x11);
1333   }
1334 
1335   void pcmpeqb(XMMRegister dst, XMMRegister src);
1336   void pcmpeqw(XMMRegister dst, XMMRegister src);
1337 
1338   void pcmpestri(XMMRegister dst, Address src, int imm8);
1339   void pcmpestri(XMMRegister dst, XMMRegister src, int imm8);
1340 
1341   void pmovzxbw(XMMRegister dst, XMMRegister src);
1342   void pmovzxbw(XMMRegister dst, Address src);
1343 
1344   void pmovmskb(Register dst, XMMRegister src);
1345 
1346   void ptest(XMMRegister dst, XMMRegister src);
1347 
1348   void roundsd(XMMRegister dst, XMMRegister    src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1349   void roundsd(XMMRegister dst, Address        src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1350   void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch = noreg);
1351 
1352   void sqrtss(XMMRegister dst, XMMRegister     src) { Assembler::sqrtss(dst, src); }
1353   void sqrtss(XMMRegister dst, Address         src) { Assembler::sqrtss(dst, src); }
1354   void sqrtss(XMMRegister dst, AddressLiteral  src, Register rscratch = noreg);
1355 
1356   void subsd(XMMRegister dst, XMMRegister    src) { Assembler::subsd(dst, src); }
1357   void subsd(XMMRegister dst, Address        src) { Assembler::subsd(dst, src); }
1358   void subsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1359 
1360   void subss(XMMRegister dst, XMMRegister    src) { Assembler::subss(dst, src); }
1361   void subss(XMMRegister dst, Address        src) { Assembler::subss(dst, src); }
1362   void subss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1363 
1364   void ucomiss(XMMRegister dst, XMMRegister    src) { Assembler::ucomiss(dst, src); }
1365   void ucomiss(XMMRegister dst, Address        src) { Assembler::ucomiss(dst, src); }
1366   void ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1367 
1368   void ucomisd(XMMRegister dst, XMMRegister    src) { Assembler::ucomisd(dst, src); }
1369   void ucomisd(XMMRegister dst, Address        src) { Assembler::ucomisd(dst, src); }
1370   void ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1371 
1372   // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
1373   void xorpd(XMMRegister dst, XMMRegister    src);
1374   void xorpd(XMMRegister dst, Address        src) { Assembler::xorpd(dst, src); }
1375   void xorpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1376 
1377   // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
1378   void xorps(XMMRegister dst, XMMRegister    src);
1379   void xorps(XMMRegister dst, Address        src) { Assembler::xorps(dst, src); }
1380   void xorps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1381 
1382   // Shuffle Bytes
1383   void pshufb(XMMRegister dst, XMMRegister    src) { Assembler::pshufb(dst, src); }
1384   void pshufb(XMMRegister dst, Address        src) { Assembler::pshufb(dst, src); }
1385   void pshufb(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1386   // AVX 3-operands instructions
1387 
1388   void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vaddsd(dst, nds, src); }
1389   void vaddsd(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vaddsd(dst, nds, src); }
1390   void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1391 
1392   void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vaddss(dst, nds, src); }
1393   void vaddss(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vaddss(dst, nds, src); }
1394   void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1395 
1396   void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg);
1397   void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg);
1398 
1399   void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len);
1400   void vpaddb(XMMRegister dst, XMMRegister nds, Address        src, int vector_len);
1401   void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1402 
1403   void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1404   void vpaddw(XMMRegister dst, XMMRegister nds, Address     src, int vector_len);
1405 
1406   void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1407   void vpaddd(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1408   void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1409 
1410   void vpand(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1411   void vpand(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1412   void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1413 
1414   using Assembler::vpbroadcastd;
1415   void vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1416 
1417   using Assembler::vpbroadcastq;
1418   void vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1419 
1420   void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1421   void vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len);
1422 
1423   void vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1424   void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1425   void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1426 
1427   // Vector compares
1428   void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister    src, int comparison, bool is_signed, int vector_len) {
1429     Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len);
1430   }
1431   void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1432 
1433   void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister    src, int comparison, bool is_signed, int vector_len) {
1434     Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len);
1435   }
1436   void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1437 
1438   void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister    src, int comparison, bool is_signed, int vector_len) {
1439     Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len);
1440   }
1441   void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1442 
1443   void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister    src, int comparison, bool is_signed, int vector_len) {
1444     Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len);
1445   }
1446   void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1447 
1448   void evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len);
1449 
1450   // Emit comparison instruction for the specified comparison predicate.
1451   void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len);
1452   void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len);
1453 
1454   void vpmovzxbw(XMMRegister dst, Address     src, int vector_len);
1455   void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); }
1456 
1457   void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit);
1458 
1459   void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1460   void vpmullw(XMMRegister dst, XMMRegister nds, Address     src, int vector_len);
1461 
1462   void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); }
1463   void vpmulld(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); }
1464   void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1465 
1466   void vpmuldq(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vpmuldq(dst, nds, src, vector_len); }
1467 
1468   void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1469   void vpsubb(XMMRegister dst, XMMRegister nds, Address     src, int vector_len);
1470 
1471   void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1472   void vpsubw(XMMRegister dst, XMMRegister nds, Address     src, int vector_len);
1473 
1474   void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1475   void vpsraw(XMMRegister dst, XMMRegister nds, int         shift, int vector_len);
1476 
1477   void evpsrad(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1478   void evpsrad(XMMRegister dst, XMMRegister nds, int         shift, int vector_len);
1479 
1480   void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1481   void evpsraq(XMMRegister dst, XMMRegister nds, int         shift, int vector_len);
1482 
1483   using Assembler::evpsllw;
1484   void evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1485     if (!is_varshift) {
1486       Assembler::evpsllw(dst, mask, nds, src, merge, vector_len);
1487     } else {
1488       Assembler::evpsllvw(dst, mask, nds, src, merge, vector_len);
1489     }
1490   }
1491   void evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1492     if (!is_varshift) {
1493       Assembler::evpslld(dst, mask, nds, src, merge, vector_len);
1494     } else {
1495       Assembler::evpsllvd(dst, mask, nds, src, merge, vector_len);
1496     }
1497   }
1498   void evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1499     if (!is_varshift) {
1500       Assembler::evpsllq(dst, mask, nds, src, merge, vector_len);
1501     } else {
1502       Assembler::evpsllvq(dst, mask, nds, src, merge, vector_len);
1503     }
1504   }
1505   void evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1506     if (!is_varshift) {
1507       Assembler::evpsrlw(dst, mask, nds, src, merge, vector_len);
1508     } else {
1509       Assembler::evpsrlvw(dst, mask, nds, src, merge, vector_len);
1510     }
1511   }
1512   void evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1513     if (!is_varshift) {
1514       Assembler::evpsrld(dst, mask, nds, src, merge, vector_len);
1515     } else {
1516       Assembler::evpsrlvd(dst, mask, nds, src, merge, vector_len);
1517     }
1518   }
1519 
1520   using Assembler::evpsrlq;
1521   void evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1522     if (!is_varshift) {
1523       Assembler::evpsrlq(dst, mask, nds, src, merge, vector_len);
1524     } else {
1525       Assembler::evpsrlvq(dst, mask, nds, src, merge, vector_len);
1526     }
1527   }
1528   using Assembler::evpsraw;
1529   void evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1530     if (!is_varshift) {
1531       Assembler::evpsraw(dst, mask, nds, src, merge, vector_len);
1532     } else {
1533       Assembler::evpsravw(dst, mask, nds, src, merge, vector_len);
1534     }
1535   }
1536   using Assembler::evpsrad;
1537   void evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1538     if (!is_varshift) {
1539       Assembler::evpsrad(dst, mask, nds, src, merge, vector_len);
1540     } else {
1541       Assembler::evpsravd(dst, mask, nds, src, merge, vector_len);
1542     }
1543   }
1544   using Assembler::evpsraq;
1545   void evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1546     if (!is_varshift) {
1547       Assembler::evpsraq(dst, mask, nds, src, merge, vector_len);
1548     } else {
1549       Assembler::evpsravq(dst, mask, nds, src, merge, vector_len);
1550     }
1551   }
1552 
1553   void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1554   void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1555   void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1556   void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1557 
1558   void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1559   void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1560   void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1561   void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1562 
1563   void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1564   void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1565 
1566   void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1567   void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1568 
1569   void vptest(XMMRegister dst, XMMRegister src);
1570   void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); }
1571 
1572   void punpcklbw(XMMRegister dst, XMMRegister src);
1573   void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); }
1574 
1575   void pshufd(XMMRegister dst, Address src, int mode);
1576   void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); }
1577 
1578   void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1579   void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); }
1580 
1581   void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1582   void vandpd(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1583   void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1584 
1585   void vandps(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1586   void vandps(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1587   void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1588 
1589   void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1590 
1591   void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vdivsd(dst, nds, src); }
1592   void vdivsd(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vdivsd(dst, nds, src); }
1593   void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1594 
1595   void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vdivss(dst, nds, src); }
1596   void vdivss(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vdivss(dst, nds, src); }
1597   void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1598 
1599   void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vmulsd(dst, nds, src); }
1600   void vmulsd(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vmulsd(dst, nds, src); }
1601   void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1602 
1603   void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vmulss(dst, nds, src); }
1604   void vmulss(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vmulss(dst, nds, src); }
1605   void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1606 
1607   void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vsubsd(dst, nds, src); }
1608   void vsubsd(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vsubsd(dst, nds, src); }
1609   void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1610 
1611   void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vsubss(dst, nds, src); }
1612   void vsubss(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vsubss(dst, nds, src); }
1613   void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1614 
1615   void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1616   void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1617 
1618   // AVX Vector instructions
1619 
1620   void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1621   void vxorpd(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1622   void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1623 
1624   void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1625   void vxorps(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1626   void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1627 
1628   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1629     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1630       Assembler::vpxor(dst, nds, src, vector_len);
1631     else
1632       Assembler::vxorpd(dst, nds, src, vector_len);
1633   }
1634   void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1635     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1636       Assembler::vpxor(dst, nds, src, vector_len);
1637     else
1638       Assembler::vxorpd(dst, nds, src, vector_len);
1639   }
1640   void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1641 
1642   // Simple version for AVX2 256bit vectors
1643   void vpxor(XMMRegister dst, XMMRegister src) {
1644     assert(UseAVX >= 2, "Should be at least AVX2");
1645     Assembler::vpxor(dst, dst, src, AVX_256bit);
1646   }
1647   void vpxor(XMMRegister dst, Address src) {
1648     assert(UseAVX >= 2, "Should be at least AVX2");
1649     Assembler::vpxor(dst, dst, src, AVX_256bit);
1650   }
1651 
1652   void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); }
1653   void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1654 
1655   void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
1656     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1657       Assembler::vinserti32x4(dst, nds, src, imm8);
1658     } else if (UseAVX > 1) {
1659       // vinserti128 is available only in AVX2
1660       Assembler::vinserti128(dst, nds, src, imm8);
1661     } else {
1662       Assembler::vinsertf128(dst, nds, src, imm8);
1663     }
1664   }
1665 
1666   void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
1667     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1668       Assembler::vinserti32x4(dst, nds, src, imm8);
1669     } else if (UseAVX > 1) {
1670       // vinserti128 is available only in AVX2
1671       Assembler::vinserti128(dst, nds, src, imm8);
1672     } else {
1673       Assembler::vinsertf128(dst, nds, src, imm8);
1674     }
1675   }
1676 
1677   void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1678     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1679       Assembler::vextracti32x4(dst, src, imm8);
1680     } else if (UseAVX > 1) {
1681       // vextracti128 is available only in AVX2
1682       Assembler::vextracti128(dst, src, imm8);
1683     } else {
1684       Assembler::vextractf128(dst, src, imm8);
1685     }
1686   }
1687 
1688   void vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
1689     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1690       Assembler::vextracti32x4(dst, src, imm8);
1691     } else if (UseAVX > 1) {
1692       // vextracti128 is available only in AVX2
1693       Assembler::vextracti128(dst, src, imm8);
1694     } else {
1695       Assembler::vextractf128(dst, src, imm8);
1696     }
1697   }
1698 
1699   // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers
1700   void vinserti128_high(XMMRegister dst, XMMRegister src) {
1701     vinserti128(dst, dst, src, 1);
1702   }
1703   void vinserti128_high(XMMRegister dst, Address src) {
1704     vinserti128(dst, dst, src, 1);
1705   }
1706   void vextracti128_high(XMMRegister dst, XMMRegister src) {
1707     vextracti128(dst, src, 1);
1708   }
1709   void vextracti128_high(Address dst, XMMRegister src) {
1710     vextracti128(dst, src, 1);
1711   }
1712 
1713   void vinsertf128_high(XMMRegister dst, XMMRegister src) {
1714     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1715       Assembler::vinsertf32x4(dst, dst, src, 1);
1716     } else {
1717       Assembler::vinsertf128(dst, dst, src, 1);
1718     }
1719   }
1720 
1721   void vinsertf128_high(XMMRegister dst, Address src) {
1722     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1723       Assembler::vinsertf32x4(dst, dst, src, 1);
1724     } else {
1725       Assembler::vinsertf128(dst, dst, src, 1);
1726     }
1727   }
1728 
1729   void vextractf128_high(XMMRegister dst, XMMRegister src) {
1730     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1731       Assembler::vextractf32x4(dst, src, 1);
1732     } else {
1733       Assembler::vextractf128(dst, src, 1);
1734     }
1735   }
1736 
1737   void vextractf128_high(Address dst, XMMRegister src) {
1738     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1739       Assembler::vextractf32x4(dst, src, 1);
1740     } else {
1741       Assembler::vextractf128(dst, src, 1);
1742     }
1743   }
1744 
1745   // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers
1746   void vinserti64x4_high(XMMRegister dst, XMMRegister src) {
1747     Assembler::vinserti64x4(dst, dst, src, 1);
1748   }
1749   void vinsertf64x4_high(XMMRegister dst, XMMRegister src) {
1750     Assembler::vinsertf64x4(dst, dst, src, 1);
1751   }
1752   void vextracti64x4_high(XMMRegister dst, XMMRegister src) {
1753     Assembler::vextracti64x4(dst, src, 1);
1754   }
1755   void vextractf64x4_high(XMMRegister dst, XMMRegister src) {
1756     Assembler::vextractf64x4(dst, src, 1);
1757   }
1758   void vextractf64x4_high(Address dst, XMMRegister src) {
1759     Assembler::vextractf64x4(dst, src, 1);
1760   }
1761   void vinsertf64x4_high(XMMRegister dst, Address src) {
1762     Assembler::vinsertf64x4(dst, dst, src, 1);
1763   }
1764 
1765   // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers
1766   void vinserti128_low(XMMRegister dst, XMMRegister src) {
1767     vinserti128(dst, dst, src, 0);
1768   }
1769   void vinserti128_low(XMMRegister dst, Address src) {
1770     vinserti128(dst, dst, src, 0);
1771   }
1772   void vextracti128_low(XMMRegister dst, XMMRegister src) {
1773     vextracti128(dst, src, 0);
1774   }
1775   void vextracti128_low(Address dst, XMMRegister src) {
1776     vextracti128(dst, src, 0);
1777   }
1778 
1779   void vinsertf128_low(XMMRegister dst, XMMRegister src) {
1780     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1781       Assembler::vinsertf32x4(dst, dst, src, 0);
1782     } else {
1783       Assembler::vinsertf128(dst, dst, src, 0);
1784     }
1785   }
1786 
1787   void vinsertf128_low(XMMRegister dst, Address src) {
1788     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1789       Assembler::vinsertf32x4(dst, dst, src, 0);
1790     } else {
1791       Assembler::vinsertf128(dst, dst, src, 0);
1792     }
1793   }
1794 
1795   void vextractf128_low(XMMRegister dst, XMMRegister src) {
1796     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1797       Assembler::vextractf32x4(dst, src, 0);
1798     } else {
1799       Assembler::vextractf128(dst, src, 0);
1800     }
1801   }
1802 
1803   void vextractf128_low(Address dst, XMMRegister src) {
1804     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1805       Assembler::vextractf32x4(dst, src, 0);
1806     } else {
1807       Assembler::vextractf128(dst, src, 0);
1808     }
1809   }
1810 
1811   // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers
1812   void vinserti64x4_low(XMMRegister dst, XMMRegister src) {
1813     Assembler::vinserti64x4(dst, dst, src, 0);
1814   }
1815   void vinsertf64x4_low(XMMRegister dst, XMMRegister src) {
1816     Assembler::vinsertf64x4(dst, dst, src, 0);
1817   }
1818   void vextracti64x4_low(XMMRegister dst, XMMRegister src) {
1819     Assembler::vextracti64x4(dst, src, 0);
1820   }
1821   void vextractf64x4_low(XMMRegister dst, XMMRegister src) {
1822     Assembler::vextractf64x4(dst, src, 0);
1823   }
1824   void vextractf64x4_low(Address dst, XMMRegister src) {
1825     Assembler::vextractf64x4(dst, src, 0);
1826   }
1827   void vinsertf64x4_low(XMMRegister dst, Address src) {
1828     Assembler::vinsertf64x4(dst, dst, src, 0);
1829   }
1830 
1831   // Carry-Less Multiplication Quadword
1832   void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1833     // 0x00 - multiply lower 64 bits [0:63]
1834     Assembler::vpclmulqdq(dst, nds, src, 0x00);
1835   }
1836   void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1837     // 0x11 - multiply upper 64 bits [64:127]
1838     Assembler::vpclmulqdq(dst, nds, src, 0x11);
1839   }
1840   void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1841     // 0x10 - multiply nds[0:63] and src[64:127]
1842     Assembler::vpclmulqdq(dst, nds, src, 0x10);
1843   }
1844   void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1845     //0x01 - multiply nds[64:127] and src[0:63]
1846     Assembler::vpclmulqdq(dst, nds, src, 0x01);
1847   }
1848 
1849   void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1850     // 0x00 - multiply lower 64 bits [0:63]
1851     Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len);
1852   }
1853   void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1854     // 0x11 - multiply upper 64 bits [64:127]
1855     Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len);
1856   }
1857 
1858   // AVX-512 mask operations.
1859   void kand(BasicType etype, KRegister dst, KRegister src1, KRegister src2);
1860   void kor(BasicType type, KRegister dst, KRegister src1, KRegister src2);
1861   void knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp = knoreg, Register rtmp = noreg);
1862   void kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2);
1863   void kortest(uint masklen, KRegister src1, KRegister src2);
1864   void ktest(uint masklen, KRegister src1, KRegister src2);
1865 
1866   void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1867   void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1868 
1869   void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1870   void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1871 
1872   void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1873   void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1874 
1875   void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1876   void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1877 
1878   void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc);
1879   void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc);
1880   void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc);
1881   void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc);
1882 
1883   using Assembler::evpandq;
1884   void evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1885 
1886   using Assembler::evpaddq;
1887   void evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1888 
1889   using Assembler::evporq;
1890   void evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1891 
1892   using Assembler::vpshufb;
1893   void vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1894 
1895   using Assembler::vpor;
1896   void vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1897 
1898   using Assembler::vpternlogq;
1899   void vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch = noreg);
1900 
1901   void cmov32( Condition cc, Register dst, Address  src);
1902   void cmov32( Condition cc, Register dst, Register src);
1903 
1904   void cmov(   Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1905 
1906   void cmovptr(Condition cc, Register dst, Address  src) { cmovq(cc, dst, src); }
1907   void cmovptr(Condition cc, Register dst, Register src) { cmovq(cc, dst, src); }
1908 
1909   void movoop(Register dst, jobject obj);
1910   void movoop(Address  dst, jobject obj, Register rscratch);
1911 
1912   void mov_metadata(Register dst, Metadata* obj);
1913   void mov_metadata(Address  dst, Metadata* obj, Register rscratch);
1914 
1915   void movptr(Register     dst, Register       src);
1916   void movptr(Register     dst, Address        src);
1917   void movptr(Register     dst, AddressLiteral src);
1918   void movptr(Register     dst, ArrayAddress   src);
1919   void movptr(Register     dst, intptr_t       src);
1920   void movptr(Address      dst, Register       src);
1921   void movptr(Address      dst, int32_t        imm);
1922   void movptr(Address      dst, intptr_t       src, Register rscratch);
1923   void movptr(ArrayAddress dst, Register       src, Register rscratch);
1924 
1925   void movptr(Register dst, RegisterOrConstant src) {
1926     if (src.is_constant()) movptr(dst, src.as_constant());
1927     else                   movptr(dst, src.as_register());
1928   }
1929 
1930 
1931   // to avoid hiding movl
1932   void mov32(Register       dst, AddressLiteral src);
1933   void mov32(AddressLiteral dst, Register        src, Register rscratch = noreg);
1934 
1935   // Import other mov() methods from the parent class or else
1936   // they will be hidden by the following overriding declaration.
1937   using Assembler::movdl;
1938   void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1939 
1940   using Assembler::movq;
1941   void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1942 
1943   // Can push value or effective address
1944   void pushptr(AddressLiteral src, Register rscratch);
1945 
1946   void pushptr(Address src) { pushq(src); }
1947   void popptr(Address src) { popq(src); }
1948 
1949   void pushoop(jobject obj, Register rscratch);
1950   void pushklass(Metadata* obj, Register rscratch);
1951 
1952   // sign extend as need a l to ptr sized element
1953   void movl2ptr(Register dst, Address src) { movslq(dst, src); }
1954   void movl2ptr(Register dst, Register src) { movslq(dst, src); }
1955 
1956 
1957  public:
1958   // Inline type specific methods
1959   #include "asm/macroAssembler_common.hpp"
1960 
1961   int store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter = true);
1962   bool move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]);
1963   bool unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
1964                             VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
1965                             RegState reg_state[]);
1966   bool pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
1967                           VMRegPair* from, int from_count, int& from_index, VMReg to,
1968                           RegState reg_state[], Register val_array);
1969   int extend_stack_for_inline_args(int args_on_stack);
1970   void remove_frame(int initial_framesize, bool needs_stack_repair);
1971   VMReg spill_reg_for(VMReg reg);
1972 
1973   // clear memory of size 'cnt' qwords, starting at 'base';
1974   // if 'is_large' is set, do not try to produce short loop
1975   void clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, bool is_large, bool word_copy_only, KRegister mask=knoreg);
1976 
1977   // clear memory initialization sequence for constant size;
1978   void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1979 
1980   // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
1981   void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1982 
1983   // Fill primitive arrays
1984   void generate_fill(BasicType t, bool aligned,
1985                      Register to, Register value, Register count,
1986                      Register rtmp, XMMRegister xtmp);
1987 
1988   void encode_iso_array(Register src, Register dst, Register len,
1989                         XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1990                         XMMRegister tmp4, Register tmp5, Register result, bool ascii);
1991 
1992   void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
1993   void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1994                              Register y, Register y_idx, Register z,
1995                              Register carry, Register product,
1996                              Register idx, Register kdx);
1997   void multiply_add_128_x_128(Register x_xstart, Register y, Register z,
1998                               Register yz_idx, Register idx,
1999                               Register carry, Register product, int offset);
2000   void multiply_128_x_128_bmi2_loop(Register y, Register z,
2001                                     Register carry, Register carry2,
2002                                     Register idx, Register jdx,
2003                                     Register yz_idx1, Register yz_idx2,
2004                                     Register tmp, Register tmp3, Register tmp4);
2005   void multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
2006                                Register yz_idx, Register idx, Register jdx,
2007                                Register carry, Register product,
2008                                Register carry2);
2009   void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0,
2010                        Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5);
2011   void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3,
2012                      Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
2013   void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry,
2014                             Register tmp2);
2015   void multiply_add_64(Register sum, Register op1, Register op2, Register carry,
2016                        Register rdxReg, Register raxReg);
2017   void add_one_64(Register z, Register zlen, Register carry, Register tmp1);
2018   void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
2019                        Register tmp3, Register tmp4);
2020   void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
2021                      Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
2022 
2023   void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1,
2024                Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
2025                Register raxReg);
2026   void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1,
2027                Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
2028                Register raxReg);
2029   void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
2030                            Register result, Register tmp1, Register tmp2,
2031                            XMMRegister vec1, XMMRegister vec2, XMMRegister vec3);
2032 
2033   // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic.
2034   void update_byte_crc32(Register crc, Register val, Register table);
2035   void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp);
2036 
2037   void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2);
2038   void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos,
2039                                 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop,
2040                                 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup);
2041 
2042   // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic
2043   // Note on a naming convention:
2044   // Prefix w = register only used on a Westmere+ architecture
2045   // Prefix n = register only used on a Nehalem architecture
2046   void crc32c_ipl_alg4(Register in_out, uint32_t n,
2047                        Register tmp1, Register tmp2, Register tmp3);
2048   void crc32c_pclmulqdq(XMMRegister w_xtmp1,
2049                         Register in_out,
2050                         uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
2051                         XMMRegister w_xtmp2,
2052                         Register tmp1,
2053                         Register n_tmp2, Register n_tmp3);
2054   void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
2055                        XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
2056                        Register tmp1, Register tmp2,
2057                        Register n_tmp3);
2058   void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
2059                          Register in_out1, Register in_out2, Register in_out3,
2060                          Register tmp1, Register tmp2, Register tmp3,
2061                          XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
2062                          Register tmp4, Register tmp5,
2063                          Register n_tmp6);
2064   void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
2065                             Register tmp1, Register tmp2, Register tmp3,
2066                             Register tmp4, Register tmp5, Register tmp6,
2067                             XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
2068                             bool is_pclmulqdq_supported);
2069   // Fold 128-bit data chunk
2070   void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
2071   void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf);
2072   // Fold 512-bit data chunk
2073   void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset);
2074   // Fold 8-bit data
2075   void fold_8bit_crc32(Register crc, Register table, Register tmp);
2076   void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp);
2077 
2078   // Compress char[] array to byte[].
2079   void char_array_compress(Register src, Register dst, Register len,
2080                            XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
2081                            XMMRegister tmp4, Register tmp5, Register result,
2082                            KRegister mask1 = knoreg, KRegister mask2 = knoreg);
2083 
2084   // Inflate byte[] array to char[].
2085   void byte_array_inflate(Register src, Register dst, Register len,
2086                           XMMRegister tmp1, Register tmp2, KRegister mask = knoreg);
2087 
2088   void fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask,
2089                    Register length, Register temp, int vec_enc);
2090 
2091   void fill64_masked(uint shift, Register dst, int disp,
2092                          XMMRegister xmm, KRegister mask, Register length,
2093                          Register temp, bool use64byteVector = false);
2094 
2095   void fill32_masked(uint shift, Register dst, int disp,
2096                          XMMRegister xmm, KRegister mask, Register length,
2097                          Register temp);
2098 
2099   void fill32(Address dst, XMMRegister xmm);
2100 
2101   void fill32(Register dst, int disp, XMMRegister xmm);
2102 
2103   void fill64(Address dst, XMMRegister xmm, bool use64byteVector = false);
2104 
2105   void fill64(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false);
2106 
2107   void convert_f2i(Register dst, XMMRegister src);
2108   void convert_d2i(Register dst, XMMRegister src);
2109   void convert_f2l(Register dst, XMMRegister src);
2110   void convert_d2l(Register dst, XMMRegister src);
2111   void round_double(Register dst, XMMRegister src, Register rtmp, Register rcx);
2112   void round_float(Register dst, XMMRegister src, Register rtmp, Register rcx);
2113 
2114   void cache_wb(Address line);
2115   void cache_wbsync(bool is_pre);
2116 
2117 #ifdef COMPILER2_OR_JVMCI
2118   void generate_fill_avx3(BasicType type, Register to, Register value,
2119                           Register count, Register rtmp, XMMRegister xtmp);
2120 #endif // COMPILER2_OR_JVMCI
2121 
2122   void vallones(XMMRegister dst, int vector_len);
2123 
2124   void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg);
2125 
2126   void lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow);
2127   void lightweight_unlock(Register obj, Register reg_rax, Register tmp, Label& slow);
2128 
2129   void save_legacy_gprs();
2130   void restore_legacy_gprs();
2131   void setcc(Assembler::Condition comparison, Register dst);
2132 };
2133 
2134 #endif // CPU_X86_MACROASSEMBLER_X86_HPP