1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
  26 #define CPU_X86_MACROASSEMBLER_X86_HPP
  27 
  28 #include "asm/assembler.hpp"
  29 #include "asm/register.hpp"
  30 #include "code/vmreg.inline.hpp"
  31 #include "compiler/oopMap.hpp"
  32 #include "utilities/macros.hpp"
  33 #include "runtime/signature.hpp"
  34 #include "runtime/vm_version.hpp"
  35 #include "utilities/checkedCast.hpp"
  36 
  37 class ciInlineKlass;
  38 
  39 // MacroAssembler extends Assembler by frequently used macros.
  40 //
  41 // Instructions for which a 'better' code sequence exists depending
  42 // on arguments should also go in here.
  43 
  44 class MacroAssembler: public Assembler {
  45   friend class LIR_Assembler;
  46   friend class Runtime1;      // as_Address()
  47 
  48  public:
  49   // Support for VM calls
  50   //
  51   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
  52   // may customize this version by overriding it for its purposes (e.g., to save/restore
  53   // additional registers when doing a VM call).
  54 
  55   virtual void call_VM_leaf_base(
  56     address entry_point,               // the entry point
  57     int     number_of_arguments        // the number of arguments to pop after the call
  58   );
  59 
  60  protected:
  61   // This is the base routine called by the different versions of call_VM. The interpreter
  62   // may customize this version by overriding it for its purposes (e.g., to save/restore
  63   // additional registers when doing a VM call).
  64   //
  65   // call_VM_base returns the register which contains the thread upon return.
  66   // If no last_java_sp is specified (noreg) than rsp will be used instead.
  67   virtual void call_VM_base(           // returns the register containing the thread upon return
  68     Register oop_result,               // where an oop-result ends up if any; use noreg otherwise
  69     Register last_java_sp,             // to set up last_Java_frame in stubs; use noreg otherwise
  70     address  entry_point,              // the entry point
  71     int      number_of_arguments,      // the number of arguments (w/o thread) to pop after the call
  72     bool     check_exceptions          // whether to check for pending exceptions after return
  73   );
  74 
  75   void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
  76 
  77  public:
  78   MacroAssembler(CodeBuffer* code) : Assembler(code) {}
  79 
  80  // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
  81  // The implementation is only non-empty for the InterpreterMacroAssembler,
  82  // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
  83  virtual void check_and_handle_popframe();
  84  virtual void check_and_handle_earlyret();
  85 
  86   Address as_Address(AddressLiteral adr);
  87   Address as_Address(ArrayAddress adr, Register rscratch);
  88 
  89   // Support for null-checks
  90   //
  91   // Generates code that causes a null OS exception if the content of reg is null.
  92   // If the accessed location is M[reg + offset] and the offset is known, provide the
  93   // offset. No explicit code generation is needed if the offset is within a certain
  94   // range (0 <= offset <= page_size).
  95 
  96   void null_check(Register reg, int offset = -1);
  97   static bool needs_explicit_null_check(intptr_t offset);
  98   static bool uses_implicit_null_check(void* address);
  99 
 100   // markWord tests, kills markWord reg
 101   void test_markword_is_inline_type(Register markword, Label& is_inline_type);
 102 
 103   // inlineKlass queries, kills temp_reg
 104   void test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type, bool can_be_null = true);
 105 
 106   void test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free);
 107   void test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free);
 108   void test_field_is_flat(Register flags, Register temp_reg, Label& is_flat);
 109   void test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker);
 110 
 111   // Check oops for special arrays, i.e. flat arrays and/or null-free arrays
 112   void test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label);
 113   void test_flat_array_oop(Register oop, Register temp_reg, Label& is_flat_array);
 114   void test_non_flat_array_oop(Register oop, Register temp_reg, Label& is_non_flat_array);
 115   void test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array);
 116   void test_non_null_free_array_oop(Register oop, Register temp_reg, Label& is_non_null_free_array);
 117 
 118   // Check array klass layout helper for flat or null-free arrays...
 119   void test_flat_array_layout(Register lh, Label& is_flat_array);
 120   void test_non_flat_array_layout(Register lh, Label& is_non_flat_array);
 121 
 122   // Required platform-specific helpers for Label::patch_instructions.
 123   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
 124   void pd_patch_instruction(address branch, address target, const char* file, int line) {
 125     unsigned char op = branch[0];
 126     assert(op == 0xE8 /* call */ ||
 127         op == 0xE9 /* jmp */ ||
 128         op == 0xEB /* short jmp */ ||
 129         (op & 0xF0) == 0x70 /* short jcc */ ||
 130         (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ ||
 131         (op == 0xC7 && branch[1] == 0xF8) /* xbegin */ ||
 132         (op == 0x8D) /* lea */,
 133         "Invalid opcode at patch point");
 134 
 135     if (op == 0xEB || (op & 0xF0) == 0x70) {
 136       // short offset operators (jmp and jcc)
 137       char* disp = (char*) &branch[1];
 138       int imm8 = checked_cast<int>(target - (address) &disp[1]);
 139       guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
 140                 file == nullptr ? "<null>" : file, line);
 141       *disp = (char)imm8;
 142     } else {
 143       int* disp = (int*) &branch[(op == 0x0F || op == 0xC7 || op == 0x8D) ? 2 : 1];
 144       int imm32 = checked_cast<int>(target - (address) &disp[1]);
 145       *disp = imm32;
 146     }
 147   }
 148 
 149   // The following 4 methods return the offset of the appropriate move instruction
 150 
 151   // Support for fast byte/short loading with zero extension (depending on particular CPU)
 152   int load_unsigned_byte(Register dst, Address src);
 153   int load_unsigned_short(Register dst, Address src);
 154 
 155   // Support for fast byte/short loading with sign extension (depending on particular CPU)
 156   int load_signed_byte(Register dst, Address src);
 157   int load_signed_short(Register dst, Address src);
 158 
 159   // Support for sign-extension (hi:lo = extend_sign(lo))
 160   void extend_sign(Register hi, Register lo);
 161 
 162   // Load and store values by size and signed-ness
 163   void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
 164   void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
 165 
 166   // Support for inc/dec with optimal instruction selection depending on value
 167 
 168   void increment(Register reg, int value = 1) { incrementq(reg, value); }
 169   void decrement(Register reg, int value = 1) { decrementq(reg, value); }
 170   void increment(Address dst, int value = 1)  { incrementq(dst, value); }
 171   void decrement(Address dst, int value = 1)  { decrementq(dst, value); }
 172 
 173   void decrementl(Address dst, int value = 1);
 174   void decrementl(Register reg, int value = 1);
 175 
 176   void decrementq(Register reg, int value = 1);
 177   void decrementq(Address dst, int value = 1);
 178 
 179   void incrementl(Address dst, int value = 1);
 180   void incrementl(Register reg, int value = 1);
 181 
 182   void incrementq(Register reg, int value = 1);
 183   void incrementq(Address dst, int value = 1);
 184 
 185   void incrementl(AddressLiteral dst, Register rscratch = noreg);
 186   void incrementl(ArrayAddress   dst, Register rscratch);
 187 
 188   void incrementq(AddressLiteral dst, Register rscratch = noreg);
 189 
 190   // Support optimal SSE move instructions.
 191   void movflt(XMMRegister dst, XMMRegister src) {
 192     if (dst-> encoding() == src->encoding()) return;
 193     if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
 194     else                       { movss (dst, src); return; }
 195   }
 196   void movflt(XMMRegister dst, Address src) { movss(dst, src); }
 197   void movflt(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
 198   void movflt(Address dst, XMMRegister src) { movss(dst, src); }
 199 
 200   // Move with zero extension
 201   void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); }
 202 
 203   void movdbl(XMMRegister dst, XMMRegister src) {
 204     if (dst-> encoding() == src->encoding()) return;
 205     if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
 206     else                       { movsd (dst, src); return; }
 207   }
 208 
 209   void movdbl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
 210 
 211   void movdbl(XMMRegister dst, Address src) {
 212     if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
 213     else                         { movlpd(dst, src); return; }
 214   }
 215   void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
 216 
 217   void flt_to_flt16(Register dst, XMMRegister src, XMMRegister tmp) {
 218     // Use separate tmp XMM register because caller may
 219     // requires src XMM register to be unchanged (as in x86.ad).
 220     vcvtps2ph(tmp, src, 0x04, Assembler::AVX_128bit);
 221     movdl(dst, tmp);
 222     movswl(dst, dst);
 223   }
 224 
 225   void flt16_to_flt(XMMRegister dst, Register src) {
 226     movdl(dst, src);
 227     vcvtph2ps(dst, dst, Assembler::AVX_128bit);
 228   }
 229 
 230   // Alignment
 231   void align32();
 232   void align64();
 233   void align(uint modulus);
 234   void align(uint modulus, uint target);
 235 
 236   void post_call_nop();
 237 
 238   // Stack frame creation/removal
 239   void enter();
 240   void leave();
 241 
 242   // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information).
 243   // The pointer will be loaded into the thread register. This is a slow version that does native call.
 244   // Normally, JavaThread pointer is available in r15_thread, use that where possible.
 245   void get_thread_slow(Register thread);
 246 
 247   // Support for argument shuffling
 248 
 249   // bias in bytes
 250   void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
 251   void long_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
 252   void float_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
 253   void double_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
 254   void move_ptr(VMRegPair src, VMRegPair dst);
 255   void object_move(OopMap* map,
 256                    int oop_handle_offset,
 257                    int framesize_in_slots,
 258                    VMRegPair src,
 259                    VMRegPair dst,
 260                    bool is_receiver,
 261                    int* receiver_offset);
 262 
 263   // Support for VM calls
 264   //
 265   // It is imperative that all calls into the VM are handled via the call_VM macros.
 266   // They make sure that the stack linkage is setup correctly. call_VM's correspond
 267   // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
 268 
 269 
 270   void call_VM(Register oop_result,
 271                address entry_point,
 272                bool check_exceptions = true);
 273   void call_VM(Register oop_result,
 274                address entry_point,
 275                Register arg_1,
 276                bool check_exceptions = true);
 277   void call_VM(Register oop_result,
 278                address entry_point,
 279                Register arg_1, Register arg_2,
 280                bool check_exceptions = true);
 281   void call_VM(Register oop_result,
 282                address entry_point,
 283                Register arg_1, Register arg_2, Register arg_3,
 284                bool check_exceptions = true);
 285 
 286   // Overloadings with last_Java_sp
 287   void call_VM(Register oop_result,
 288                Register last_java_sp,
 289                address entry_point,
 290                int number_of_arguments = 0,
 291                bool check_exceptions = true);
 292   void call_VM(Register oop_result,
 293                Register last_java_sp,
 294                address entry_point,
 295                Register arg_1, bool
 296                check_exceptions = true);
 297   void call_VM(Register oop_result,
 298                Register last_java_sp,
 299                address entry_point,
 300                Register arg_1, Register arg_2,
 301                bool check_exceptions = true);
 302   void call_VM(Register oop_result,
 303                Register last_java_sp,
 304                address entry_point,
 305                Register arg_1, Register arg_2, Register arg_3,
 306                bool check_exceptions = true);
 307 
 308   void get_vm_result_oop(Register oop_result);
 309   void get_vm_result_metadata(Register metadata_result);
 310 
 311   // These always tightly bind to MacroAssembler::call_VM_base
 312   // bypassing the virtual implementation
 313   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
 314   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
 315   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
 316   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
 317   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
 318 
 319   void call_VM_leaf0(address entry_point);
 320   void call_VM_leaf(address entry_point,
 321                     int number_of_arguments = 0);
 322   void call_VM_leaf(address entry_point,
 323                     Register arg_1);
 324   void call_VM_leaf(address entry_point,
 325                     Register arg_1, Register arg_2);
 326   void call_VM_leaf(address entry_point,
 327                     Register arg_1, Register arg_2, Register arg_3);
 328 
 329   void call_VM_leaf(address entry_point,
 330                     Register arg_1, Register arg_2, Register arg_3, Register arg_4);
 331 
 332   // These always tightly bind to MacroAssembler::call_VM_leaf_base
 333   // bypassing the virtual implementation
 334   void super_call_VM_leaf(address entry_point);
 335   void super_call_VM_leaf(address entry_point, Register arg_1);
 336   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
 337   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
 338   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
 339 
 340   void set_last_Java_frame(Register last_java_sp,
 341                            Register last_java_fp,
 342                            address  last_java_pc,
 343                            Register rscratch);
 344 
 345   void set_last_Java_frame(Register last_java_sp,
 346                            Register last_java_fp,
 347                            Label &last_java_pc,
 348                            Register scratch);
 349 
 350   void reset_last_Java_frame(bool clear_fp);
 351 
 352   // jobjects
 353   void clear_jobject_tag(Register possibly_non_local);
 354   void resolve_jobject(Register value, Register tmp);
 355   void resolve_global_jobject(Register value, Register tmp);
 356 
 357   // C 'boolean' to Java boolean: x == 0 ? 0 : 1
 358   void c2bool(Register x);
 359 
 360   // C++ bool manipulation
 361 
 362   void movbool(Register dst, Address src);
 363   void movbool(Address dst, bool boolconst);
 364   void movbool(Address dst, Register src);
 365   void testbool(Register dst);
 366 
 367   void resolve_oop_handle(Register result, Register tmp);
 368   void resolve_weak_handle(Register result, Register tmp);
 369   void load_mirror(Register mirror, Register method, Register tmp);
 370   void load_method_holder_cld(Register rresult, Register rmethod);
 371 
 372   void load_method_holder(Register holder, Register method);
 373 
 374   // oop manipulations
 375 
 376   // Load oopDesc._metadata without decode (useful for direct Klass* compare from oops)
 377   void load_metadata(Register dst, Register src);
 378   void load_narrow_klass_compact(Register dst, Register src);
 379   void load_klass(Register dst, Register src, Register tmp);
 380   void store_klass(Register dst, Register src, Register tmp);
 381 
 382   // Compares the Klass pointer of an object to a given Klass (which might be narrow,
 383   // depending on UseCompressedClassPointers).
 384   void cmp_klass(Register klass, Register obj, Register tmp);
 385 
 386   // Compares the Klass pointer of two objects obj1 and obj2. Result is in the condition flags.
 387   // Uses tmp1 and tmp2 as temporary registers.
 388   void cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2);
 389 
 390   void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
 391                       Register tmp1);
 392   void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
 393                        Register tmp1, Register tmp2, Register tmp3);
 394 
 395   void flat_field_copy(DecoratorSet decorators, Register src, Register dst, Register inline_layout_info);
 396 
 397   // inline type data payload offsets...
 398   void payload_offset(Register inline_klass, Register offset);
 399   void payload_addr(Register oop, Register data, Register inline_klass);
 400   // get data payload ptr a flat value array at index, kills rcx and index
 401   void data_for_value_array_index(Register array, Register array_klass,
 402                                   Register index, Register data);
 403 
 404   void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, DecoratorSet decorators = 0);
 405   void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, DecoratorSet decorators = 0);
 406   void store_heap_oop(Address dst, Register val, Register tmp1 = noreg,
 407                       Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
 408 
 409   // Used for storing null. All other oop constants should be
 410   // stored using routines that take a jobject.
 411   void store_heap_oop_null(Address dst);
 412 
 413   void load_prototype_header(Register dst, Register src, Register tmp);
 414 
 415   void store_klass_gap(Register dst, Register src);
 416 
 417   // This dummy is to prevent a call to store_heap_oop from
 418   // converting a zero (like null) into a Register by giving
 419   // the compiler two choices it can't resolve
 420 
 421   void store_heap_oop(Address dst, void* dummy);
 422 
 423   void encode_heap_oop(Register r);
 424   void decode_heap_oop(Register r);
 425   void encode_heap_oop_not_null(Register r);
 426   void decode_heap_oop_not_null(Register r);
 427   void encode_heap_oop_not_null(Register dst, Register src);
 428   void decode_heap_oop_not_null(Register dst, Register src);
 429 
 430   void set_narrow_oop(Register dst, jobject obj);
 431   void set_narrow_oop(Address dst, jobject obj);
 432   void cmp_narrow_oop(Register dst, jobject obj);
 433   void cmp_narrow_oop(Address dst, jobject obj);
 434 
 435   void encode_klass_not_null(Register r, Register tmp);
 436   void decode_klass_not_null(Register r, Register tmp);
 437   void encode_and_move_klass_not_null(Register dst, Register src);
 438   void decode_and_move_klass_not_null(Register dst, Register src);
 439   void set_narrow_klass(Register dst, Klass* k);
 440   void set_narrow_klass(Address dst, Klass* k);
 441   void cmp_narrow_klass(Register dst, Klass* k);
 442   void cmp_narrow_klass(Address dst, Klass* k);
 443 
 444   // if heap base register is used - reinit it with the correct value
 445   void reinit_heapbase();
 446 
 447   DEBUG_ONLY(void verify_heapbase(const char* msg);)
 448 
 449   // Int division/remainder for Java
 450   // (as idivl, but checks for special case as described in JVM spec.)
 451   // returns idivl instruction offset for implicit exception handling
 452   int corrected_idivl(Register reg);
 453 
 454   // Long division/remainder for Java
 455   // (as idivq, but checks for special case as described in JVM spec.)
 456   // returns idivq instruction offset for implicit exception handling
 457   int corrected_idivq(Register reg);
 458 
 459   void int3();
 460 
 461   // Long operation macros for a 32bit cpu
 462   // Long negation for Java
 463   void lneg(Register hi, Register lo);
 464 
 465   // Long multiplication for Java
 466   // (destroys contents of eax, ebx, ecx and edx)
 467   void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
 468 
 469   // Long shifts for Java
 470   // (semantics as described in JVM spec.)
 471   void lshl(Register hi, Register lo);                               // hi:lo << (rcx & 0x3f)
 472   void lshr(Register hi, Register lo, bool sign_extension = false);  // hi:lo >> (rcx & 0x3f)
 473 
 474   // Long compare for Java
 475   // (semantics as described in JVM spec.)
 476   void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
 477 
 478 
 479   // misc
 480 
 481   // Sign extension
 482   void sign_extend_short(Register reg);
 483   void sign_extend_byte(Register reg);
 484 
 485   // Division by power of 2, rounding towards 0
 486   void division_with_shift(Register reg, int shift_value);
 487 
 488   // dst = c = a * b + c
 489   void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
 490   void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
 491 
 492   void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
 493   void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
 494   void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
 495   void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
 496 
 497 
 498   // same as fcmp2int, but using SSE2
 499   void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
 500   void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
 501 
 502   void push_IU_state();
 503   void pop_IU_state();
 504 
 505   void push_FPU_state();
 506   void pop_FPU_state();
 507 
 508   void push_CPU_state();
 509   void pop_CPU_state();
 510 
 511   void push_cont_fastpath();
 512   void pop_cont_fastpath();
 513 
 514   DEBUG_ONLY(void stop_if_in_cont(Register cont_reg, const char* name);)
 515 
 516   // Round up to a power of two
 517   void round_to(Register reg, int modulus);
 518 
 519 private:
 520   // General purpose and XMM registers potentially clobbered by native code; there
 521   // is no need for FPU or AVX opmask related methods because C1/interpreter
 522   // - we save/restore FPU state as a whole always
 523   // - do not care about AVX-512 opmask
 524   static RegSet call_clobbered_gp_registers();
 525   static XMMRegSet call_clobbered_xmm_registers();
 526 
 527   void push_set(XMMRegSet set, int offset);
 528   void pop_set(XMMRegSet set, int offset);
 529 
 530 public:
 531   void push_set(RegSet set, int offset = -1);
 532   void pop_set(RegSet set, int offset = -1);
 533 
 534   // Push and pop everything that might be clobbered by a native
 535   // runtime call.
 536   // Only save the lower 64 bits of each vector register.
 537   // Additional registers can be excluded in a passed RegSet.
 538   void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
 539   void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
 540 
 541   void push_call_clobbered_registers(bool save_fpu = true) {
 542     push_call_clobbered_registers_except(RegSet(), save_fpu);
 543   }
 544   void pop_call_clobbered_registers(bool restore_fpu = true) {
 545     pop_call_clobbered_registers_except(RegSet(), restore_fpu);
 546   }
 547 
 548   // allocation
 549 
 550   // Object / value buffer allocation...
 551   // Allocate instance of klass, assumes klass initialized by caller
 552   // new_obj prefers to be rax
 553   // Kills t1 and t2, perserves klass, return allocation in new_obj (rsi on LP64)
 554   void allocate_instance(Register klass, Register new_obj,
 555                          Register t1, Register t2,
 556                          bool clear_fields, Label& alloc_failed);
 557 
 558   void tlab_allocate(
 559     Register obj,                      // result: pointer to object after successful allocation
 560     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
 561     int      con_size_in_bytes,        // object size in bytes if   known at compile time
 562     Register t1,                       // temp register
 563     Register t2,                       // temp register
 564     Label&   slow_case                 // continuation point if fast allocation fails
 565   );
 566   void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
 567 
 568   void inline_layout_info(Register klass, Register index, Register layout_info);
 569 
 570   void population_count(Register dst, Register src, Register scratch1, Register scratch2);
 571 
 572   // interface method calling
 573   void lookup_interface_method(Register recv_klass,
 574                                Register intf_klass,
 575                                RegisterOrConstant itable_index,
 576                                Register method_result,
 577                                Register scan_temp,
 578                                Label& no_such_interface,
 579                                bool return_method = true);
 580 
 581   void lookup_interface_method_stub(Register recv_klass,
 582                                     Register holder_klass,
 583                                     Register resolved_klass,
 584                                     Register method_result,
 585                                     Register scan_temp,
 586                                     Register temp_reg2,
 587                                     Register receiver,
 588                                     int itable_index,
 589                                     Label& L_no_such_interface);
 590 
 591   // virtual method calling
 592   void lookup_virtual_method(Register recv_klass,
 593                              RegisterOrConstant vtable_index,
 594                              Register method_result);
 595 
 596   // Test sub_klass against super_klass, with fast and slow paths.
 597 
 598   // The fast path produces a tri-state answer: yes / no / maybe-slow.
 599   // One of the three labels can be null, meaning take the fall-through.
 600   // If super_check_offset is -1, the value is loaded up from super_klass.
 601   // No registers are killed, except temp_reg.
 602   void check_klass_subtype_fast_path(Register sub_klass,
 603                                      Register super_klass,
 604                                      Register temp_reg,
 605                                      Label* L_success,
 606                                      Label* L_failure,
 607                                      Label* L_slow_path,
 608                 RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
 609 
 610   // The rest of the type check; must be wired to a corresponding fast path.
 611   // It does not repeat the fast path logic, so don't use it standalone.
 612   // The temp_reg and temp2_reg can be noreg, if no temps are available.
 613   // Updates the sub's secondary super cache as necessary.
 614   // If set_cond_codes, condition codes will be Z on success, NZ on failure.
 615   void check_klass_subtype_slow_path(Register sub_klass,
 616                                      Register super_klass,
 617                                      Register temp_reg,
 618                                      Register temp2_reg,
 619                                      Label* L_success,
 620                                      Label* L_failure,
 621                                      bool set_cond_codes = false);
 622 
 623   // The 64-bit version, which may do a hashed subclass lookup.
 624   void check_klass_subtype_slow_path(Register sub_klass,
 625                                      Register super_klass,
 626                                      Register temp_reg,
 627                                      Register temp2_reg,
 628                                      Register temp3_reg,
 629                                      Register temp4_reg,
 630                                      Label* L_success,
 631                                      Label* L_failure);
 632 
 633   // Three parts of a hashed subclass lookup: a simple linear search,
 634   // a table lookup, and a fallback that does linear probing in the
 635   // event of a hash collision.
 636   void check_klass_subtype_slow_path_linear(Register sub_klass,
 637                                             Register super_klass,
 638                                             Register temp_reg,
 639                                             Register temp2_reg,
 640                                             Label* L_success,
 641                                             Label* L_failure,
 642                                             bool set_cond_codes = false);
 643   void check_klass_subtype_slow_path_table(Register sub_klass,
 644                                            Register super_klass,
 645                                            Register temp_reg,
 646                                            Register temp2_reg,
 647                                            Register temp3_reg,
 648                                            Register result_reg,
 649                                            Label* L_success,
 650                                            Label* L_failure);
 651   void hashed_check_klass_subtype_slow_path(Register sub_klass,
 652                                             Register super_klass,
 653                                             Register temp_reg,
 654                                             Label* L_success,
 655                                             Label* L_failure);
 656 
 657   // As above, but with a constant super_klass.
 658   // The result is in Register result, not the condition codes.
 659   void lookup_secondary_supers_table_const(Register sub_klass,
 660                                            Register super_klass,
 661                                            Register temp1,
 662                                            Register temp2,
 663                                            Register temp3,
 664                                            Register temp4,
 665                                            Register result,
 666                                            u1 super_klass_slot);
 667 
 668   using Assembler::salq;
 669   void salq(Register dest, Register count);
 670   using Assembler::rorq;
 671   void rorq(Register dest, Register count);
 672   void lookup_secondary_supers_table_var(Register sub_klass,
 673                                          Register super_klass,
 674                                          Register temp1,
 675                                          Register temp2,
 676                                          Register temp3,
 677                                          Register temp4,
 678                                          Register result);
 679 
 680   void lookup_secondary_supers_table_slow_path(Register r_super_klass,
 681                                                Register r_array_base,
 682                                                Register r_array_index,
 683                                                Register r_bitmap,
 684                                                Register temp1,
 685                                                Register temp2,
 686                                                Label* L_success,
 687                                                Label* L_failure = nullptr);
 688 
 689   void verify_secondary_supers_table(Register r_sub_klass,
 690                                      Register r_super_klass,
 691                                      Register expected,
 692                                      Register temp1,
 693                                      Register temp2,
 694                                      Register temp3);
 695 
 696   void repne_scanq(Register addr, Register value, Register count, Register limit,
 697                    Label* L_success,
 698                    Label* L_failure = nullptr);
 699 
 700   // If r is valid, return r.
 701   // If r is invalid, remove a register r2 from available_regs, add r2
 702   // to regs_to_push, then return r2.
 703   Register allocate_if_noreg(const Register r,
 704                              RegSetIterator<Register> &available_regs,
 705                              RegSet &regs_to_push);
 706 
 707   // Simplified, combined version, good for typical uses.
 708   // Falls through on failure.
 709   void check_klass_subtype(Register sub_klass,
 710                            Register super_klass,
 711                            Register temp_reg,
 712                            Label& L_success);
 713 
 714   void clinit_barrier(Register klass,
 715                       Label* L_fast_path = nullptr,
 716                       Label* L_slow_path = nullptr);
 717 
 718   // method handles (JSR 292)
 719   Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
 720 
 721   void profile_receiver_type(Register recv, Register mdp, int mdp_offset);
 722 
 723   // Debugging
 724 
 725   // only if +VerifyOops
 726   void _verify_oop(Register reg, const char* s, const char* file, int line);
 727   void _verify_oop_addr(Address addr, const char* s, const char* file, int line);
 728 
 729   void _verify_oop_checked(Register reg, const char* s, const char* file, int line) {
 730     if (VerifyOops) {
 731       _verify_oop(reg, s, file, line);
 732     }
 733   }
 734   void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) {
 735     if (VerifyOops) {
 736       _verify_oop_addr(reg, s, file, line);
 737     }
 738   }
 739 
 740   // TODO: verify method and klass metadata (compare against vptr?)
 741   void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
 742   void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
 743 
 744 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__)
 745 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__)
 746 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__)
 747 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
 748 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
 749 
 750   // Verify or restore cpu control state after JNI call
 751   void restore_cpu_control_state_after_jni(Register rscratch);
 752 
 753   // prints msg, dumps registers and stops execution
 754   void stop(const char* msg);
 755 
 756   // prints msg and continues
 757   void warn(const char* msg);
 758 
 759   // dumps registers and other state
 760   void print_state();
 761 
 762   static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
 763   static void debug64(char* msg, int64_t pc, int64_t regs[]);
 764   static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip);
 765   static void print_state64(int64_t pc, int64_t regs[]);
 766 
 767   void os_breakpoint();
 768 
 769   void untested()                                { stop("untested"); }
 770 
 771   void unimplemented(const char* what = "");
 772 
 773   void should_not_reach_here()                   { stop("should not reach here"); }
 774 
 775   void print_CPU_state();
 776 
 777   // Stack overflow checking
 778   void bang_stack_with_offset(int offset) {
 779     // stack grows down, caller passes positive offset
 780     assert(offset > 0, "must bang with negative offset");
 781     movl(Address(rsp, (-offset)), rax);
 782   }
 783 
 784   // Writes to stack successive pages until offset reached to check for
 785   // stack overflow + shadow pages.  Also, clobbers tmp
 786   void bang_stack_size(Register size, Register tmp);
 787 
 788   // Check for reserved stack access in method being exited (for JIT)
 789   void reserved_stack_check();
 790 
 791   void safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod);
 792 
 793   void verify_tlab();
 794 
 795   static Condition negate_condition(Condition cond);
 796 
 797   // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
 798   // operands. In general the names are modified to avoid hiding the instruction in Assembler
 799   // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
 800   // here in MacroAssembler. The major exception to this rule is call
 801 
 802   // Arithmetics
 803 
 804 
 805   void addptr(Address dst, int32_t src) { addq(dst, src); }
 806   void addptr(Address dst, Register src);
 807 
 808   void addptr(Register dst, Address src) { addq(dst, src); }
 809   void addptr(Register dst, int32_t src);
 810   void addptr(Register dst, Register src);
 811   void addptr(Register dst, RegisterOrConstant src) {
 812     if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant()));
 813     else                   addptr(dst, src.as_register());
 814   }
 815 
 816   void andptr(Register dst, int32_t src);
 817   void andptr(Register src1, Register src2) { andq(src1, src2); }
 818   void andptr(Register dst, Address src) { andq(dst, src); }
 819 
 820   using Assembler::andq;
 821   void andq(Register dst, AddressLiteral src, Register rscratch = noreg);
 822 
 823   void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg);
 824 
 825   // renamed to drag out the casting of address to int32_t/intptr_t
 826   void cmp32(Register src1, int32_t imm);
 827 
 828   void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg);
 829   // compare reg - mem, or reg - &mem
 830   void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg);
 831 
 832   void cmp32(Register src1, Address src2);
 833 
 834   void cmpoop(Register src1, Register src2);
 835   void cmpoop(Register src1, Address src2);
 836   void cmpoop(Register dst, jobject obj, Register rscratch);
 837 
 838   // NOTE src2 must be the lval. This is NOT an mem-mem compare
 839   void cmpptr(Address src1, AddressLiteral src2, Register rscratch);
 840 
 841   void cmpptr(Register src1, AddressLiteral src2, Register rscratch = noreg);
 842 
 843   void cmpptr(Register src1, Register src2) { cmpq(src1, src2); }
 844   void cmpptr(Register src1, Address src2) { cmpq(src1, src2); }
 845 
 846   void cmpptr(Register src1, int32_t src2) { cmpq(src1, src2); }
 847   void cmpptr(Address src1, int32_t src2) { cmpq(src1, src2); }
 848 
 849   // cmp64 to avoild hiding cmpq
 850   void cmp64(Register src1, AddressLiteral src, Register rscratch = noreg);
 851 
 852   void cmpxchgptr(Register reg, Address adr);
 853 
 854   void locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch = noreg);
 855 
 856   void imulptr(Register dst, Register src) { imulq(dst, src); }
 857   void imulptr(Register dst, Register src, int imm32) { imulq(dst, src, imm32); }
 858 
 859 
 860   void negptr(Register dst) { negq(dst); }
 861 
 862   void notptr(Register dst) { notq(dst); }
 863 
 864   void shlptr(Register dst, int32_t shift);
 865   void shlptr(Register dst) { shlq(dst); }
 866 
 867   void shrptr(Register dst, int32_t shift);
 868   void shrptr(Register dst) { shrq(dst); }
 869 
 870   void sarptr(Register dst) { sarq(dst); }
 871   void sarptr(Register dst, int32_t src) { sarq(dst, src); }
 872 
 873   void subptr(Address dst, int32_t src) { subq(dst, src); }
 874 
 875   void subptr(Register dst, Address src) { subq(dst, src); }
 876   void subptr(Register dst, int32_t src);
 877   // Force generation of a 4 byte immediate value even if it fits into 8bit
 878   void subptr_imm32(Register dst, int32_t src);
 879   void subptr(Register dst, Register src);
 880   void subptr(Register dst, RegisterOrConstant src) {
 881     if (src.is_constant()) subptr(dst, (int) src.as_constant());
 882     else                   subptr(dst,       src.as_register());
 883   }
 884 
 885   void sbbptr(Address dst, int32_t src) { sbbq(dst, src); }
 886   void sbbptr(Register dst, int32_t src) { sbbq(dst, src); }
 887 
 888   void xchgptr(Register src1, Register src2) { xchgq(src1, src2); }
 889   void xchgptr(Register src1, Address src2) { xchgq(src1, src2); }
 890 
 891   void xaddptr(Address src1, Register src2) { xaddq(src1, src2); }
 892 
 893 
 894 
 895   // Helper functions for statistics gathering.
 896   // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
 897   void cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch = noreg);
 898   // Unconditional atomic increment.
 899   void atomic_incl(Address counter_addr);
 900   void atomic_incl(AddressLiteral counter_addr, Register rscratch = noreg);
 901   void atomic_incq(Address counter_addr);
 902   void atomic_incq(AddressLiteral counter_addr, Register rscratch = noreg);
 903   void atomic_incptr(AddressLiteral counter_addr, Register rscratch = noreg) { atomic_incq(counter_addr, rscratch); }
 904   void atomic_incptr(Address counter_addr) { atomic_incq(counter_addr); }
 905 
 906   using Assembler::lea;
 907   void lea(Register dst, AddressLiteral adr);
 908   void lea(Address  dst, AddressLiteral adr, Register rscratch);
 909 
 910   void leal32(Register dst, Address src) { leal(dst, src); }
 911 
 912   // Import other testl() methods from the parent class or else
 913   // they will be hidden by the following overriding declaration.
 914   using Assembler::testl;
 915   void testl(Address dst, int32_t imm32);
 916   void testl(Register dst, int32_t imm32);
 917   void testl(Register dst, AddressLiteral src); // requires reachable address
 918   using Assembler::testq;
 919   void testq(Address dst, int32_t imm32);
 920   void testq(Register dst, int32_t imm32);
 921 
 922   void orptr(Register dst, Address src) { orq(dst, src); }
 923   void orptr(Register dst, Register src) { orq(dst, src); }
 924   void orptr(Register dst, int32_t src) { orq(dst, src); }
 925   void orptr(Address dst, int32_t imm32) { orq(dst, imm32); }
 926 
 927   void testptr(Register src, int32_t imm32) { testq(src, imm32); }
 928   void testptr(Register src1, Address src2) { testq(src1, src2); }
 929   void testptr(Address src, int32_t imm32) { testq(src, imm32); }
 930   void testptr(Register src1, Register src2);
 931 
 932   void xorptr(Register dst, Register src) { xorq(dst, src); }
 933   void xorptr(Register dst, Address src) { xorq(dst, src); }
 934 
 935   // Calls
 936 
 937   void call(Label& L, relocInfo::relocType rtype);
 938   void call(Register entry);
 939   void call(Address addr) { Assembler::call(addr); }
 940 
 941   // NOTE: this call transfers to the effective address of entry NOT
 942   // the address contained by entry. This is because this is more natural
 943   // for jumps/calls.
 944   void call(AddressLiteral entry, Register rscratch = rax);
 945 
 946   // Emit the CompiledIC call idiom
 947   void ic_call(address entry, jint method_index = 0);
 948   static int ic_check_size();
 949   int ic_check(int end_alignment);
 950 
 951   void emit_static_call_stub();
 952 
 953   // Jumps
 954 
 955   // NOTE: these jumps transfer to the effective address of dst NOT
 956   // the address contained by dst. This is because this is more natural
 957   // for jumps/calls.
 958   void jump(AddressLiteral dst, Register rscratch = noreg);
 959 
 960   void jump_cc(Condition cc, AddressLiteral dst, Register rscratch = noreg);
 961 
 962   // 32bit can do a case table jump in one instruction but we no longer allow the base
 963   // to be installed in the Address class. This jump will transfer to the address
 964   // contained in the location described by entry (not the address of entry)
 965   void jump(ArrayAddress entry, Register rscratch);
 966 
 967   // Adding more natural conditional jump instructions
 968   void ALWAYSINLINE jo(Label& L, bool maybe_short = true) { jcc(Assembler::overflow, L, maybe_short); }
 969   void ALWAYSINLINE jno(Label& L, bool maybe_short = true) { jcc(Assembler::noOverflow, L, maybe_short); }
 970   void ALWAYSINLINE js(Label& L, bool maybe_short = true) { jcc(Assembler::negative, L, maybe_short); }
 971   void ALWAYSINLINE jns(Label& L, bool maybe_short = true) { jcc(Assembler::positive, L, maybe_short); }
 972   void ALWAYSINLINE je(Label& L, bool maybe_short = true) { jcc(Assembler::equal, L, maybe_short); }
 973   void ALWAYSINLINE jz(Label& L, bool maybe_short = true) { jcc(Assembler::zero, L, maybe_short); }
 974   void ALWAYSINLINE jne(Label& L, bool maybe_short = true) { jcc(Assembler::notEqual, L, maybe_short); }
 975   void ALWAYSINLINE jnz(Label& L, bool maybe_short = true) { jcc(Assembler::notZero, L, maybe_short); }
 976   void ALWAYSINLINE jb(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); }
 977   void ALWAYSINLINE jnae(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); }
 978   void ALWAYSINLINE jc(Label& L, bool maybe_short = true) { jcc(Assembler::carrySet, L, maybe_short); }
 979   void ALWAYSINLINE jnb(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); }
 980   void ALWAYSINLINE jae(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); }
 981   void ALWAYSINLINE jnc(Label& L, bool maybe_short = true) { jcc(Assembler::carryClear, L, maybe_short); }
 982   void ALWAYSINLINE jbe(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); }
 983   void ALWAYSINLINE jna(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); }
 984   void ALWAYSINLINE ja(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); }
 985   void ALWAYSINLINE jnbe(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); }
 986   void ALWAYSINLINE jl(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); }
 987   void ALWAYSINLINE jnge(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); }
 988   void ALWAYSINLINE jge(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); }
 989   void ALWAYSINLINE jnl(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); }
 990   void ALWAYSINLINE jle(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); }
 991   void ALWAYSINLINE jng(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); }
 992   void ALWAYSINLINE jg(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); }
 993   void ALWAYSINLINE jnle(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); }
 994   void ALWAYSINLINE jp(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); }
 995   void ALWAYSINLINE jpe(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); }
 996   void ALWAYSINLINE jnp(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); }
 997   void ALWAYSINLINE jpo(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); }
 998   // * No condition for this *  void ALWAYSINLINE jcxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); }
 999   // * No condition for this *  void ALWAYSINLINE jecxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); }
1000 
1001   // Short versions of the above
1002   void ALWAYSINLINE jo_b(Label& L) { jccb(Assembler::overflow, L); }
1003   void ALWAYSINLINE jno_b(Label& L) { jccb(Assembler::noOverflow, L); }
1004   void ALWAYSINLINE js_b(Label& L) { jccb(Assembler::negative, L); }
1005   void ALWAYSINLINE jns_b(Label& L) { jccb(Assembler::positive, L); }
1006   void ALWAYSINLINE je_b(Label& L) { jccb(Assembler::equal, L); }
1007   void ALWAYSINLINE jz_b(Label& L) { jccb(Assembler::zero, L); }
1008   void ALWAYSINLINE jne_b(Label& L) { jccb(Assembler::notEqual, L); }
1009   void ALWAYSINLINE jnz_b(Label& L) { jccb(Assembler::notZero, L); }
1010   void ALWAYSINLINE jb_b(Label& L) { jccb(Assembler::below, L); }
1011   void ALWAYSINLINE jnae_b(Label& L) { jccb(Assembler::below, L); }
1012   void ALWAYSINLINE jc_b(Label& L) { jccb(Assembler::carrySet, L); }
1013   void ALWAYSINLINE jnb_b(Label& L) { jccb(Assembler::aboveEqual, L); }
1014   void ALWAYSINLINE jae_b(Label& L) { jccb(Assembler::aboveEqual, L); }
1015   void ALWAYSINLINE jnc_b(Label& L) { jccb(Assembler::carryClear, L); }
1016   void ALWAYSINLINE jbe_b(Label& L) { jccb(Assembler::belowEqual, L); }
1017   void ALWAYSINLINE jna_b(Label& L) { jccb(Assembler::belowEqual, L); }
1018   void ALWAYSINLINE ja_b(Label& L) { jccb(Assembler::above, L); }
1019   void ALWAYSINLINE jnbe_b(Label& L) { jccb(Assembler::above, L); }
1020   void ALWAYSINLINE jl_b(Label& L) { jccb(Assembler::less, L); }
1021   void ALWAYSINLINE jnge_b(Label& L) { jccb(Assembler::less, L); }
1022   void ALWAYSINLINE jge_b(Label& L) { jccb(Assembler::greaterEqual, L); }
1023   void ALWAYSINLINE jnl_b(Label& L) { jccb(Assembler::greaterEqual, L); }
1024   void ALWAYSINLINE jle_b(Label& L) { jccb(Assembler::lessEqual, L); }
1025   void ALWAYSINLINE jng_b(Label& L) { jccb(Assembler::lessEqual, L); }
1026   void ALWAYSINLINE jg_b(Label& L) { jccb(Assembler::greater, L); }
1027   void ALWAYSINLINE jnle_b(Label& L) { jccb(Assembler::greater, L); }
1028   void ALWAYSINLINE jp_b(Label& L) { jccb(Assembler::parity, L); }
1029   void ALWAYSINLINE jpe_b(Label& L) { jccb(Assembler::parity, L); }
1030   void ALWAYSINLINE jnp_b(Label& L) { jccb(Assembler::noParity, L); }
1031   void ALWAYSINLINE jpo_b(Label& L) { jccb(Assembler::noParity, L); }
1032   // * No condition for this *  void ALWAYSINLINE jcxz_b(Label& L) { jccb(Assembler::cxz, L); }
1033   // * No condition for this *  void ALWAYSINLINE jecxz_b(Label& L) { jccb(Assembler::cxz, L); }
1034 
1035   // Floating
1036 
1037   void push_f(XMMRegister r);
1038   void pop_f(XMMRegister r);
1039   void push_d(XMMRegister r);
1040   void pop_d(XMMRegister r);
1041 
1042   void push_ppx(Register src);
1043   void pop_ppx(Register dst);
1044 
1045   void andpd(XMMRegister dst, XMMRegister    src) { Assembler::andpd(dst, src); }
1046   void andpd(XMMRegister dst, Address        src) { Assembler::andpd(dst, src); }
1047   void andpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1048 
1049   void andnpd(XMMRegister dst, XMMRegister src) { Assembler::andnpd(dst, src); }
1050 
1051   void andps(XMMRegister dst, XMMRegister    src) { Assembler::andps(dst, src); }
1052   void andps(XMMRegister dst, Address        src) { Assembler::andps(dst, src); }
1053   void andps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1054 
1055   void comiss(XMMRegister dst, XMMRegister    src) { Assembler::comiss(dst, src); }
1056   void comiss(XMMRegister dst, Address        src) { Assembler::comiss(dst, src); }
1057   void comiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1058 
1059   void comisd(XMMRegister dst, XMMRegister    src) { Assembler::comisd(dst, src); }
1060   void comisd(XMMRegister dst, Address        src) { Assembler::comisd(dst, src); }
1061   void comisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1062 
1063   void orpd(XMMRegister dst, XMMRegister src) { Assembler::orpd(dst, src); }
1064 
1065   void cmp32_mxcsr_std(Address mxcsr_save, Register tmp, Register rscratch = noreg);
1066   void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
1067   void ldmxcsr(AddressLiteral src, Register rscratch = noreg);
1068 
1069  private:
1070   void sha256_AVX2_one_round_compute(
1071     Register  reg_old_h,
1072     Register  reg_a,
1073     Register  reg_b,
1074     Register  reg_c,
1075     Register  reg_d,
1076     Register  reg_e,
1077     Register  reg_f,
1078     Register  reg_g,
1079     Register  reg_h,
1080     int iter);
1081   void sha256_AVX2_four_rounds_compute_first(int start);
1082   void sha256_AVX2_four_rounds_compute_last(int start);
1083   void sha256_AVX2_one_round_and_sched(
1084         XMMRegister xmm_0,     /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */
1085         XMMRegister xmm_1,     /* ymm5 */  /* full cycle is 16 iterations */
1086         XMMRegister xmm_2,     /* ymm6 */
1087         XMMRegister xmm_3,     /* ymm7 */
1088         Register    reg_a,      /* == eax on 0 iteration, then rotate 8 register right on each next iteration */
1089         Register    reg_b,      /* ebx */    /* full cycle is 8 iterations */
1090         Register    reg_c,      /* edi */
1091         Register    reg_d,      /* esi */
1092         Register    reg_e,      /* r8d */
1093         Register    reg_f,      /* r9d */
1094         Register    reg_g,      /* r10d */
1095         Register    reg_h,      /* r11d */
1096         int iter);
1097 
1098   void addm(int disp, Register r1, Register r2);
1099 
1100   void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d,
1101                                      Register e, Register f, Register g, Register h, int iteration);
1102 
1103   void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1104                                           Register a, Register b, Register c, Register d, Register e, Register f,
1105                                           Register g, Register h, int iteration);
1106 
1107   void addmq(int disp, Register r1, Register r2);
1108  public:
1109   void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1110                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1111                    Register buf, Register state, Register ofs, Register limit, Register rsp,
1112                    bool multi_block, XMMRegister shuf_mask);
1113   void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1114                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1115                    Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block,
1116                    XMMRegister shuf_mask);
1117   void sha512_update_ni_x1(Register arg_hash, Register arg_msg, Register ofs, Register limit, bool multi_block);
1118 
1119   void fast_md5(Register buf, Address state, Address ofs, Address limit,
1120                 bool multi_block);
1121 
1122   void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
1123                  XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask,
1124                  Register buf, Register state, Register ofs, Register limit, Register rsp,
1125                  bool multi_block);
1126 
1127   void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1128                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1129                    Register buf, Register state, Register ofs, Register limit, Register rsp,
1130                    bool multi_block, XMMRegister shuf_mask);
1131 
1132   void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1133                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1134                 Register rax, Register rcx, Register rdx, Register tmp);
1135 
1136 private:
1137 
1138   // these are private because users should be doing movflt/movdbl
1139 
1140   void movss(Address     dst, XMMRegister    src) { Assembler::movss(dst, src); }
1141   void movss(XMMRegister dst, XMMRegister    src) { Assembler::movss(dst, src); }
1142   void movss(XMMRegister dst, Address        src) { Assembler::movss(dst, src); }
1143   void movss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1144 
1145   void movlpd(XMMRegister dst, Address        src) {Assembler::movlpd(dst, src); }
1146   void movlpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1147 
1148 public:
1149 
1150   void addsd(XMMRegister dst, XMMRegister    src) { Assembler::addsd(dst, src); }
1151   void addsd(XMMRegister dst, Address        src) { Assembler::addsd(dst, src); }
1152   void addsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1153 
1154   void addss(XMMRegister dst, XMMRegister    src) { Assembler::addss(dst, src); }
1155   void addss(XMMRegister dst, Address        src) { Assembler::addss(dst, src); }
1156   void addss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1157 
1158   void addpd(XMMRegister dst, XMMRegister    src) { Assembler::addpd(dst, src); }
1159   void addpd(XMMRegister dst, Address        src) { Assembler::addpd(dst, src); }
1160   void addpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1161 
1162   using Assembler::vbroadcasti128;
1163   void vbroadcasti128(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1164 
1165   using Assembler::vbroadcastsd;
1166   void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1167 
1168   using Assembler::vbroadcastss;
1169   void vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1170 
1171   // Vector float blend
1172   void vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg);
1173   void vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg);
1174 
1175   void divsd(XMMRegister dst, XMMRegister    src) { Assembler::divsd(dst, src); }
1176   void divsd(XMMRegister dst, Address        src) { Assembler::divsd(dst, src); }
1177   void divsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1178 
1179   void divss(XMMRegister dst, XMMRegister    src) { Assembler::divss(dst, src); }
1180   void divss(XMMRegister dst, Address        src) { Assembler::divss(dst, src); }
1181   void divss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1182 
1183   // Move Unaligned Double Quadword
1184   void movdqu(Address     dst, XMMRegister    src);
1185   void movdqu(XMMRegister dst, XMMRegister    src);
1186   void movdqu(XMMRegister dst, Address        src);
1187   void movdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1188 
1189   void kmovwl(Register  dst, KRegister      src) { Assembler::kmovwl(dst, src); }
1190   void kmovwl(Address   dst, KRegister      src) { Assembler::kmovwl(dst, src); }
1191   void kmovwl(KRegister dst, KRegister      src) { Assembler::kmovwl(dst, src); }
1192   void kmovwl(KRegister dst, Register       src) { Assembler::kmovwl(dst, src); }
1193   void kmovwl(KRegister dst, Address        src) { Assembler::kmovwl(dst, src); }
1194   void kmovwl(KRegister dst, AddressLiteral src, Register rscratch = noreg);
1195 
1196   void kmovql(KRegister dst, KRegister      src) { Assembler::kmovql(dst, src); }
1197   void kmovql(KRegister dst, Register       src) { Assembler::kmovql(dst, src); }
1198   void kmovql(Register  dst, KRegister      src) { Assembler::kmovql(dst, src); }
1199   void kmovql(KRegister dst, Address        src) { Assembler::kmovql(dst, src); }
1200   void kmovql(Address   dst, KRegister      src) { Assembler::kmovql(dst, src); }
1201   void kmovql(KRegister dst, AddressLiteral src, Register rscratch = noreg);
1202 
1203   // Safe move operation, lowers down to 16bit moves for targets supporting
1204   // AVX512F feature and 64bit moves for targets supporting AVX512BW feature.
1205   void kmov(Address  dst, KRegister src);
1206   void kmov(KRegister dst, Address src);
1207   void kmov(KRegister dst, KRegister src);
1208   void kmov(Register dst, KRegister src);
1209   void kmov(KRegister dst, Register src);
1210 
1211   using Assembler::movddup;
1212   void movddup(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1213 
1214   using Assembler::vmovddup;
1215   void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1216 
1217   // AVX Unaligned forms
1218   void vmovdqu(Address     dst, XMMRegister    src);
1219   void vmovdqu(XMMRegister dst, Address        src);
1220   void vmovdqu(XMMRegister dst, XMMRegister    src);
1221   void vmovdqu(XMMRegister dst, AddressLiteral src,                 Register rscratch = noreg);
1222   void vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1223   void vmovdqu(XMMRegister dst, XMMRegister    src, int vector_len);
1224   void vmovdqu(XMMRegister dst, Address        src, int vector_len);
1225   void vmovdqu(Address     dst, XMMRegister    src, int vector_len);
1226 
1227   // AVX Aligned forms
1228   using Assembler::vmovdqa;
1229   void vmovdqa(XMMRegister dst, AddressLiteral src,                 Register rscratch = noreg);
1230   void vmovdqa(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1231 
1232   // AVX512 Unaligned
1233   void evmovdqu(BasicType type, KRegister kmask, Address     dst, XMMRegister src, bool merge, int vector_len);
1234   void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address     src, bool merge, int vector_len);
1235   void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len);
1236 
1237   void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); }
1238   void evmovdqub(XMMRegister dst, Address     src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); }
1239 
1240   void evmovdqub(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1241     if (dst->encoding() != src->encoding() || mask != k0)  {
1242       Assembler::evmovdqub(dst, mask, src, merge, vector_len);
1243     }
1244   }
1245   void evmovdqub(Address     dst, KRegister mask, XMMRegister    src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1246   void evmovdqub(XMMRegister dst, KRegister mask, Address        src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1247   void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1248 
1249   void evmovdquw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1250   void evmovdquw(Address     dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1251   void evmovdquw(XMMRegister dst, Address     src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1252 
1253   void evmovdquw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1254     if (dst->encoding() != src->encoding() || mask != k0) {
1255       Assembler::evmovdquw(dst, mask, src, merge, vector_len);
1256     }
1257   }
1258   void evmovdquw(XMMRegister dst, KRegister mask, Address        src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1259   void evmovdquw(Address     dst, KRegister mask, XMMRegister    src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1260   void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1261 
1262   void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) {
1263      if (dst->encoding() != src->encoding()) {
1264        Assembler::evmovdqul(dst, src, vector_len);
1265      }
1266   }
1267   void evmovdqul(Address     dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1268   void evmovdqul(XMMRegister dst, Address     src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1269 
1270   void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1271     if (dst->encoding() != src->encoding() || mask != k0)  {
1272       Assembler::evmovdqul(dst, mask, src, merge, vector_len);
1273     }
1274   }
1275   void evmovdqul(Address     dst, KRegister mask, XMMRegister    src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1276   void evmovdqul(XMMRegister dst, KRegister mask, Address        src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1277   void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1278 
1279   void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) {
1280     if (dst->encoding() != src->encoding()) {
1281       Assembler::evmovdquq(dst, src, vector_len);
1282     }
1283   }
1284   void evmovdquq(XMMRegister dst, Address        src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1285   void evmovdquq(Address     dst, XMMRegister    src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1286   void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1287   void evmovdqaq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1288 
1289   void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1290     if (dst->encoding() != src->encoding() || mask != k0) {
1291       Assembler::evmovdquq(dst, mask, src, merge, vector_len);
1292     }
1293   }
1294   void evmovdquq(Address     dst, KRegister mask, XMMRegister    src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1295   void evmovdquq(XMMRegister dst, KRegister mask, Address        src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1296   void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1297   void evmovdqaq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1298 
1299   using Assembler::movapd;
1300   void movapd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1301 
1302   // Move Aligned Double Quadword
1303   void movdqa(XMMRegister dst, XMMRegister    src) { Assembler::movdqa(dst, src); }
1304   void movdqa(XMMRegister dst, Address        src) { Assembler::movdqa(dst, src); }
1305   void movdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1306 
1307   void movsd(Address     dst, XMMRegister    src) { Assembler::movsd(dst, src); }
1308   void movsd(XMMRegister dst, XMMRegister    src) { Assembler::movsd(dst, src); }
1309   void movsd(XMMRegister dst, Address        src) { Assembler::movsd(dst, src); }
1310   void movsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1311 
1312   void mulpd(XMMRegister dst, XMMRegister    src) { Assembler::mulpd(dst, src); }
1313   void mulpd(XMMRegister dst, Address        src) { Assembler::mulpd(dst, src); }
1314   void mulpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1315 
1316   void mulsd(XMMRegister dst, XMMRegister    src) { Assembler::mulsd(dst, src); }
1317   void mulsd(XMMRegister dst, Address        src) { Assembler::mulsd(dst, src); }
1318   void mulsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1319 
1320   void mulss(XMMRegister dst, XMMRegister    src) { Assembler::mulss(dst, src); }
1321   void mulss(XMMRegister dst, Address        src) { Assembler::mulss(dst, src); }
1322   void mulss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1323 
1324   // Carry-Less Multiplication Quadword
1325   void pclmulldq(XMMRegister dst, XMMRegister src) {
1326     // 0x00 - multiply lower 64 bits [0:63]
1327     Assembler::pclmulqdq(dst, src, 0x00);
1328   }
1329   void pclmulhdq(XMMRegister dst, XMMRegister src) {
1330     // 0x11 - multiply upper 64 bits [64:127]
1331     Assembler::pclmulqdq(dst, src, 0x11);
1332   }
1333 
1334   void pcmpeqb(XMMRegister dst, XMMRegister src);
1335   void pcmpeqw(XMMRegister dst, XMMRegister src);
1336 
1337   void pcmpestri(XMMRegister dst, Address src, int imm8);
1338   void pcmpestri(XMMRegister dst, XMMRegister src, int imm8);
1339 
1340   void pmovzxbw(XMMRegister dst, XMMRegister src);
1341   void pmovzxbw(XMMRegister dst, Address src);
1342 
1343   void pmovmskb(Register dst, XMMRegister src);
1344 
1345   void ptest(XMMRegister dst, XMMRegister src);
1346 
1347   void roundsd(XMMRegister dst, XMMRegister    src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1348   void roundsd(XMMRegister dst, Address        src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1349   void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch = noreg);
1350 
1351   void sqrtss(XMMRegister dst, XMMRegister     src) { Assembler::sqrtss(dst, src); }
1352   void sqrtss(XMMRegister dst, Address         src) { Assembler::sqrtss(dst, src); }
1353   void sqrtss(XMMRegister dst, AddressLiteral  src, Register rscratch = noreg);
1354 
1355   void subsd(XMMRegister dst, XMMRegister    src) { Assembler::subsd(dst, src); }
1356   void subsd(XMMRegister dst, Address        src) { Assembler::subsd(dst, src); }
1357   void subsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1358 
1359   void subss(XMMRegister dst, XMMRegister    src) { Assembler::subss(dst, src); }
1360   void subss(XMMRegister dst, Address        src) { Assembler::subss(dst, src); }
1361   void subss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1362 
1363   void ucomiss(XMMRegister dst, XMMRegister    src) { Assembler::ucomiss(dst, src); }
1364   void ucomiss(XMMRegister dst, Address        src) { Assembler::ucomiss(dst, src); }
1365   void ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1366 
1367   void ucomisd(XMMRegister dst, XMMRegister    src) { Assembler::ucomisd(dst, src); }
1368   void ucomisd(XMMRegister dst, Address        src) { Assembler::ucomisd(dst, src); }
1369   void ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1370 
1371   // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
1372   void xorpd(XMMRegister dst, XMMRegister    src);
1373   void xorpd(XMMRegister dst, Address        src) { Assembler::xorpd(dst, src); }
1374   void xorpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1375 
1376   // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
1377   void xorps(XMMRegister dst, XMMRegister    src);
1378   void xorps(XMMRegister dst, Address        src) { Assembler::xorps(dst, src); }
1379   void xorps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1380 
1381   // Shuffle Bytes
1382   void pshufb(XMMRegister dst, XMMRegister    src) { Assembler::pshufb(dst, src); }
1383   void pshufb(XMMRegister dst, Address        src) { Assembler::pshufb(dst, src); }
1384   void pshufb(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1385   // AVX 3-operands instructions
1386 
1387   void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vaddsd(dst, nds, src); }
1388   void vaddsd(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vaddsd(dst, nds, src); }
1389   void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1390 
1391   void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vaddss(dst, nds, src); }
1392   void vaddss(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vaddss(dst, nds, src); }
1393   void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1394 
1395   void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg);
1396   void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg);
1397 
1398   void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len);
1399   void vpaddb(XMMRegister dst, XMMRegister nds, Address        src, int vector_len);
1400   void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1401 
1402   void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1403   void vpaddw(XMMRegister dst, XMMRegister nds, Address     src, int vector_len);
1404 
1405   void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1406   void vpaddd(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1407   void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1408 
1409   void vpand(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1410   void vpand(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1411   void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1412 
1413   using Assembler::vpbroadcastd;
1414   void vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1415 
1416   using Assembler::vpbroadcastq;
1417   void vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1418 
1419   void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1420   void vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len);
1421 
1422   void vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1423   void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1424   using Assembler::evpcmpeqd;
1425   void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1426 
1427   // Vector compares
1428   void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister    src, int comparison, bool is_signed, int vector_len) {
1429     Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len);
1430   }
1431   void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1432 
1433   void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister    src, int comparison, bool is_signed, int vector_len) {
1434     Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len);
1435   }
1436   void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1437 
1438   void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister    src, int comparison, bool is_signed, int vector_len) {
1439     Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len);
1440   }
1441   void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1442 
1443   void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister    src, int comparison, bool is_signed, int vector_len) {
1444     Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len);
1445   }
1446   void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1447 
1448   void evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len);
1449 
1450   // Emit comparison instruction for the specified comparison predicate.
1451   void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len);
1452   void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len);
1453 
1454   void vpmovzxbw(XMMRegister dst, Address     src, int vector_len);
1455   void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); }
1456 
1457   void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit);
1458 
1459   void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1460   void vpmullw(XMMRegister dst, XMMRegister nds, Address     src, int vector_len);
1461 
1462   void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); }
1463   void vpmulld(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); }
1464   void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1465 
1466   void vpmuldq(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vpmuldq(dst, nds, src, vector_len); }
1467 
1468   void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1469   void vpsubb(XMMRegister dst, XMMRegister nds, Address     src, int vector_len);
1470 
1471   void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1472   void vpsubw(XMMRegister dst, XMMRegister nds, Address     src, int vector_len);
1473 
1474   void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1475   void vpsraw(XMMRegister dst, XMMRegister nds, int         shift, int vector_len);
1476 
1477   void evpsrad(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1478   void evpsrad(XMMRegister dst, XMMRegister nds, int         shift, int vector_len);
1479 
1480   void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1481   void evpsraq(XMMRegister dst, XMMRegister nds, int         shift, int vector_len);
1482 
1483   using Assembler::evpsllw;
1484   void evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1485     if (!is_varshift) {
1486       Assembler::evpsllw(dst, mask, nds, src, merge, vector_len);
1487     } else {
1488       Assembler::evpsllvw(dst, mask, nds, src, merge, vector_len);
1489     }
1490   }
1491   void evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1492     if (!is_varshift) {
1493       Assembler::evpslld(dst, mask, nds, src, merge, vector_len);
1494     } else {
1495       Assembler::evpsllvd(dst, mask, nds, src, merge, vector_len);
1496     }
1497   }
1498   void evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1499     if (!is_varshift) {
1500       Assembler::evpsllq(dst, mask, nds, src, merge, vector_len);
1501     } else {
1502       Assembler::evpsllvq(dst, mask, nds, src, merge, vector_len);
1503     }
1504   }
1505   void evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1506     if (!is_varshift) {
1507       Assembler::evpsrlw(dst, mask, nds, src, merge, vector_len);
1508     } else {
1509       Assembler::evpsrlvw(dst, mask, nds, src, merge, vector_len);
1510     }
1511   }
1512   void evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1513     if (!is_varshift) {
1514       Assembler::evpsrld(dst, mask, nds, src, merge, vector_len);
1515     } else {
1516       Assembler::evpsrlvd(dst, mask, nds, src, merge, vector_len);
1517     }
1518   }
1519 
1520   using Assembler::evpsrlq;
1521   void evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1522     if (!is_varshift) {
1523       Assembler::evpsrlq(dst, mask, nds, src, merge, vector_len);
1524     } else {
1525       Assembler::evpsrlvq(dst, mask, nds, src, merge, vector_len);
1526     }
1527   }
1528   using Assembler::evpsraw;
1529   void evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1530     if (!is_varshift) {
1531       Assembler::evpsraw(dst, mask, nds, src, merge, vector_len);
1532     } else {
1533       Assembler::evpsravw(dst, mask, nds, src, merge, vector_len);
1534     }
1535   }
1536   using Assembler::evpsrad;
1537   void evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1538     if (!is_varshift) {
1539       Assembler::evpsrad(dst, mask, nds, src, merge, vector_len);
1540     } else {
1541       Assembler::evpsravd(dst, mask, nds, src, merge, vector_len);
1542     }
1543   }
1544   using Assembler::evpsraq;
1545   void evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1546     if (!is_varshift) {
1547       Assembler::evpsraq(dst, mask, nds, src, merge, vector_len);
1548     } else {
1549       Assembler::evpsravq(dst, mask, nds, src, merge, vector_len);
1550     }
1551   }
1552 
1553   void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1554   void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1555   void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1556   void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1557 
1558   void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1559   void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1560   void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1561   void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1562 
1563   void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1564   void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1565 
1566   void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1567   void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1568 
1569   void vptest(XMMRegister dst, XMMRegister src);
1570   void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); }
1571 
1572   void punpcklbw(XMMRegister dst, XMMRegister src);
1573   void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); }
1574 
1575   void pshufd(XMMRegister dst, Address src, int mode);
1576   void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); }
1577 
1578   void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1579   void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); }
1580 
1581   void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1582   void vandpd(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1583   void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1584 
1585   void vandps(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1586   void vandps(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1587   void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1588 
1589   void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1590 
1591   void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vdivsd(dst, nds, src); }
1592   void vdivsd(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vdivsd(dst, nds, src); }
1593   void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1594 
1595   void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vdivss(dst, nds, src); }
1596   void vdivss(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vdivss(dst, nds, src); }
1597   void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1598 
1599   void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vmulsd(dst, nds, src); }
1600   void vmulsd(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vmulsd(dst, nds, src); }
1601   void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1602 
1603   void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vmulss(dst, nds, src); }
1604   void vmulss(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vmulss(dst, nds, src); }
1605   void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1606 
1607   void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vsubsd(dst, nds, src); }
1608   void vsubsd(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vsubsd(dst, nds, src); }
1609   void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1610 
1611   void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister    src) { Assembler::vsubss(dst, nds, src); }
1612   void vsubss(XMMRegister dst, XMMRegister nds, Address        src) { Assembler::vsubss(dst, nds, src); }
1613   void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1614 
1615   void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1616   void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1617 
1618   // AVX Vector instructions
1619 
1620   void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1621   void vxorpd(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1622   void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1623 
1624   void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1625   void vxorps(XMMRegister dst, XMMRegister nds, Address        src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1626   void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1627 
1628   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1629     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1630       Assembler::vpxor(dst, nds, src, vector_len);
1631     else
1632       Assembler::vxorpd(dst, nds, src, vector_len);
1633   }
1634   void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1635     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1636       Assembler::vpxor(dst, nds, src, vector_len);
1637     else
1638       Assembler::vxorpd(dst, nds, src, vector_len);
1639   }
1640   void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1641 
1642   // Simple version for AVX2 256bit vectors
1643   void vpxor(XMMRegister dst, XMMRegister src) {
1644     assert(UseAVX >= 2, "Should be at least AVX2");
1645     Assembler::vpxor(dst, dst, src, AVX_256bit);
1646   }
1647   void vpxor(XMMRegister dst, Address src) {
1648     assert(UseAVX >= 2, "Should be at least AVX2");
1649     Assembler::vpxor(dst, dst, src, AVX_256bit);
1650   }
1651 
1652   void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister    src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); }
1653   void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1654 
1655   void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
1656     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1657       Assembler::vinserti32x4(dst, nds, src, imm8);
1658     } else if (UseAVX > 1) {
1659       // vinserti128 is available only in AVX2
1660       Assembler::vinserti128(dst, nds, src, imm8);
1661     } else {
1662       Assembler::vinsertf128(dst, nds, src, imm8);
1663     }
1664   }
1665 
1666   void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
1667     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1668       Assembler::vinserti32x4(dst, nds, src, imm8);
1669     } else if (UseAVX > 1) {
1670       // vinserti128 is available only in AVX2
1671       Assembler::vinserti128(dst, nds, src, imm8);
1672     } else {
1673       Assembler::vinsertf128(dst, nds, src, imm8);
1674     }
1675   }
1676 
1677   void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1678     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1679       Assembler::vextracti32x4(dst, src, imm8);
1680     } else if (UseAVX > 1) {
1681       // vextracti128 is available only in AVX2
1682       Assembler::vextracti128(dst, src, imm8);
1683     } else {
1684       Assembler::vextractf128(dst, src, imm8);
1685     }
1686   }
1687 
1688   void vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
1689     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1690       Assembler::vextracti32x4(dst, src, imm8);
1691     } else if (UseAVX > 1) {
1692       // vextracti128 is available only in AVX2
1693       Assembler::vextracti128(dst, src, imm8);
1694     } else {
1695       Assembler::vextractf128(dst, src, imm8);
1696     }
1697   }
1698 
1699   // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers
1700   void vinserti128_high(XMMRegister dst, XMMRegister src) {
1701     vinserti128(dst, dst, src, 1);
1702   }
1703   void vinserti128_high(XMMRegister dst, Address src) {
1704     vinserti128(dst, dst, src, 1);
1705   }
1706   void vextracti128_high(XMMRegister dst, XMMRegister src) {
1707     vextracti128(dst, src, 1);
1708   }
1709   void vextracti128_high(Address dst, XMMRegister src) {
1710     vextracti128(dst, src, 1);
1711   }
1712 
1713   void vinsertf128_high(XMMRegister dst, XMMRegister src) {
1714     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1715       Assembler::vinsertf32x4(dst, dst, src, 1);
1716     } else {
1717       Assembler::vinsertf128(dst, dst, src, 1);
1718     }
1719   }
1720 
1721   void vinsertf128_high(XMMRegister dst, Address src) {
1722     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1723       Assembler::vinsertf32x4(dst, dst, src, 1);
1724     } else {
1725       Assembler::vinsertf128(dst, dst, src, 1);
1726     }
1727   }
1728 
1729   void vextractf128_high(XMMRegister dst, XMMRegister src) {
1730     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1731       Assembler::vextractf32x4(dst, src, 1);
1732     } else {
1733       Assembler::vextractf128(dst, src, 1);
1734     }
1735   }
1736 
1737   void vextractf128_high(Address dst, XMMRegister src) {
1738     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1739       Assembler::vextractf32x4(dst, src, 1);
1740     } else {
1741       Assembler::vextractf128(dst, src, 1);
1742     }
1743   }
1744 
1745   // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers
1746   void vinserti64x4_high(XMMRegister dst, XMMRegister src) {
1747     Assembler::vinserti64x4(dst, dst, src, 1);
1748   }
1749   void vinsertf64x4_high(XMMRegister dst, XMMRegister src) {
1750     Assembler::vinsertf64x4(dst, dst, src, 1);
1751   }
1752   void vextracti64x4_high(XMMRegister dst, XMMRegister src) {
1753     Assembler::vextracti64x4(dst, src, 1);
1754   }
1755   void vextractf64x4_high(XMMRegister dst, XMMRegister src) {
1756     Assembler::vextractf64x4(dst, src, 1);
1757   }
1758   void vextractf64x4_high(Address dst, XMMRegister src) {
1759     Assembler::vextractf64x4(dst, src, 1);
1760   }
1761   void vinsertf64x4_high(XMMRegister dst, Address src) {
1762     Assembler::vinsertf64x4(dst, dst, src, 1);
1763   }
1764 
1765   // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers
1766   void vinserti128_low(XMMRegister dst, XMMRegister src) {
1767     vinserti128(dst, dst, src, 0);
1768   }
1769   void vinserti128_low(XMMRegister dst, Address src) {
1770     vinserti128(dst, dst, src, 0);
1771   }
1772   void vextracti128_low(XMMRegister dst, XMMRegister src) {
1773     vextracti128(dst, src, 0);
1774   }
1775   void vextracti128_low(Address dst, XMMRegister src) {
1776     vextracti128(dst, src, 0);
1777   }
1778 
1779   void vinsertf128_low(XMMRegister dst, XMMRegister src) {
1780     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1781       Assembler::vinsertf32x4(dst, dst, src, 0);
1782     } else {
1783       Assembler::vinsertf128(dst, dst, src, 0);
1784     }
1785   }
1786 
1787   void vinsertf128_low(XMMRegister dst, Address src) {
1788     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1789       Assembler::vinsertf32x4(dst, dst, src, 0);
1790     } else {
1791       Assembler::vinsertf128(dst, dst, src, 0);
1792     }
1793   }
1794 
1795   void vextractf128_low(XMMRegister dst, XMMRegister src) {
1796     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1797       Assembler::vextractf32x4(dst, src, 0);
1798     } else {
1799       Assembler::vextractf128(dst, src, 0);
1800     }
1801   }
1802 
1803   void vextractf128_low(Address dst, XMMRegister src) {
1804     if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1805       Assembler::vextractf32x4(dst, src, 0);
1806     } else {
1807       Assembler::vextractf128(dst, src, 0);
1808     }
1809   }
1810 
1811   // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers
1812   void vinserti64x4_low(XMMRegister dst, XMMRegister src) {
1813     Assembler::vinserti64x4(dst, dst, src, 0);
1814   }
1815   void vinsertf64x4_low(XMMRegister dst, XMMRegister src) {
1816     Assembler::vinsertf64x4(dst, dst, src, 0);
1817   }
1818   void vextracti64x4_low(XMMRegister dst, XMMRegister src) {
1819     Assembler::vextracti64x4(dst, src, 0);
1820   }
1821   void vextractf64x4_low(XMMRegister dst, XMMRegister src) {
1822     Assembler::vextractf64x4(dst, src, 0);
1823   }
1824   void vextractf64x4_low(Address dst, XMMRegister src) {
1825     Assembler::vextractf64x4(dst, src, 0);
1826   }
1827   void vinsertf64x4_low(XMMRegister dst, Address src) {
1828     Assembler::vinsertf64x4(dst, dst, src, 0);
1829   }
1830 
1831   // Carry-Less Multiplication Quadword
1832   void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1833     // 0x00 - multiply lower 64 bits [0:63]
1834     Assembler::vpclmulqdq(dst, nds, src, 0x00);
1835   }
1836   void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1837     // 0x11 - multiply upper 64 bits [64:127]
1838     Assembler::vpclmulqdq(dst, nds, src, 0x11);
1839   }
1840   void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1841     // 0x10 - multiply nds[0:63] and src[64:127]
1842     Assembler::vpclmulqdq(dst, nds, src, 0x10);
1843   }
1844   void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1845     //0x01 - multiply nds[64:127] and src[0:63]
1846     Assembler::vpclmulqdq(dst, nds, src, 0x01);
1847   }
1848 
1849   void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1850     // 0x00 - multiply lower 64 bits [0:63]
1851     Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len);
1852   }
1853   void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1854     // 0x11 - multiply upper 64 bits [64:127]
1855     Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len);
1856   }
1857 
1858   // AVX-512 mask operations.
1859   void kand(BasicType etype, KRegister dst, KRegister src1, KRegister src2);
1860   void kor(BasicType type, KRegister dst, KRegister src1, KRegister src2);
1861   void knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp = knoreg, Register rtmp = noreg);
1862   void kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2);
1863   void kortest(uint masklen, KRegister src1, KRegister src2);
1864   void ktest(uint masklen, KRegister src1, KRegister src2);
1865 
1866   void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1867   void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1868 
1869   void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1870   void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1871 
1872   void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1873   void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1874 
1875   void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1876   void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1877 
1878   void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc);
1879   void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc);
1880   void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc);
1881   void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc);
1882 
1883   using Assembler::evpandq;
1884   void evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1885 
1886   using Assembler::evpaddq;
1887   void evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1888 
1889   using Assembler::evporq;
1890   void evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1891 
1892   using Assembler::vpshufb;
1893   void vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1894 
1895   using Assembler::vpor;
1896   void vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1897 
1898   using Assembler::vpternlogq;
1899   void vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch = noreg);
1900 
1901   void cmov32( Condition cc, Register dst, Address  src);
1902   void cmov32( Condition cc, Register dst, Register src);
1903 
1904   void cmov(   Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1905 
1906   void cmovptr(Condition cc, Register dst, Address  src) { cmovq(cc, dst, src); }
1907   void cmovptr(Condition cc, Register dst, Register src) { cmovq(cc, dst, src); }
1908 
1909   void movoop(Register dst, jobject obj);
1910   void movoop(Address  dst, jobject obj, Register rscratch);
1911 
1912   void mov_metadata(Register dst, Metadata* obj);
1913   void mov_metadata(Address  dst, Metadata* obj, Register rscratch);
1914 
1915   void movptr(Register     dst, Register       src);
1916   void movptr(Register     dst, Address        src);
1917   void movptr(Register     dst, AddressLiteral src);
1918   void movptr(Register     dst, ArrayAddress   src);
1919   void movptr(Register     dst, intptr_t       src);
1920   void movptr(Address      dst, Register       src);
1921   void movptr(Address      dst, int32_t        imm);
1922   void movptr(Address      dst, intptr_t       src, Register rscratch);
1923   void movptr(ArrayAddress dst, Register       src, Register rscratch);
1924 
1925   void movptr(Register dst, RegisterOrConstant src) {
1926     if (src.is_constant()) movptr(dst, src.as_constant());
1927     else                   movptr(dst, src.as_register());
1928   }
1929 
1930 
1931   // to avoid hiding movl
1932   void mov32(Register       dst, AddressLiteral src);
1933   void mov32(AddressLiteral dst, Register        src, Register rscratch = noreg);
1934 
1935   // Import other mov() methods from the parent class or else
1936   // they will be hidden by the following overriding declaration.
1937   using Assembler::movdl;
1938   void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1939 
1940   using Assembler::movq;
1941   void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1942 
1943   // Can push value or effective address
1944   void pushptr(AddressLiteral src, Register rscratch);
1945 
1946   void pushptr(Address src) { pushq(src); }
1947   void popptr(Address src) { popq(src); }
1948 
1949   void pushoop(jobject obj, Register rscratch);
1950   void pushklass(Metadata* obj, Register rscratch);
1951 
1952   // sign extend as need a l to ptr sized element
1953   void movl2ptr(Register dst, Address src) { movslq(dst, src); }
1954   void movl2ptr(Register dst, Register src) { movslq(dst, src); }
1955 
1956 
1957  public:
1958   // Inline type specific methods
1959   #include "asm/macroAssembler_common.hpp"
1960 
1961   int store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter = true);
1962   bool move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]);
1963   bool unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
1964                             VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
1965                             RegState reg_state[]);
1966   bool pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
1967                           VMRegPair* from, int from_count, int& from_index, VMReg to,
1968                           RegState reg_state[], Register val_array);
1969   int extend_stack_for_inline_args(int args_on_stack);
1970   void remove_frame(int initial_framesize, bool needs_stack_repair);
1971   VMReg spill_reg_for(VMReg reg);
1972 
1973   // clear memory of size 'cnt' qwords, starting at 'base';
1974   // if 'is_large' is set, do not try to produce short loop
1975   void clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, bool is_large, bool word_copy_only, KRegister mask=knoreg);
1976 
1977   // clear memory initialization sequence for constant size;
1978   void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1979 
1980   // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
1981   void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1982 
1983   // Fill primitive arrays
1984   void generate_fill(BasicType t, bool aligned,
1985                      Register to, Register value, Register count,
1986                      Register rtmp, XMMRegister xtmp);
1987 
1988   void encode_iso_array(Register src, Register dst, Register len,
1989                         XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1990                         XMMRegister tmp4, Register tmp5, Register result, bool ascii);
1991 
1992   void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
1993   void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1994                              Register y, Register y_idx, Register z,
1995                              Register carry, Register product,
1996                              Register idx, Register kdx);
1997   void multiply_add_128_x_128(Register x_xstart, Register y, Register z,
1998                               Register yz_idx, Register idx,
1999                               Register carry, Register product, int offset);
2000   void multiply_128_x_128_bmi2_loop(Register y, Register z,
2001                                     Register carry, Register carry2,
2002                                     Register idx, Register jdx,
2003                                     Register yz_idx1, Register yz_idx2,
2004                                     Register tmp, Register tmp3, Register tmp4);
2005   void multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
2006                                Register yz_idx, Register idx, Register jdx,
2007                                Register carry, Register product,
2008                                Register carry2);
2009   void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0,
2010                        Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5);
2011   void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3,
2012                      Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
2013   void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry,
2014                             Register tmp2);
2015   void multiply_add_64(Register sum, Register op1, Register op2, Register carry,
2016                        Register rdxReg, Register raxReg);
2017   void add_one_64(Register z, Register zlen, Register carry, Register tmp1);
2018   void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
2019                        Register tmp3, Register tmp4);
2020   void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
2021                      Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
2022 
2023   void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1,
2024                Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
2025                Register raxReg);
2026   void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1,
2027                Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
2028                Register raxReg);
2029   void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
2030                            Register result, Register tmp1, Register tmp2,
2031                            XMMRegister vec1, XMMRegister vec2, XMMRegister vec3);
2032 
2033   // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic.
2034   void update_byte_crc32(Register crc, Register val, Register table);
2035   void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp);
2036 
2037   void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2);
2038   void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos,
2039                                 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop,
2040                                 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup);
2041 
2042   // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic
2043   // Note on a naming convention:
2044   // Prefix w = register only used on a Westmere+ architecture
2045   // Prefix n = register only used on a Nehalem architecture
2046   void crc32c_ipl_alg4(Register in_out, uint32_t n,
2047                        Register tmp1, Register tmp2, Register tmp3);
2048   void crc32c_pclmulqdq(XMMRegister w_xtmp1,
2049                         Register in_out,
2050                         uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
2051                         XMMRegister w_xtmp2,
2052                         Register tmp1,
2053                         Register n_tmp2, Register n_tmp3);
2054   void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
2055                        XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
2056                        Register tmp1, Register tmp2,
2057                        Register n_tmp3);
2058   void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
2059                          Register in_out1, Register in_out2, Register in_out3,
2060                          Register tmp1, Register tmp2, Register tmp3,
2061                          XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
2062                          Register tmp4, Register tmp5,
2063                          Register n_tmp6);
2064   void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
2065                             Register tmp1, Register tmp2, Register tmp3,
2066                             Register tmp4, Register tmp5, Register tmp6,
2067                             XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
2068                             bool is_pclmulqdq_supported);
2069   // Fold 128-bit data chunk
2070   void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
2071   void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf);
2072   // Fold 512-bit data chunk
2073   void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset);
2074   // Fold 8-bit data
2075   void fold_8bit_crc32(Register crc, Register table, Register tmp);
2076   void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp);
2077 
2078   // Compress char[] array to byte[].
2079   void char_array_compress(Register src, Register dst, Register len,
2080                            XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
2081                            XMMRegister tmp4, Register tmp5, Register result,
2082                            KRegister mask1 = knoreg, KRegister mask2 = knoreg);
2083 
2084   // Inflate byte[] array to char[].
2085   void byte_array_inflate(Register src, Register dst, Register len,
2086                           XMMRegister tmp1, Register tmp2, KRegister mask = knoreg);
2087 
2088   void fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask,
2089                    Register length, Register temp, int vec_enc);
2090 
2091   void fill64_masked(uint shift, Register dst, int disp,
2092                          XMMRegister xmm, KRegister mask, Register length,
2093                          Register temp, bool use64byteVector = false);
2094 
2095   void fill32_masked(uint shift, Register dst, int disp,
2096                          XMMRegister xmm, KRegister mask, Register length,
2097                          Register temp);
2098 
2099   void fill32(Address dst, XMMRegister xmm);
2100 
2101   void fill32(Register dst, int disp, XMMRegister xmm);
2102 
2103   void fill64(Address dst, XMMRegister xmm, bool use64byteVector = false);
2104 
2105   void fill64(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false);
2106 
2107   void convert_f2i(Register dst, XMMRegister src);
2108   void convert_d2i(Register dst, XMMRegister src);
2109   void convert_f2l(Register dst, XMMRegister src);
2110   void convert_d2l(Register dst, XMMRegister src);
2111   void round_double(Register dst, XMMRegister src, Register rtmp, Register rcx);
2112   void round_float(Register dst, XMMRegister src, Register rtmp, Register rcx);
2113 
2114   void cache_wb(Address line);
2115   void cache_wbsync(bool is_pre);
2116 
2117 #ifdef COMPILER2_OR_JVMCI
2118   void generate_fill_avx3(BasicType type, Register to, Register value,
2119                           Register count, Register rtmp, XMMRegister xtmp);
2120 #endif // COMPILER2_OR_JVMCI
2121 
2122   void vallones(XMMRegister dst, int vector_len);
2123 
2124   void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg);
2125 
2126   void fast_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow);
2127   void fast_unlock(Register obj, Register reg_rax, Register tmp, Label& slow);
2128 
2129   void save_legacy_gprs();
2130   void restore_legacy_gprs();
2131   void setcc(Assembler::Condition comparison, Register dst);
2132 };
2133 
2134 #endif // CPU_X86_MACROASSEMBLER_X86_HPP