1 /*
    2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
    3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    4  *
    5  * This code is free software; you can redistribute it and/or modify it
    6  * under the terms of the GNU General Public License version 2 only, as
    7  * published by the Free Software Foundation.
    8  *
    9  * This code is distributed in the hope that it will be useful, but WITHOUT
   10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   12  * version 2 for more details (a copy is included in the LICENSE file that
   13  * accompanied this code).
   14  *
   15  * You should have received a copy of the GNU General Public License version
   16  * 2 along with this work; if not, write to the Free Software Foundation,
   17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   18  *
   19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   20  * or visit www.oracle.com if you need additional information or have any
   21  * questions.
   22  *
   23  */
   24 
   25 #include "asm/assembler.hpp"
   26 #include "asm/assembler.inline.hpp"
   27 #include "code/compiledIC.hpp"
   28 #include "compiler/compiler_globals.hpp"
   29 #include "compiler/disassembler.hpp"
   30 #include "ci/ciInlineKlass.hpp"
   31 #include "crc32c.h"
   32 #include "gc/shared/barrierSet.hpp"
   33 #include "gc/shared/barrierSetAssembler.hpp"
   34 #include "gc/shared/collectedHeap.inline.hpp"
   35 #include "gc/shared/tlab_globals.hpp"
   36 #include "interpreter/bytecodeHistogram.hpp"
   37 #include "interpreter/interpreter.hpp"
   38 #include "interpreter/interpreterRuntime.hpp"
   39 #include "jvm.h"
   40 #include "memory/resourceArea.hpp"
   41 #include "memory/universe.hpp"
   42 #include "oops/accessDecorators.hpp"
   43 #include "oops/compressedKlass.inline.hpp"
   44 #include "oops/compressedOops.inline.hpp"
   45 #include "oops/klass.inline.hpp"
   46 #include "oops/resolvedFieldEntry.hpp"
   47 #include "prims/methodHandles.hpp"
   48 #include "runtime/continuation.hpp"
   49 #include "runtime/interfaceSupport.inline.hpp"
   50 #include "runtime/javaThread.hpp"
   51 #include "runtime/jniHandles.hpp"
   52 #include "runtime/objectMonitor.hpp"
   53 #include "runtime/os.hpp"
   54 #include "runtime/safepoint.hpp"
   55 #include "runtime/safepointMechanism.hpp"
   56 #include "runtime/sharedRuntime.hpp"
   57 #include "runtime/signature_cc.hpp"
   58 #include "runtime/stubRoutines.hpp"
   59 #include "utilities/checkedCast.hpp"
   60 #include "utilities/macros.hpp"
   61 #include "vmreg_x86.inline.hpp"
   62 #ifdef COMPILER2
   63 #include "opto/output.hpp"
   64 #endif
   65 
   66 #ifdef PRODUCT
   67 #define BLOCK_COMMENT(str) /* nothing */
   68 #define STOP(error) stop(error)
   69 #else
   70 #define BLOCK_COMMENT(str) block_comment(str)
   71 #define STOP(error) block_comment(error); stop(error)
   72 #endif
   73 
   74 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
   75 
   76 #ifdef ASSERT
   77 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
   78 #endif
   79 
   80 static const Assembler::Condition reverse[] = {
   81     Assembler::noOverflow     /* overflow      = 0x0 */ ,
   82     Assembler::overflow       /* noOverflow    = 0x1 */ ,
   83     Assembler::aboveEqual     /* carrySet      = 0x2, below         = 0x2 */ ,
   84     Assembler::below          /* aboveEqual    = 0x3, carryClear    = 0x3 */ ,
   85     Assembler::notZero        /* zero          = 0x4, equal         = 0x4 */ ,
   86     Assembler::zero           /* notZero       = 0x5, notEqual      = 0x5 */ ,
   87     Assembler::above          /* belowEqual    = 0x6 */ ,
   88     Assembler::belowEqual     /* above         = 0x7 */ ,
   89     Assembler::positive       /* negative      = 0x8 */ ,
   90     Assembler::negative       /* positive      = 0x9 */ ,
   91     Assembler::noParity       /* parity        = 0xa */ ,
   92     Assembler::parity         /* noParity      = 0xb */ ,
   93     Assembler::greaterEqual   /* less          = 0xc */ ,
   94     Assembler::less           /* greaterEqual  = 0xd */ ,
   95     Assembler::greater        /* lessEqual     = 0xe */ ,
   96     Assembler::lessEqual      /* greater       = 0xf, */
   97 
   98 };
   99 
  100 
  101 // Implementation of MacroAssembler
  102 
  103 Address MacroAssembler::as_Address(AddressLiteral adr) {
  104   // amd64 always does this as a pc-rel
  105   // we can be absolute or disp based on the instruction type
  106   // jmp/call are displacements others are absolute
  107   assert(!adr.is_lval(), "must be rval");
  108   assert(reachable(adr), "must be");
  109   return Address(checked_cast<int32_t>(adr.target() - pc()), adr.target(), adr.reloc());
  110 
  111 }
  112 
  113 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) {
  114   AddressLiteral base = adr.base();
  115   lea(rscratch, base);
  116   Address index = adr.index();
  117   assert(index._disp == 0, "must not have disp"); // maybe it can?
  118   Address array(rscratch, index._index, index._scale, index._disp);
  119   return array;
  120 }
  121 
  122 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
  123   Label L, E;
  124 
  125 #ifdef _WIN64
  126   // Windows always allocates space for it's register args
  127   assert(num_args <= 4, "only register arguments supported");
  128   subq(rsp,  frame::arg_reg_save_area_bytes);
  129 #endif
  130 
  131   // Align stack if necessary
  132   testl(rsp, 15);
  133   jcc(Assembler::zero, L);
  134 
  135   subq(rsp, 8);
  136   call(RuntimeAddress(entry_point));
  137   addq(rsp, 8);
  138   jmp(E);
  139 
  140   bind(L);
  141   call(RuntimeAddress(entry_point));
  142 
  143   bind(E);
  144 
  145 #ifdef _WIN64
  146   // restore stack pointer
  147   addq(rsp, frame::arg_reg_save_area_bytes);
  148 #endif
  149 }
  150 
  151 void MacroAssembler::cmp64(Register src1, AddressLiteral src2, Register rscratch) {
  152   assert(!src2.is_lval(), "should use cmpptr");
  153   assert(rscratch != noreg || always_reachable(src2), "missing");
  154 
  155   if (reachable(src2)) {
  156     cmpq(src1, as_Address(src2));
  157   } else {
  158     lea(rscratch, src2);
  159     Assembler::cmpq(src1, Address(rscratch, 0));
  160   }
  161 }
  162 
  163 int MacroAssembler::corrected_idivq(Register reg) {
  164   // Full implementation of Java ldiv and lrem; checks for special
  165   // case as described in JVM spec., p.243 & p.271.  The function
  166   // returns the (pc) offset of the idivl instruction - may be needed
  167   // for implicit exceptions.
  168   //
  169   //         normal case                           special case
  170   //
  171   // input : rax: dividend                         min_long
  172   //         reg: divisor   (may not be eax/edx)   -1
  173   //
  174   // output: rax: quotient  (= rax idiv reg)       min_long
  175   //         rdx: remainder (= rax irem reg)       0
  176   assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
  177   static const int64_t min_long = 0x8000000000000000;
  178   Label normal_case, special_case;
  179 
  180   // check for special case
  181   cmp64(rax, ExternalAddress((address) &min_long), rdx /*rscratch*/);
  182   jcc(Assembler::notEqual, normal_case);
  183   xorl(rdx, rdx); // prepare rdx for possible special case (where
  184                   // remainder = 0)
  185   cmpq(reg, -1);
  186   jcc(Assembler::equal, special_case);
  187 
  188   // handle normal case
  189   bind(normal_case);
  190   cdqq();
  191   int idivq_offset = offset();
  192   idivq(reg);
  193 
  194   // normal and special case exit
  195   bind(special_case);
  196 
  197   return idivq_offset;
  198 }
  199 
  200 void MacroAssembler::decrementq(Register reg, int value) {
  201   if (value == min_jint) { subq(reg, value); return; }
  202   if (value <  0) { incrementq(reg, -value); return; }
  203   if (value == 0) {                        ; return; }
  204   if (value == 1 && UseIncDec) { decq(reg) ; return; }
  205   /* else */      { subq(reg, value)       ; return; }
  206 }
  207 
  208 void MacroAssembler::decrementq(Address dst, int value) {
  209   if (value == min_jint) { subq(dst, value); return; }
  210   if (value <  0) { incrementq(dst, -value); return; }
  211   if (value == 0) {                        ; return; }
  212   if (value == 1 && UseIncDec) { decq(dst) ; return; }
  213   /* else */      { subq(dst, value)       ; return; }
  214 }
  215 
  216 void MacroAssembler::incrementq(AddressLiteral dst, Register rscratch) {
  217   assert(rscratch != noreg || always_reachable(dst), "missing");
  218 
  219   if (reachable(dst)) {
  220     incrementq(as_Address(dst));
  221   } else {
  222     lea(rscratch, dst);
  223     incrementq(Address(rscratch, 0));
  224   }
  225 }
  226 
  227 void MacroAssembler::incrementq(Register reg, int value) {
  228   if (value == min_jint) { addq(reg, value); return; }
  229   if (value <  0) { decrementq(reg, -value); return; }
  230   if (value == 0) {                        ; return; }
  231   if (value == 1 && UseIncDec) { incq(reg) ; return; }
  232   /* else */      { addq(reg, value)       ; return; }
  233 }
  234 
  235 void MacroAssembler::incrementq(Address dst, int value) {
  236   if (value == min_jint) { addq(dst, value); return; }
  237   if (value <  0) { decrementq(dst, -value); return; }
  238   if (value == 0) {                        ; return; }
  239   if (value == 1 && UseIncDec) { incq(dst) ; return; }
  240   /* else */      { addq(dst, value)       ; return; }
  241 }
  242 
  243 // 32bit can do a case table jump in one instruction but we no longer allow the base
  244 // to be installed in the Address class
  245 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) {
  246   lea(rscratch, entry.base());
  247   Address dispatch = entry.index();
  248   assert(dispatch._base == noreg, "must be");
  249   dispatch._base = rscratch;
  250   jmp(dispatch);
  251 }
  252 
  253 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
  254   ShouldNotReachHere(); // 64bit doesn't use two regs
  255   cmpq(x_lo, y_lo);
  256 }
  257 
  258 void MacroAssembler::lea(Register dst, AddressLiteral src) {
  259   mov_literal64(dst, (intptr_t)src.target(), src.rspec());
  260 }
  261 
  262 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) {
  263   lea(rscratch, adr);
  264   movptr(dst, rscratch);
  265 }
  266 
  267 void MacroAssembler::leave() {
  268   // %%% is this really better? Why not on 32bit too?
  269   emit_int8((unsigned char)0xC9); // LEAVE
  270 }
  271 
  272 void MacroAssembler::lneg(Register hi, Register lo) {
  273   ShouldNotReachHere(); // 64bit doesn't use two regs
  274   negq(lo);
  275 }
  276 
  277 void MacroAssembler::movoop(Register dst, jobject obj) {
  278   mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate());
  279 }
  280 
  281 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) {
  282   mov_literal64(rscratch, (intptr_t)obj, oop_Relocation::spec_for_immediate());
  283   movq(dst, rscratch);
  284 }
  285 
  286 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
  287   mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
  288 }
  289 
  290 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) {
  291   mov_literal64(rscratch, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
  292   movq(dst, rscratch);
  293 }
  294 
  295 void MacroAssembler::movptr(Register dst, AddressLiteral src) {
  296   if (src.is_lval()) {
  297     mov_literal64(dst, (intptr_t)src.target(), src.rspec());
  298   } else {
  299     if (reachable(src)) {
  300       movq(dst, as_Address(src));
  301     } else {
  302       lea(dst, src);
  303       movq(dst, Address(dst, 0));
  304     }
  305   }
  306 }
  307 
  308 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) {
  309   movq(as_Address(dst, rscratch), src);
  310 }
  311 
  312 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
  313   movq(dst, as_Address(src, dst /*rscratch*/));
  314 }
  315 
  316 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
  317 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) {
  318   if (is_simm32(src)) {
  319     movptr(dst, checked_cast<int32_t>(src));
  320   } else {
  321     mov64(rscratch, src);
  322     movq(dst, rscratch);
  323   }
  324 }
  325 
  326 void MacroAssembler::pushoop(jobject obj, Register rscratch) {
  327   movoop(rscratch, obj);
  328   push(rscratch);
  329 }
  330 
  331 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) {
  332   mov_metadata(rscratch, obj);
  333   push(rscratch);
  334 }
  335 
  336 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) {
  337   lea(rscratch, src);
  338   if (src.is_lval()) {
  339     push(rscratch);
  340   } else {
  341     pushq(Address(rscratch, 0));
  342   }
  343 }
  344 
  345 static void pass_arg0(MacroAssembler* masm, Register arg) {
  346   if (c_rarg0 != arg ) {
  347     masm->mov(c_rarg0, arg);
  348   }
  349 }
  350 
  351 static void pass_arg1(MacroAssembler* masm, Register arg) {
  352   if (c_rarg1 != arg ) {
  353     masm->mov(c_rarg1, arg);
  354   }
  355 }
  356 
  357 static void pass_arg2(MacroAssembler* masm, Register arg) {
  358   if (c_rarg2 != arg ) {
  359     masm->mov(c_rarg2, arg);
  360   }
  361 }
  362 
  363 static void pass_arg3(MacroAssembler* masm, Register arg) {
  364   if (c_rarg3 != arg ) {
  365     masm->mov(c_rarg3, arg);
  366   }
  367 }
  368 
  369 void MacroAssembler::stop(const char* msg) {
  370   if (ShowMessageBoxOnError) {
  371     address rip = pc();
  372     pusha(); // get regs on stack
  373     lea(c_rarg1, InternalAddress(rip));
  374     movq(c_rarg2, rsp); // pass pointer to regs array
  375   }
  376   lea(c_rarg0, ExternalAddress((address) msg));
  377   andq(rsp, -16); // align stack as required by ABI
  378   call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
  379   hlt();
  380 }
  381 
  382 void MacroAssembler::warn(const char* msg) {
  383   push(rbp);
  384   movq(rbp, rsp);
  385   andq(rsp, -16);     // align stack as required by push_CPU_state and call
  386   push_CPU_state();   // keeps alignment at 16 bytes
  387 
  388 #ifdef _WIN64
  389   // Windows always allocates space for its register args
  390   subq(rsp,  frame::arg_reg_save_area_bytes);
  391 #endif
  392   lea(c_rarg0, ExternalAddress((address) msg));
  393   call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
  394 
  395 #ifdef _WIN64
  396   // restore stack pointer
  397   addq(rsp, frame::arg_reg_save_area_bytes);
  398 #endif
  399   pop_CPU_state();
  400   mov(rsp, rbp);
  401   pop(rbp);
  402 }
  403 
  404 void MacroAssembler::print_state() {
  405   address rip = pc();
  406   pusha();            // get regs on stack
  407   push(rbp);
  408   movq(rbp, rsp);
  409   andq(rsp, -16);     // align stack as required by push_CPU_state and call
  410   push_CPU_state();   // keeps alignment at 16 bytes
  411 
  412   lea(c_rarg0, InternalAddress(rip));
  413   lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array
  414   call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1);
  415 
  416   pop_CPU_state();
  417   mov(rsp, rbp);
  418   pop(rbp);
  419   popa();
  420 }
  421 
  422 #ifndef PRODUCT
  423 extern "C" void findpc(intptr_t x);
  424 #endif
  425 
  426 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
  427   // In order to get locks to work, we need to fake a in_VM state
  428   if (ShowMessageBoxOnError) {
  429     JavaThread* thread = JavaThread::current();
  430     JavaThreadState saved_state = thread->thread_state();
  431     thread->set_thread_state(_thread_in_vm);
  432 #ifndef PRODUCT
  433     if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
  434       ttyLocker ttyl;
  435       BytecodeCounter::print();
  436     }
  437 #endif
  438     // To see where a verify_oop failed, get $ebx+40/X for this frame.
  439     // XXX correct this offset for amd64
  440     // This is the value of eip which points to where verify_oop will return.
  441     if (os::message_box(msg, "Execution stopped, print registers?")) {
  442       print_state64(pc, regs);
  443       BREAKPOINT;
  444     }
  445   }
  446   fatal("DEBUG MESSAGE: %s", msg);
  447 }
  448 
  449 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) {
  450   ttyLocker ttyl;
  451   DebuggingContext debugging{};
  452   tty->print_cr("rip = 0x%016lx", (intptr_t)pc);
  453 #ifndef PRODUCT
  454   tty->cr();
  455   findpc(pc);
  456   tty->cr();
  457 #endif
  458 #define PRINT_REG(rax, value) \
  459   { tty->print("%s = ", #rax); os::print_location(tty, value); }
  460   PRINT_REG(rax, regs[15]);
  461   PRINT_REG(rbx, regs[12]);
  462   PRINT_REG(rcx, regs[14]);
  463   PRINT_REG(rdx, regs[13]);
  464   PRINT_REG(rdi, regs[8]);
  465   PRINT_REG(rsi, regs[9]);
  466   PRINT_REG(rbp, regs[10]);
  467   // rsp is actually not stored by pusha(), compute the old rsp from regs (rsp after pusha): regs + 16 = old rsp
  468   PRINT_REG(rsp, (intptr_t)(&regs[16]));
  469   PRINT_REG(r8 , regs[7]);
  470   PRINT_REG(r9 , regs[6]);
  471   PRINT_REG(r10, regs[5]);
  472   PRINT_REG(r11, regs[4]);
  473   PRINT_REG(r12, regs[3]);
  474   PRINT_REG(r13, regs[2]);
  475   PRINT_REG(r14, regs[1]);
  476   PRINT_REG(r15, regs[0]);
  477 #undef PRINT_REG
  478   // Print some words near the top of the stack.
  479   int64_t* rsp = &regs[16];
  480   int64_t* dump_sp = rsp;
  481   for (int col1 = 0; col1 < 8; col1++) {
  482     tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
  483     os::print_location(tty, *dump_sp++);
  484   }
  485   for (int row = 0; row < 25; row++) {
  486     tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
  487     for (int col = 0; col < 4; col++) {
  488       tty->print(" 0x%016lx", (intptr_t)*dump_sp++);
  489     }
  490     tty->cr();
  491   }
  492   // Print some instructions around pc:
  493   Disassembler::decode((address)pc-64, (address)pc);
  494   tty->print_cr("--------");
  495   Disassembler::decode((address)pc, (address)pc+32);
  496 }
  497 
  498 // The java_calling_convention describes stack locations as ideal slots on
  499 // a frame with no abi restrictions. Since we must observe abi restrictions
  500 // (like the placement of the register window) the slots must be biased by
  501 // the following value.
  502 static int reg2offset_in(VMReg r) {
  503   // Account for saved rbp and return address
  504   // This should really be in_preserve_stack_slots
  505   return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
  506 }
  507 
  508 static int reg2offset_out(VMReg r) {
  509   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
  510 }
  511 
  512 // A long move
  513 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
  514 
  515   // The calling conventions assures us that each VMregpair is either
  516   // all really one physical register or adjacent stack slots.
  517 
  518   if (src.is_single_phys_reg() ) {
  519     if (dst.is_single_phys_reg()) {
  520       if (dst.first() != src.first()) {
  521         mov(dst.first()->as_Register(), src.first()->as_Register());
  522       }
  523     } else {
  524       assert(dst.is_single_reg(), "not a stack pair: (%s, %s), (%s, %s)",
  525              src.first()->name(), src.second()->name(), dst.first()->name(), dst.second()->name());
  526       movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register());
  527     }
  528   } else if (dst.is_single_phys_reg()) {
  529     assert(src.is_single_reg(),  "not a stack pair");
  530     movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  531   } else {
  532     assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
  533     movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  534     movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
  535   }
  536 }
  537 
  538 // A double move
  539 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
  540 
  541   // The calling conventions assures us that each VMregpair is either
  542   // all really one physical register or adjacent stack slots.
  543 
  544   if (src.is_single_phys_reg() ) {
  545     if (dst.is_single_phys_reg()) {
  546       // In theory these overlap but the ordering is such that this is likely a nop
  547       if ( src.first() != dst.first()) {
  548         movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
  549       }
  550     } else {
  551       assert(dst.is_single_reg(), "not a stack pair");
  552       movdbl(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister());
  553     }
  554   } else if (dst.is_single_phys_reg()) {
  555     assert(src.is_single_reg(),  "not a stack pair");
  556     movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  557   } else {
  558     assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
  559     movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  560     movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
  561   }
  562 }
  563 
  564 
  565 // A float arg may have to do float reg int reg conversion
  566 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
  567   assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
  568 
  569   // The calling conventions assures us that each VMregpair is either
  570   // all really one physical register or adjacent stack slots.
  571 
  572   if (src.first()->is_stack()) {
  573     if (dst.first()->is_stack()) {
  574       movl(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  575       movptr(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
  576     } else {
  577       // stack to reg
  578       assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
  579       movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  580     }
  581   } else if (dst.first()->is_stack()) {
  582     // reg to stack
  583     assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
  584     movflt(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister());
  585   } else {
  586     // reg to reg
  587     // In theory these overlap but the ordering is such that this is likely a nop
  588     if ( src.first() != dst.first()) {
  589       movdbl(dst.first()->as_XMMRegister(),  src.first()->as_XMMRegister());
  590     }
  591   }
  592 }
  593 
  594 // On 64 bit we will store integer like items to the stack as
  595 // 64 bits items (x86_32/64 abi) even though java would only store
  596 // 32bits for a parameter. On 32bit it will simply be 32 bits
  597 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
  598 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
  599   if (src.first()->is_stack()) {
  600     if (dst.first()->is_stack()) {
  601       // stack to stack
  602       movslq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  603       movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
  604     } else {
  605       // stack to reg
  606       movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  607     }
  608   } else if (dst.first()->is_stack()) {
  609     // reg to stack
  610     // Do we really have to sign extend???
  611     // __ movslq(src.first()->as_Register(), src.first()->as_Register());
  612     movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register());
  613   } else {
  614     // Do we really have to sign extend???
  615     // __ movslq(dst.first()->as_Register(), src.first()->as_Register());
  616     if (dst.first() != src.first()) {
  617       movq(dst.first()->as_Register(), src.first()->as_Register());
  618     }
  619   }
  620 }
  621 
  622 void MacroAssembler::move_ptr(VMRegPair src, VMRegPair dst) {
  623   if (src.first()->is_stack()) {
  624     if (dst.first()->is_stack()) {
  625       // stack to stack
  626       movq(rax, Address(rbp, reg2offset_in(src.first())));
  627       movq(Address(rsp, reg2offset_out(dst.first())), rax);
  628     } else {
  629       // stack to reg
  630       movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
  631     }
  632   } else if (dst.first()->is_stack()) {
  633     // reg to stack
  634     movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
  635   } else {
  636     if (dst.first() != src.first()) {
  637       movq(dst.first()->as_Register(), src.first()->as_Register());
  638     }
  639   }
  640 }
  641 
  642 // An oop arg. Must pass a handle not the oop itself
  643 void MacroAssembler::object_move(OopMap* map,
  644                         int oop_handle_offset,
  645                         int framesize_in_slots,
  646                         VMRegPair src,
  647                         VMRegPair dst,
  648                         bool is_receiver,
  649                         int* receiver_offset) {
  650 
  651   // must pass a handle. First figure out the location we use as a handle
  652 
  653   Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();
  654 
  655   // See if oop is null if it is we need no handle
  656 
  657   if (src.first()->is_stack()) {
  658 
  659     // Oop is already on the stack as an argument
  660     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
  661     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
  662     if (is_receiver) {
  663       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
  664     }
  665 
  666     cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD);
  667     lea(rHandle, Address(rbp, reg2offset_in(src.first())));
  668     // conditionally move a null
  669     cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
  670   } else {
  671 
  672     // Oop is in a register we must store it to the space we reserve
  673     // on the stack for oop_handles and pass a handle if oop is non-null
  674 
  675     const Register rOop = src.first()->as_Register();
  676     int oop_slot;
  677     if (rOop == j_rarg0)
  678       oop_slot = 0;
  679     else if (rOop == j_rarg1)
  680       oop_slot = 1;
  681     else if (rOop == j_rarg2)
  682       oop_slot = 2;
  683     else if (rOop == j_rarg3)
  684       oop_slot = 3;
  685     else if (rOop == j_rarg4)
  686       oop_slot = 4;
  687     else {
  688       assert(rOop == j_rarg5, "wrong register");
  689       oop_slot = 5;
  690     }
  691 
  692     oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
  693     int offset = oop_slot*VMRegImpl::stack_slot_size;
  694 
  695     map->set_oop(VMRegImpl::stack2reg(oop_slot));
  696     // Store oop in handle area, may be null
  697     movptr(Address(rsp, offset), rOop);
  698     if (is_receiver) {
  699       *receiver_offset = offset;
  700     }
  701 
  702     cmpptr(rOop, NULL_WORD);
  703     lea(rHandle, Address(rsp, offset));
  704     // conditionally move a null from the handle area where it was just stored
  705     cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
  706   }
  707 
  708   // If arg is on the stack then place it otherwise it is already in correct reg.
  709   if (dst.first()->is_stack()) {
  710     movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
  711   }
  712 }
  713 
  714 void MacroAssembler::addptr(Register dst, int32_t imm32) {
  715   addq(dst, imm32);
  716 }
  717 
  718 void MacroAssembler::addptr(Register dst, Register src) {
  719   addq(dst, src);
  720 }
  721 
  722 void MacroAssembler::addptr(Address dst, Register src) {
  723   addq(dst, src);
  724 }
  725 
  726 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
  727   assert(rscratch != noreg || always_reachable(src), "missing");
  728 
  729   if (reachable(src)) {
  730     Assembler::addsd(dst, as_Address(src));
  731   } else {
  732     lea(rscratch, src);
  733     Assembler::addsd(dst, Address(rscratch, 0));
  734   }
  735 }
  736 
  737 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src, Register rscratch) {
  738   assert(rscratch != noreg || always_reachable(src), "missing");
  739 
  740   if (reachable(src)) {
  741     addss(dst, as_Address(src));
  742   } else {
  743     lea(rscratch, src);
  744     addss(dst, Address(rscratch, 0));
  745   }
  746 }
  747 
  748 void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
  749   assert(rscratch != noreg || always_reachable(src), "missing");
  750 
  751   if (reachable(src)) {
  752     Assembler::addpd(dst, as_Address(src));
  753   } else {
  754     lea(rscratch, src);
  755     Assembler::addpd(dst, Address(rscratch, 0));
  756   }
  757 }
  758 
  759 // See 8273459.  Function for ensuring 64-byte alignment, intended for stubs only.
  760 // Stub code is generated once and never copied.
  761 // NMethods can't use this because they get copied and we can't force alignment > 32 bytes.
  762 void MacroAssembler::align64() {
  763   align(64, (uint)(uintptr_t)pc());
  764 }
  765 
  766 void MacroAssembler::align32() {
  767   align(32, (uint)(uintptr_t)pc());
  768 }
  769 
  770 void MacroAssembler::align(uint modulus) {
  771   // 8273459: Ensure alignment is possible with current segment alignment
  772   assert(modulus <= (uintx)CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment");
  773   align(modulus, offset());
  774 }
  775 
  776 void MacroAssembler::align(uint modulus, uint target) {
  777   if (target % modulus != 0) {
  778     nop(modulus - (target % modulus));
  779   }
  780 }
  781 
  782 void MacroAssembler::push_f(XMMRegister r) {
  783   subptr(rsp, wordSize);
  784   movflt(Address(rsp, 0), r);
  785 }
  786 
  787 void MacroAssembler::pop_f(XMMRegister r) {
  788   movflt(r, Address(rsp, 0));
  789   addptr(rsp, wordSize);
  790 }
  791 
  792 void MacroAssembler::push_d(XMMRegister r) {
  793   subptr(rsp, 2 * wordSize);
  794   movdbl(Address(rsp, 0), r);
  795 }
  796 
  797 void MacroAssembler::pop_d(XMMRegister r) {
  798   movdbl(r, Address(rsp, 0));
  799   addptr(rsp, 2 * Interpreter::stackElementSize);
  800 }
  801 
  802 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
  803   // Used in sign-masking with aligned address.
  804   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
  805   assert(rscratch != noreg || always_reachable(src), "missing");
  806 
  807   if (UseAVX > 2 &&
  808       (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
  809       (dst->encoding() >= 16)) {
  810     vpand(dst, dst, src, AVX_512bit, rscratch);
  811   } else if (reachable(src)) {
  812     Assembler::andpd(dst, as_Address(src));
  813   } else {
  814     lea(rscratch, src);
  815     Assembler::andpd(dst, Address(rscratch, 0));
  816   }
  817 }
  818 
  819 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src, Register rscratch) {
  820   // Used in sign-masking with aligned address.
  821   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
  822   assert(rscratch != noreg || always_reachable(src), "missing");
  823 
  824   if (reachable(src)) {
  825     Assembler::andps(dst, as_Address(src));
  826   } else {
  827     lea(rscratch, src);
  828     Assembler::andps(dst, Address(rscratch, 0));
  829   }
  830 }
  831 
  832 void MacroAssembler::andptr(Register dst, int32_t imm32) {
  833   andq(dst, imm32);
  834 }
  835 
  836 void MacroAssembler::andq(Register dst, AddressLiteral src, Register rscratch) {
  837   assert(rscratch != noreg || always_reachable(src), "missing");
  838 
  839   if (reachable(src)) {
  840     andq(dst, as_Address(src));
  841   } else {
  842     lea(rscratch, src);
  843     andq(dst, Address(rscratch, 0));
  844   }
  845 }
  846 
  847 void MacroAssembler::atomic_incl(Address counter_addr) {
  848   lock();
  849   incrementl(counter_addr);
  850 }
  851 
  852 void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register rscratch) {
  853   assert(rscratch != noreg || always_reachable(counter_addr), "missing");
  854 
  855   if (reachable(counter_addr)) {
  856     atomic_incl(as_Address(counter_addr));
  857   } else {
  858     lea(rscratch, counter_addr);
  859     atomic_incl(Address(rscratch, 0));
  860   }
  861 }
  862 
  863 void MacroAssembler::atomic_incq(Address counter_addr) {
  864   lock();
  865   incrementq(counter_addr);
  866 }
  867 
  868 void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register rscratch) {
  869   assert(rscratch != noreg || always_reachable(counter_addr), "missing");
  870 
  871   if (reachable(counter_addr)) {
  872     atomic_incq(as_Address(counter_addr));
  873   } else {
  874     lea(rscratch, counter_addr);
  875     atomic_incq(Address(rscratch, 0));
  876   }
  877 }
  878 
  879 // Writes to stack successive pages until offset reached to check for
  880 // stack overflow + shadow pages.  This clobbers tmp.
  881 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
  882   movptr(tmp, rsp);
  883   // Bang stack for total size given plus shadow page size.
  884   // Bang one page at a time because large size can bang beyond yellow and
  885   // red zones.
  886   Label loop;
  887   bind(loop);
  888   movl(Address(tmp, (-(int)os::vm_page_size())), size );
  889   subptr(tmp, (int)os::vm_page_size());
  890   subl(size, (int)os::vm_page_size());
  891   jcc(Assembler::greater, loop);
  892 
  893   // Bang down shadow pages too.
  894   // At this point, (tmp-0) is the last address touched, so don't
  895   // touch it again.  (It was touched as (tmp-pagesize) but then tmp
  896   // was post-decremented.)  Skip this address by starting at i=1, and
  897   // touch a few more pages below.  N.B.  It is important to touch all
  898   // the way down including all pages in the shadow zone.
  899   for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()); i++) {
  900     // this could be any sized move but this is can be a debugging crumb
  901     // so the bigger the better.
  902     movptr(Address(tmp, (-i*(int)os::vm_page_size())), size );
  903   }
  904 }
  905 
  906 void MacroAssembler::reserved_stack_check() {
  907   // testing if reserved zone needs to be enabled
  908   Label no_reserved_zone_enabling;
  909 
  910   cmpptr(rsp, Address(r15_thread, JavaThread::reserved_stack_activation_offset()));
  911   jcc(Assembler::below, no_reserved_zone_enabling);
  912 
  913   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), r15_thread);
  914   jump(RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry()));
  915   should_not_reach_here();
  916 
  917   bind(no_reserved_zone_enabling);
  918 }
  919 
  920 void MacroAssembler::c2bool(Register x) {
  921   // implements x == 0 ? 0 : 1
  922   // note: must only look at least-significant byte of x
  923   //       since C-style booleans are stored in one byte
  924   //       only! (was bug)
  925   andl(x, 0xFF);
  926   setb(Assembler::notZero, x);
  927 }
  928 
  929 // Wouldn't need if AddressLiteral version had new name
  930 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
  931   Assembler::call(L, rtype);
  932 }
  933 
  934 void MacroAssembler::call(Register entry) {
  935   Assembler::call(entry);
  936 }
  937 
  938 void MacroAssembler::call(AddressLiteral entry, Register rscratch) {
  939   assert(rscratch != noreg || always_reachable(entry), "missing");
  940 
  941   if (reachable(entry)) {
  942     Assembler::call_literal(entry.target(), entry.rspec());
  943   } else {
  944     lea(rscratch, entry);
  945     Assembler::call(rscratch);
  946   }
  947 }
  948 
  949 void MacroAssembler::ic_call(address entry, jint method_index) {
  950   RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
  951   // Needs full 64-bit immediate for later patching.
  952   mov64(rax, (int64_t)Universe::non_oop_word());
  953   call(AddressLiteral(entry, rh));
  954 }
  955 
  956 int MacroAssembler::ic_check_size() {
  957   return UseCompactObjectHeaders ? 17 : 14;
  958 }
  959 
  960 int MacroAssembler::ic_check(int end_alignment) {
  961   Register receiver = j_rarg0;
  962   Register data = rax;
  963   Register temp = rscratch1;
  964 
  965   // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
  966   // before the inline cache check, so we don't have to execute any nop instructions when dispatching
  967   // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
  968   // before the inline cache check here, and not after
  969   align(end_alignment, offset() + ic_check_size());
  970 
  971   int uep_offset = offset();
  972 
  973   if (UseCompactObjectHeaders) {
  974     load_narrow_klass_compact(temp, receiver);
  975     cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
  976   } else if (UseCompressedClassPointers) {
  977     movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
  978     cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
  979   } else {
  980     movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
  981     cmpptr(temp, Address(data, CompiledICData::speculated_klass_offset()));
  982   }
  983 
  984   // if inline cache check fails, then jump to runtime routine
  985   jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
  986   assert((offset() % end_alignment) == 0, "Misaligned verified entry point (%d, %d, %d)", uep_offset, offset(), end_alignment);
  987 
  988   return uep_offset;
  989 }
  990 
  991 void MacroAssembler::emit_static_call_stub() {
  992   // Static stub relocation also tags the Method* in the code-stream.
  993   mov_metadata(rbx, (Metadata*) nullptr);  // Method is zapped till fixup time.
  994   // This is recognized as unresolved by relocs/nativeinst/ic code.
  995   jump(RuntimeAddress(pc()));
  996 }
  997 
  998 // Implementation of call_VM versions
  999 
 1000 void MacroAssembler::call_VM(Register oop_result,
 1001                              address entry_point,
 1002                              bool check_exceptions) {
 1003   Label C, E;
 1004   call(C, relocInfo::none);
 1005   jmp(E);
 1006 
 1007   bind(C);
 1008   call_VM_helper(oop_result, entry_point, 0, check_exceptions);
 1009   ret(0);
 1010 
 1011   bind(E);
 1012 }
 1013 
 1014 void MacroAssembler::call_VM(Register oop_result,
 1015                              address entry_point,
 1016                              Register arg_1,
 1017                              bool check_exceptions) {
 1018   Label C, E;
 1019   call(C, relocInfo::none);
 1020   jmp(E);
 1021 
 1022   bind(C);
 1023   pass_arg1(this, arg_1);
 1024   call_VM_helper(oop_result, entry_point, 1, check_exceptions);
 1025   ret(0);
 1026 
 1027   bind(E);
 1028 }
 1029 
 1030 void MacroAssembler::call_VM(Register oop_result,
 1031                              address entry_point,
 1032                              Register arg_1,
 1033                              Register arg_2,
 1034                              bool check_exceptions) {
 1035   Label C, E;
 1036   call(C, relocInfo::none);
 1037   jmp(E);
 1038 
 1039   bind(C);
 1040 
 1041   assert_different_registers(arg_1, c_rarg2);
 1042 
 1043   pass_arg2(this, arg_2);
 1044   pass_arg1(this, arg_1);
 1045   call_VM_helper(oop_result, entry_point, 2, check_exceptions);
 1046   ret(0);
 1047 
 1048   bind(E);
 1049 }
 1050 
 1051 void MacroAssembler::call_VM(Register oop_result,
 1052                              address entry_point,
 1053                              Register arg_1,
 1054                              Register arg_2,
 1055                              Register arg_3,
 1056                              bool check_exceptions) {
 1057   Label C, E;
 1058   call(C, relocInfo::none);
 1059   jmp(E);
 1060 
 1061   bind(C);
 1062 
 1063   assert_different_registers(arg_1, c_rarg2, c_rarg3);
 1064   assert_different_registers(arg_2, c_rarg3);
 1065   pass_arg3(this, arg_3);
 1066   pass_arg2(this, arg_2);
 1067   pass_arg1(this, arg_1);
 1068   call_VM_helper(oop_result, entry_point, 3, check_exceptions);
 1069   ret(0);
 1070 
 1071   bind(E);
 1072 }
 1073 
 1074 void MacroAssembler::call_VM(Register oop_result,
 1075                              Register last_java_sp,
 1076                              address entry_point,
 1077                              int number_of_arguments,
 1078                              bool check_exceptions) {
 1079   call_VM_base(oop_result, last_java_sp, entry_point, number_of_arguments, check_exceptions);
 1080 }
 1081 
 1082 void MacroAssembler::call_VM(Register oop_result,
 1083                              Register last_java_sp,
 1084                              address entry_point,
 1085                              Register arg_1,
 1086                              bool check_exceptions) {
 1087   pass_arg1(this, arg_1);
 1088   call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
 1089 }
 1090 
 1091 void MacroAssembler::call_VM(Register oop_result,
 1092                              Register last_java_sp,
 1093                              address entry_point,
 1094                              Register arg_1,
 1095                              Register arg_2,
 1096                              bool check_exceptions) {
 1097 
 1098   assert_different_registers(arg_1, c_rarg2);
 1099   pass_arg2(this, arg_2);
 1100   pass_arg1(this, arg_1);
 1101   call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
 1102 }
 1103 
 1104 void MacroAssembler::call_VM(Register oop_result,
 1105                              Register last_java_sp,
 1106                              address entry_point,
 1107                              Register arg_1,
 1108                              Register arg_2,
 1109                              Register arg_3,
 1110                              bool check_exceptions) {
 1111   assert_different_registers(arg_1, c_rarg2, c_rarg3);
 1112   assert_different_registers(arg_2, c_rarg3);
 1113   pass_arg3(this, arg_3);
 1114   pass_arg2(this, arg_2);
 1115   pass_arg1(this, arg_1);
 1116   call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
 1117 }
 1118 
 1119 void MacroAssembler::super_call_VM(Register oop_result,
 1120                                    Register last_java_sp,
 1121                                    address entry_point,
 1122                                    int number_of_arguments,
 1123                                    bool check_exceptions) {
 1124   MacroAssembler::call_VM_base(oop_result, last_java_sp, entry_point, number_of_arguments, check_exceptions);
 1125 }
 1126 
 1127 void MacroAssembler::super_call_VM(Register oop_result,
 1128                                    Register last_java_sp,
 1129                                    address entry_point,
 1130                                    Register arg_1,
 1131                                    bool check_exceptions) {
 1132   pass_arg1(this, arg_1);
 1133   super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
 1134 }
 1135 
 1136 void MacroAssembler::super_call_VM(Register oop_result,
 1137                                    Register last_java_sp,
 1138                                    address entry_point,
 1139                                    Register arg_1,
 1140                                    Register arg_2,
 1141                                    bool check_exceptions) {
 1142 
 1143   assert_different_registers(arg_1, c_rarg2);
 1144   pass_arg2(this, arg_2);
 1145   pass_arg1(this, arg_1);
 1146   super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
 1147 }
 1148 
 1149 void MacroAssembler::super_call_VM(Register oop_result,
 1150                                    Register last_java_sp,
 1151                                    address entry_point,
 1152                                    Register arg_1,
 1153                                    Register arg_2,
 1154                                    Register arg_3,
 1155                                    bool check_exceptions) {
 1156   assert_different_registers(arg_1, c_rarg2, c_rarg3);
 1157   assert_different_registers(arg_2, c_rarg3);
 1158   pass_arg3(this, arg_3);
 1159   pass_arg2(this, arg_2);
 1160   pass_arg1(this, arg_1);
 1161   super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
 1162 }
 1163 
 1164 void MacroAssembler::call_VM_base(Register oop_result,
 1165                                   Register last_java_sp,
 1166                                   address  entry_point,
 1167                                   int      number_of_arguments,
 1168                                   bool     check_exceptions) {
 1169   Register java_thread = r15_thread;
 1170 
 1171   // determine last_java_sp register
 1172   if (!last_java_sp->is_valid()) {
 1173     last_java_sp = rsp;
 1174   }
 1175   // debugging support
 1176   assert(number_of_arguments >= 0   , "cannot have negative number of arguments");
 1177 #ifdef ASSERT
 1178   // TraceBytecodes does not use r12 but saves it over the call, so don't verify
 1179   // r12 is the heapbase.
 1180   if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");
 1181 #endif // ASSERT
 1182 
 1183   assert(java_thread != oop_result  , "cannot use the same register for java_thread & oop_result");
 1184   assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
 1185 
 1186   // push java thread (becomes first argument of C function)
 1187 
 1188   mov(c_rarg0, r15_thread);
 1189 
 1190   // set last Java frame before call
 1191   assert(last_java_sp != rbp, "can't use ebp/rbp");
 1192 
 1193   // Only interpreter should have to set fp
 1194   set_last_Java_frame(last_java_sp, rbp, nullptr, rscratch1);
 1195 
 1196   // do the call, remove parameters
 1197   MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
 1198 
 1199 #ifdef ASSERT
 1200   // Check that thread register is not clobbered.
 1201   guarantee(java_thread != rax, "change this code");
 1202   push(rax);
 1203   { Label L;
 1204     get_thread_slow(rax);
 1205     cmpptr(java_thread, rax);
 1206     jcc(Assembler::equal, L);
 1207     STOP("MacroAssembler::call_VM_base: java_thread not callee saved?");
 1208     bind(L);
 1209   }
 1210   pop(rax);
 1211 #endif
 1212 
 1213   // reset last Java frame
 1214   // Only interpreter should have to clear fp
 1215   reset_last_Java_frame(true);
 1216 
 1217    // C++ interp handles this in the interpreter
 1218   check_and_handle_popframe();
 1219   check_and_handle_earlyret();
 1220 
 1221   if (check_exceptions) {
 1222     // check for pending exceptions (java_thread is set upon return)
 1223     cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
 1224     // This used to conditionally jump to forward_exception however it is
 1225     // possible if we relocate that the branch will not reach. So we must jump
 1226     // around so we can always reach
 1227 
 1228     Label ok;
 1229     jcc(Assembler::equal, ok);
 1230     jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
 1231     bind(ok);
 1232   }
 1233 
 1234   // get oop result if there is one and reset the value in the thread
 1235   if (oop_result->is_valid()) {
 1236     get_vm_result_oop(oop_result);
 1237   }
 1238 }
 1239 
 1240 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
 1241   // Calculate the value for last_Java_sp somewhat subtle.
 1242   // call_VM does an intermediate call which places a return address on
 1243   // the stack just under the stack pointer as the user finished with it.
 1244   // This allows use to retrieve last_Java_pc from last_Java_sp[-1].
 1245 
 1246   // We've pushed one address, correct last_Java_sp
 1247   lea(rax, Address(rsp, wordSize));
 1248 
 1249   call_VM_base(oop_result, rax, entry_point, number_of_arguments, check_exceptions);
 1250 }
 1251 
 1252 // Use this method when MacroAssembler version of call_VM_leaf_base() should be called from Interpreter.
 1253 void MacroAssembler::call_VM_leaf0(address entry_point) {
 1254   MacroAssembler::call_VM_leaf_base(entry_point, 0);
 1255 }
 1256 
 1257 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
 1258   call_VM_leaf_base(entry_point, number_of_arguments);
 1259 }
 1260 
 1261 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
 1262   pass_arg0(this, arg_0);
 1263   call_VM_leaf(entry_point, 1);
 1264 }
 1265 
 1266 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
 1267 
 1268   assert_different_registers(arg_0, c_rarg1);
 1269   pass_arg1(this, arg_1);
 1270   pass_arg0(this, arg_0);
 1271   call_VM_leaf(entry_point, 2);
 1272 }
 1273 
 1274 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1275   assert_different_registers(arg_0, c_rarg1, c_rarg2);
 1276   assert_different_registers(arg_1, c_rarg2);
 1277   pass_arg2(this, arg_2);
 1278   pass_arg1(this, arg_1);
 1279   pass_arg0(this, arg_0);
 1280   call_VM_leaf(entry_point, 3);
 1281 }
 1282 
 1283 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
 1284   assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
 1285   assert_different_registers(arg_1, c_rarg2, c_rarg3);
 1286   assert_different_registers(arg_2, c_rarg3);
 1287   pass_arg3(this, arg_3);
 1288   pass_arg2(this, arg_2);
 1289   pass_arg1(this, arg_1);
 1290   pass_arg0(this, arg_0);
 1291   call_VM_leaf(entry_point, 3);
 1292 }
 1293 
 1294 void MacroAssembler::super_call_VM_leaf(address entry_point) {
 1295   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1296 }
 1297 
 1298 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
 1299   pass_arg0(this, arg_0);
 1300   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1301 }
 1302 
 1303 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
 1304   assert_different_registers(arg_0, c_rarg1);
 1305   pass_arg1(this, arg_1);
 1306   pass_arg0(this, arg_0);
 1307   MacroAssembler::call_VM_leaf_base(entry_point, 2);
 1308 }
 1309 
 1310 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1311   assert_different_registers(arg_0, c_rarg1, c_rarg2);
 1312   assert_different_registers(arg_1, c_rarg2);
 1313   pass_arg2(this, arg_2);
 1314   pass_arg1(this, arg_1);
 1315   pass_arg0(this, arg_0);
 1316   MacroAssembler::call_VM_leaf_base(entry_point, 3);
 1317 }
 1318 
 1319 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
 1320   assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
 1321   assert_different_registers(arg_1, c_rarg2, c_rarg3);
 1322   assert_different_registers(arg_2, c_rarg3);
 1323   pass_arg3(this, arg_3);
 1324   pass_arg2(this, arg_2);
 1325   pass_arg1(this, arg_1);
 1326   pass_arg0(this, arg_0);
 1327   MacroAssembler::call_VM_leaf_base(entry_point, 4);
 1328 }
 1329 
 1330 void MacroAssembler::get_vm_result_oop(Register oop_result) {
 1331   movptr(oop_result, Address(r15_thread, JavaThread::vm_result_oop_offset()));
 1332   movptr(Address(r15_thread, JavaThread::vm_result_oop_offset()), NULL_WORD);
 1333   verify_oop_msg(oop_result, "broken oop in call_VM_base");
 1334 }
 1335 
 1336 void MacroAssembler::get_vm_result_metadata(Register metadata_result) {
 1337   movptr(metadata_result, Address(r15_thread, JavaThread::vm_result_metadata_offset()));
 1338   movptr(Address(r15_thread, JavaThread::vm_result_metadata_offset()), NULL_WORD);
 1339 }
 1340 
 1341 void MacroAssembler::check_and_handle_earlyret() {
 1342 }
 1343 
 1344 void MacroAssembler::check_and_handle_popframe() {
 1345 }
 1346 
 1347 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm, Register rscratch) {
 1348   assert(rscratch != noreg || always_reachable(src1), "missing");
 1349 
 1350   if (reachable(src1)) {
 1351     cmpl(as_Address(src1), imm);
 1352   } else {
 1353     lea(rscratch, src1);
 1354     cmpl(Address(rscratch, 0), imm);
 1355   }
 1356 }
 1357 
 1358 void MacroAssembler::cmp32(Register src1, AddressLiteral src2, Register rscratch) {
 1359   assert(!src2.is_lval(), "use cmpptr");
 1360   assert(rscratch != noreg || always_reachable(src2), "missing");
 1361 
 1362   if (reachable(src2)) {
 1363     cmpl(src1, as_Address(src2));
 1364   } else {
 1365     lea(rscratch, src2);
 1366     cmpl(src1, Address(rscratch, 0));
 1367   }
 1368 }
 1369 
 1370 void MacroAssembler::cmp32(Register src1, int32_t imm) {
 1371   Assembler::cmpl(src1, imm);
 1372 }
 1373 
 1374 void MacroAssembler::cmp32(Register src1, Address src2) {
 1375   Assembler::cmpl(src1, src2);
 1376 }
 1377 
 1378 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
 1379   ucomisd(opr1, opr2);
 1380 
 1381   Label L;
 1382   if (unordered_is_less) {
 1383     movl(dst, -1);
 1384     jcc(Assembler::parity, L);
 1385     jcc(Assembler::below , L);
 1386     movl(dst, 0);
 1387     jcc(Assembler::equal , L);
 1388     increment(dst);
 1389   } else { // unordered is greater
 1390     movl(dst, 1);
 1391     jcc(Assembler::parity, L);
 1392     jcc(Assembler::above , L);
 1393     movl(dst, 0);
 1394     jcc(Assembler::equal , L);
 1395     decrementl(dst);
 1396   }
 1397   bind(L);
 1398 }
 1399 
 1400 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
 1401   ucomiss(opr1, opr2);
 1402 
 1403   Label L;
 1404   if (unordered_is_less) {
 1405     movl(dst, -1);
 1406     jcc(Assembler::parity, L);
 1407     jcc(Assembler::below , L);
 1408     movl(dst, 0);
 1409     jcc(Assembler::equal , L);
 1410     increment(dst);
 1411   } else { // unordered is greater
 1412     movl(dst, 1);
 1413     jcc(Assembler::parity, L);
 1414     jcc(Assembler::above , L);
 1415     movl(dst, 0);
 1416     jcc(Assembler::equal , L);
 1417     decrementl(dst);
 1418   }
 1419   bind(L);
 1420 }
 1421 
 1422 
 1423 void MacroAssembler::cmp8(AddressLiteral src1, int imm, Register rscratch) {
 1424   assert(rscratch != noreg || always_reachable(src1), "missing");
 1425 
 1426   if (reachable(src1)) {
 1427     cmpb(as_Address(src1), imm);
 1428   } else {
 1429     lea(rscratch, src1);
 1430     cmpb(Address(rscratch, 0), imm);
 1431   }
 1432 }
 1433 
 1434 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2, Register rscratch) {
 1435   assert(rscratch != noreg || always_reachable(src2), "missing");
 1436 
 1437   if (src2.is_lval()) {
 1438     movptr(rscratch, src2);
 1439     Assembler::cmpq(src1, rscratch);
 1440   } else if (reachable(src2)) {
 1441     cmpq(src1, as_Address(src2));
 1442   } else {
 1443     lea(rscratch, src2);
 1444     Assembler::cmpq(src1, Address(rscratch, 0));
 1445   }
 1446 }
 1447 
 1448 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2, Register rscratch) {
 1449   assert(src2.is_lval(), "not a mem-mem compare");
 1450   // moves src2's literal address
 1451   movptr(rscratch, src2);
 1452   Assembler::cmpq(src1, rscratch);
 1453 }
 1454 
 1455 void MacroAssembler::cmpoop(Register src1, Register src2) {
 1456   cmpptr(src1, src2);
 1457 }
 1458 
 1459 void MacroAssembler::cmpoop(Register src1, Address src2) {
 1460   cmpptr(src1, src2);
 1461 }
 1462 
 1463 void MacroAssembler::cmpoop(Register src1, jobject src2, Register rscratch) {
 1464   movoop(rscratch, src2);
 1465   cmpptr(src1, rscratch);
 1466 }
 1467 
 1468 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch) {
 1469   assert(rscratch != noreg || always_reachable(adr), "missing");
 1470 
 1471   if (reachable(adr)) {
 1472     lock();
 1473     cmpxchgptr(reg, as_Address(adr));
 1474   } else {
 1475     lea(rscratch, adr);
 1476     lock();
 1477     cmpxchgptr(reg, Address(rscratch, 0));
 1478   }
 1479 }
 1480 
 1481 void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
 1482   cmpxchgq(reg, adr);
 1483 }
 1484 
 1485 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1486   assert(rscratch != noreg || always_reachable(src), "missing");
 1487 
 1488   if (reachable(src)) {
 1489     Assembler::comisd(dst, as_Address(src));
 1490   } else {
 1491     lea(rscratch, src);
 1492     Assembler::comisd(dst, Address(rscratch, 0));
 1493   }
 1494 }
 1495 
 1496 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1497   assert(rscratch != noreg || always_reachable(src), "missing");
 1498 
 1499   if (reachable(src)) {
 1500     Assembler::comiss(dst, as_Address(src));
 1501   } else {
 1502     lea(rscratch, src);
 1503     Assembler::comiss(dst, Address(rscratch, 0));
 1504   }
 1505 }
 1506 
 1507 
 1508 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch) {
 1509   assert(rscratch != noreg || always_reachable(counter_addr), "missing");
 1510 
 1511   Condition negated_cond = negate_condition(cond);
 1512   Label L;
 1513   jcc(negated_cond, L);
 1514   pushf(); // Preserve flags
 1515   atomic_incl(counter_addr, rscratch);
 1516   popf();
 1517   bind(L);
 1518 }
 1519 
 1520 int MacroAssembler::corrected_idivl(Register reg) {
 1521   // Full implementation of Java idiv and irem; checks for
 1522   // special case as described in JVM spec., p.243 & p.271.
 1523   // The function returns the (pc) offset of the idivl
 1524   // instruction - may be needed for implicit exceptions.
 1525   //
 1526   //         normal case                           special case
 1527   //
 1528   // input : rax,: dividend                         min_int
 1529   //         reg: divisor   (may not be rax,/rdx)   -1
 1530   //
 1531   // output: rax,: quotient  (= rax, idiv reg)       min_int
 1532   //         rdx: remainder (= rax, irem reg)       0
 1533   assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register");
 1534   const int min_int = 0x80000000;
 1535   Label normal_case, special_case;
 1536 
 1537   // check for special case
 1538   cmpl(rax, min_int);
 1539   jcc(Assembler::notEqual, normal_case);
 1540   xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
 1541   cmpl(reg, -1);
 1542   jcc(Assembler::equal, special_case);
 1543 
 1544   // handle normal case
 1545   bind(normal_case);
 1546   cdql();
 1547   int idivl_offset = offset();
 1548   idivl(reg);
 1549 
 1550   // normal and special case exit
 1551   bind(special_case);
 1552 
 1553   return idivl_offset;
 1554 }
 1555 
 1556 
 1557 
 1558 void MacroAssembler::decrementl(Register reg, int value) {
 1559   if (value == min_jint) {subl(reg, value) ; return; }
 1560   if (value <  0) { incrementl(reg, -value); return; }
 1561   if (value == 0) {                        ; return; }
 1562   if (value == 1 && UseIncDec) { decl(reg) ; return; }
 1563   /* else */      { subl(reg, value)       ; return; }
 1564 }
 1565 
 1566 void MacroAssembler::decrementl(Address dst, int value) {
 1567   if (value == min_jint) {subl(dst, value) ; return; }
 1568   if (value <  0) { incrementl(dst, -value); return; }
 1569   if (value == 0) {                        ; return; }
 1570   if (value == 1 && UseIncDec) { decl(dst) ; return; }
 1571   /* else */      { subl(dst, value)       ; return; }
 1572 }
 1573 
 1574 void MacroAssembler::division_with_shift (Register reg, int shift_value) {
 1575   assert(shift_value > 0, "illegal shift value");
 1576   Label _is_positive;
 1577   testl (reg, reg);
 1578   jcc (Assembler::positive, _is_positive);
 1579   int offset = (1 << shift_value) - 1 ;
 1580 
 1581   if (offset == 1) {
 1582     incrementl(reg);
 1583   } else {
 1584     addl(reg, offset);
 1585   }
 1586 
 1587   bind (_is_positive);
 1588   sarl(reg, shift_value);
 1589 }
 1590 
 1591 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1592   assert(rscratch != noreg || always_reachable(src), "missing");
 1593 
 1594   if (reachable(src)) {
 1595     Assembler::divsd(dst, as_Address(src));
 1596   } else {
 1597     lea(rscratch, src);
 1598     Assembler::divsd(dst, Address(rscratch, 0));
 1599   }
 1600 }
 1601 
 1602 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1603   assert(rscratch != noreg || always_reachable(src), "missing");
 1604 
 1605   if (reachable(src)) {
 1606     Assembler::divss(dst, as_Address(src));
 1607   } else {
 1608     lea(rscratch, src);
 1609     Assembler::divss(dst, Address(rscratch, 0));
 1610   }
 1611 }
 1612 
 1613 void MacroAssembler::enter() {
 1614   push(rbp);
 1615   mov(rbp, rsp);
 1616 }
 1617 
 1618 void MacroAssembler::post_call_nop() {
 1619   if (!Continuations::enabled()) {
 1620     return;
 1621   }
 1622   InstructionMark im(this);
 1623   relocate(post_call_nop_Relocation::spec());
 1624   InlineSkippedInstructionsCounter skipCounter(this);
 1625   emit_int8((uint8_t)0x0f);
 1626   emit_int8((uint8_t)0x1f);
 1627   emit_int8((uint8_t)0x84);
 1628   emit_int8((uint8_t)0x00);
 1629   emit_int32(0x00);
 1630 }
 1631 
 1632 // A 5 byte nop that is safe for patching (see patch_verified_entry)
 1633 void MacroAssembler::fat_nop() {
 1634   if (UseAddressNop) {
 1635     addr_nop_5();
 1636   } else {
 1637     emit_int8((uint8_t)0x26); // es:
 1638     emit_int8((uint8_t)0x2e); // cs:
 1639     emit_int8((uint8_t)0x64); // fs:
 1640     emit_int8((uint8_t)0x65); // gs:
 1641     emit_int8((uint8_t)0x90);
 1642   }
 1643 }
 1644 
 1645 void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1646   assert(rscratch != noreg || always_reachable(src), "missing");
 1647   if (reachable(src)) {
 1648     Assembler::mulpd(dst, as_Address(src));
 1649   } else {
 1650     lea(rscratch, src);
 1651     Assembler::mulpd(dst, Address(rscratch, 0));
 1652   }
 1653 }
 1654 
 1655 // dst = c = a * b + c
 1656 void MacroAssembler::fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) {
 1657   Assembler::vfmadd231sd(c, a, b);
 1658   if (dst != c) {
 1659     movdbl(dst, c);
 1660   }
 1661 }
 1662 
 1663 // dst = c = a * b + c
 1664 void MacroAssembler::fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) {
 1665   Assembler::vfmadd231ss(c, a, b);
 1666   if (dst != c) {
 1667     movflt(dst, c);
 1668   }
 1669 }
 1670 
 1671 // dst = c = a * b + c
 1672 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) {
 1673   Assembler::vfmadd231pd(c, a, b, vector_len);
 1674   if (dst != c) {
 1675     vmovdqu(dst, c);
 1676   }
 1677 }
 1678 
 1679 // dst = c = a * b + c
 1680 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) {
 1681   Assembler::vfmadd231ps(c, a, b, vector_len);
 1682   if (dst != c) {
 1683     vmovdqu(dst, c);
 1684   }
 1685 }
 1686 
 1687 // dst = c = a * b + c
 1688 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) {
 1689   Assembler::vfmadd231pd(c, a, b, vector_len);
 1690   if (dst != c) {
 1691     vmovdqu(dst, c);
 1692   }
 1693 }
 1694 
 1695 // dst = c = a * b + c
 1696 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) {
 1697   Assembler::vfmadd231ps(c, a, b, vector_len);
 1698   if (dst != c) {
 1699     vmovdqu(dst, c);
 1700   }
 1701 }
 1702 
 1703 void MacroAssembler::incrementl(AddressLiteral dst, Register rscratch) {
 1704   assert(rscratch != noreg || always_reachable(dst), "missing");
 1705 
 1706   if (reachable(dst)) {
 1707     incrementl(as_Address(dst));
 1708   } else {
 1709     lea(rscratch, dst);
 1710     incrementl(Address(rscratch, 0));
 1711   }
 1712 }
 1713 
 1714 void MacroAssembler::incrementl(ArrayAddress dst, Register rscratch) {
 1715   incrementl(as_Address(dst, rscratch));
 1716 }
 1717 
 1718 void MacroAssembler::incrementl(Register reg, int value) {
 1719   if (value == min_jint) {addl(reg, value) ; return; }
 1720   if (value <  0) { decrementl(reg, -value); return; }
 1721   if (value == 0) {                        ; return; }
 1722   if (value == 1 && UseIncDec) { incl(reg) ; return; }
 1723   /* else */      { addl(reg, value)       ; return; }
 1724 }
 1725 
 1726 void MacroAssembler::incrementl(Address dst, int value) {
 1727   if (value == min_jint) {addl(dst, value) ; return; }
 1728   if (value <  0) { decrementl(dst, -value); return; }
 1729   if (value == 0) {                        ; return; }
 1730   if (value == 1 && UseIncDec) { incl(dst) ; return; }
 1731   /* else */      { addl(dst, value)       ; return; }
 1732 }
 1733 
 1734 void MacroAssembler::jump(AddressLiteral dst, Register rscratch) {
 1735   assert(rscratch != noreg || always_reachable(dst), "missing");
 1736   assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump");
 1737   if (reachable(dst)) {
 1738     jmp_literal(dst.target(), dst.rspec());
 1739   } else {
 1740     lea(rscratch, dst);
 1741     jmp(rscratch);
 1742   }
 1743 }
 1744 
 1745 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst, Register rscratch) {
 1746   assert(rscratch != noreg || always_reachable(dst), "missing");
 1747   assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump_cc");
 1748   if (reachable(dst)) {
 1749     InstructionMark im(this);
 1750     relocate(dst.reloc());
 1751     const int short_size = 2;
 1752     const int long_size = 6;
 1753     int offs = (intptr_t)dst.target() - ((intptr_t)pc());
 1754     if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
 1755       // 0111 tttn #8-bit disp
 1756       emit_int8(0x70 | cc);
 1757       emit_int8((offs - short_size) & 0xFF);
 1758     } else {
 1759       // 0000 1111 1000 tttn #32-bit disp
 1760       emit_int8(0x0F);
 1761       emit_int8((unsigned char)(0x80 | cc));
 1762       emit_int32(offs - long_size);
 1763     }
 1764   } else {
 1765 #ifdef ASSERT
 1766     warning("reversing conditional branch");
 1767 #endif /* ASSERT */
 1768     Label skip;
 1769     jccb(reverse[cc], skip);
 1770     lea(rscratch, dst);
 1771     Assembler::jmp(rscratch);
 1772     bind(skip);
 1773   }
 1774 }
 1775 
 1776 void MacroAssembler::cmp32_mxcsr_std(Address mxcsr_save, Register tmp, Register rscratch) {
 1777   ExternalAddress mxcsr_std(StubRoutines::x86::addr_mxcsr_std());
 1778   assert(rscratch != noreg || always_reachable(mxcsr_std), "missing");
 1779 
 1780   stmxcsr(mxcsr_save);
 1781   movl(tmp, mxcsr_save);
 1782   if (EnableX86ECoreOpts) {
 1783     // The mxcsr_std has status bits set for performance on ECore
 1784     orl(tmp, 0x003f);
 1785   } else {
 1786     // Mask out status bits (only check control and mask bits)
 1787     andl(tmp, 0xFFC0);
 1788   }
 1789   cmp32(tmp, mxcsr_std, rscratch);
 1790 }
 1791 
 1792 void MacroAssembler::ldmxcsr(AddressLiteral src, Register rscratch) {
 1793   assert(rscratch != noreg || always_reachable(src), "missing");
 1794 
 1795   if (reachable(src)) {
 1796     Assembler::ldmxcsr(as_Address(src));
 1797   } else {
 1798     lea(rscratch, src);
 1799     Assembler::ldmxcsr(Address(rscratch, 0));
 1800   }
 1801 }
 1802 
 1803 int MacroAssembler::load_signed_byte(Register dst, Address src) {
 1804   int off = offset();
 1805   movsbl(dst, src); // movsxb
 1806   return off;
 1807 }
 1808 
 1809 // Note: load_signed_short used to be called load_signed_word.
 1810 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler
 1811 // manual, which means 16 bits, that usage is found nowhere in HotSpot code.
 1812 // The term "word" in HotSpot means a 32- or 64-bit machine word.
 1813 int MacroAssembler::load_signed_short(Register dst, Address src) {
 1814   // This is dubious to me since it seems safe to do a signed 16 => 64 bit
 1815   // version but this is what 64bit has always done. This seems to imply
 1816   // that users are only using 32bits worth.
 1817   int off = offset();
 1818   movswl(dst, src); // movsxw
 1819   return off;
 1820 }
 1821 
 1822 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
 1823   // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
 1824   // and "3.9 Partial Register Penalties", p. 22).
 1825   int off = offset();
 1826   movzbl(dst, src); // movzxb
 1827   return off;
 1828 }
 1829 
 1830 // Note: load_unsigned_short used to be called load_unsigned_word.
 1831 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
 1832   // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
 1833   // and "3.9 Partial Register Penalties", p. 22).
 1834   int off = offset();
 1835   movzwl(dst, src); // movzxw
 1836   return off;
 1837 }
 1838 
 1839 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
 1840   switch (size_in_bytes) {
 1841   case  8:  movq(dst, src); break;
 1842   case  4:  movl(dst, src); break;
 1843   case  2:  is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
 1844   case  1:  is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
 1845   default:  ShouldNotReachHere();
 1846   }
 1847 }
 1848 
 1849 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
 1850   switch (size_in_bytes) {
 1851   case  8:  movq(dst, src); break;
 1852   case  4:  movl(dst, src); break;
 1853   case  2:  movw(dst, src); break;
 1854   case  1:  movb(dst, src); break;
 1855   default:  ShouldNotReachHere();
 1856   }
 1857 }
 1858 
 1859 void MacroAssembler::mov32(AddressLiteral dst, Register src, Register rscratch) {
 1860   assert(rscratch != noreg || always_reachable(dst), "missing");
 1861 
 1862   if (reachable(dst)) {
 1863     movl(as_Address(dst), src);
 1864   } else {
 1865     lea(rscratch, dst);
 1866     movl(Address(rscratch, 0), src);
 1867   }
 1868 }
 1869 
 1870 void MacroAssembler::mov32(Register dst, AddressLiteral src) {
 1871   if (reachable(src)) {
 1872     movl(dst, as_Address(src));
 1873   } else {
 1874     lea(dst, src);
 1875     movl(dst, Address(dst, 0));
 1876   }
 1877 }
 1878 
 1879 // C++ bool manipulation
 1880 
 1881 void MacroAssembler::movbool(Register dst, Address src) {
 1882   if(sizeof(bool) == 1)
 1883     movb(dst, src);
 1884   else if(sizeof(bool) == 2)
 1885     movw(dst, src);
 1886   else if(sizeof(bool) == 4)
 1887     movl(dst, src);
 1888   else
 1889     // unsupported
 1890     ShouldNotReachHere();
 1891 }
 1892 
 1893 void MacroAssembler::movbool(Address dst, bool boolconst) {
 1894   if(sizeof(bool) == 1)
 1895     movb(dst, (int) boolconst);
 1896   else if(sizeof(bool) == 2)
 1897     movw(dst, (int) boolconst);
 1898   else if(sizeof(bool) == 4)
 1899     movl(dst, (int) boolconst);
 1900   else
 1901     // unsupported
 1902     ShouldNotReachHere();
 1903 }
 1904 
 1905 void MacroAssembler::movbool(Address dst, Register src) {
 1906   if(sizeof(bool) == 1)
 1907     movb(dst, src);
 1908   else if(sizeof(bool) == 2)
 1909     movw(dst, src);
 1910   else if(sizeof(bool) == 4)
 1911     movl(dst, src);
 1912   else
 1913     // unsupported
 1914     ShouldNotReachHere();
 1915 }
 1916 
 1917 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1918   assert(rscratch != noreg || always_reachable(src), "missing");
 1919 
 1920   if (reachable(src)) {
 1921     movdl(dst, as_Address(src));
 1922   } else {
 1923     lea(rscratch, src);
 1924     movdl(dst, Address(rscratch, 0));
 1925   }
 1926 }
 1927 
 1928 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1929   assert(rscratch != noreg || always_reachable(src), "missing");
 1930 
 1931   if (reachable(src)) {
 1932     movq(dst, as_Address(src));
 1933   } else {
 1934     lea(rscratch, src);
 1935     movq(dst, Address(rscratch, 0));
 1936   }
 1937 }
 1938 
 1939 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1940   assert(rscratch != noreg || always_reachable(src), "missing");
 1941 
 1942   if (reachable(src)) {
 1943     if (UseXmmLoadAndClearUpper) {
 1944       movsd (dst, as_Address(src));
 1945     } else {
 1946       movlpd(dst, as_Address(src));
 1947     }
 1948   } else {
 1949     lea(rscratch, src);
 1950     if (UseXmmLoadAndClearUpper) {
 1951       movsd (dst, Address(rscratch, 0));
 1952     } else {
 1953       movlpd(dst, Address(rscratch, 0));
 1954     }
 1955   }
 1956 }
 1957 
 1958 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1959   assert(rscratch != noreg || always_reachable(src), "missing");
 1960 
 1961   if (reachable(src)) {
 1962     movss(dst, as_Address(src));
 1963   } else {
 1964     lea(rscratch, src);
 1965     movss(dst, Address(rscratch, 0));
 1966   }
 1967 }
 1968 
 1969 void MacroAssembler::movptr(Register dst, Register src) {
 1970   movq(dst, src);
 1971 }
 1972 
 1973 void MacroAssembler::movptr(Register dst, Address src) {
 1974   movq(dst, src);
 1975 }
 1976 
 1977 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
 1978 void MacroAssembler::movptr(Register dst, intptr_t src) {
 1979   if (is_uimm32(src)) {
 1980     movl(dst, checked_cast<uint32_t>(src));
 1981   } else if (is_simm32(src)) {
 1982     movq(dst, checked_cast<int32_t>(src));
 1983   } else {
 1984     mov64(dst, src);
 1985   }
 1986 }
 1987 
 1988 void MacroAssembler::movptr(Address dst, Register src) {
 1989   movq(dst, src);
 1990 }
 1991 
 1992 void MacroAssembler::movptr(Address dst, int32_t src) {
 1993   movslq(dst, src);
 1994 }
 1995 
 1996 void MacroAssembler::movdqu(Address dst, XMMRegister src) {
 1997   assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 1998   Assembler::movdqu(dst, src);
 1999 }
 2000 
 2001 void MacroAssembler::movdqu(XMMRegister dst, Address src) {
 2002   assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 2003   Assembler::movdqu(dst, src);
 2004 }
 2005 
 2006 void MacroAssembler::movdqu(XMMRegister dst, XMMRegister src) {
 2007   assert(((dst->encoding() < 16  && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 2008   Assembler::movdqu(dst, src);
 2009 }
 2010 
 2011 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2012   assert(rscratch != noreg || always_reachable(src), "missing");
 2013 
 2014   if (reachable(src)) {
 2015     movdqu(dst, as_Address(src));
 2016   } else {
 2017     lea(rscratch, src);
 2018     movdqu(dst, Address(rscratch, 0));
 2019   }
 2020 }
 2021 
 2022 void MacroAssembler::vmovdqu(Address dst, XMMRegister src) {
 2023   assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 2024   Assembler::vmovdqu(dst, src);
 2025 }
 2026 
 2027 void MacroAssembler::vmovdqu(XMMRegister dst, Address src) {
 2028   assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 2029   Assembler::vmovdqu(dst, src);
 2030 }
 2031 
 2032 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src) {
 2033   assert(((dst->encoding() < 16  && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 2034   Assembler::vmovdqu(dst, src);
 2035 }
 2036 
 2037 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2038   assert(rscratch != noreg || always_reachable(src), "missing");
 2039 
 2040   if (reachable(src)) {
 2041     vmovdqu(dst, as_Address(src));
 2042   }
 2043   else {
 2044     lea(rscratch, src);
 2045     vmovdqu(dst, Address(rscratch, 0));
 2046   }
 2047 }
 2048 
 2049 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2050   assert(rscratch != noreg || always_reachable(src), "missing");
 2051 
 2052   if (vector_len == AVX_512bit) {
 2053     evmovdquq(dst, src, AVX_512bit, rscratch);
 2054   } else if (vector_len == AVX_256bit) {
 2055     vmovdqu(dst, src, rscratch);
 2056   } else {
 2057     movdqu(dst, src, rscratch);
 2058   }
 2059 }
 2060 
 2061 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src, int vector_len) {
 2062   if (vector_len == AVX_512bit) {
 2063     evmovdquq(dst, src, AVX_512bit);
 2064   } else if (vector_len == AVX_256bit) {
 2065     vmovdqu(dst, src);
 2066   } else {
 2067     movdqu(dst, src);
 2068   }
 2069 }
 2070 
 2071 void MacroAssembler::vmovdqu(Address dst, XMMRegister src, int vector_len) {
 2072   if (vector_len == AVX_512bit) {
 2073     evmovdquq(dst, src, AVX_512bit);
 2074   } else if (vector_len == AVX_256bit) {
 2075     vmovdqu(dst, src);
 2076   } else {
 2077     movdqu(dst, src);
 2078   }
 2079 }
 2080 
 2081 void MacroAssembler::vmovdqu(XMMRegister dst, Address src, int vector_len) {
 2082   if (vector_len == AVX_512bit) {
 2083     evmovdquq(dst, src, AVX_512bit);
 2084   } else if (vector_len == AVX_256bit) {
 2085     vmovdqu(dst, src);
 2086   } else {
 2087     movdqu(dst, src);
 2088   }
 2089 }
 2090 
 2091 void MacroAssembler::vmovdqa(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2092   assert(rscratch != noreg || always_reachable(src), "missing");
 2093 
 2094   if (reachable(src)) {
 2095     vmovdqa(dst, as_Address(src));
 2096   }
 2097   else {
 2098     lea(rscratch, src);
 2099     vmovdqa(dst, Address(rscratch, 0));
 2100   }
 2101 }
 2102 
 2103 void MacroAssembler::vmovdqa(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2104   assert(rscratch != noreg || always_reachable(src), "missing");
 2105 
 2106   if (vector_len == AVX_512bit) {
 2107     evmovdqaq(dst, src, AVX_512bit, rscratch);
 2108   } else if (vector_len == AVX_256bit) {
 2109     vmovdqa(dst, src, rscratch);
 2110   } else {
 2111     movdqa(dst, src, rscratch);
 2112   }
 2113 }
 2114 
 2115 void MacroAssembler::kmov(KRegister dst, Address src) {
 2116   if (VM_Version::supports_avx512bw()) {
 2117     kmovql(dst, src);
 2118   } else {
 2119     assert(VM_Version::supports_evex(), "");
 2120     kmovwl(dst, src);
 2121   }
 2122 }
 2123 
 2124 void MacroAssembler::kmov(Address dst, KRegister src) {
 2125   if (VM_Version::supports_avx512bw()) {
 2126     kmovql(dst, src);
 2127   } else {
 2128     assert(VM_Version::supports_evex(), "");
 2129     kmovwl(dst, src);
 2130   }
 2131 }
 2132 
 2133 void MacroAssembler::kmov(KRegister dst, KRegister src) {
 2134   if (VM_Version::supports_avx512bw()) {
 2135     kmovql(dst, src);
 2136   } else {
 2137     assert(VM_Version::supports_evex(), "");
 2138     kmovwl(dst, src);
 2139   }
 2140 }
 2141 
 2142 void MacroAssembler::kmov(Register dst, KRegister src) {
 2143   if (VM_Version::supports_avx512bw()) {
 2144     kmovql(dst, src);
 2145   } else {
 2146     assert(VM_Version::supports_evex(), "");
 2147     kmovwl(dst, src);
 2148   }
 2149 }
 2150 
 2151 void MacroAssembler::kmov(KRegister dst, Register src) {
 2152   if (VM_Version::supports_avx512bw()) {
 2153     kmovql(dst, src);
 2154   } else {
 2155     assert(VM_Version::supports_evex(), "");
 2156     kmovwl(dst, src);
 2157   }
 2158 }
 2159 
 2160 void MacroAssembler::kmovql(KRegister dst, AddressLiteral src, Register rscratch) {
 2161   assert(rscratch != noreg || always_reachable(src), "missing");
 2162 
 2163   if (reachable(src)) {
 2164     kmovql(dst, as_Address(src));
 2165   } else {
 2166     lea(rscratch, src);
 2167     kmovql(dst, Address(rscratch, 0));
 2168   }
 2169 }
 2170 
 2171 void MacroAssembler::kmovwl(KRegister dst, AddressLiteral src, Register rscratch) {
 2172   assert(rscratch != noreg || always_reachable(src), "missing");
 2173 
 2174   if (reachable(src)) {
 2175     kmovwl(dst, as_Address(src));
 2176   } else {
 2177     lea(rscratch, src);
 2178     kmovwl(dst, Address(rscratch, 0));
 2179   }
 2180 }
 2181 
 2182 void MacroAssembler::evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge,
 2183                                int vector_len, Register rscratch) {
 2184   assert(rscratch != noreg || always_reachable(src), "missing");
 2185 
 2186   if (reachable(src)) {
 2187     Assembler::evmovdqub(dst, mask, as_Address(src), merge, vector_len);
 2188   } else {
 2189     lea(rscratch, src);
 2190     Assembler::evmovdqub(dst, mask, Address(rscratch, 0), merge, vector_len);
 2191   }
 2192 }
 2193 
 2194 void MacroAssembler::evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge,
 2195                                int vector_len, Register rscratch) {
 2196   assert(rscratch != noreg || always_reachable(src), "missing");
 2197 
 2198   if (reachable(src)) {
 2199     Assembler::evmovdquw(dst, mask, as_Address(src), merge, vector_len);
 2200   } else {
 2201     lea(rscratch, src);
 2202     Assembler::evmovdquw(dst, mask, Address(rscratch, 0), merge, vector_len);
 2203   }
 2204 }
 2205 
 2206 void MacroAssembler::evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
 2207   assert(rscratch != noreg || always_reachable(src), "missing");
 2208 
 2209   if (reachable(src)) {
 2210     Assembler::evmovdqul(dst, mask, as_Address(src), merge, vector_len);
 2211   } else {
 2212     lea(rscratch, src);
 2213     Assembler::evmovdqul(dst, mask, Address(rscratch, 0), merge, vector_len);
 2214   }
 2215 }
 2216 
 2217 void MacroAssembler::evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
 2218   assert(rscratch != noreg || always_reachable(src), "missing");
 2219 
 2220   if (reachable(src)) {
 2221     Assembler::evmovdquq(dst, mask, as_Address(src), merge, vector_len);
 2222   } else {
 2223     lea(rscratch, src);
 2224     Assembler::evmovdquq(dst, mask, Address(rscratch, 0), merge, vector_len);
 2225   }
 2226 }
 2227 
 2228 void MacroAssembler::evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2229   assert(rscratch != noreg || always_reachable(src), "missing");
 2230 
 2231   if (reachable(src)) {
 2232     Assembler::evmovdquq(dst, as_Address(src), vector_len);
 2233   } else {
 2234     lea(rscratch, src);
 2235     Assembler::evmovdquq(dst, Address(rscratch, 0), vector_len);
 2236   }
 2237 }
 2238 
 2239 void MacroAssembler::evmovdqaq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
 2240   assert(rscratch != noreg || always_reachable(src), "missing");
 2241 
 2242   if (reachable(src)) {
 2243     Assembler::evmovdqaq(dst, mask, as_Address(src), merge, vector_len);
 2244   } else {
 2245     lea(rscratch, src);
 2246     Assembler::evmovdqaq(dst, mask, Address(rscratch, 0), merge, vector_len);
 2247   }
 2248 }
 2249 
 2250 void MacroAssembler::evmovdqaq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2251   assert(rscratch != noreg || always_reachable(src), "missing");
 2252 
 2253   if (reachable(src)) {
 2254     Assembler::evmovdqaq(dst, as_Address(src), vector_len);
 2255   } else {
 2256     lea(rscratch, src);
 2257     Assembler::evmovdqaq(dst, Address(rscratch, 0), vector_len);
 2258   }
 2259 }
 2260 
 2261 
 2262 void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2263   assert(rscratch != noreg || always_reachable(src), "missing");
 2264 
 2265   if (reachable(src)) {
 2266     Assembler::movdqa(dst, as_Address(src));
 2267   } else {
 2268     lea(rscratch, src);
 2269     Assembler::movdqa(dst, Address(rscratch, 0));
 2270   }
 2271 }
 2272 
 2273 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2274   assert(rscratch != noreg || always_reachable(src), "missing");
 2275 
 2276   if (reachable(src)) {
 2277     Assembler::movsd(dst, as_Address(src));
 2278   } else {
 2279     lea(rscratch, src);
 2280     Assembler::movsd(dst, Address(rscratch, 0));
 2281   }
 2282 }
 2283 
 2284 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2285   assert(rscratch != noreg || always_reachable(src), "missing");
 2286 
 2287   if (reachable(src)) {
 2288     Assembler::movss(dst, as_Address(src));
 2289   } else {
 2290     lea(rscratch, src);
 2291     Assembler::movss(dst, Address(rscratch, 0));
 2292   }
 2293 }
 2294 
 2295 void MacroAssembler::movddup(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2296   assert(rscratch != noreg || always_reachable(src), "missing");
 2297 
 2298   if (reachable(src)) {
 2299     Assembler::movddup(dst, as_Address(src));
 2300   } else {
 2301     lea(rscratch, src);
 2302     Assembler::movddup(dst, Address(rscratch, 0));
 2303   }
 2304 }
 2305 
 2306 void MacroAssembler::vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2307   assert(rscratch != noreg || always_reachable(src), "missing");
 2308 
 2309   if (reachable(src)) {
 2310     Assembler::vmovddup(dst, as_Address(src), vector_len);
 2311   } else {
 2312     lea(rscratch, src);
 2313     Assembler::vmovddup(dst, Address(rscratch, 0), vector_len);
 2314   }
 2315 }
 2316 
 2317 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2318   assert(rscratch != noreg || always_reachable(src), "missing");
 2319 
 2320   if (reachable(src)) {
 2321     Assembler::mulsd(dst, as_Address(src));
 2322   } else {
 2323     lea(rscratch, src);
 2324     Assembler::mulsd(dst, Address(rscratch, 0));
 2325   }
 2326 }
 2327 
 2328 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2329   assert(rscratch != noreg || always_reachable(src), "missing");
 2330 
 2331   if (reachable(src)) {
 2332     Assembler::mulss(dst, as_Address(src));
 2333   } else {
 2334     lea(rscratch, src);
 2335     Assembler::mulss(dst, Address(rscratch, 0));
 2336   }
 2337 }
 2338 
 2339 void MacroAssembler::null_check(Register reg, int offset) {
 2340   if (needs_explicit_null_check(offset)) {
 2341     // provoke OS null exception if reg is null by
 2342     // accessing M[reg] w/o changing any (non-CC) registers
 2343     // NOTE: cmpl is plenty here to provoke a segv
 2344     cmpptr(rax, Address(reg, 0));
 2345     // Note: should probably use testl(rax, Address(reg, 0));
 2346     //       may be shorter code (however, this version of
 2347     //       testl needs to be implemented first)
 2348   } else {
 2349     // nothing to do, (later) access of M[reg + offset]
 2350     // will provoke OS null exception if reg is null
 2351   }
 2352 }
 2353 
 2354 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) {
 2355   andptr(markword, markWord::inline_type_mask_in_place);
 2356   cmpptr(markword, markWord::inline_type_pattern);
 2357   jcc(Assembler::equal, is_inline_type);
 2358 }
 2359 
 2360 void MacroAssembler::test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type) {
 2361   load_unsigned_short(temp_reg, Address(klass, Klass::access_flags_offset()));
 2362   testl(temp_reg, JVM_ACC_IDENTITY);
 2363   jcc(Assembler::zero, is_inline_type);
 2364 }
 2365 
 2366 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type) {
 2367   testptr(object, object);
 2368   jcc(Assembler::zero, not_inline_type);
 2369   const int is_inline_type_mask = markWord::inline_type_pattern;
 2370   movptr(tmp, Address(object, oopDesc::mark_offset_in_bytes()));
 2371   andptr(tmp, is_inline_type_mask);
 2372   cmpptr(tmp, is_inline_type_mask);
 2373   jcc(Assembler::notEqual, not_inline_type);
 2374 }
 2375 
 2376 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) {
 2377   movl(temp_reg, flags);
 2378   testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift);
 2379   jcc(Assembler::notEqual, is_null_free_inline_type);
 2380 }
 2381 
 2382 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) {
 2383   movl(temp_reg, flags);
 2384   testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift);
 2385   jcc(Assembler::equal, not_null_free_inline_type);
 2386 }
 2387 
 2388 void MacroAssembler::test_field_is_flat(Register flags, Register temp_reg, Label& is_flat) {
 2389   movl(temp_reg, flags);
 2390   testl(temp_reg, 1 << ResolvedFieldEntry::is_flat_shift);
 2391   jcc(Assembler::notEqual, is_flat);
 2392 }
 2393 
 2394 void MacroAssembler::test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker) {
 2395   movl(temp_reg, flags);
 2396   testl(temp_reg, 1 << ResolvedFieldEntry::has_null_marker_shift);
 2397   jcc(Assembler::notEqual, has_null_marker);
 2398 }
 2399 
 2400 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) {
 2401   Label test_mark_word;
 2402   // load mark word
 2403   movptr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes()));
 2404   // check displaced
 2405   testl(temp_reg, markWord::unlocked_value);
 2406   jccb(Assembler::notZero, test_mark_word);
 2407   // slow path use klass prototype
 2408   push(rscratch1);
 2409   load_prototype_header(temp_reg, oop, rscratch1);
 2410   pop(rscratch1);
 2411 
 2412   bind(test_mark_word);
 2413   testl(temp_reg, test_bit);
 2414   jcc((jmp_set) ? Assembler::notZero : Assembler::zero, jmp_label);
 2415 }
 2416 
 2417 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg,
 2418                                          Label& is_flat_array) {
 2419 #ifdef _LP64
 2420   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flat_array);
 2421 #else
 2422   load_klass(temp_reg, oop, noreg);
 2423   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 2424   test_flat_array_layout(temp_reg, is_flat_array);
 2425 #endif
 2426 }
 2427 
 2428 void MacroAssembler::test_non_flat_array_oop(Register oop, Register temp_reg,
 2429                                              Label& is_non_flat_array) {
 2430 #ifdef _LP64
 2431   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flat_array);
 2432 #else
 2433   load_klass(temp_reg, oop, noreg);
 2434   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 2435   test_non_flat_array_layout(temp_reg, is_non_flat_array);
 2436 #endif
 2437 }
 2438 
 2439 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array) {
 2440 #ifdef _LP64
 2441   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array);
 2442 #else
 2443   Unimplemented();
 2444 #endif
 2445 }
 2446 
 2447 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) {
 2448 #ifdef _LP64
 2449   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array);
 2450 #else
 2451   Unimplemented();
 2452 #endif
 2453 }
 2454 
 2455 void MacroAssembler::test_flat_array_layout(Register lh, Label& is_flat_array) {
 2456   testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
 2457   jcc(Assembler::notZero, is_flat_array);
 2458 }
 2459 
 2460 void MacroAssembler::test_non_flat_array_layout(Register lh, Label& is_non_flat_array) {
 2461   testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
 2462   jcc(Assembler::zero, is_non_flat_array);
 2463 }
 2464 
 2465 void MacroAssembler::os_breakpoint() {
 2466   // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
 2467   // (e.g., MSVC can't call ps() otherwise)
 2468   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
 2469 }
 2470 
 2471 void MacroAssembler::unimplemented(const char* what) {
 2472   const char* buf = nullptr;
 2473   {
 2474     ResourceMark rm;
 2475     stringStream ss;
 2476     ss.print("unimplemented: %s", what);
 2477     buf = code_string(ss.as_string());
 2478   }
 2479   stop(buf);
 2480 }
 2481 
 2482 #define XSTATE_BV 0x200
 2483 
 2484 void MacroAssembler::pop_CPU_state() {
 2485   pop_FPU_state();
 2486   pop_IU_state();
 2487 }
 2488 
 2489 void MacroAssembler::pop_FPU_state() {
 2490   fxrstor(Address(rsp, 0));
 2491   addptr(rsp, FPUStateSizeInWords * wordSize);
 2492 }
 2493 
 2494 void MacroAssembler::pop_IU_state() {
 2495   popa();
 2496   addq(rsp, 8);
 2497   popf();
 2498 }
 2499 
 2500 // Save Integer and Float state
 2501 // Warning: Stack must be 16 byte aligned (64bit)
 2502 void MacroAssembler::push_CPU_state() {
 2503   push_IU_state();
 2504   push_FPU_state();
 2505 }
 2506 
 2507 void MacroAssembler::push_FPU_state() {
 2508   subptr(rsp, FPUStateSizeInWords * wordSize);
 2509   fxsave(Address(rsp, 0));
 2510 }
 2511 
 2512 void MacroAssembler::push_IU_state() {
 2513   // Push flags first because pusha kills them
 2514   pushf();
 2515   // Make sure rsp stays 16-byte aligned
 2516   subq(rsp, 8);
 2517   pusha();
 2518 }
 2519 
 2520 void MacroAssembler::push_cont_fastpath() {
 2521   if (!Continuations::enabled()) return;
 2522 
 2523   Label L_done;
 2524   cmpptr(rsp, Address(r15_thread, JavaThread::cont_fastpath_offset()));
 2525   jccb(Assembler::belowEqual, L_done);
 2526   movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), rsp);
 2527   bind(L_done);
 2528 }
 2529 
 2530 void MacroAssembler::pop_cont_fastpath() {
 2531   if (!Continuations::enabled()) return;
 2532 
 2533   Label L_done;
 2534   cmpptr(rsp, Address(r15_thread, JavaThread::cont_fastpath_offset()));
 2535   jccb(Assembler::below, L_done);
 2536   movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), 0);
 2537   bind(L_done);
 2538 }
 2539 
 2540 void MacroAssembler::inc_held_monitor_count() {
 2541   incrementq(Address(r15_thread, JavaThread::held_monitor_count_offset()));
 2542 }
 2543 
 2544 void MacroAssembler::dec_held_monitor_count() {
 2545   decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset()));
 2546 }
 2547 
 2548 #ifdef ASSERT
 2549 void MacroAssembler::stop_if_in_cont(Register cont, const char* name) {
 2550   Label no_cont;
 2551   movptr(cont, Address(r15_thread, JavaThread::cont_entry_offset()));
 2552   testl(cont, cont);
 2553   jcc(Assembler::zero, no_cont);
 2554   stop(name);
 2555   bind(no_cont);
 2556 }
 2557 #endif
 2558 
 2559 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { // determine java_thread register
 2560   // we must set sp to zero to clear frame
 2561   movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
 2562   // must clear fp, so that compiled frames are not confused; it is
 2563   // possible that we need it only for debugging
 2564   if (clear_fp) {
 2565     movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
 2566   }
 2567   // Always clear the pc because it could have been set by make_walkable()
 2568   movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
 2569   vzeroupper();
 2570 }
 2571 
 2572 void MacroAssembler::round_to(Register reg, int modulus) {
 2573   addptr(reg, modulus - 1);
 2574   andptr(reg, -modulus);
 2575 }
 2576 
 2577 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod) {
 2578   if (at_return) {
 2579     // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
 2580     // we may safely use rsp instead to perform the stack watermark check.
 2581     cmpptr(in_nmethod ? rsp : rbp, Address(r15_thread, JavaThread::polling_word_offset()));
 2582     jcc(Assembler::above, slow_path);
 2583     return;
 2584   }
 2585   testb(Address(r15_thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
 2586   jcc(Assembler::notZero, slow_path); // handshake bit set implies poll
 2587 }
 2588 
 2589 // Calls to C land
 2590 //
 2591 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded
 2592 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
 2593 // has to be reset to 0. This is required to allow proper stack traversal.
 2594 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
 2595                                          Register last_java_fp,
 2596                                          address  last_java_pc,
 2597                                          Register rscratch) {
 2598   vzeroupper();
 2599   // determine last_java_sp register
 2600   if (!last_java_sp->is_valid()) {
 2601     last_java_sp = rsp;
 2602   }
 2603   // last_java_fp is optional
 2604   if (last_java_fp->is_valid()) {
 2605     movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
 2606   }
 2607   // last_java_pc is optional
 2608   if (last_java_pc != nullptr) {
 2609     Address java_pc(r15_thread,
 2610                     JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
 2611     lea(java_pc, InternalAddress(last_java_pc), rscratch);
 2612   }
 2613   movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
 2614 }
 2615 
 2616 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
 2617                                          Register last_java_fp,
 2618                                          Label &L,
 2619                                          Register scratch) {
 2620   lea(scratch, L);
 2621   movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), scratch);
 2622   set_last_Java_frame(last_java_sp, last_java_fp, nullptr, scratch);
 2623 }
 2624 
 2625 void MacroAssembler::shlptr(Register dst, int imm8) {
 2626   shlq(dst, imm8);
 2627 }
 2628 
 2629 void MacroAssembler::shrptr(Register dst, int imm8) {
 2630   shrq(dst, imm8);
 2631 }
 2632 
 2633 void MacroAssembler::sign_extend_byte(Register reg) {
 2634   movsbl(reg, reg); // movsxb
 2635 }
 2636 
 2637 void MacroAssembler::sign_extend_short(Register reg) {
 2638   movswl(reg, reg); // movsxw
 2639 }
 2640 
 2641 void MacroAssembler::testl(Address dst, int32_t imm32) {
 2642   if (imm32 >= 0 && is8bit(imm32)) {
 2643     testb(dst, imm32);
 2644   } else {
 2645     Assembler::testl(dst, imm32);
 2646   }
 2647 }
 2648 
 2649 void MacroAssembler::testl(Register dst, int32_t imm32) {
 2650   if (imm32 >= 0 && is8bit(imm32) && dst->has_byte_register()) {
 2651     testb(dst, imm32);
 2652   } else {
 2653     Assembler::testl(dst, imm32);
 2654   }
 2655 }
 2656 
 2657 void MacroAssembler::testl(Register dst, AddressLiteral src) {
 2658   assert(always_reachable(src), "Address should be reachable");
 2659   testl(dst, as_Address(src));
 2660 }
 2661 
 2662 void MacroAssembler::testq(Address dst, int32_t imm32) {
 2663   if (imm32 >= 0) {
 2664     testl(dst, imm32);
 2665   } else {
 2666     Assembler::testq(dst, imm32);
 2667   }
 2668 }
 2669 
 2670 void MacroAssembler::testq(Register dst, int32_t imm32) {
 2671   if (imm32 >= 0) {
 2672     testl(dst, imm32);
 2673   } else {
 2674     Assembler::testq(dst, imm32);
 2675   }
 2676 }
 2677 
 2678 void MacroAssembler::pcmpeqb(XMMRegister dst, XMMRegister src) {
 2679   assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2680   Assembler::pcmpeqb(dst, src);
 2681 }
 2682 
 2683 void MacroAssembler::pcmpeqw(XMMRegister dst, XMMRegister src) {
 2684   assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2685   Assembler::pcmpeqw(dst, src);
 2686 }
 2687 
 2688 void MacroAssembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
 2689   assert((dst->encoding() < 16),"XMM register should be 0-15");
 2690   Assembler::pcmpestri(dst, src, imm8);
 2691 }
 2692 
 2693 void MacroAssembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
 2694   assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15");
 2695   Assembler::pcmpestri(dst, src, imm8);
 2696 }
 2697 
 2698 void MacroAssembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
 2699   assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2700   Assembler::pmovzxbw(dst, src);
 2701 }
 2702 
 2703 void MacroAssembler::pmovzxbw(XMMRegister dst, Address src) {
 2704   assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2705   Assembler::pmovzxbw(dst, src);
 2706 }
 2707 
 2708 void MacroAssembler::pmovmskb(Register dst, XMMRegister src) {
 2709   assert((src->encoding() < 16),"XMM register should be 0-15");
 2710   Assembler::pmovmskb(dst, src);
 2711 }
 2712 
 2713 void MacroAssembler::ptest(XMMRegister dst, XMMRegister src) {
 2714   assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15");
 2715   Assembler::ptest(dst, src);
 2716 }
 2717 
 2718 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2719   assert(rscratch != noreg || always_reachable(src), "missing");
 2720 
 2721   if (reachable(src)) {
 2722     Assembler::sqrtss(dst, as_Address(src));
 2723   } else {
 2724     lea(rscratch, src);
 2725     Assembler::sqrtss(dst, Address(rscratch, 0));
 2726   }
 2727 }
 2728 
 2729 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2730   assert(rscratch != noreg || always_reachable(src), "missing");
 2731 
 2732   if (reachable(src)) {
 2733     Assembler::subsd(dst, as_Address(src));
 2734   } else {
 2735     lea(rscratch, src);
 2736     Assembler::subsd(dst, Address(rscratch, 0));
 2737   }
 2738 }
 2739 
 2740 void MacroAssembler::roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch) {
 2741   assert(rscratch != noreg || always_reachable(src), "missing");
 2742 
 2743   if (reachable(src)) {
 2744     Assembler::roundsd(dst, as_Address(src), rmode);
 2745   } else {
 2746     lea(rscratch, src);
 2747     Assembler::roundsd(dst, Address(rscratch, 0), rmode);
 2748   }
 2749 }
 2750 
 2751 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2752   assert(rscratch != noreg || always_reachable(src), "missing");
 2753 
 2754   if (reachable(src)) {
 2755     Assembler::subss(dst, as_Address(src));
 2756   } else {
 2757     lea(rscratch, src);
 2758     Assembler::subss(dst, Address(rscratch, 0));
 2759   }
 2760 }
 2761 
 2762 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2763   assert(rscratch != noreg || always_reachable(src), "missing");
 2764 
 2765   if (reachable(src)) {
 2766     Assembler::ucomisd(dst, as_Address(src));
 2767   } else {
 2768     lea(rscratch, src);
 2769     Assembler::ucomisd(dst, Address(rscratch, 0));
 2770   }
 2771 }
 2772 
 2773 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2774   assert(rscratch != noreg || always_reachable(src), "missing");
 2775 
 2776   if (reachable(src)) {
 2777     Assembler::ucomiss(dst, as_Address(src));
 2778   } else {
 2779     lea(rscratch, src);
 2780     Assembler::ucomiss(dst, Address(rscratch, 0));
 2781   }
 2782 }
 2783 
 2784 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2785   assert(rscratch != noreg || always_reachable(src), "missing");
 2786 
 2787   // Used in sign-bit flipping with aligned address.
 2788   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
 2789 
 2790   if (UseAVX > 2 &&
 2791       (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
 2792       (dst->encoding() >= 16)) {
 2793     vpxor(dst, dst, src, Assembler::AVX_512bit, rscratch);
 2794   } else if (reachable(src)) {
 2795     Assembler::xorpd(dst, as_Address(src));
 2796   } else {
 2797     lea(rscratch, src);
 2798     Assembler::xorpd(dst, Address(rscratch, 0));
 2799   }
 2800 }
 2801 
 2802 void MacroAssembler::xorpd(XMMRegister dst, XMMRegister src) {
 2803   if (UseAVX > 2 &&
 2804       (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
 2805       ((dst->encoding() >= 16) || (src->encoding() >= 16))) {
 2806     Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit);
 2807   } else {
 2808     Assembler::xorpd(dst, src);
 2809   }
 2810 }
 2811 
 2812 void MacroAssembler::xorps(XMMRegister dst, XMMRegister src) {
 2813   if (UseAVX > 2 &&
 2814       (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
 2815       ((dst->encoding() >= 16) || (src->encoding() >= 16))) {
 2816     Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit);
 2817   } else {
 2818     Assembler::xorps(dst, src);
 2819   }
 2820 }
 2821 
 2822 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2823   assert(rscratch != noreg || always_reachable(src), "missing");
 2824 
 2825   // Used in sign-bit flipping with aligned address.
 2826   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
 2827 
 2828   if (UseAVX > 2 &&
 2829       (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
 2830       (dst->encoding() >= 16)) {
 2831     vpxor(dst, dst, src, Assembler::AVX_512bit, rscratch);
 2832   } else if (reachable(src)) {
 2833     Assembler::xorps(dst, as_Address(src));
 2834   } else {
 2835     lea(rscratch, src);
 2836     Assembler::xorps(dst, Address(rscratch, 0));
 2837   }
 2838 }
 2839 
 2840 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2841   assert(rscratch != noreg || always_reachable(src), "missing");
 2842 
 2843   // Used in sign-bit flipping with aligned address.
 2844   bool aligned_adr = (((intptr_t)src.target() & 15) == 0);
 2845   assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes");
 2846   if (reachable(src)) {
 2847     Assembler::pshufb(dst, as_Address(src));
 2848   } else {
 2849     lea(rscratch, src);
 2850     Assembler::pshufb(dst, Address(rscratch, 0));
 2851   }
 2852 }
 2853 
 2854 // AVX 3-operands instructions
 2855 
 2856 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 2857   assert(rscratch != noreg || always_reachable(src), "missing");
 2858 
 2859   if (reachable(src)) {
 2860     vaddsd(dst, nds, as_Address(src));
 2861   } else {
 2862     lea(rscratch, src);
 2863     vaddsd(dst, nds, Address(rscratch, 0));
 2864   }
 2865 }
 2866 
 2867 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 2868   assert(rscratch != noreg || always_reachable(src), "missing");
 2869 
 2870   if (reachable(src)) {
 2871     vaddss(dst, nds, as_Address(src));
 2872   } else {
 2873     lea(rscratch, src);
 2874     vaddss(dst, nds, Address(rscratch, 0));
 2875   }
 2876 }
 2877 
 2878 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 2879   assert(UseAVX > 0, "requires some form of AVX");
 2880   assert(rscratch != noreg || always_reachable(src), "missing");
 2881 
 2882   if (reachable(src)) {
 2883     Assembler::vpaddb(dst, nds, as_Address(src), vector_len);
 2884   } else {
 2885     lea(rscratch, src);
 2886     Assembler::vpaddb(dst, nds, Address(rscratch, 0), vector_len);
 2887   }
 2888 }
 2889 
 2890 void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 2891   assert(UseAVX > 0, "requires some form of AVX");
 2892   assert(rscratch != noreg || always_reachable(src), "missing");
 2893 
 2894   if (reachable(src)) {
 2895     Assembler::vpaddd(dst, nds, as_Address(src), vector_len);
 2896   } else {
 2897     lea(rscratch, src);
 2898     Assembler::vpaddd(dst, nds, Address(rscratch, 0), vector_len);
 2899   }
 2900 }
 2901 
 2902 void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) {
 2903   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
 2904   assert(rscratch != noreg || always_reachable(negate_field), "missing");
 2905 
 2906   vandps(dst, nds, negate_field, vector_len, rscratch);
 2907 }
 2908 
 2909 void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) {
 2910   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
 2911   assert(rscratch != noreg || always_reachable(negate_field), "missing");
 2912 
 2913   vandpd(dst, nds, negate_field, vector_len, rscratch);
 2914 }
 2915 
 2916 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 2917   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2918   Assembler::vpaddb(dst, nds, src, vector_len);
 2919 }
 2920 
 2921 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 2922   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2923   Assembler::vpaddb(dst, nds, src, vector_len);
 2924 }
 2925 
 2926 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 2927   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2928   Assembler::vpaddw(dst, nds, src, vector_len);
 2929 }
 2930 
 2931 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 2932   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2933   Assembler::vpaddw(dst, nds, src, vector_len);
 2934 }
 2935 
 2936 void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 2937   assert(rscratch != noreg || always_reachable(src), "missing");
 2938 
 2939   if (reachable(src)) {
 2940     Assembler::vpand(dst, nds, as_Address(src), vector_len);
 2941   } else {
 2942     lea(rscratch, src);
 2943     Assembler::vpand(dst, nds, Address(rscratch, 0), vector_len);
 2944   }
 2945 }
 2946 
 2947 void MacroAssembler::vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2948   assert(rscratch != noreg || always_reachable(src), "missing");
 2949 
 2950   if (reachable(src)) {
 2951     Assembler::vpbroadcastd(dst, as_Address(src), vector_len);
 2952   } else {
 2953     lea(rscratch, src);
 2954     Assembler::vpbroadcastd(dst, Address(rscratch, 0), vector_len);
 2955   }
 2956 }
 2957 
 2958 void MacroAssembler::vbroadcasti128(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2959   assert(rscratch != noreg || always_reachable(src), "missing");
 2960 
 2961   if (reachable(src)) {
 2962     Assembler::vbroadcasti128(dst, as_Address(src), vector_len);
 2963   } else {
 2964     lea(rscratch, src);
 2965     Assembler::vbroadcasti128(dst, Address(rscratch, 0), vector_len);
 2966   }
 2967 }
 2968 
 2969 void MacroAssembler::vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2970   assert(rscratch != noreg || always_reachable(src), "missing");
 2971 
 2972   if (reachable(src)) {
 2973     Assembler::vpbroadcastq(dst, as_Address(src), vector_len);
 2974   } else {
 2975     lea(rscratch, src);
 2976     Assembler::vpbroadcastq(dst, Address(rscratch, 0), vector_len);
 2977   }
 2978 }
 2979 
 2980 void MacroAssembler::vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2981   assert(rscratch != noreg || always_reachable(src), "missing");
 2982 
 2983   if (reachable(src)) {
 2984     Assembler::vbroadcastsd(dst, as_Address(src), vector_len);
 2985   } else {
 2986     lea(rscratch, src);
 2987     Assembler::vbroadcastsd(dst, Address(rscratch, 0), vector_len);
 2988   }
 2989 }
 2990 
 2991 void MacroAssembler::vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2992   assert(rscratch != noreg || always_reachable(src), "missing");
 2993 
 2994   if (reachable(src)) {
 2995     Assembler::vbroadcastss(dst, as_Address(src), vector_len);
 2996   } else {
 2997     lea(rscratch, src);
 2998     Assembler::vbroadcastss(dst, Address(rscratch, 0), vector_len);
 2999   }
 3000 }
 3001 
 3002 // Vector float blend
 3003 // vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg)
 3004 void MacroAssembler::vblendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) {
 3005   // WARN: Allow dst == (src1|src2), mask == scratch
 3006   bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1;
 3007   bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst;
 3008   bool dst_available = dst != mask && (dst != src1 || dst != src2);
 3009   if (blend_emulation && scratch_available && dst_available) {
 3010     if (compute_mask) {
 3011       vpsrad(scratch, mask, 32, vector_len);
 3012       mask = scratch;
 3013     }
 3014     if (dst == src1) {
 3015       vpandn(dst,     mask, src1, vector_len); // if mask == 0, src1
 3016       vpand (scratch, mask, src2, vector_len); // if mask == 1, src2
 3017     } else {
 3018       vpand (dst,     mask, src2, vector_len); // if mask == 1, src2
 3019       vpandn(scratch, mask, src1, vector_len); // if mask == 0, src1
 3020     }
 3021     vpor(dst, dst, scratch, vector_len);
 3022   } else {
 3023     Assembler::vblendvps(dst, src1, src2, mask, vector_len);
 3024   }
 3025 }
 3026 
 3027 // vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg)
 3028 void MacroAssembler::vblendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) {
 3029   // WARN: Allow dst == (src1|src2), mask == scratch
 3030   bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1;
 3031   bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst && (!compute_mask || scratch != mask);
 3032   bool dst_available = dst != mask && (dst != src1 || dst != src2);
 3033   if (blend_emulation && scratch_available && dst_available) {
 3034     if (compute_mask) {
 3035       vpxor(scratch, scratch, scratch, vector_len);
 3036       vpcmpgtq(scratch, scratch, mask, vector_len);
 3037       mask = scratch;
 3038     }
 3039     if (dst == src1) {
 3040       vpandn(dst,     mask, src1, vector_len); // if mask == 0, src
 3041       vpand (scratch, mask, src2, vector_len); // if mask == 1, src2
 3042     } else {
 3043       vpand (dst,     mask, src2, vector_len); // if mask == 1, src2
 3044       vpandn(scratch, mask, src1, vector_len); // if mask == 0, src
 3045     }
 3046     vpor(dst, dst, scratch, vector_len);
 3047   } else {
 3048     Assembler::vblendvpd(dst, src1, src2, mask, vector_len);
 3049   }
 3050 }
 3051 
 3052 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 3053   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3054   Assembler::vpcmpeqb(dst, nds, src, vector_len);
 3055 }
 3056 
 3057 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) {
 3058   assert(((dst->encoding() < 16 && src1->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3059   Assembler::vpcmpeqb(dst, src1, src2, vector_len);
 3060 }
 3061 
 3062 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 3063   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3064   Assembler::vpcmpeqw(dst, nds, src, vector_len);
 3065 }
 3066 
 3067 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 3068   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3069   Assembler::vpcmpeqw(dst, nds, src, vector_len);
 3070 }
 3071 
 3072 void MacroAssembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3073   assert(rscratch != noreg || always_reachable(src), "missing");
 3074 
 3075   if (reachable(src)) {
 3076     Assembler::evpcmpeqd(kdst, mask, nds, as_Address(src), vector_len);
 3077   } else {
 3078     lea(rscratch, src);
 3079     Assembler::evpcmpeqd(kdst, mask, nds, Address(rscratch, 0), vector_len);
 3080   }
 3081 }
 3082 
 3083 void MacroAssembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
 3084                              int comparison, bool is_signed, int vector_len, Register rscratch) {
 3085   assert(rscratch != noreg || always_reachable(src), "missing");
 3086 
 3087   if (reachable(src)) {
 3088     Assembler::evpcmpd(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
 3089   } else {
 3090     lea(rscratch, src);
 3091     Assembler::evpcmpd(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
 3092   }
 3093 }
 3094 
 3095 void MacroAssembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
 3096                              int comparison, bool is_signed, int vector_len, Register rscratch) {
 3097   assert(rscratch != noreg || always_reachable(src), "missing");
 3098 
 3099   if (reachable(src)) {
 3100     Assembler::evpcmpq(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
 3101   } else {
 3102     lea(rscratch, src);
 3103     Assembler::evpcmpq(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
 3104   }
 3105 }
 3106 
 3107 void MacroAssembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
 3108                              int comparison, bool is_signed, int vector_len, Register rscratch) {
 3109   assert(rscratch != noreg || always_reachable(src), "missing");
 3110 
 3111   if (reachable(src)) {
 3112     Assembler::evpcmpb(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
 3113   } else {
 3114     lea(rscratch, src);
 3115     Assembler::evpcmpb(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
 3116   }
 3117 }
 3118 
 3119 void MacroAssembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
 3120                              int comparison, bool is_signed, int vector_len, Register rscratch) {
 3121   assert(rscratch != noreg || always_reachable(src), "missing");
 3122 
 3123   if (reachable(src)) {
 3124     Assembler::evpcmpw(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
 3125   } else {
 3126     lea(rscratch, src);
 3127     Assembler::evpcmpw(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
 3128   }
 3129 }
 3130 
 3131 void MacroAssembler::vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len) {
 3132   if (width == Assembler::Q) {
 3133     Assembler::vpcmpCCq(dst, nds, src, cond_encoding, vector_len);
 3134   } else {
 3135     Assembler::vpcmpCCbwd(dst, nds, src, cond_encoding, vector_len);
 3136   }
 3137 }
 3138 
 3139 void MacroAssembler::vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len) {
 3140   int eq_cond_enc = 0x29;
 3141   int gt_cond_enc = 0x37;
 3142   if (width != Assembler::Q) {
 3143     eq_cond_enc = 0x74 + width;
 3144     gt_cond_enc = 0x64 + width;
 3145   }
 3146   switch (cond) {
 3147   case eq:
 3148     vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len);
 3149     break;
 3150   case neq:
 3151     vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len);
 3152     vallones(xtmp, vector_len);
 3153     vpxor(dst, xtmp, dst, vector_len);
 3154     break;
 3155   case le:
 3156     vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len);
 3157     vallones(xtmp, vector_len);
 3158     vpxor(dst, xtmp, dst, vector_len);
 3159     break;
 3160   case nlt:
 3161     vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len);
 3162     vallones(xtmp, vector_len);
 3163     vpxor(dst, xtmp, dst, vector_len);
 3164     break;
 3165   case lt:
 3166     vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len);
 3167     break;
 3168   case nle:
 3169     vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len);
 3170     break;
 3171   default:
 3172     assert(false, "Should not reach here");
 3173   }
 3174 }
 3175 
 3176 void MacroAssembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) {
 3177   assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3178   Assembler::vpmovzxbw(dst, src, vector_len);
 3179 }
 3180 
 3181 void MacroAssembler::vpmovmskb(Register dst, XMMRegister src, int vector_len) {
 3182   assert((src->encoding() < 16),"XMM register should be 0-15");
 3183   Assembler::vpmovmskb(dst, src, vector_len);
 3184 }
 3185 
 3186 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 3187   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3188   Assembler::vpmullw(dst, nds, src, vector_len);
 3189 }
 3190 
 3191 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 3192   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3193   Assembler::vpmullw(dst, nds, src, vector_len);
 3194 }
 3195 
 3196 void MacroAssembler::vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3197   assert((UseAVX > 0), "AVX support is needed");
 3198   assert(rscratch != noreg || always_reachable(src), "missing");
 3199 
 3200   if (reachable(src)) {
 3201     Assembler::vpmulld(dst, nds, as_Address(src), vector_len);
 3202   } else {
 3203     lea(rscratch, src);
 3204     Assembler::vpmulld(dst, nds, Address(rscratch, 0), vector_len);
 3205   }
 3206 }
 3207 
 3208 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 3209   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3210   Assembler::vpsubb(dst, nds, src, vector_len);
 3211 }
 3212 
 3213 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 3214   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3215   Assembler::vpsubb(dst, nds, src, vector_len);
 3216 }
 3217 
 3218 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 3219   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3220   Assembler::vpsubw(dst, nds, src, vector_len);
 3221 }
 3222 
 3223 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 3224   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3225   Assembler::vpsubw(dst, nds, src, vector_len);
 3226 }
 3227 
 3228 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
 3229   assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3230   Assembler::vpsraw(dst, nds, shift, vector_len);
 3231 }
 3232 
 3233 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
 3234   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3235   Assembler::vpsraw(dst, nds, shift, vector_len);
 3236 }
 3237 
 3238 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
 3239   assert(UseAVX > 2,"");
 3240   if (!VM_Version::supports_avx512vl() && vector_len < 2) {
 3241      vector_len = 2;
 3242   }
 3243   Assembler::evpsraq(dst, nds, shift, vector_len);
 3244 }
 3245 
 3246 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
 3247   assert(UseAVX > 2,"");
 3248   if (!VM_Version::supports_avx512vl() && vector_len < 2) {
 3249      vector_len = 2;
 3250   }
 3251   Assembler::evpsraq(dst, nds, shift, vector_len);
 3252 }
 3253 
 3254 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
 3255   assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3256   Assembler::vpsrlw(dst, nds, shift, vector_len);
 3257 }
 3258 
 3259 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
 3260   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3261   Assembler::vpsrlw(dst, nds, shift, vector_len);
 3262 }
 3263 
 3264 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
 3265   assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3266   Assembler::vpsllw(dst, nds, shift, vector_len);
 3267 }
 3268 
 3269 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
 3270   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3271   Assembler::vpsllw(dst, nds, shift, vector_len);
 3272 }
 3273 
 3274 void MacroAssembler::vptest(XMMRegister dst, XMMRegister src) {
 3275   assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15");
 3276   Assembler::vptest(dst, src);
 3277 }
 3278 
 3279 void MacroAssembler::punpcklbw(XMMRegister dst, XMMRegister src) {
 3280   assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3281   Assembler::punpcklbw(dst, src);
 3282 }
 3283 
 3284 void MacroAssembler::pshufd(XMMRegister dst, Address src, int mode) {
 3285   assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 3286   Assembler::pshufd(dst, src, mode);
 3287 }
 3288 
 3289 void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
 3290   assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3291   Assembler::pshuflw(dst, src, mode);
 3292 }
 3293 
 3294 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3295   assert(rscratch != noreg || always_reachable(src), "missing");
 3296 
 3297   if (reachable(src)) {
 3298     vandpd(dst, nds, as_Address(src), vector_len);
 3299   } else {
 3300     lea(rscratch, src);
 3301     vandpd(dst, nds, Address(rscratch, 0), vector_len);
 3302   }
 3303 }
 3304 
 3305 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3306   assert(rscratch != noreg || always_reachable(src), "missing");
 3307 
 3308   if (reachable(src)) {
 3309     vandps(dst, nds, as_Address(src), vector_len);
 3310   } else {
 3311     lea(rscratch, src);
 3312     vandps(dst, nds, Address(rscratch, 0), vector_len);
 3313   }
 3314 }
 3315 
 3316 void MacroAssembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src,
 3317                             bool merge, int vector_len, Register rscratch) {
 3318   assert(rscratch != noreg || always_reachable(src), "missing");
 3319 
 3320   if (reachable(src)) {
 3321     Assembler::evpord(dst, mask, nds, as_Address(src), merge, vector_len);
 3322   } else {
 3323     lea(rscratch, src);
 3324     Assembler::evpord(dst, mask, nds, Address(rscratch, 0), merge, vector_len);
 3325   }
 3326 }
 3327 
 3328 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3329   assert(rscratch != noreg || always_reachable(src), "missing");
 3330 
 3331   if (reachable(src)) {
 3332     vdivsd(dst, nds, as_Address(src));
 3333   } else {
 3334     lea(rscratch, src);
 3335     vdivsd(dst, nds, Address(rscratch, 0));
 3336   }
 3337 }
 3338 
 3339 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3340   assert(rscratch != noreg || always_reachable(src), "missing");
 3341 
 3342   if (reachable(src)) {
 3343     vdivss(dst, nds, as_Address(src));
 3344   } else {
 3345     lea(rscratch, src);
 3346     vdivss(dst, nds, Address(rscratch, 0));
 3347   }
 3348 }
 3349 
 3350 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3351   assert(rscratch != noreg || always_reachable(src), "missing");
 3352 
 3353   if (reachable(src)) {
 3354     vmulsd(dst, nds, as_Address(src));
 3355   } else {
 3356     lea(rscratch, src);
 3357     vmulsd(dst, nds, Address(rscratch, 0));
 3358   }
 3359 }
 3360 
 3361 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3362   assert(rscratch != noreg || always_reachable(src), "missing");
 3363 
 3364   if (reachable(src)) {
 3365     vmulss(dst, nds, as_Address(src));
 3366   } else {
 3367     lea(rscratch, src);
 3368     vmulss(dst, nds, Address(rscratch, 0));
 3369   }
 3370 }
 3371 
 3372 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3373   assert(rscratch != noreg || always_reachable(src), "missing");
 3374 
 3375   if (reachable(src)) {
 3376     vsubsd(dst, nds, as_Address(src));
 3377   } else {
 3378     lea(rscratch, src);
 3379     vsubsd(dst, nds, Address(rscratch, 0));
 3380   }
 3381 }
 3382 
 3383 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3384   assert(rscratch != noreg || always_reachable(src), "missing");
 3385 
 3386   if (reachable(src)) {
 3387     vsubss(dst, nds, as_Address(src));
 3388   } else {
 3389     lea(rscratch, src);
 3390     vsubss(dst, nds, Address(rscratch, 0));
 3391   }
 3392 }
 3393 
 3394 void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3395   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
 3396   assert(rscratch != noreg || always_reachable(src), "missing");
 3397 
 3398   vxorps(dst, nds, src, Assembler::AVX_128bit, rscratch);
 3399 }
 3400 
 3401 void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3402   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
 3403   assert(rscratch != noreg || always_reachable(src), "missing");
 3404 
 3405   vxorpd(dst, nds, src, Assembler::AVX_128bit, rscratch);
 3406 }
 3407 
 3408 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3409   assert(rscratch != noreg || always_reachable(src), "missing");
 3410 
 3411   if (reachable(src)) {
 3412     vxorpd(dst, nds, as_Address(src), vector_len);
 3413   } else {
 3414     lea(rscratch, src);
 3415     vxorpd(dst, nds, Address(rscratch, 0), vector_len);
 3416   }
 3417 }
 3418 
 3419 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3420   assert(rscratch != noreg || always_reachable(src), "missing");
 3421 
 3422   if (reachable(src)) {
 3423     vxorps(dst, nds, as_Address(src), vector_len);
 3424   } else {
 3425     lea(rscratch, src);
 3426     vxorps(dst, nds, Address(rscratch, 0), vector_len);
 3427   }
 3428 }
 3429 
 3430 void MacroAssembler::vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3431   assert(rscratch != noreg || always_reachable(src), "missing");
 3432 
 3433   if (UseAVX > 1 || (vector_len < 1)) {
 3434     if (reachable(src)) {
 3435       Assembler::vpxor(dst, nds, as_Address(src), vector_len);
 3436     } else {
 3437       lea(rscratch, src);
 3438       Assembler::vpxor(dst, nds, Address(rscratch, 0), vector_len);
 3439     }
 3440   } else {
 3441     MacroAssembler::vxorpd(dst, nds, src, vector_len, rscratch);
 3442   }
 3443 }
 3444 
 3445 void MacroAssembler::vpermd(XMMRegister dst,  XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3446   assert(rscratch != noreg || always_reachable(src), "missing");
 3447 
 3448   if (reachable(src)) {
 3449     Assembler::vpermd(dst, nds, as_Address(src), vector_len);
 3450   } else {
 3451     lea(rscratch, src);
 3452     Assembler::vpermd(dst, nds, Address(rscratch, 0), vector_len);
 3453   }
 3454 }
 3455 
 3456 void MacroAssembler::clear_jobject_tag(Register possibly_non_local) {
 3457   const int32_t inverted_mask = ~static_cast<int32_t>(JNIHandles::tag_mask);
 3458   STATIC_ASSERT(inverted_mask == -4); // otherwise check this code
 3459   // The inverted mask is sign-extended
 3460   andptr(possibly_non_local, inverted_mask);
 3461 }
 3462 
 3463 void MacroAssembler::resolve_jobject(Register value,
 3464                                      Register tmp) {
 3465   Register thread = r15_thread;
 3466   assert_different_registers(value, thread, tmp);
 3467   Label done, tagged, weak_tagged;
 3468   testptr(value, value);
 3469   jcc(Assembler::zero, done);           // Use null as-is.
 3470   testptr(value, JNIHandles::tag_mask); // Test for tag.
 3471   jcc(Assembler::notZero, tagged);
 3472 
 3473   // Resolve local handle
 3474   access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp);
 3475   verify_oop(value);
 3476   jmp(done);
 3477 
 3478   bind(tagged);
 3479   testptr(value, JNIHandles::TypeTag::weak_global); // Test for weak tag.
 3480   jcc(Assembler::notZero, weak_tagged);
 3481 
 3482   // Resolve global handle
 3483   access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp);
 3484   verify_oop(value);
 3485   jmp(done);
 3486 
 3487   bind(weak_tagged);
 3488   // Resolve jweak.
 3489   access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
 3490                  value, Address(value, -JNIHandles::TypeTag::weak_global), tmp);
 3491   verify_oop(value);
 3492 
 3493   bind(done);
 3494 }
 3495 
 3496 void MacroAssembler::resolve_global_jobject(Register value,
 3497                                             Register tmp) {
 3498   Register thread = r15_thread;
 3499   assert_different_registers(value, thread, tmp);
 3500   Label done;
 3501 
 3502   testptr(value, value);
 3503   jcc(Assembler::zero, done);           // Use null as-is.
 3504 
 3505 #ifdef ASSERT
 3506   {
 3507     Label valid_global_tag;
 3508     testptr(value, JNIHandles::TypeTag::global); // Test for global tag.
 3509     jcc(Assembler::notZero, valid_global_tag);
 3510     stop("non global jobject using resolve_global_jobject");
 3511     bind(valid_global_tag);
 3512   }
 3513 #endif
 3514 
 3515   // Resolve global handle
 3516   access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp);
 3517   verify_oop(value);
 3518 
 3519   bind(done);
 3520 }
 3521 
 3522 void MacroAssembler::subptr(Register dst, int32_t imm32) {
 3523   subq(dst, imm32);
 3524 }
 3525 
 3526 // Force generation of a 4 byte immediate value even if it fits into 8bit
 3527 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) {
 3528   subq_imm32(dst, imm32);
 3529 }
 3530 
 3531 void MacroAssembler::subptr(Register dst, Register src) {
 3532   subq(dst, src);
 3533 }
 3534 
 3535 // C++ bool manipulation
 3536 void MacroAssembler::testbool(Register dst) {
 3537   if(sizeof(bool) == 1)
 3538     testb(dst, 0xff);
 3539   else if(sizeof(bool) == 2) {
 3540     // testw implementation needed for two byte bools
 3541     ShouldNotReachHere();
 3542   } else if(sizeof(bool) == 4)
 3543     testl(dst, dst);
 3544   else
 3545     // unsupported
 3546     ShouldNotReachHere();
 3547 }
 3548 
 3549 void MacroAssembler::testptr(Register dst, Register src) {
 3550   testq(dst, src);
 3551 }
 3552 
 3553 // Object / value buffer allocation...
 3554 //
 3555 // Kills klass and rsi on LP64
 3556 void MacroAssembler::allocate_instance(Register klass, Register new_obj,
 3557                                        Register t1, Register t2,
 3558                                        bool clear_fields, Label& alloc_failed)
 3559 {
 3560   Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop;
 3561   Register layout_size = t1;
 3562   assert(new_obj == rax, "needs to be rax");
 3563   assert_different_registers(klass, new_obj, t1, t2);
 3564 
 3565   // get instance_size in InstanceKlass (scaled to a count of bytes)
 3566   movl(layout_size, Address(klass, Klass::layout_helper_offset()));
 3567   // test to see if it is malformed in some way
 3568   testl(layout_size, Klass::_lh_instance_slow_path_bit);
 3569   jcc(Assembler::notZero, slow_case_no_pop);
 3570 
 3571   // Allocate the instance:
 3572   //  If TLAB is enabled:
 3573   //    Try to allocate in the TLAB.
 3574   //    If fails, go to the slow path.
 3575   //  Else If inline contiguous allocations are enabled:
 3576   //    Try to allocate in eden.
 3577   //    If fails due to heap end, go to slow path.
 3578   //
 3579   //  If TLAB is enabled OR inline contiguous is enabled:
 3580   //    Initialize the allocation.
 3581   //    Exit.
 3582   //
 3583   //  Go to slow path.
 3584 
 3585   push(klass);
 3586   if (UseTLAB) {
 3587     tlab_allocate(new_obj, layout_size, 0, klass, t2, slow_case);
 3588     if (ZeroTLAB || (!clear_fields)) {
 3589       // the fields have been already cleared
 3590       jmp(initialize_header);
 3591     } else {
 3592       // initialize both the header and fields
 3593       jmp(initialize_object);
 3594     }
 3595   } else {
 3596     jmp(slow_case);
 3597   }
 3598 
 3599   // If UseTLAB is true, the object is created above and there is an initialize need.
 3600   // Otherwise, skip and go to the slow path.
 3601   if (UseTLAB) {
 3602     if (clear_fields) {
 3603       // The object is initialized before the header.  If the object size is
 3604       // zero, go directly to the header initialization.
 3605       bind(initialize_object);
 3606       if (UseCompactObjectHeaders) {
 3607         assert(is_aligned(oopDesc::base_offset_in_bytes(), BytesPerLong), "oop base offset must be 8-byte-aligned");
 3608         decrement(layout_size, oopDesc::base_offset_in_bytes());
 3609       } else {
 3610         decrement(layout_size, sizeof(oopDesc));
 3611       }
 3612       jcc(Assembler::zero, initialize_header);
 3613 
 3614       // Initialize topmost object field, divide size by 8, check if odd and
 3615       // test if zero.
 3616       Register zero = klass;
 3617       xorl(zero, zero);    // use zero reg to clear memory (shorter code)
 3618       shrl(layout_size, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
 3619 
 3620   #ifdef ASSERT
 3621       // make sure instance_size was multiple of 8
 3622       Label L;
 3623       // Ignore partial flag stall after shrl() since it is debug VM
 3624       jcc(Assembler::carryClear, L);
 3625       stop("object size is not multiple of 2 - adjust this code");
 3626       bind(L);
 3627       // must be > 0, no extra check needed here
 3628   #endif
 3629 
 3630       // initialize remaining object fields: instance_size was a multiple of 8
 3631       {
 3632         Label loop;
 3633         bind(loop);
 3634         int header_size_bytes = oopDesc::header_size() * HeapWordSize;
 3635         assert(is_aligned(header_size_bytes, BytesPerLong), "oop header size must be 8-byte-aligned");
 3636         movptr(Address(new_obj, layout_size, Address::times_8, header_size_bytes - 1*oopSize), zero);
 3637         decrement(layout_size);
 3638         jcc(Assembler::notZero, loop);
 3639       }
 3640     } // clear_fields
 3641 
 3642     // initialize object header only.
 3643     bind(initialize_header);
 3644     if (UseCompactObjectHeaders || EnableValhalla) {
 3645       pop(klass);
 3646       Register mark_word = t2;
 3647       movptr(mark_word, Address(klass, Klass::prototype_header_offset()));
 3648       movptr(Address(new_obj, oopDesc::mark_offset_in_bytes ()), mark_word);
 3649     } else {
 3650      movptr(Address(new_obj, oopDesc::mark_offset_in_bytes()),
 3651             (intptr_t)markWord::prototype().value()); // header
 3652      pop(klass);   // get saved klass back in the register.
 3653     }
 3654     if (!UseCompactObjectHeaders) {
 3655       xorl(rsi, rsi);                 // use zero reg to clear memory (shorter code)
 3656       store_klass_gap(new_obj, rsi);  // zero klass gap for compressed oops
 3657       movptr(t2, klass);         // preserve klass
 3658       store_klass(new_obj, t2, rscratch1);  // src klass reg is potentially compressed
 3659     }
 3660     jmp(done);
 3661   }
 3662 
 3663   bind(slow_case);
 3664   pop(klass);
 3665   bind(slow_case_no_pop);
 3666   jmp(alloc_failed);
 3667 
 3668   bind(done);
 3669 }
 3670 
 3671 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
 3672 void MacroAssembler::tlab_allocate(Register obj,
 3673                                    Register var_size_in_bytes,
 3674                                    int con_size_in_bytes,
 3675                                    Register t1,
 3676                                    Register t2,
 3677                                    Label& slow_case) {
 3678   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 3679   bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
 3680 }
 3681 
 3682 RegSet MacroAssembler::call_clobbered_gp_registers() {
 3683   RegSet regs;
 3684   regs += RegSet::of(rax, rcx, rdx);
 3685 #ifndef _WINDOWS
 3686   regs += RegSet::of(rsi, rdi);
 3687 #endif
 3688   regs += RegSet::range(r8, r11);
 3689   if (UseAPX) {
 3690     regs += RegSet::range(r16, as_Register(Register::number_of_registers - 1));
 3691   }
 3692   return regs;
 3693 }
 3694 
 3695 XMMRegSet MacroAssembler::call_clobbered_xmm_registers() {
 3696   int num_xmm_registers = XMMRegister::available_xmm_registers();
 3697 #if defined(_WINDOWS)
 3698   XMMRegSet result = XMMRegSet::range(xmm0, xmm5);
 3699   if (num_xmm_registers > 16) {
 3700      result += XMMRegSet::range(xmm16, as_XMMRegister(num_xmm_registers - 1));
 3701   }
 3702   return result;
 3703 #else
 3704   return XMMRegSet::range(xmm0, as_XMMRegister(num_xmm_registers - 1));
 3705 #endif
 3706 }
 3707 
 3708 // C1 only ever uses the first double/float of the XMM register.
 3709 static int xmm_save_size() { return sizeof(double); }
 3710 
 3711 static void save_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) {
 3712   masm->movdbl(Address(rsp, offset), reg);
 3713 }
 3714 
 3715 static void restore_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) {
 3716   masm->movdbl(reg, Address(rsp, offset));
 3717 }
 3718 
 3719 static int register_section_sizes(RegSet gp_registers, XMMRegSet xmm_registers,
 3720                                   bool save_fpu, int& gp_area_size, int& xmm_area_size) {
 3721 
 3722   gp_area_size = align_up(gp_registers.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size,
 3723                          StackAlignmentInBytes);
 3724   xmm_area_size = save_fpu ? xmm_registers.size() * xmm_save_size() : 0;
 3725 
 3726   return gp_area_size + xmm_area_size;
 3727 }
 3728 
 3729 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude, bool save_fpu) {
 3730   block_comment("push_call_clobbered_registers start");
 3731   // Regular registers
 3732   RegSet gp_registers_to_push = call_clobbered_gp_registers() - exclude;
 3733 
 3734   int gp_area_size;
 3735   int xmm_area_size;
 3736   int total_save_size = register_section_sizes(gp_registers_to_push, call_clobbered_xmm_registers(), save_fpu,
 3737                                                gp_area_size, xmm_area_size);
 3738   subptr(rsp, total_save_size);
 3739 
 3740   push_set(gp_registers_to_push, 0);
 3741 
 3742   if (save_fpu) {
 3743     push_set(call_clobbered_xmm_registers(), gp_area_size);
 3744   }
 3745 
 3746   block_comment("push_call_clobbered_registers end");
 3747 }
 3748 
 3749 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu) {
 3750   block_comment("pop_call_clobbered_registers start");
 3751 
 3752   RegSet gp_registers_to_pop = call_clobbered_gp_registers() - exclude;
 3753 
 3754   int gp_area_size;
 3755   int xmm_area_size;
 3756   int total_save_size = register_section_sizes(gp_registers_to_pop, call_clobbered_xmm_registers(), restore_fpu,
 3757                                                gp_area_size, xmm_area_size);
 3758 
 3759   if (restore_fpu) {
 3760     pop_set(call_clobbered_xmm_registers(), gp_area_size);
 3761   }
 3762 
 3763   pop_set(gp_registers_to_pop, 0);
 3764 
 3765   addptr(rsp, total_save_size);
 3766 
 3767   vzeroupper();
 3768 
 3769   block_comment("pop_call_clobbered_registers end");
 3770 }
 3771 
 3772 void MacroAssembler::push_set(XMMRegSet set, int offset) {
 3773   assert(is_aligned(set.size() * xmm_save_size(), StackAlignmentInBytes), "must be");
 3774   int spill_offset = offset;
 3775 
 3776   for (RegSetIterator<XMMRegister> it = set.begin(); *it != xnoreg; ++it) {
 3777     save_xmm_register(this, spill_offset, *it);
 3778     spill_offset += xmm_save_size();
 3779   }
 3780 }
 3781 
 3782 void MacroAssembler::pop_set(XMMRegSet set, int offset) {
 3783   int restore_size = set.size() * xmm_save_size();
 3784   assert(is_aligned(restore_size, StackAlignmentInBytes), "must be");
 3785 
 3786   int restore_offset = offset + restore_size - xmm_save_size();
 3787 
 3788   for (ReverseRegSetIterator<XMMRegister> it = set.rbegin(); *it != xnoreg; ++it) {
 3789     restore_xmm_register(this, restore_offset, *it);
 3790     restore_offset -= xmm_save_size();
 3791   }
 3792 }
 3793 
 3794 void MacroAssembler::push_set(RegSet set, int offset) {
 3795   int spill_offset;
 3796   if (offset == -1) {
 3797     int register_push_size = set.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size;
 3798     int aligned_size = align_up(register_push_size, StackAlignmentInBytes);
 3799     subptr(rsp, aligned_size);
 3800     spill_offset = 0;
 3801   } else {
 3802     spill_offset = offset;
 3803   }
 3804 
 3805   for (RegSetIterator<Register> it = set.begin(); *it != noreg; ++it) {
 3806     movptr(Address(rsp, spill_offset), *it);
 3807     spill_offset += Register::max_slots_per_register * VMRegImpl::stack_slot_size;
 3808   }
 3809 }
 3810 
 3811 void MacroAssembler::pop_set(RegSet set, int offset) {
 3812 
 3813   int gp_reg_size = Register::max_slots_per_register * VMRegImpl::stack_slot_size;
 3814   int restore_size = set.size() * gp_reg_size;
 3815   int aligned_size = align_up(restore_size, StackAlignmentInBytes);
 3816 
 3817   int restore_offset;
 3818   if (offset == -1) {
 3819     restore_offset = restore_size - gp_reg_size;
 3820   } else {
 3821     restore_offset = offset + restore_size - gp_reg_size;
 3822   }
 3823   for (ReverseRegSetIterator<Register> it = set.rbegin(); *it != noreg; ++it) {
 3824     movptr(*it, Address(rsp, restore_offset));
 3825     restore_offset -= gp_reg_size;
 3826   }
 3827 
 3828   if (offset == -1) {
 3829     addptr(rsp, aligned_size);
 3830   }
 3831 }
 3832 
 3833 // Preserves the contents of address, destroys the contents length_in_bytes and temp.
 3834 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) {
 3835   assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different");
 3836   assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord");
 3837   Label done;
 3838 
 3839   testptr(length_in_bytes, length_in_bytes);
 3840   jcc(Assembler::zero, done);
 3841 
 3842   // initialize topmost word, divide index by 2, check if odd and test if zero
 3843   // note: for the remaining code to work, index must be a multiple of BytesPerWord
 3844 #ifdef ASSERT
 3845   {
 3846     Label L;
 3847     testptr(length_in_bytes, BytesPerWord - 1);
 3848     jcc(Assembler::zero, L);
 3849     stop("length must be a multiple of BytesPerWord");
 3850     bind(L);
 3851   }
 3852 #endif
 3853   Register index = length_in_bytes;
 3854   xorptr(temp, temp);    // use _zero reg to clear memory (shorter code)
 3855   if (UseIncDec) {
 3856     shrptr(index, 3);  // divide by 8/16 and set carry flag if bit 2 was set
 3857   } else {
 3858     shrptr(index, 2);  // use 2 instructions to avoid partial flag stall
 3859     shrptr(index, 1);
 3860   }
 3861 
 3862   // initialize remaining object fields: index is a multiple of 2 now
 3863   {
 3864     Label loop;
 3865     bind(loop);
 3866     movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp);
 3867     decrement(index);
 3868     jcc(Assembler::notZero, loop);
 3869   }
 3870 
 3871   bind(done);
 3872 }
 3873 
 3874 void MacroAssembler::get_inline_type_field_klass(Register holder_klass, Register index, Register inline_klass) {
 3875   inline_layout_info(holder_klass, index, inline_klass);
 3876   movptr(inline_klass, Address(inline_klass, InlineLayoutInfo::klass_offset()));
 3877 }
 3878 
 3879 void MacroAssembler::inline_layout_info(Register holder_klass, Register index, Register layout_info) {
 3880   movptr(layout_info, Address(holder_klass, InstanceKlass::inline_layout_info_array_offset()));
 3881 #ifdef ASSERT
 3882   {
 3883     Label done;
 3884     cmpptr(layout_info, 0);
 3885     jcc(Assembler::notEqual, done);
 3886     stop("inline_layout_info_array is null");
 3887     bind(done);
 3888   }
 3889 #endif
 3890 
 3891   InlineLayoutInfo array[2];
 3892   int size = (char*)&array[1] - (char*)&array[0]; // computing size of array elements
 3893   if (is_power_of_2(size)) {
 3894     shll(index, log2i_exact(size)); // Scale index by power of 2
 3895   } else {
 3896     imull(index, index, size); // Scale the index to be the entry index * array_element_size
 3897   }
 3898   lea(layout_info, Address(layout_info, index, Address::times_1, Array<InlineLayoutInfo>::base_offset_in_bytes()));
 3899 }
 3900 
 3901 // Look up the method for a megamorphic invokeinterface call.
 3902 // The target method is determined by <intf_klass, itable_index>.
 3903 // The receiver klass is in recv_klass.
 3904 // On success, the result will be in method_result, and execution falls through.
 3905 // On failure, execution transfers to the given label.
 3906 void MacroAssembler::lookup_interface_method(Register recv_klass,
 3907                                              Register intf_klass,
 3908                                              RegisterOrConstant itable_index,
 3909                                              Register method_result,
 3910                                              Register scan_temp,
 3911                                              Label& L_no_such_interface,
 3912                                              bool return_method) {
 3913   assert_different_registers(recv_klass, intf_klass, scan_temp);
 3914   assert_different_registers(method_result, intf_klass, scan_temp);
 3915   assert(recv_klass != method_result || !return_method,
 3916          "recv_klass can be destroyed when method isn't needed");
 3917 
 3918   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
 3919          "caller must use same register for non-constant itable index as for method");
 3920 
 3921   // Compute start of first itableOffsetEntry (which is at the end of the vtable)
 3922   int vtable_base = in_bytes(Klass::vtable_start_offset());
 3923   int itentry_off = in_bytes(itableMethodEntry::method_offset());
 3924   int scan_step   = itableOffsetEntry::size() * wordSize;
 3925   int vte_size    = vtableEntry::size_in_bytes();
 3926   Address::ScaleFactor times_vte_scale = Address::times_ptr;
 3927   assert(vte_size == wordSize, "else adjust times_vte_scale");
 3928 
 3929   movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
 3930 
 3931   // Could store the aligned, prescaled offset in the klass.
 3932   lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
 3933 
 3934   if (return_method) {
 3935     // Adjust recv_klass by scaled itable_index, so we can free itable_index.
 3936     assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
 3937     lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
 3938   }
 3939 
 3940   // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) {
 3941   //   if (scan->interface() == intf) {
 3942   //     result = (klass + scan->offset() + itable_index);
 3943   //   }
 3944   // }
 3945   Label search, found_method;
 3946 
 3947   for (int peel = 1; peel >= 0; peel--) {
 3948     movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset()));
 3949     cmpptr(intf_klass, method_result);
 3950 
 3951     if (peel) {
 3952       jccb(Assembler::equal, found_method);
 3953     } else {
 3954       jccb(Assembler::notEqual, search);
 3955       // (invert the test to fall through to found_method...)
 3956     }
 3957 
 3958     if (!peel)  break;
 3959 
 3960     bind(search);
 3961 
 3962     // Check that the previous entry is non-null.  A null entry means that
 3963     // the receiver class doesn't implement the interface, and wasn't the
 3964     // same as when the caller was compiled.
 3965     testptr(method_result, method_result);
 3966     jcc(Assembler::zero, L_no_such_interface);
 3967     addptr(scan_temp, scan_step);
 3968   }
 3969 
 3970   bind(found_method);
 3971 
 3972   if (return_method) {
 3973     // Got a hit.
 3974     movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset()));
 3975     movptr(method_result, Address(recv_klass, scan_temp, Address::times_1));
 3976   }
 3977 }
 3978 
 3979 // Look up the method for a megamorphic invokeinterface call in a single pass over itable:
 3980 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData
 3981 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index
 3982 // The target method is determined by <holder_klass, itable_index>.
 3983 // The receiver klass is in recv_klass.
 3984 // On success, the result will be in method_result, and execution falls through.
 3985 // On failure, execution transfers to the given label.
 3986 void MacroAssembler::lookup_interface_method_stub(Register recv_klass,
 3987                                                   Register holder_klass,
 3988                                                   Register resolved_klass,
 3989                                                   Register method_result,
 3990                                                   Register scan_temp,
 3991                                                   Register temp_reg2,
 3992                                                   Register receiver,
 3993                                                   int itable_index,
 3994                                                   Label& L_no_such_interface) {
 3995   assert_different_registers(recv_klass, method_result, holder_klass, resolved_klass, scan_temp, temp_reg2, receiver);
 3996   Register temp_itbl_klass = method_result;
 3997   Register temp_reg = (temp_reg2 == noreg ? recv_klass : temp_reg2); // reuse recv_klass register on 32-bit x86 impl
 3998 
 3999   int vtable_base = in_bytes(Klass::vtable_start_offset());
 4000   int itentry_off = in_bytes(itableMethodEntry::method_offset());
 4001   int scan_step = itableOffsetEntry::size() * wordSize;
 4002   int vte_size = vtableEntry::size_in_bytes();
 4003   int ioffset = in_bytes(itableOffsetEntry::interface_offset());
 4004   int ooffset = in_bytes(itableOffsetEntry::offset_offset());
 4005   Address::ScaleFactor times_vte_scale = Address::times_ptr;
 4006   assert(vte_size == wordSize, "adjust times_vte_scale");
 4007 
 4008   Label L_loop_scan_resolved_entry, L_resolved_found, L_holder_found;
 4009 
 4010   // temp_itbl_klass = recv_klass.itable[0]
 4011   // scan_temp = &recv_klass.itable[0] + step
 4012   movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
 4013   movptr(temp_itbl_klass, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset));
 4014   lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset + scan_step));
 4015   xorptr(temp_reg, temp_reg);
 4016 
 4017   // Initial checks:
 4018   //   - if (holder_klass != resolved_klass), go to "scan for resolved"
 4019   //   - if (itable[0] == 0), no such interface
 4020   //   - if (itable[0] == holder_klass), shortcut to "holder found"
 4021   cmpptr(holder_klass, resolved_klass);
 4022   jccb(Assembler::notEqual, L_loop_scan_resolved_entry);
 4023   testptr(temp_itbl_klass, temp_itbl_klass);
 4024   jccb(Assembler::zero, L_no_such_interface);
 4025   cmpptr(holder_klass, temp_itbl_klass);
 4026   jccb(Assembler::equal, L_holder_found);
 4027 
 4028   // Loop: Look for holder_klass record in itable
 4029   //   do {
 4030   //     tmp = itable[index];
 4031   //     index += step;
 4032   //     if (tmp == holder_klass) {
 4033   //       goto L_holder_found; // Found!
 4034   //     }
 4035   //   } while (tmp != 0);
 4036   //   goto L_no_such_interface // Not found.
 4037   Label L_scan_holder;
 4038   bind(L_scan_holder);
 4039     movptr(temp_itbl_klass, Address(scan_temp, 0));
 4040     addptr(scan_temp, scan_step);
 4041     cmpptr(holder_klass, temp_itbl_klass);
 4042     jccb(Assembler::equal, L_holder_found);
 4043     testptr(temp_itbl_klass, temp_itbl_klass);
 4044     jccb(Assembler::notZero, L_scan_holder);
 4045 
 4046   jmpb(L_no_such_interface);
 4047 
 4048   // Loop: Look for resolved_class record in itable
 4049   //   do {
 4050   //     tmp = itable[index];
 4051   //     index += step;
 4052   //     if (tmp == holder_klass) {
 4053   //        // Also check if we have met a holder klass
 4054   //        holder_tmp = itable[index-step-ioffset];
 4055   //     }
 4056   //     if (tmp == resolved_klass) {
 4057   //        goto L_resolved_found;  // Found!
 4058   //     }
 4059   //   } while (tmp != 0);
 4060   //   goto L_no_such_interface // Not found.
 4061   //
 4062   Label L_loop_scan_resolved;
 4063   bind(L_loop_scan_resolved);
 4064     movptr(temp_itbl_klass, Address(scan_temp, 0));
 4065     addptr(scan_temp, scan_step);
 4066     bind(L_loop_scan_resolved_entry);
 4067     cmpptr(holder_klass, temp_itbl_klass);
 4068     cmovl(Assembler::equal, temp_reg, Address(scan_temp, ooffset - ioffset - scan_step));
 4069     cmpptr(resolved_klass, temp_itbl_klass);
 4070     jccb(Assembler::equal, L_resolved_found);
 4071     testptr(temp_itbl_klass, temp_itbl_klass);
 4072     jccb(Assembler::notZero, L_loop_scan_resolved);
 4073 
 4074   jmpb(L_no_such_interface);
 4075 
 4076   Label L_ready;
 4077 
 4078   // See if we already have a holder klass. If not, go and scan for it.
 4079   bind(L_resolved_found);
 4080   testptr(temp_reg, temp_reg);
 4081   jccb(Assembler::zero, L_scan_holder);
 4082   jmpb(L_ready);
 4083 
 4084   bind(L_holder_found);
 4085   movl(temp_reg, Address(scan_temp, ooffset - ioffset - scan_step));
 4086 
 4087   // Finally, temp_reg contains holder_klass vtable offset
 4088   bind(L_ready);
 4089   assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
 4090   if (temp_reg2 == noreg) { // recv_klass register is clobbered for 32-bit x86 impl
 4091     load_klass(scan_temp, receiver, noreg);
 4092     movptr(method_result, Address(scan_temp, temp_reg, Address::times_1, itable_index * wordSize + itentry_off));
 4093   } else {
 4094     movptr(method_result, Address(recv_klass, temp_reg, Address::times_1, itable_index * wordSize + itentry_off));
 4095   }
 4096 }
 4097 
 4098 
 4099 // virtual method calling
 4100 void MacroAssembler::lookup_virtual_method(Register recv_klass,
 4101                                            RegisterOrConstant vtable_index,
 4102                                            Register method_result) {
 4103   const ByteSize base = Klass::vtable_start_offset();
 4104   assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
 4105   Address vtable_entry_addr(recv_klass,
 4106                             vtable_index, Address::times_ptr,
 4107                             base + vtableEntry::method_offset());
 4108   movptr(method_result, vtable_entry_addr);
 4109 }
 4110 
 4111 
 4112 void MacroAssembler::check_klass_subtype(Register sub_klass,
 4113                            Register super_klass,
 4114                            Register temp_reg,
 4115                            Label& L_success) {
 4116   Label L_failure;
 4117   check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg,        &L_success, &L_failure, nullptr);
 4118   check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr);
 4119   bind(L_failure);
 4120 }
 4121 
 4122 
 4123 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
 4124                                                    Register super_klass,
 4125                                                    Register temp_reg,
 4126                                                    Label* L_success,
 4127                                                    Label* L_failure,
 4128                                                    Label* L_slow_path,
 4129                                         RegisterOrConstant super_check_offset) {
 4130   assert_different_registers(sub_klass, super_klass, temp_reg);
 4131   bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
 4132   if (super_check_offset.is_register()) {
 4133     assert_different_registers(sub_klass, super_klass,
 4134                                super_check_offset.as_register());
 4135   } else if (must_load_sco) {
 4136     assert(temp_reg != noreg, "supply either a temp or a register offset");
 4137   }
 4138 
 4139   Label L_fallthrough;
 4140   int label_nulls = 0;
 4141   if (L_success == nullptr)   { L_success   = &L_fallthrough; label_nulls++; }
 4142   if (L_failure == nullptr)   { L_failure   = &L_fallthrough; label_nulls++; }
 4143   if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; }
 4144   assert(label_nulls <= 1, "at most one null in the batch");
 4145 
 4146   int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
 4147   int sco_offset = in_bytes(Klass::super_check_offset_offset());
 4148   Address super_check_offset_addr(super_klass, sco_offset);
 4149 
 4150   // Hacked jcc, which "knows" that L_fallthrough, at least, is in
 4151   // range of a jccb.  If this routine grows larger, reconsider at
 4152   // least some of these.
 4153 #define local_jcc(assembler_cond, label)                                \
 4154   if (&(label) == &L_fallthrough)  jccb(assembler_cond, label);         \
 4155   else                             jcc( assembler_cond, label) /*omit semi*/
 4156 
 4157   // Hacked jmp, which may only be used just before L_fallthrough.
 4158 #define final_jmp(label)                                                \
 4159   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
 4160   else                            jmp(label)                /*omit semi*/
 4161 
 4162   // If the pointers are equal, we are done (e.g., String[] elements).
 4163   // This self-check enables sharing of secondary supertype arrays among
 4164   // non-primary types such as array-of-interface.  Otherwise, each such
 4165   // type would need its own customized SSA.
 4166   // We move this check to the front of the fast path because many
 4167   // type checks are in fact trivially successful in this manner,
 4168   // so we get a nicely predicted branch right at the start of the check.
 4169   cmpptr(sub_klass, super_klass);
 4170   local_jcc(Assembler::equal, *L_success);
 4171 
 4172   // Check the supertype display:
 4173   if (must_load_sco) {
 4174     // Positive movl does right thing on LP64.
 4175     movl(temp_reg, super_check_offset_addr);
 4176     super_check_offset = RegisterOrConstant(temp_reg);
 4177   }
 4178   Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
 4179   cmpptr(super_klass, super_check_addr); // load displayed supertype
 4180 
 4181   // This check has worked decisively for primary supers.
 4182   // Secondary supers are sought in the super_cache ('super_cache_addr').
 4183   // (Secondary supers are interfaces and very deeply nested subtypes.)
 4184   // This works in the same check above because of a tricky aliasing
 4185   // between the super_cache and the primary super display elements.
 4186   // (The 'super_check_addr' can address either, as the case requires.)
 4187   // Note that the cache is updated below if it does not help us find
 4188   // what we need immediately.
 4189   // So if it was a primary super, we can just fail immediately.
 4190   // Otherwise, it's the slow path for us (no success at this point).
 4191 
 4192   if (super_check_offset.is_register()) {
 4193     local_jcc(Assembler::equal, *L_success);
 4194     cmpl(super_check_offset.as_register(), sc_offset);
 4195     if (L_failure == &L_fallthrough) {
 4196       local_jcc(Assembler::equal, *L_slow_path);
 4197     } else {
 4198       local_jcc(Assembler::notEqual, *L_failure);
 4199       final_jmp(*L_slow_path);
 4200     }
 4201   } else if (super_check_offset.as_constant() == sc_offset) {
 4202     // Need a slow path; fast failure is impossible.
 4203     if (L_slow_path == &L_fallthrough) {
 4204       local_jcc(Assembler::equal, *L_success);
 4205     } else {
 4206       local_jcc(Assembler::notEqual, *L_slow_path);
 4207       final_jmp(*L_success);
 4208     }
 4209   } else {
 4210     // No slow path; it's a fast decision.
 4211     if (L_failure == &L_fallthrough) {
 4212       local_jcc(Assembler::equal, *L_success);
 4213     } else {
 4214       local_jcc(Assembler::notEqual, *L_failure);
 4215       final_jmp(*L_success);
 4216     }
 4217   }
 4218 
 4219   bind(L_fallthrough);
 4220 
 4221 #undef local_jcc
 4222 #undef final_jmp
 4223 }
 4224 
 4225 
 4226 void MacroAssembler::check_klass_subtype_slow_path_linear(Register sub_klass,
 4227                                                           Register super_klass,
 4228                                                           Register temp_reg,
 4229                                                           Register temp2_reg,
 4230                                                           Label* L_success,
 4231                                                           Label* L_failure,
 4232                                                           bool set_cond_codes) {
 4233   assert_different_registers(sub_klass, super_klass, temp_reg);
 4234   if (temp2_reg != noreg)
 4235     assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg);
 4236 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
 4237 
 4238   Label L_fallthrough;
 4239   int label_nulls = 0;
 4240   if (L_success == nullptr)   { L_success   = &L_fallthrough; label_nulls++; }
 4241   if (L_failure == nullptr)   { L_failure   = &L_fallthrough; label_nulls++; }
 4242   assert(label_nulls <= 1, "at most one null in the batch");
 4243 
 4244   // a couple of useful fields in sub_klass:
 4245   int ss_offset = in_bytes(Klass::secondary_supers_offset());
 4246   int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
 4247   Address secondary_supers_addr(sub_klass, ss_offset);
 4248   Address super_cache_addr(     sub_klass, sc_offset);
 4249 
 4250   // Do a linear scan of the secondary super-klass chain.
 4251   // This code is rarely used, so simplicity is a virtue here.
 4252   // The repne_scan instruction uses fixed registers, which we must spill.
 4253   // Don't worry too much about pre-existing connections with the input regs.
 4254 
 4255   assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super)
 4256   assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter)
 4257 
 4258   // Get super_klass value into rax (even if it was in rdi or rcx).
 4259   bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false;
 4260   if (super_klass != rax) {
 4261     if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; }
 4262     mov(rax, super_klass);
 4263   }
 4264   if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; }
 4265   if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; }
 4266 
 4267 #ifndef PRODUCT
 4268   uint* pst_counter = &SharedRuntime::_partial_subtype_ctr;
 4269   ExternalAddress pst_counter_addr((address) pst_counter);
 4270   lea(rcx, pst_counter_addr);
 4271   incrementl(Address(rcx, 0));
 4272 #endif //PRODUCT
 4273 
 4274   // We will consult the secondary-super array.
 4275   movptr(rdi, secondary_supers_addr);
 4276   // Load the array length.  (Positive movl does right thing on LP64.)
 4277   movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes()));
 4278   // Skip to start of data.
 4279   addptr(rdi, Array<Klass*>::base_offset_in_bytes());
 4280 
 4281   // Scan RCX words at [RDI] for an occurrence of RAX.
 4282   // Set NZ/Z based on last compare.
 4283   // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does
 4284   // not change flags (only scas instruction which is repeated sets flags).
 4285   // Set Z = 0 (not equal) before 'repne' to indicate that class was not found.
 4286 
 4287     testptr(rax,rax); // Set Z = 0
 4288     repne_scan();
 4289 
 4290   // Unspill the temp. registers:
 4291   if (pushed_rdi)  pop(rdi);
 4292   if (pushed_rcx)  pop(rcx);
 4293   if (pushed_rax)  pop(rax);
 4294 
 4295   if (set_cond_codes) {
 4296     // Special hack for the AD files:  rdi is guaranteed non-zero.
 4297     assert(!pushed_rdi, "rdi must be left non-null");
 4298     // Also, the condition codes are properly set Z/NZ on succeed/failure.
 4299   }
 4300 
 4301   if (L_failure == &L_fallthrough)
 4302         jccb(Assembler::notEqual, *L_failure);
 4303   else  jcc(Assembler::notEqual, *L_failure);
 4304 
 4305   // Success.  Cache the super we found and proceed in triumph.
 4306   movptr(super_cache_addr, super_klass);
 4307 
 4308   if (L_success != &L_fallthrough) {
 4309     jmp(*L_success);
 4310   }
 4311 
 4312 #undef IS_A_TEMP
 4313 
 4314   bind(L_fallthrough);
 4315 }
 4316 
 4317 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
 4318                                                    Register super_klass,
 4319                                                    Register temp_reg,
 4320                                                    Register temp2_reg,
 4321                                                    Label* L_success,
 4322                                                    Label* L_failure,
 4323                                                    bool set_cond_codes) {
 4324   assert(set_cond_codes == false, "must be false on 64-bit x86");
 4325   check_klass_subtype_slow_path
 4326     (sub_klass, super_klass, temp_reg, temp2_reg, noreg, noreg,
 4327      L_success, L_failure);
 4328 }
 4329 
 4330 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
 4331                                                    Register super_klass,
 4332                                                    Register temp_reg,
 4333                                                    Register temp2_reg,
 4334                                                    Register temp3_reg,
 4335                                                    Register temp4_reg,
 4336                                                    Label* L_success,
 4337                                                    Label* L_failure) {
 4338   if (UseSecondarySupersTable) {
 4339     check_klass_subtype_slow_path_table
 4340       (sub_klass, super_klass, temp_reg, temp2_reg, temp3_reg, temp4_reg,
 4341        L_success, L_failure);
 4342   } else {
 4343     check_klass_subtype_slow_path_linear
 4344       (sub_klass, super_klass, temp_reg, temp2_reg, L_success, L_failure, /*set_cond_codes*/false);
 4345   }
 4346 }
 4347 
 4348 Register MacroAssembler::allocate_if_noreg(Register r,
 4349                                   RegSetIterator<Register> &available_regs,
 4350                                   RegSet &regs_to_push) {
 4351   if (!r->is_valid()) {
 4352     r = *available_regs++;
 4353     regs_to_push += r;
 4354   }
 4355   return r;
 4356 }
 4357 
 4358 void MacroAssembler::check_klass_subtype_slow_path_table(Register sub_klass,
 4359                                                          Register super_klass,
 4360                                                          Register temp_reg,
 4361                                                          Register temp2_reg,
 4362                                                          Register temp3_reg,
 4363                                                          Register result_reg,
 4364                                                          Label* L_success,
 4365                                                          Label* L_failure) {
 4366   // NB! Callers may assume that, when temp2_reg is a valid register,
 4367   // this code sets it to a nonzero value.
 4368   bool temp2_reg_was_valid = temp2_reg->is_valid();
 4369 
 4370   RegSet temps = RegSet::of(temp_reg, temp2_reg, temp3_reg);
 4371 
 4372   Label L_fallthrough;
 4373   int label_nulls = 0;
 4374   if (L_success == nullptr)   { L_success   = &L_fallthrough; label_nulls++; }
 4375   if (L_failure == nullptr)   { L_failure   = &L_fallthrough; label_nulls++; }
 4376   assert(label_nulls <= 1, "at most one null in the batch");
 4377 
 4378   BLOCK_COMMENT("check_klass_subtype_slow_path_table");
 4379 
 4380   RegSetIterator<Register> available_regs
 4381     = (RegSet::of(rax, rcx, rdx, r8) + r9 + r10 + r11 + r12 - temps - sub_klass - super_klass).begin();
 4382 
 4383   RegSet pushed_regs;
 4384 
 4385   temp_reg = allocate_if_noreg(temp_reg, available_regs, pushed_regs);
 4386   temp2_reg = allocate_if_noreg(temp2_reg, available_regs, pushed_regs);
 4387   temp3_reg = allocate_if_noreg(temp3_reg, available_regs, pushed_regs);
 4388   result_reg = allocate_if_noreg(result_reg, available_regs, pushed_regs);
 4389   Register temp4_reg = allocate_if_noreg(noreg, available_regs, pushed_regs);
 4390 
 4391   assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, temp3_reg, result_reg);
 4392 
 4393   {
 4394 
 4395     int register_push_size = pushed_regs.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size;
 4396     int aligned_size = align_up(register_push_size, StackAlignmentInBytes);
 4397     subptr(rsp, aligned_size);
 4398     push_set(pushed_regs, 0);
 4399 
 4400     lookup_secondary_supers_table_var(sub_klass,
 4401                                       super_klass,
 4402                                       temp_reg, temp2_reg, temp3_reg, temp4_reg, result_reg);
 4403     cmpq(result_reg, 0);
 4404 
 4405     // Unspill the temp. registers:
 4406     pop_set(pushed_regs, 0);
 4407     // Increment SP but do not clobber flags.
 4408     lea(rsp, Address(rsp, aligned_size));
 4409   }
 4410 
 4411   if (temp2_reg_was_valid) {
 4412     movq(temp2_reg, 1);
 4413   }
 4414 
 4415   jcc(Assembler::notEqual, *L_failure);
 4416 
 4417   if (L_success != &L_fallthrough) {
 4418     jmp(*L_success);
 4419   }
 4420 
 4421   bind(L_fallthrough);
 4422 }
 4423 
 4424 // population_count variant for running without the POPCNT
 4425 // instruction, which was introduced with SSE4.2 in 2008.
 4426 void MacroAssembler::population_count(Register dst, Register src,
 4427                                       Register scratch1, Register scratch2) {
 4428   assert_different_registers(src, scratch1, scratch2);
 4429   if (UsePopCountInstruction) {
 4430     Assembler::popcntq(dst, src);
 4431   } else {
 4432     assert_different_registers(src, scratch1, scratch2);
 4433     assert_different_registers(dst, scratch1, scratch2);
 4434     Label loop, done;
 4435 
 4436     mov(scratch1, src);
 4437     // dst = 0;
 4438     // while(scratch1 != 0) {
 4439     //   dst++;
 4440     //   scratch1 &= (scratch1 - 1);
 4441     // }
 4442     xorl(dst, dst);
 4443     testq(scratch1, scratch1);
 4444     jccb(Assembler::equal, done);
 4445     {
 4446       bind(loop);
 4447       incq(dst);
 4448       movq(scratch2, scratch1);
 4449       decq(scratch2);
 4450       andq(scratch1, scratch2);
 4451       jccb(Assembler::notEqual, loop);
 4452     }
 4453     bind(done);
 4454   }
 4455 #ifdef ASSERT
 4456   mov64(scratch1, 0xCafeBabeDeadBeef);
 4457   movq(scratch2, scratch1);
 4458 #endif
 4459 }
 4460 
 4461 // Ensure that the inline code and the stub are using the same registers.
 4462 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS                      \
 4463 do {                                                                 \
 4464   assert(r_super_klass  == rax, "mismatch");                         \
 4465   assert(r_array_base   == rbx, "mismatch");                         \
 4466   assert(r_array_length == rcx, "mismatch");                         \
 4467   assert(r_array_index  == rdx, "mismatch");                         \
 4468   assert(r_sub_klass    == rsi || r_sub_klass == noreg, "mismatch"); \
 4469   assert(r_bitmap       == r11 || r_bitmap    == noreg, "mismatch"); \
 4470   assert(result         == rdi || result      == noreg, "mismatch"); \
 4471 } while(0)
 4472 
 4473 // Versions of salq and rorq that don't need count to be in rcx
 4474 
 4475 void MacroAssembler::salq(Register dest, Register count) {
 4476   if (count == rcx) {
 4477     Assembler::salq(dest);
 4478   } else {
 4479     assert_different_registers(rcx, dest);
 4480     xchgq(rcx, count);
 4481     Assembler::salq(dest);
 4482     xchgq(rcx, count);
 4483   }
 4484 }
 4485 
 4486 void MacroAssembler::rorq(Register dest, Register count) {
 4487   if (count == rcx) {
 4488     Assembler::rorq(dest);
 4489   } else {
 4490     assert_different_registers(rcx, dest);
 4491     xchgq(rcx, count);
 4492     Assembler::rorq(dest);
 4493     xchgq(rcx, count);
 4494   }
 4495 }
 4496 
 4497 // Return true: we succeeded in generating this code
 4498 //
 4499 // At runtime, return 0 in result if r_super_klass is a superclass of
 4500 // r_sub_klass, otherwise return nonzero. Use this if you know the
 4501 // super_klass_slot of the class you're looking for. This is always
 4502 // the case for instanceof and checkcast.
 4503 void MacroAssembler::lookup_secondary_supers_table_const(Register r_sub_klass,
 4504                                                          Register r_super_klass,
 4505                                                          Register temp1,
 4506                                                          Register temp2,
 4507                                                          Register temp3,
 4508                                                          Register temp4,
 4509                                                          Register result,
 4510                                                          u1 super_klass_slot) {
 4511   assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result);
 4512 
 4513   Label L_fallthrough, L_success, L_failure;
 4514 
 4515   BLOCK_COMMENT("lookup_secondary_supers_table {");
 4516 
 4517   const Register
 4518     r_array_index  = temp1,
 4519     r_array_length = temp2,
 4520     r_array_base   = temp3,
 4521     r_bitmap       = temp4;
 4522 
 4523   LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS;
 4524 
 4525   xorq(result, result); // = 0
 4526 
 4527   movq(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
 4528   movq(r_array_index, r_bitmap);
 4529 
 4530   // First check the bitmap to see if super_klass might be present. If
 4531   // the bit is zero, we are certain that super_klass is not one of
 4532   // the secondary supers.
 4533   u1 bit = super_klass_slot;
 4534   {
 4535     // NB: If the count in a x86 shift instruction is 0, the flags are
 4536     // not affected, so we do a testq instead.
 4537     int shift_count = Klass::SECONDARY_SUPERS_TABLE_MASK - bit;
 4538     if (shift_count != 0) {
 4539       salq(r_array_index, shift_count);
 4540     } else {
 4541       testq(r_array_index, r_array_index);
 4542     }
 4543   }
 4544   // We test the MSB of r_array_index, i.e. its sign bit
 4545   jcc(Assembler::positive, L_failure);
 4546 
 4547   // Get the first array index that can contain super_klass into r_array_index.
 4548   if (bit != 0) {
 4549     population_count(r_array_index, r_array_index, temp2, temp3);
 4550   } else {
 4551     movl(r_array_index, 1);
 4552   }
 4553   // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
 4554 
 4555   // We will consult the secondary-super array.
 4556   movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
 4557 
 4558   // We're asserting that the first word in an Array<Klass*> is the
 4559   // length, and the second word is the first word of the data. If
 4560   // that ever changes, r_array_base will have to be adjusted here.
 4561   assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code");
 4562   assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code");
 4563 
 4564   cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8));
 4565   jccb(Assembler::equal, L_success);
 4566 
 4567   // Is there another entry to check? Consult the bitmap.
 4568   btq(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK);
 4569   jccb(Assembler::carryClear, L_failure);
 4570 
 4571   // Linear probe. Rotate the bitmap so that the next bit to test is
 4572   // in Bit 1.
 4573   if (bit != 0) {
 4574     rorq(r_bitmap, bit);
 4575   }
 4576 
 4577   // Calls into the stub generated by lookup_secondary_supers_table_slow_path.
 4578   // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap.
 4579   // Kills: r_array_length.
 4580   // Returns: result.
 4581   call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub()));
 4582   // Result (0/1) is in rdi
 4583   jmpb(L_fallthrough);
 4584 
 4585   bind(L_failure);
 4586   incq(result); // 0 => 1
 4587 
 4588   bind(L_success);
 4589   // result = 0;
 4590 
 4591   bind(L_fallthrough);
 4592   BLOCK_COMMENT("} lookup_secondary_supers_table");
 4593 
 4594   if (VerifySecondarySupers) {
 4595     verify_secondary_supers_table(r_sub_klass, r_super_klass, result,
 4596                                   temp1, temp2, temp3);
 4597   }
 4598 }
 4599 
 4600 // At runtime, return 0 in result if r_super_klass is a superclass of
 4601 // r_sub_klass, otherwise return nonzero. Use this version of
 4602 // lookup_secondary_supers_table() if you don't know ahead of time
 4603 // which superclass will be searched for. Used by interpreter and
 4604 // runtime stubs. It is larger and has somewhat greater latency than
 4605 // the version above, which takes a constant super_klass_slot.
 4606 void MacroAssembler::lookup_secondary_supers_table_var(Register r_sub_klass,
 4607                                                        Register r_super_klass,
 4608                                                        Register temp1,
 4609                                                        Register temp2,
 4610                                                        Register temp3,
 4611                                                        Register temp4,
 4612                                                        Register result) {
 4613   assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result);
 4614   assert_different_registers(r_sub_klass, r_super_klass, rcx);
 4615   RegSet temps = RegSet::of(temp1, temp2, temp3, temp4);
 4616 
 4617   Label L_fallthrough, L_success, L_failure;
 4618 
 4619   BLOCK_COMMENT("lookup_secondary_supers_table {");
 4620 
 4621   RegSetIterator<Register> available_regs = (temps - rcx).begin();
 4622 
 4623   // FIXME. Once we are sure that all paths reaching this point really
 4624   // do pass rcx as one of our temps we can get rid of the following
 4625   // workaround.
 4626   assert(temps.contains(rcx), "fix this code");
 4627 
 4628   // We prefer to have our shift count in rcx. If rcx is one of our
 4629   // temps, use it for slot. If not, pick any of our temps.
 4630   Register slot;
 4631   if (!temps.contains(rcx)) {
 4632     slot = *available_regs++;
 4633   } else {
 4634     slot = rcx;
 4635   }
 4636 
 4637   const Register r_array_index = *available_regs++;
 4638   const Register r_bitmap      = *available_regs++;
 4639 
 4640   // The logic above guarantees this property, but we state it here.
 4641   assert_different_registers(r_array_index, r_bitmap, rcx);
 4642 
 4643   movq(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
 4644   movq(r_array_index, r_bitmap);
 4645 
 4646   // First check the bitmap to see if super_klass might be present. If
 4647   // the bit is zero, we are certain that super_klass is not one of
 4648   // the secondary supers.
 4649   movb(slot, Address(r_super_klass, Klass::hash_slot_offset()));
 4650   xorl(slot, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1)); // slot ^ 63 === 63 - slot (mod 64)
 4651   salq(r_array_index, slot);
 4652 
 4653   testq(r_array_index, r_array_index);
 4654   // We test the MSB of r_array_index, i.e. its sign bit
 4655   jcc(Assembler::positive, L_failure);
 4656 
 4657   const Register r_array_base = *available_regs++;
 4658 
 4659   // Get the first array index that can contain super_klass into r_array_index.
 4660   // Note: Clobbers r_array_base and slot.
 4661   population_count(r_array_index, r_array_index, /*temp2*/r_array_base, /*temp3*/slot);
 4662 
 4663   // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
 4664 
 4665   // We will consult the secondary-super array.
 4666   movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
 4667 
 4668   // We're asserting that the first word in an Array<Klass*> is the
 4669   // length, and the second word is the first word of the data. If
 4670   // that ever changes, r_array_base will have to be adjusted here.
 4671   assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code");
 4672   assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code");
 4673 
 4674   cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8));
 4675   jccb(Assembler::equal, L_success);
 4676 
 4677   // Restore slot to its true value
 4678   movb(slot, Address(r_super_klass, Klass::hash_slot_offset()));
 4679 
 4680   // Linear probe. Rotate the bitmap so that the next bit to test is
 4681   // in Bit 1.
 4682   rorq(r_bitmap, slot);
 4683 
 4684   // Is there another entry to check? Consult the bitmap.
 4685   btq(r_bitmap, 1);
 4686   jccb(Assembler::carryClear, L_failure);
 4687 
 4688   // Calls into the stub generated by lookup_secondary_supers_table_slow_path.
 4689   // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap.
 4690   // Kills: r_array_length.
 4691   // Returns: result.
 4692   lookup_secondary_supers_table_slow_path(r_super_klass,
 4693                                           r_array_base,
 4694                                           r_array_index,
 4695                                           r_bitmap,
 4696                                           /*temp1*/result,
 4697                                           /*temp2*/slot,
 4698                                           &L_success,
 4699                                           nullptr);
 4700 
 4701   bind(L_failure);
 4702   movq(result, 1);
 4703   jmpb(L_fallthrough);
 4704 
 4705   bind(L_success);
 4706   xorq(result, result); // = 0
 4707 
 4708   bind(L_fallthrough);
 4709   BLOCK_COMMENT("} lookup_secondary_supers_table");
 4710 
 4711   if (VerifySecondarySupers) {
 4712     verify_secondary_supers_table(r_sub_klass, r_super_klass, result,
 4713                                   temp1, temp2, temp3);
 4714   }
 4715 }
 4716 
 4717 void MacroAssembler::repne_scanq(Register addr, Register value, Register count, Register limit,
 4718                                  Label* L_success, Label* L_failure) {
 4719   Label L_loop, L_fallthrough;
 4720   {
 4721     int label_nulls = 0;
 4722     if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
 4723     if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
 4724     assert(label_nulls <= 1, "at most one null in the batch");
 4725   }
 4726   bind(L_loop);
 4727   cmpq(value, Address(addr, count, Address::times_8));
 4728   jcc(Assembler::equal, *L_success);
 4729   addl(count, 1);
 4730   cmpl(count, limit);
 4731   jcc(Assembler::less, L_loop);
 4732 
 4733   if (&L_fallthrough != L_failure) {
 4734     jmp(*L_failure);
 4735   }
 4736   bind(L_fallthrough);
 4737 }
 4738 
 4739 // Called by code generated by check_klass_subtype_slow_path
 4740 // above. This is called when there is a collision in the hashed
 4741 // lookup in the secondary supers array.
 4742 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass,
 4743                                                              Register r_array_base,
 4744                                                              Register r_array_index,
 4745                                                              Register r_bitmap,
 4746                                                              Register temp1,
 4747                                                              Register temp2,
 4748                                                              Label* L_success,
 4749                                                              Label* L_failure) {
 4750   assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, temp2);
 4751 
 4752   const Register
 4753     r_array_length = temp1,
 4754     r_sub_klass    = noreg,
 4755     result         = noreg;
 4756 
 4757   Label L_fallthrough;
 4758   int label_nulls = 0;
 4759   if (L_success == nullptr)   { L_success   = &L_fallthrough; label_nulls++; }
 4760   if (L_failure == nullptr)   { L_failure   = &L_fallthrough; label_nulls++; }
 4761   assert(label_nulls <= 1, "at most one null in the batch");
 4762 
 4763   // Load the array length.
 4764   movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
 4765   // And adjust the array base to point to the data.
 4766   // NB! Effectively increments current slot index by 1.
 4767   assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "");
 4768   addptr(r_array_base, Array<Klass*>::base_offset_in_bytes());
 4769 
 4770   // Linear probe
 4771   Label L_huge;
 4772 
 4773   // The bitmap is full to bursting.
 4774   // Implicit invariant: BITMAP_FULL implies (length > 0)
 4775   cmpl(r_array_length, (int32_t)Klass::SECONDARY_SUPERS_TABLE_SIZE - 2);
 4776   jcc(Assembler::greater, L_huge);
 4777 
 4778   // NB! Our caller has checked bits 0 and 1 in the bitmap. The
 4779   // current slot (at secondary_supers[r_array_index]) has not yet
 4780   // been inspected, and r_array_index may be out of bounds if we
 4781   // wrapped around the end of the array.
 4782 
 4783   { // This is conventional linear probing, but instead of terminating
 4784     // when a null entry is found in the table, we maintain a bitmap
 4785     // in which a 0 indicates missing entries.
 4786     // The check above guarantees there are 0s in the bitmap, so the loop
 4787     // eventually terminates.
 4788 
 4789     xorl(temp2, temp2); // = 0;
 4790 
 4791     Label L_again;
 4792     bind(L_again);
 4793 
 4794     // Check for array wraparound.
 4795     cmpl(r_array_index, r_array_length);
 4796     cmovl(Assembler::greaterEqual, r_array_index, temp2);
 4797 
 4798     cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8));
 4799     jcc(Assembler::equal, *L_success);
 4800 
 4801     // If the next bit in bitmap is zero, we're done.
 4802     btq(r_bitmap, 2); // look-ahead check (Bit 2); Bits 0 and 1 are tested by now
 4803     jcc(Assembler::carryClear, *L_failure);
 4804 
 4805     rorq(r_bitmap, 1); // Bits 1/2 => 0/1
 4806     addl(r_array_index, 1);
 4807 
 4808     jmp(L_again);
 4809   }
 4810 
 4811   { // Degenerate case: more than 64 secondary supers.
 4812     // FIXME: We could do something smarter here, maybe a vectorized
 4813     // comparison or a binary search, but is that worth any added
 4814     // complexity?
 4815     bind(L_huge);
 4816     xorl(r_array_index, r_array_index); // = 0
 4817     repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length,
 4818                 L_success,
 4819                 (&L_fallthrough != L_failure ? L_failure : nullptr));
 4820 
 4821     bind(L_fallthrough);
 4822   }
 4823 }
 4824 
 4825 struct VerifyHelperArguments {
 4826   Klass* _super;
 4827   Klass* _sub;
 4828   intptr_t _linear_result;
 4829   intptr_t _table_result;
 4830 };
 4831 
 4832 static void verify_secondary_supers_table_helper(const char* msg, VerifyHelperArguments* args) {
 4833   Klass::on_secondary_supers_verification_failure(args->_super,
 4834                                                   args->_sub,
 4835                                                   args->_linear_result,
 4836                                                   args->_table_result,
 4837                                                   msg);
 4838 }
 4839 
 4840 // Make sure that the hashed lookup and a linear scan agree.
 4841 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass,
 4842                                                    Register r_super_klass,
 4843                                                    Register result,
 4844                                                    Register temp1,
 4845                                                    Register temp2,
 4846                                                    Register temp3) {
 4847   const Register
 4848       r_array_index  = temp1,
 4849       r_array_length = temp2,
 4850       r_array_base   = temp3,
 4851       r_bitmap       = noreg;
 4852 
 4853   BLOCK_COMMENT("verify_secondary_supers_table {");
 4854 
 4855   Label L_success, L_failure, L_check, L_done;
 4856 
 4857   movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
 4858   movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
 4859   // And adjust the array base to point to the data.
 4860   addptr(r_array_base, Array<Klass*>::base_offset_in_bytes());
 4861 
 4862   testl(r_array_length, r_array_length); // array_length == 0?
 4863   jcc(Assembler::zero, L_failure);
 4864 
 4865   movl(r_array_index, 0);
 4866   repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length, &L_success);
 4867   // fall through to L_failure
 4868 
 4869   const Register linear_result = r_array_index; // reuse temp1
 4870 
 4871   bind(L_failure); // not present
 4872   movl(linear_result, 1);
 4873   jmp(L_check);
 4874 
 4875   bind(L_success); // present
 4876   movl(linear_result, 0);
 4877 
 4878   bind(L_check);
 4879   cmpl(linear_result, result);
 4880   jcc(Assembler::equal, L_done);
 4881 
 4882   { // To avoid calling convention issues, build a record on the stack
 4883     // and pass the pointer to that instead.
 4884     push(result);
 4885     push(linear_result);
 4886     push(r_sub_klass);
 4887     push(r_super_klass);
 4888     movptr(c_rarg1, rsp);
 4889     movptr(c_rarg0, (uintptr_t) "mismatch");
 4890     call(RuntimeAddress(CAST_FROM_FN_PTR(address, verify_secondary_supers_table_helper)));
 4891     should_not_reach_here();
 4892   }
 4893   bind(L_done);
 4894 
 4895   BLOCK_COMMENT("} verify_secondary_supers_table");
 4896 }
 4897 
 4898 #undef LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS
 4899 
 4900 void MacroAssembler::clinit_barrier(Register klass, Label* L_fast_path, Label* L_slow_path) {
 4901   assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required");
 4902 
 4903   Label L_fallthrough;
 4904   if (L_fast_path == nullptr) {
 4905     L_fast_path = &L_fallthrough;
 4906   } else if (L_slow_path == nullptr) {
 4907     L_slow_path = &L_fallthrough;
 4908   }
 4909 
 4910   // Fast path check: class is fully initialized.
 4911   // init_state needs acquire, but x86 is TSO, and so we are already good.
 4912   cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
 4913   jcc(Assembler::equal, *L_fast_path);
 4914 
 4915   // Fast path check: current thread is initializer thread
 4916   cmpptr(r15_thread, Address(klass, InstanceKlass::init_thread_offset()));
 4917   if (L_slow_path == &L_fallthrough) {
 4918     jcc(Assembler::equal, *L_fast_path);
 4919     bind(*L_slow_path);
 4920   } else if (L_fast_path == &L_fallthrough) {
 4921     jcc(Assembler::notEqual, *L_slow_path);
 4922     bind(*L_fast_path);
 4923   } else {
 4924     Unimplemented();
 4925   }
 4926 }
 4927 
 4928 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) {
 4929   if (VM_Version::supports_cmov()) {
 4930     cmovl(cc, dst, src);
 4931   } else {
 4932     Label L;
 4933     jccb(negate_condition(cc), L);
 4934     movl(dst, src);
 4935     bind(L);
 4936   }
 4937 }
 4938 
 4939 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
 4940   if (VM_Version::supports_cmov()) {
 4941     cmovl(cc, dst, src);
 4942   } else {
 4943     Label L;
 4944     jccb(negate_condition(cc), L);
 4945     movl(dst, src);
 4946     bind(L);
 4947   }
 4948 }
 4949 
 4950 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
 4951   if (!VerifyOops || VerifyAdapterSharing) {
 4952     // Below address of the code string confuses VerifyAdapterSharing
 4953     // because it may differ between otherwise equivalent adapters.
 4954     return;
 4955   }
 4956 
 4957   BLOCK_COMMENT("verify_oop {");
 4958   push(rscratch1);
 4959   push(rax);                          // save rax
 4960   push(reg);                          // pass register argument
 4961 
 4962   // Pass register number to verify_oop_subroutine
 4963   const char* b = nullptr;
 4964   {
 4965     ResourceMark rm;
 4966     stringStream ss;
 4967     ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
 4968     b = code_string(ss.as_string());
 4969   }
 4970   AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate());
 4971   pushptr(buffer.addr(), rscratch1);
 4972 
 4973   // call indirectly to solve generation ordering problem
 4974   movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
 4975   call(rax);
 4976   // Caller pops the arguments (oop, message) and restores rax, r10
 4977   BLOCK_COMMENT("} verify_oop");
 4978 }
 4979 
 4980 void MacroAssembler::vallones(XMMRegister dst, int vector_len) {
 4981   if (UseAVX > 2 && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl())) {
 4982     // Only pcmpeq has dependency breaking treatment (i.e the execution can begin without
 4983     // waiting for the previous result on dst), not vpcmpeqd, so just use vpternlog
 4984     vpternlogd(dst, 0xFF, dst, dst, vector_len);
 4985   } else if (VM_Version::supports_avx()) {
 4986     vpcmpeqd(dst, dst, dst, vector_len);
 4987   } else {
 4988     pcmpeqd(dst, dst);
 4989   }
 4990 }
 4991 
 4992 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
 4993                                          int extra_slot_offset) {
 4994   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
 4995   int stackElementSize = Interpreter::stackElementSize;
 4996   int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
 4997 #ifdef ASSERT
 4998   int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
 4999   assert(offset1 - offset == stackElementSize, "correct arithmetic");
 5000 #endif
 5001   Register             scale_reg    = noreg;
 5002   Address::ScaleFactor scale_factor = Address::no_scale;
 5003   if (arg_slot.is_constant()) {
 5004     offset += arg_slot.as_constant() * stackElementSize;
 5005   } else {
 5006     scale_reg    = arg_slot.as_register();
 5007     scale_factor = Address::times(stackElementSize);
 5008   }
 5009   offset += wordSize;           // return PC is on stack
 5010   return Address(rsp, scale_reg, scale_factor, offset);
 5011 }
 5012 
 5013 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
 5014   if (!VerifyOops || VerifyAdapterSharing) {
 5015     // Below address of the code string confuses VerifyAdapterSharing
 5016     // because it may differ between otherwise equivalent adapters.
 5017     return;
 5018   }
 5019 
 5020   push(rscratch1);
 5021   push(rax); // save rax,
 5022   // addr may contain rsp so we will have to adjust it based on the push
 5023   // we just did (and on 64 bit we do two pushes)
 5024   // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
 5025   // stores rax into addr which is backwards of what was intended.
 5026   if (addr.uses(rsp)) {
 5027     lea(rax, addr);
 5028     pushptr(Address(rax, 2 * BytesPerWord));
 5029   } else {
 5030     pushptr(addr);
 5031   }
 5032 
 5033   // Pass register number to verify_oop_subroutine
 5034   const char* b = nullptr;
 5035   {
 5036     ResourceMark rm;
 5037     stringStream ss;
 5038     ss.print("verify_oop_addr: %s (%s:%d)", s, file, line);
 5039     b = code_string(ss.as_string());
 5040   }
 5041   AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate());
 5042   pushptr(buffer.addr(), rscratch1);
 5043 
 5044   // call indirectly to solve generation ordering problem
 5045   movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
 5046   call(rax);
 5047   // Caller pops the arguments (addr, message) and restores rax, r10.
 5048 }
 5049 
 5050 void MacroAssembler::verify_tlab() {
 5051 #ifdef ASSERT
 5052   if (UseTLAB && VerifyOops) {
 5053     Label next, ok;
 5054     Register t1 = rsi;
 5055 
 5056     push(t1);
 5057 
 5058     movptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
 5059     cmpptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_start_offset())));
 5060     jcc(Assembler::aboveEqual, next);
 5061     STOP("assert(top >= start)");
 5062     should_not_reach_here();
 5063 
 5064     bind(next);
 5065     movptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset())));
 5066     cmpptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
 5067     jcc(Assembler::aboveEqual, ok);
 5068     STOP("assert(top <= end)");
 5069     should_not_reach_here();
 5070 
 5071     bind(ok);
 5072     pop(t1);
 5073   }
 5074 #endif
 5075 }
 5076 
 5077 class ControlWord {
 5078  public:
 5079   int32_t _value;
 5080 
 5081   int  rounding_control() const        { return  (_value >> 10) & 3      ; }
 5082   int  precision_control() const       { return  (_value >>  8) & 3      ; }
 5083   bool precision() const               { return ((_value >>  5) & 1) != 0; }
 5084   bool underflow() const               { return ((_value >>  4) & 1) != 0; }
 5085   bool overflow() const                { return ((_value >>  3) & 1) != 0; }
 5086   bool zero_divide() const             { return ((_value >>  2) & 1) != 0; }
 5087   bool denormalized() const            { return ((_value >>  1) & 1) != 0; }
 5088   bool invalid() const                 { return ((_value >>  0) & 1) != 0; }
 5089 
 5090   void print() const {
 5091     // rounding control
 5092     const char* rc;
 5093     switch (rounding_control()) {
 5094       case 0: rc = "round near"; break;
 5095       case 1: rc = "round down"; break;
 5096       case 2: rc = "round up  "; break;
 5097       case 3: rc = "chop      "; break;
 5098       default:
 5099         rc = nullptr; // silence compiler warnings
 5100         fatal("Unknown rounding control: %d", rounding_control());
 5101     };
 5102     // precision control
 5103     const char* pc;
 5104     switch (precision_control()) {
 5105       case 0: pc = "24 bits "; break;
 5106       case 1: pc = "reserved"; break;
 5107       case 2: pc = "53 bits "; break;
 5108       case 3: pc = "64 bits "; break;
 5109       default:
 5110         pc = nullptr; // silence compiler warnings
 5111         fatal("Unknown precision control: %d", precision_control());
 5112     };
 5113     // flags
 5114     char f[9];
 5115     f[0] = ' ';
 5116     f[1] = ' ';
 5117     f[2] = (precision   ()) ? 'P' : 'p';
 5118     f[3] = (underflow   ()) ? 'U' : 'u';
 5119     f[4] = (overflow    ()) ? 'O' : 'o';
 5120     f[5] = (zero_divide ()) ? 'Z' : 'z';
 5121     f[6] = (denormalized()) ? 'D' : 'd';
 5122     f[7] = (invalid     ()) ? 'I' : 'i';
 5123     f[8] = '\x0';
 5124     // output
 5125     printf("%04x  masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc);
 5126   }
 5127 
 5128 };
 5129 
 5130 class StatusWord {
 5131  public:
 5132   int32_t _value;
 5133 
 5134   bool busy() const                    { return ((_value >> 15) & 1) != 0; }
 5135   bool C3() const                      { return ((_value >> 14) & 1) != 0; }
 5136   bool C2() const                      { return ((_value >> 10) & 1) != 0; }
 5137   bool C1() const                      { return ((_value >>  9) & 1) != 0; }
 5138   bool C0() const                      { return ((_value >>  8) & 1) != 0; }
 5139   int  top() const                     { return  (_value >> 11) & 7      ; }
 5140   bool error_status() const            { return ((_value >>  7) & 1) != 0; }
 5141   bool stack_fault() const             { return ((_value >>  6) & 1) != 0; }
 5142   bool precision() const               { return ((_value >>  5) & 1) != 0; }
 5143   bool underflow() const               { return ((_value >>  4) & 1) != 0; }
 5144   bool overflow() const                { return ((_value >>  3) & 1) != 0; }
 5145   bool zero_divide() const             { return ((_value >>  2) & 1) != 0; }
 5146   bool denormalized() const            { return ((_value >>  1) & 1) != 0; }
 5147   bool invalid() const                 { return ((_value >>  0) & 1) != 0; }
 5148 
 5149   void print() const {
 5150     // condition codes
 5151     char c[5];
 5152     c[0] = (C3()) ? '3' : '-';
 5153     c[1] = (C2()) ? '2' : '-';
 5154     c[2] = (C1()) ? '1' : '-';
 5155     c[3] = (C0()) ? '0' : '-';
 5156     c[4] = '\x0';
 5157     // flags
 5158     char f[9];
 5159     f[0] = (error_status()) ? 'E' : '-';
 5160     f[1] = (stack_fault ()) ? 'S' : '-';
 5161     f[2] = (precision   ()) ? 'P' : '-';
 5162     f[3] = (underflow   ()) ? 'U' : '-';
 5163     f[4] = (overflow    ()) ? 'O' : '-';
 5164     f[5] = (zero_divide ()) ? 'Z' : '-';
 5165     f[6] = (denormalized()) ? 'D' : '-';
 5166     f[7] = (invalid     ()) ? 'I' : '-';
 5167     f[8] = '\x0';
 5168     // output
 5169     printf("%04x  flags = %s, cc =  %s, top = %d", _value & 0xFFFF, f, c, top());
 5170   }
 5171 
 5172 };
 5173 
 5174 class TagWord {
 5175  public:
 5176   int32_t _value;
 5177 
 5178   int tag_at(int i) const              { return (_value >> (i*2)) & 3; }
 5179 
 5180   void print() const {
 5181     printf("%04x", _value & 0xFFFF);
 5182   }
 5183 
 5184 };
 5185 
 5186 class FPU_Register {
 5187  public:
 5188   int32_t _m0;
 5189   int32_t _m1;
 5190   int16_t _ex;
 5191 
 5192   bool is_indefinite() const           {
 5193     return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0;
 5194   }
 5195 
 5196   void print() const {
 5197     char  sign = (_ex < 0) ? '-' : '+';
 5198     const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : "   ";
 5199     printf("%c%04hx.%08x%08x  %s", sign, _ex, _m1, _m0, kind);
 5200   };
 5201 
 5202 };
 5203 
 5204 class FPU_State {
 5205  public:
 5206   enum {
 5207     register_size       = 10,
 5208     number_of_registers =  8,
 5209     register_mask       =  7
 5210   };
 5211 
 5212   ControlWord  _control_word;
 5213   StatusWord   _status_word;
 5214   TagWord      _tag_word;
 5215   int32_t      _error_offset;
 5216   int32_t      _error_selector;
 5217   int32_t      _data_offset;
 5218   int32_t      _data_selector;
 5219   int8_t       _register[register_size * number_of_registers];
 5220 
 5221   int tag_for_st(int i) const          { return _tag_word.tag_at((_status_word.top() + i) & register_mask); }
 5222   FPU_Register* st(int i) const        { return (FPU_Register*)&_register[register_size * i]; }
 5223 
 5224   const char* tag_as_string(int tag) const {
 5225     switch (tag) {
 5226       case 0: return "valid";
 5227       case 1: return "zero";
 5228       case 2: return "special";
 5229       case 3: return "empty";
 5230     }
 5231     ShouldNotReachHere();
 5232     return nullptr;
 5233   }
 5234 
 5235   void print() const {
 5236     // print computation registers
 5237     { int t = _status_word.top();
 5238       for (int i = 0; i < number_of_registers; i++) {
 5239         int j = (i - t) & register_mask;
 5240         printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j);
 5241         st(j)->print();
 5242         printf(" %s\n", tag_as_string(_tag_word.tag_at(i)));
 5243       }
 5244     }
 5245     printf("\n");
 5246     // print control registers
 5247     printf("ctrl = "); _control_word.print(); printf("\n");
 5248     printf("stat = "); _status_word .print(); printf("\n");
 5249     printf("tags = "); _tag_word    .print(); printf("\n");
 5250   }
 5251 
 5252 };
 5253 
 5254 class Flag_Register {
 5255  public:
 5256   int32_t _value;
 5257 
 5258   bool overflow() const                { return ((_value >> 11) & 1) != 0; }
 5259   bool direction() const               { return ((_value >> 10) & 1) != 0; }
 5260   bool sign() const                    { return ((_value >>  7) & 1) != 0; }
 5261   bool zero() const                    { return ((_value >>  6) & 1) != 0; }
 5262   bool auxiliary_carry() const         { return ((_value >>  4) & 1) != 0; }
 5263   bool parity() const                  { return ((_value >>  2) & 1) != 0; }
 5264   bool carry() const                   { return ((_value >>  0) & 1) != 0; }
 5265 
 5266   void print() const {
 5267     // flags
 5268     char f[8];
 5269     f[0] = (overflow       ()) ? 'O' : '-';
 5270     f[1] = (direction      ()) ? 'D' : '-';
 5271     f[2] = (sign           ()) ? 'S' : '-';
 5272     f[3] = (zero           ()) ? 'Z' : '-';
 5273     f[4] = (auxiliary_carry()) ? 'A' : '-';
 5274     f[5] = (parity         ()) ? 'P' : '-';
 5275     f[6] = (carry          ()) ? 'C' : '-';
 5276     f[7] = '\x0';
 5277     // output
 5278     printf("%08x  flags = %s", _value, f);
 5279   }
 5280 
 5281 };
 5282 
 5283 class IU_Register {
 5284  public:
 5285   int32_t _value;
 5286 
 5287   void print() const {
 5288     printf("%08x  %11d", _value, _value);
 5289   }
 5290 
 5291 };
 5292 
 5293 class IU_State {
 5294  public:
 5295   Flag_Register _eflags;
 5296   IU_Register   _rdi;
 5297   IU_Register   _rsi;
 5298   IU_Register   _rbp;
 5299   IU_Register   _rsp;
 5300   IU_Register   _rbx;
 5301   IU_Register   _rdx;
 5302   IU_Register   _rcx;
 5303   IU_Register   _rax;
 5304 
 5305   void print() const {
 5306     // computation registers
 5307     printf("rax,  = "); _rax.print(); printf("\n");
 5308     printf("rbx,  = "); _rbx.print(); printf("\n");
 5309     printf("rcx  = "); _rcx.print(); printf("\n");
 5310     printf("rdx  = "); _rdx.print(); printf("\n");
 5311     printf("rdi  = "); _rdi.print(); printf("\n");
 5312     printf("rsi  = "); _rsi.print(); printf("\n");
 5313     printf("rbp,  = "); _rbp.print(); printf("\n");
 5314     printf("rsp  = "); _rsp.print(); printf("\n");
 5315     printf("\n");
 5316     // control registers
 5317     printf("flgs = "); _eflags.print(); printf("\n");
 5318   }
 5319 };
 5320 
 5321 
 5322 class CPU_State {
 5323  public:
 5324   FPU_State _fpu_state;
 5325   IU_State  _iu_state;
 5326 
 5327   void print() const {
 5328     printf("--------------------------------------------------\n");
 5329     _iu_state .print();
 5330     printf("\n");
 5331     _fpu_state.print();
 5332     printf("--------------------------------------------------\n");
 5333   }
 5334 
 5335 };
 5336 
 5337 
 5338 static void _print_CPU_state(CPU_State* state) {
 5339   state->print();
 5340 };
 5341 
 5342 
 5343 void MacroAssembler::print_CPU_state() {
 5344   push_CPU_state();
 5345   push(rsp);                // pass CPU state
 5346   call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state)));
 5347   addptr(rsp, wordSize);       // discard argument
 5348   pop_CPU_state();
 5349 }
 5350 
 5351 void MacroAssembler::restore_cpu_control_state_after_jni(Register rscratch) {
 5352   // Either restore the MXCSR register after returning from the JNI Call
 5353   // or verify that it wasn't changed (with -Xcheck:jni flag).
 5354   if (VM_Version::supports_sse()) {
 5355     if (RestoreMXCSROnJNICalls) {
 5356       ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), rscratch);
 5357     } else if (CheckJNICalls) {
 5358       call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
 5359     }
 5360   }
 5361   // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty.
 5362   vzeroupper();
 5363 }
 5364 
 5365 // ((OopHandle)result).resolve();
 5366 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) {
 5367   assert_different_registers(result, tmp);
 5368 
 5369   // Only 64 bit platforms support GCs that require a tmp register
 5370   // Only IN_HEAP loads require a thread_tmp register
 5371   // OopHandle::resolve is an indirection like jobject.
 5372   access_load_at(T_OBJECT, IN_NATIVE,
 5373                  result, Address(result, 0), tmp);
 5374 }
 5375 
 5376 // ((WeakHandle)result).resolve();
 5377 void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) {
 5378   assert_different_registers(rresult, rtmp);
 5379   Label resolved;
 5380 
 5381   // A null weak handle resolves to null.
 5382   cmpptr(rresult, 0);
 5383   jcc(Assembler::equal, resolved);
 5384 
 5385   // Only 64 bit platforms support GCs that require a tmp register
 5386   // Only IN_HEAP loads require a thread_tmp register
 5387   // WeakHandle::resolve is an indirection like jweak.
 5388   access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
 5389                  rresult, Address(rresult, 0), rtmp);
 5390   bind(resolved);
 5391 }
 5392 
 5393 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
 5394   // get mirror
 5395   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
 5396   load_method_holder(mirror, method);
 5397   movptr(mirror, Address(mirror, mirror_offset));
 5398   resolve_oop_handle(mirror, tmp);
 5399 }
 5400 
 5401 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
 5402   load_method_holder(rresult, rmethod);
 5403   movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
 5404 }
 5405 
 5406 void MacroAssembler::load_method_holder(Register holder, Register method) {
 5407   movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
 5408   movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
 5409   movptr(holder, Address(holder, ConstantPool::pool_holder_offset()));          // InstanceKlass*
 5410 }
 5411 
 5412 void MacroAssembler::load_metadata(Register dst, Register src) {
 5413   if (UseCompactObjectHeaders) {
 5414     load_narrow_klass_compact(dst, src);
 5415   } else if (UseCompressedClassPointers) {
 5416     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5417   } else {
 5418     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5419   }
 5420 }
 5421 
 5422 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
 5423   assert(UseCompactObjectHeaders, "expect compact object headers");
 5424   movq(dst, Address(src, oopDesc::mark_offset_in_bytes()));
 5425   shrq(dst, markWord::klass_shift);
 5426 }
 5427 
 5428 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
 5429   assert_different_registers(src, tmp);
 5430   assert_different_registers(dst, tmp);
 5431 
 5432   if (UseCompactObjectHeaders) {
 5433     load_narrow_klass_compact(dst, src);
 5434     decode_klass_not_null(dst, tmp);
 5435   } else if (UseCompressedClassPointers) {
 5436     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5437     decode_klass_not_null(dst, tmp);
 5438   } else {
 5439     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5440   }
 5441 }
 5442 
 5443 void MacroAssembler::load_prototype_header(Register dst, Register src, Register tmp) {
 5444   load_klass(dst, src, tmp);
 5445   movptr(dst, Address(dst, Klass::prototype_header_offset()));
 5446 }
 5447 
 5448 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
 5449   assert(!UseCompactObjectHeaders, "not with compact headers");
 5450   assert_different_registers(src, tmp);
 5451   assert_different_registers(dst, tmp);
 5452   if (UseCompressedClassPointers) {
 5453     encode_klass_not_null(src, tmp);
 5454     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5455   } else {
 5456     movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5457   }
 5458 }
 5459 
 5460 void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) {
 5461   if (UseCompactObjectHeaders) {
 5462     assert(tmp != noreg, "need tmp");
 5463     assert_different_registers(klass, obj, tmp);
 5464     load_narrow_klass_compact(tmp, obj);
 5465     cmpl(klass, tmp);
 5466   } else if (UseCompressedClassPointers) {
 5467     cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
 5468   } else {
 5469     cmpptr(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
 5470   }
 5471 }
 5472 
 5473 void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) {
 5474   if (UseCompactObjectHeaders) {
 5475     assert(tmp2 != noreg, "need tmp2");
 5476     assert_different_registers(obj1, obj2, tmp1, tmp2);
 5477     load_narrow_klass_compact(tmp1, obj1);
 5478     load_narrow_klass_compact(tmp2, obj2);
 5479     cmpl(tmp1, tmp2);
 5480   } else if (UseCompressedClassPointers) {
 5481     movl(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
 5482     cmpl(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes()));
 5483   } else {
 5484     movptr(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
 5485     cmpptr(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes()));
 5486   }
 5487 }
 5488 
 5489 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
 5490                                     Register tmp1) {
 5491   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5492   decorators = AccessInternal::decorator_fixup(decorators, type);
 5493   bool as_raw = (decorators & AS_RAW) != 0;
 5494   if (as_raw) {
 5495     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1);
 5496   } else {
 5497     bs->load_at(this, decorators, type, dst, src, tmp1);
 5498   }
 5499 }
 5500 
 5501 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
 5502                                      Register tmp1, Register tmp2, Register tmp3) {
 5503   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5504   decorators = AccessInternal::decorator_fixup(decorators, type);
 5505   bool as_raw = (decorators & AS_RAW) != 0;
 5506   if (as_raw) {
 5507     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5508   } else {
 5509     bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5510   }
 5511 }
 5512 
 5513 void MacroAssembler::flat_field_copy(DecoratorSet decorators, Register src, Register dst,
 5514                                      Register inline_layout_info) {
 5515   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5516   bs->flat_field_copy(this, decorators, src, dst, inline_layout_info);
 5517 }
 5518 
 5519 void MacroAssembler::payload_offset(Register inline_klass, Register offset) {
 5520   movptr(offset, Address(inline_klass, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 5521   movl(offset, Address(offset, InlineKlass::payload_offset_offset()));
 5522 }
 5523 
 5524 void MacroAssembler::payload_addr(Register oop, Register data, Register inline_klass) {
 5525   // ((address) (void*) o) + vk->payload_offset();
 5526   Register offset = (data == oop) ? rscratch1 : data;
 5527   payload_offset(inline_klass, offset);
 5528   if (data == oop) {
 5529     addptr(data, offset);
 5530   } else {
 5531     lea(data, Address(oop, offset));
 5532   }
 5533 }
 5534 
 5535 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass,
 5536                                                 Register index, Register data) {
 5537   assert(index != rcx, "index needs to shift by rcx");
 5538   assert_different_registers(array, array_klass, index);
 5539   assert_different_registers(rcx, array, index);
 5540 
 5541   // array->base() + (index << Klass::layout_helper_log2_element_size(lh));
 5542   movl(rcx, Address(array_klass, Klass::layout_helper_offset()));
 5543 
 5544   // Klass::layout_helper_log2_element_size(lh)
 5545   // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask;
 5546   shrl(rcx, Klass::_lh_log2_element_size_shift);
 5547   andl(rcx, Klass::_lh_log2_element_size_mask);
 5548   shlptr(index); // index << rcx
 5549 
 5550   lea(data, Address(array, index, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT)));
 5551 }
 5552 
 5553 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
 5554   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1);
 5555 }
 5556 
 5557 // Doesn't do verification, generates fixed size code
 5558 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
 5559   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1);
 5560 }
 5561 
 5562 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
 5563                                     Register tmp2, Register tmp3, DecoratorSet decorators) {
 5564   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
 5565 }
 5566 
 5567 // Used for storing nulls.
 5568 void MacroAssembler::store_heap_oop_null(Address dst) {
 5569   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
 5570 }
 5571 
 5572 void MacroAssembler::store_klass_gap(Register dst, Register src) {
 5573   assert(!UseCompactObjectHeaders, "Don't use with compact headers");
 5574   if (UseCompressedClassPointers) {
 5575     // Store to klass gap in destination
 5576     movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
 5577   }
 5578 }
 5579 
 5580 #ifdef ASSERT
 5581 void MacroAssembler::verify_heapbase(const char* msg) {
 5582   assert (UseCompressedOops, "should be compressed");
 5583   assert (Universe::heap() != nullptr, "java heap should be initialized");
 5584   if (CheckCompressedOops) {
 5585     Label ok;
 5586     ExternalAddress src2(CompressedOops::base_addr());
 5587     const bool is_src2_reachable = reachable(src2);
 5588     if (!is_src2_reachable) {
 5589       push(rscratch1);  // cmpptr trashes rscratch1
 5590     }
 5591     cmpptr(r12_heapbase, src2, rscratch1);
 5592     jcc(Assembler::equal, ok);
 5593     STOP(msg);
 5594     bind(ok);
 5595     if (!is_src2_reachable) {
 5596       pop(rscratch1);
 5597     }
 5598   }
 5599 }
 5600 #endif
 5601 
 5602 // Algorithm must match oop.inline.hpp encode_heap_oop.
 5603 void MacroAssembler::encode_heap_oop(Register r) {
 5604 #ifdef ASSERT
 5605   verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
 5606 #endif
 5607   verify_oop_msg(r, "broken oop in encode_heap_oop");
 5608   if (CompressedOops::base() == nullptr) {
 5609     if (CompressedOops::shift() != 0) {
 5610       assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
 5611       shrq(r, LogMinObjAlignmentInBytes);
 5612     }
 5613     return;
 5614   }
 5615   testq(r, r);
 5616   cmovq(Assembler::equal, r, r12_heapbase);
 5617   subq(r, r12_heapbase);
 5618   shrq(r, LogMinObjAlignmentInBytes);
 5619 }
 5620 
 5621 void MacroAssembler::encode_heap_oop_not_null(Register r) {
 5622 #ifdef ASSERT
 5623   verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
 5624   if (CheckCompressedOops) {
 5625     Label ok;
 5626     testq(r, r);
 5627     jcc(Assembler::notEqual, ok);
 5628     STOP("null oop passed to encode_heap_oop_not_null");
 5629     bind(ok);
 5630   }
 5631 #endif
 5632   verify_oop_msg(r, "broken oop in encode_heap_oop_not_null");
 5633   if (CompressedOops::base() != nullptr) {
 5634     subq(r, r12_heapbase);
 5635   }
 5636   if (CompressedOops::shift() != 0) {
 5637     assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
 5638     shrq(r, LogMinObjAlignmentInBytes);
 5639   }
 5640 }
 5641 
 5642 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
 5643 #ifdef ASSERT
 5644   verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
 5645   if (CheckCompressedOops) {
 5646     Label ok;
 5647     testq(src, src);
 5648     jcc(Assembler::notEqual, ok);
 5649     STOP("null oop passed to encode_heap_oop_not_null2");
 5650     bind(ok);
 5651   }
 5652 #endif
 5653   verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2");
 5654   if (dst != src) {
 5655     movq(dst, src);
 5656   }
 5657   if (CompressedOops::base() != nullptr) {
 5658     subq(dst, r12_heapbase);
 5659   }
 5660   if (CompressedOops::shift() != 0) {
 5661     assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
 5662     shrq(dst, LogMinObjAlignmentInBytes);
 5663   }
 5664 }
 5665 
 5666 void  MacroAssembler::decode_heap_oop(Register r) {
 5667 #ifdef ASSERT
 5668   verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
 5669 #endif
 5670   if (CompressedOops::base() == nullptr) {
 5671     if (CompressedOops::shift() != 0) {
 5672       assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
 5673       shlq(r, LogMinObjAlignmentInBytes);
 5674     }
 5675   } else {
 5676     Label done;
 5677     shlq(r, LogMinObjAlignmentInBytes);
 5678     jccb(Assembler::equal, done);
 5679     addq(r, r12_heapbase);
 5680     bind(done);
 5681   }
 5682   verify_oop_msg(r, "broken oop in decode_heap_oop");
 5683 }
 5684 
 5685 void  MacroAssembler::decode_heap_oop_not_null(Register r) {
 5686   // Note: it will change flags
 5687   assert (UseCompressedOops, "should only be used for compressed headers");
 5688   assert (Universe::heap() != nullptr, "java heap should be initialized");
 5689   // Cannot assert, unverified entry point counts instructions (see .ad file)
 5690   // vtableStubs also counts instructions in pd_code_size_limit.
 5691   // Also do not verify_oop as this is called by verify_oop.
 5692   if (CompressedOops::shift() != 0) {
 5693     assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
 5694     shlq(r, LogMinObjAlignmentInBytes);
 5695     if (CompressedOops::base() != nullptr) {
 5696       addq(r, r12_heapbase);
 5697     }
 5698   } else {
 5699     assert (CompressedOops::base() == nullptr, "sanity");
 5700   }
 5701 }
 5702 
 5703 void  MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
 5704   // Note: it will change flags
 5705   assert (UseCompressedOops, "should only be used for compressed headers");
 5706   assert (Universe::heap() != nullptr, "java heap should be initialized");
 5707   // Cannot assert, unverified entry point counts instructions (see .ad file)
 5708   // vtableStubs also counts instructions in pd_code_size_limit.
 5709   // Also do not verify_oop as this is called by verify_oop.
 5710   if (CompressedOops::shift() != 0) {
 5711     assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
 5712     if (LogMinObjAlignmentInBytes == Address::times_8) {
 5713       leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
 5714     } else {
 5715       if (dst != src) {
 5716         movq(dst, src);
 5717       }
 5718       shlq(dst, LogMinObjAlignmentInBytes);
 5719       if (CompressedOops::base() != nullptr) {
 5720         addq(dst, r12_heapbase);
 5721       }
 5722     }
 5723   } else {
 5724     assert (CompressedOops::base() == nullptr, "sanity");
 5725     if (dst != src) {
 5726       movq(dst, src);
 5727     }
 5728   }
 5729 }
 5730 
 5731 void MacroAssembler::encode_klass_not_null(Register r, Register tmp) {
 5732   assert_different_registers(r, tmp);
 5733   if (CompressedKlassPointers::base() != nullptr) {
 5734     mov64(tmp, (int64_t)CompressedKlassPointers::base());
 5735     subq(r, tmp);
 5736   }
 5737   if (CompressedKlassPointers::shift() != 0) {
 5738     shrq(r, CompressedKlassPointers::shift());
 5739   }
 5740 }
 5741 
 5742 void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) {
 5743   assert_different_registers(src, dst);
 5744   if (CompressedKlassPointers::base() != nullptr) {
 5745     mov64(dst, -(int64_t)CompressedKlassPointers::base());
 5746     addq(dst, src);
 5747   } else {
 5748     movptr(dst, src);
 5749   }
 5750   if (CompressedKlassPointers::shift() != 0) {
 5751     shrq(dst, CompressedKlassPointers::shift());
 5752   }
 5753 }
 5754 
 5755 void  MacroAssembler::decode_klass_not_null(Register r, Register tmp) {
 5756   assert_different_registers(r, tmp);
 5757   // Note: it will change flags
 5758   assert(UseCompressedClassPointers, "should only be used for compressed headers");
 5759   // Cannot assert, unverified entry point counts instructions (see .ad file)
 5760   // vtableStubs also counts instructions in pd_code_size_limit.
 5761   // Also do not verify_oop as this is called by verify_oop.
 5762   if (CompressedKlassPointers::shift() != 0) {
 5763     shlq(r, CompressedKlassPointers::shift());
 5764   }
 5765   if (CompressedKlassPointers::base() != nullptr) {
 5766     mov64(tmp, (int64_t)CompressedKlassPointers::base());
 5767     addq(r, tmp);
 5768   }
 5769 }
 5770 
 5771 void  MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) {
 5772   assert_different_registers(src, dst);
 5773   // Note: it will change flags
 5774   assert (UseCompressedClassPointers, "should only be used for compressed headers");
 5775   // Cannot assert, unverified entry point counts instructions (see .ad file)
 5776   // vtableStubs also counts instructions in pd_code_size_limit.
 5777   // Also do not verify_oop as this is called by verify_oop.
 5778 
 5779   if (CompressedKlassPointers::base() == nullptr &&
 5780       CompressedKlassPointers::shift() == 0) {
 5781     // The best case scenario is that there is no base or shift. Then it is already
 5782     // a pointer that needs nothing but a register rename.
 5783     movl(dst, src);
 5784   } else {
 5785     if (CompressedKlassPointers::shift() <= Address::times_8) {
 5786       if (CompressedKlassPointers::base() != nullptr) {
 5787         mov64(dst, (int64_t)CompressedKlassPointers::base());
 5788       } else {
 5789         xorq(dst, dst);
 5790       }
 5791       if (CompressedKlassPointers::shift() != 0) {
 5792         assert(CompressedKlassPointers::shift() == Address::times_8, "klass not aligned on 64bits?");
 5793         leaq(dst, Address(dst, src, Address::times_8, 0));
 5794       } else {
 5795         addq(dst, src);
 5796       }
 5797     } else {
 5798       if (CompressedKlassPointers::base() != nullptr) {
 5799         const uint64_t base_right_shifted =
 5800             (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
 5801         mov64(dst, base_right_shifted);
 5802       } else {
 5803         xorq(dst, dst);
 5804       }
 5805       addq(dst, src);
 5806       shlq(dst, CompressedKlassPointers::shift());
 5807     }
 5808   }
 5809 }
 5810 
 5811 void  MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
 5812   assert (UseCompressedOops, "should only be used for compressed headers");
 5813   assert (Universe::heap() != nullptr, "java heap should be initialized");
 5814   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5815   int oop_index = oop_recorder()->find_index(obj);
 5816   RelocationHolder rspec = oop_Relocation::spec(oop_index);
 5817   mov_narrow_oop(dst, oop_index, rspec);
 5818 }
 5819 
 5820 void  MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
 5821   assert (UseCompressedOops, "should only be used for compressed headers");
 5822   assert (Universe::heap() != nullptr, "java heap should be initialized");
 5823   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5824   int oop_index = oop_recorder()->find_index(obj);
 5825   RelocationHolder rspec = oop_Relocation::spec(oop_index);
 5826   mov_narrow_oop(dst, oop_index, rspec);
 5827 }
 5828 
 5829 void  MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
 5830   assert (UseCompressedClassPointers, "should only be used for compressed headers");
 5831   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5832   int klass_index = oop_recorder()->find_index(k);
 5833   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
 5834   mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 5835 }
 5836 
 5837 void  MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
 5838   assert (UseCompressedClassPointers, "should only be used for compressed headers");
 5839   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5840   int klass_index = oop_recorder()->find_index(k);
 5841   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
 5842   mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 5843 }
 5844 
 5845 void  MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
 5846   assert (UseCompressedOops, "should only be used for compressed headers");
 5847   assert (Universe::heap() != nullptr, "java heap should be initialized");
 5848   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5849   int oop_index = oop_recorder()->find_index(obj);
 5850   RelocationHolder rspec = oop_Relocation::spec(oop_index);
 5851   Assembler::cmp_narrow_oop(dst, oop_index, rspec);
 5852 }
 5853 
 5854 void  MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
 5855   assert (UseCompressedOops, "should only be used for compressed headers");
 5856   assert (Universe::heap() != nullptr, "java heap should be initialized");
 5857   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5858   int oop_index = oop_recorder()->find_index(obj);
 5859   RelocationHolder rspec = oop_Relocation::spec(oop_index);
 5860   Assembler::cmp_narrow_oop(dst, oop_index, rspec);
 5861 }
 5862 
 5863 void  MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
 5864   assert (UseCompressedClassPointers, "should only be used for compressed headers");
 5865   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5866   int klass_index = oop_recorder()->find_index(k);
 5867   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
 5868   Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 5869 }
 5870 
 5871 void  MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
 5872   assert (UseCompressedClassPointers, "should only be used for compressed headers");
 5873   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5874   int klass_index = oop_recorder()->find_index(k);
 5875   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
 5876   Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 5877 }
 5878 
 5879 void MacroAssembler::reinit_heapbase() {
 5880   if (UseCompressedOops) {
 5881     if (Universe::heap() != nullptr) {
 5882       if (CompressedOops::base() == nullptr) {
 5883         MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
 5884       } else {
 5885         mov64(r12_heapbase, (int64_t)CompressedOops::base());
 5886       }
 5887     } else {
 5888       movptr(r12_heapbase, ExternalAddress(CompressedOops::base_addr()));
 5889     }
 5890   }
 5891 }
 5892 
 5893 #if COMPILER2_OR_JVMCI
 5894 
 5895 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
 5896 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, KRegister mask) {
 5897   // cnt - number of qwords (8-byte words).
 5898   // base - start address, qword aligned.
 5899   Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
 5900   bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0);
 5901   if (use64byteVector) {
 5902     evpbroadcastq(xtmp, val, AVX_512bit);
 5903   } else if (MaxVectorSize >= 32) {
 5904     movdq(xtmp, val);
 5905     punpcklqdq(xtmp, xtmp);
 5906     vinserti128_high(xtmp, xtmp);
 5907   } else {
 5908     movdq(xtmp, val);
 5909     punpcklqdq(xtmp, xtmp);
 5910   }
 5911   jmp(L_zero_64_bytes);
 5912 
 5913   BIND(L_loop);
 5914   if (MaxVectorSize >= 32) {
 5915     fill64(base, 0, xtmp, use64byteVector);
 5916   } else {
 5917     movdqu(Address(base,  0), xtmp);
 5918     movdqu(Address(base, 16), xtmp);
 5919     movdqu(Address(base, 32), xtmp);
 5920     movdqu(Address(base, 48), xtmp);
 5921   }
 5922   addptr(base, 64);
 5923 
 5924   BIND(L_zero_64_bytes);
 5925   subptr(cnt, 8);
 5926   jccb(Assembler::greaterEqual, L_loop);
 5927 
 5928   // Copy trailing 64 bytes
 5929   if (use64byteVector) {
 5930     addptr(cnt, 8);
 5931     jccb(Assembler::equal, L_end);
 5932     fill64_masked(3, base, 0, xtmp, mask, cnt, val, true);
 5933     jmp(L_end);
 5934   } else {
 5935     addptr(cnt, 4);
 5936     jccb(Assembler::less, L_tail);
 5937     if (MaxVectorSize >= 32) {
 5938       vmovdqu(Address(base, 0), xtmp);
 5939     } else {
 5940       movdqu(Address(base,  0), xtmp);
 5941       movdqu(Address(base, 16), xtmp);
 5942     }
 5943   }
 5944   addptr(base, 32);
 5945   subptr(cnt, 4);
 5946 
 5947   BIND(L_tail);
 5948   addptr(cnt, 4);
 5949   jccb(Assembler::lessEqual, L_end);
 5950   if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
 5951     fill32_masked(3, base, 0, xtmp, mask, cnt, val);
 5952   } else {
 5953     decrement(cnt);
 5954 
 5955     BIND(L_sloop);
 5956     movq(Address(base, 0), xtmp);
 5957     addptr(base, 8);
 5958     decrement(cnt);
 5959     jccb(Assembler::greaterEqual, L_sloop);
 5960   }
 5961   BIND(L_end);
 5962 }
 5963 
 5964 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) {
 5965   assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
 5966   // An inline type might be returned. If fields are in registers we
 5967   // need to allocate an inline type instance and initialize it with
 5968   // the value of the fields.
 5969   Label skip;
 5970   // We only need a new buffered inline type if a new one is not returned
 5971   testptr(rax, 1);
 5972   jcc(Assembler::zero, skip);
 5973   int call_offset = -1;
 5974 
 5975 #ifdef _LP64
 5976   // The following code is similar to allocate_instance but has some slight differences,
 5977   // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after
 5978   // allocating is not necessary if vk != nullptr, etc. allocate_instance is not aware of these.
 5979   Label slow_case;
 5980   // 1. Try to allocate a new buffered inline instance either from TLAB or eden space
 5981   mov(rscratch1, rax); // save rax for slow_case since *_allocate may corrupt it when allocation failed
 5982   if (vk != nullptr) {
 5983     // Called from C1, where the return type is statically known.
 5984     movptr(rbx, (intptr_t)vk->get_InlineKlass());
 5985     jint lh = vk->layout_helper();
 5986     assert(lh != Klass::_lh_neutral_value, "inline class in return type must have been resolved");
 5987     if (UseTLAB && !Klass::layout_helper_needs_slow_path(lh)) {
 5988       tlab_allocate(rax, noreg, lh, r13, r14, slow_case);
 5989     } else {
 5990       jmp(slow_case);
 5991     }
 5992   } else {
 5993     // Call from interpreter. RAX contains ((the InlineKlass* of the return type) | 0x01)
 5994     mov(rbx, rax);
 5995     andptr(rbx, -2);
 5996     if (UseTLAB) {
 5997       movl(r14, Address(rbx, Klass::layout_helper_offset()));
 5998       testl(r14, Klass::_lh_instance_slow_path_bit);
 5999       jcc(Assembler::notZero, slow_case);
 6000       tlab_allocate(rax, r14, 0, r13, r14, slow_case);
 6001     } else {
 6002       jmp(slow_case);
 6003     }
 6004   }
 6005   if (UseTLAB) {
 6006     // 2. Initialize buffered inline instance header
 6007     Register buffer_obj = rax;
 6008     if (UseCompactObjectHeaders) {
 6009       Register mark_word = r13;
 6010       movptr(mark_word, Address(rbx, Klass::prototype_header_offset()));
 6011       movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes ()), mark_word);
 6012     } else {
 6013       movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), (intptr_t)markWord::inline_type_prototype().value());
 6014       xorl(r13, r13);
 6015       store_klass_gap(buffer_obj, r13);
 6016       if (vk == nullptr) {
 6017         // store_klass corrupts rbx(klass), so save it in r13 for later use (interpreter case only).
 6018         mov(r13, rbx);
 6019       }
 6020       store_klass(buffer_obj, rbx, rscratch1);
 6021     }
 6022     // 3. Initialize its fields with an inline class specific handler
 6023     if (vk != nullptr) {
 6024       call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint.
 6025     } else {
 6026       movptr(rbx, Address(r13, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 6027       movptr(rbx, Address(rbx, InlineKlass::pack_handler_offset()));
 6028       call(rbx);
 6029     }
 6030     jmp(skip);
 6031   }
 6032   bind(slow_case);
 6033   // We failed to allocate a new inline type, fall back to a runtime
 6034   // call. Some oop field may be live in some registers but we can't
 6035   // tell. That runtime call will take care of preserving them
 6036   // across a GC if there's one.
 6037   mov(rax, rscratch1);
 6038 #endif
 6039 
 6040   if (from_interpreter) {
 6041     super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf());
 6042   } else {
 6043     call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf()));
 6044     call_offset = offset();
 6045   }
 6046 
 6047   bind(skip);
 6048   return call_offset;
 6049 }
 6050 
 6051 // Move a value between registers/stack slots and update the reg_state
 6052 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) {
 6053   assert(from->is_valid() && to->is_valid(), "source and destination must be valid");
 6054   if (reg_state[to->value()] == reg_written) {
 6055     return true; // Already written
 6056   }
 6057   if (from != to && bt != T_VOID) {
 6058     if (reg_state[to->value()] == reg_readonly) {
 6059       return false; // Not yet writable
 6060     }
 6061     if (from->is_reg()) {
 6062       if (to->is_reg()) {
 6063         if (from->is_XMMRegister()) {
 6064           if (bt == T_DOUBLE) {
 6065             movdbl(to->as_XMMRegister(), from->as_XMMRegister());
 6066           } else {
 6067             assert(bt == T_FLOAT, "must be float");
 6068             movflt(to->as_XMMRegister(), from->as_XMMRegister());
 6069           }
 6070         } else {
 6071           movq(to->as_Register(), from->as_Register());
 6072         }
 6073       } else {
 6074         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6075         Address to_addr = Address(rsp, st_off);
 6076         if (from->is_XMMRegister()) {
 6077           if (bt == T_DOUBLE) {
 6078             movdbl(to_addr, from->as_XMMRegister());
 6079           } else {
 6080             assert(bt == T_FLOAT, "must be float");
 6081             movflt(to_addr, from->as_XMMRegister());
 6082           }
 6083         } else {
 6084           movq(to_addr, from->as_Register());
 6085         }
 6086       }
 6087     } else {
 6088       Address from_addr = Address(rsp, from->reg2stack() * VMRegImpl::stack_slot_size + wordSize);
 6089       if (to->is_reg()) {
 6090         if (to->is_XMMRegister()) {
 6091           if (bt == T_DOUBLE) {
 6092             movdbl(to->as_XMMRegister(), from_addr);
 6093           } else {
 6094             assert(bt == T_FLOAT, "must be float");
 6095             movflt(to->as_XMMRegister(), from_addr);
 6096           }
 6097         } else {
 6098           movq(to->as_Register(), from_addr);
 6099         }
 6100       } else {
 6101         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6102         movq(r13, from_addr);
 6103         movq(Address(rsp, st_off), r13);
 6104       }
 6105     }
 6106   }
 6107   // Update register states
 6108   reg_state[from->value()] = reg_writable;
 6109   reg_state[to->value()] = reg_written;
 6110   return true;
 6111 }
 6112 
 6113 // Calculate the extra stack space required for packing or unpacking inline
 6114 // args and adjust the stack pointer
 6115 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) {
 6116   // Two additional slots to account for return address
 6117   int sp_inc = (args_on_stack + 2) * VMRegImpl::stack_slot_size;
 6118   sp_inc = align_up(sp_inc, StackAlignmentInBytes);
 6119   // Save the return address, adjust the stack (make sure it is properly
 6120   // 16-byte aligned) and copy the return address to the new top of the stack.
 6121   // The stack will be repaired on return (see MacroAssembler::remove_frame).
 6122   assert(sp_inc > 0, "sanity");
 6123   pop(r13);
 6124   subptr(rsp, sp_inc);
 6125   push(r13);
 6126   return sp_inc;
 6127 }
 6128 
 6129 // Read all fields from an inline type buffer and store the field values in registers/stack slots.
 6130 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
 6131                                           VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
 6132                                           RegState reg_state[]) {
 6133   assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
 6134   assert(from->is_valid(), "source must be valid");
 6135   bool progress = false;
 6136 #ifdef ASSERT
 6137   const int start_offset = offset();
 6138 #endif
 6139 
 6140   Label L_null, L_notNull;
 6141   // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for)
 6142   Register tmp1 = r10;
 6143   Register tmp2 = r13;
 6144   Register fromReg = noreg;
 6145   ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, -1);
 6146   bool done = true;
 6147   bool mark_done = true;
 6148   VMReg toReg;
 6149   BasicType bt;
 6150   // Check if argument requires a null check
 6151   bool null_check = false;
 6152   VMReg nullCheckReg;
 6153   while (stream.next(nullCheckReg, bt)) {
 6154     if (sig->at(stream.sig_index())._offset == -1) {
 6155       null_check = true;
 6156       break;
 6157     }
 6158   }
 6159   stream.reset(sig_index, to_index);
 6160   while (stream.next(toReg, bt)) {
 6161     assert(toReg->is_valid(), "destination must be valid");
 6162     int idx = (int)toReg->value();
 6163     if (reg_state[idx] == reg_readonly) {
 6164       if (idx != from->value()) {
 6165         mark_done = false;
 6166       }
 6167       done = false;
 6168       continue;
 6169     } else if (reg_state[idx] == reg_written) {
 6170       continue;
 6171     }
 6172     assert(reg_state[idx] == reg_writable, "must be writable");
 6173     reg_state[idx] = reg_written;
 6174     progress = true;
 6175 
 6176     if (fromReg == noreg) {
 6177       if (from->is_reg()) {
 6178         fromReg = from->as_Register();
 6179       } else {
 6180         int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6181         movq(tmp1, Address(rsp, st_off));
 6182         fromReg = tmp1;
 6183       }
 6184       if (null_check) {
 6185         // Nullable inline type argument, emit null check
 6186         testptr(fromReg, fromReg);
 6187         jcc(Assembler::zero, L_null);
 6188       }
 6189     }
 6190     int off = sig->at(stream.sig_index())._offset;
 6191     if (off == -1) {
 6192       assert(null_check, "Missing null check at");
 6193       if (toReg->is_stack()) {
 6194         int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6195         movq(Address(rsp, st_off), 1);
 6196       } else {
 6197         movq(toReg->as_Register(), 1);
 6198       }
 6199       continue;
 6200     }
 6201     assert(off > 0, "offset in object should be positive");
 6202     Address fromAddr = Address(fromReg, off);
 6203     if (!toReg->is_XMMRegister()) {
 6204       Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register();
 6205       if (is_reference_type(bt)) {
 6206         load_heap_oop(dst, fromAddr);
 6207       } else {
 6208         bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
 6209         load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed);
 6210       }
 6211       if (toReg->is_stack()) {
 6212         int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6213         movq(Address(rsp, st_off), dst);
 6214       }
 6215     } else if (bt == T_DOUBLE) {
 6216       movdbl(toReg->as_XMMRegister(), fromAddr);
 6217     } else {
 6218       assert(bt == T_FLOAT, "must be float");
 6219       movflt(toReg->as_XMMRegister(), fromAddr);
 6220     }
 6221   }
 6222   if (progress && null_check) {
 6223     if (done) {
 6224       jmp(L_notNull);
 6225       bind(L_null);
 6226       // Set IsInit field to zero to signal that the argument is null.
 6227       // Also set all oop fields to zero to make the GC happy.
 6228       stream.reset(sig_index, to_index);
 6229       while (stream.next(toReg, bt)) {
 6230         if (sig->at(stream.sig_index())._offset == -1 ||
 6231             bt == T_OBJECT || bt == T_ARRAY) {
 6232           if (toReg->is_stack()) {
 6233             int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6234             movq(Address(rsp, st_off), 0);
 6235           } else {
 6236             xorq(toReg->as_Register(), toReg->as_Register());
 6237           }
 6238         }
 6239       }
 6240       bind(L_notNull);
 6241     } else {
 6242       bind(L_null);
 6243     }
 6244   }
 6245 
 6246   sig_index = stream.sig_index();
 6247   to_index = stream.regs_index();
 6248 
 6249   if (mark_done && reg_state[from->value()] != reg_written) {
 6250     // This is okay because no one else will write to that slot
 6251     reg_state[from->value()] = reg_writable;
 6252   }
 6253   from_index--;
 6254   assert(progress || (start_offset == offset()), "should not emit code");
 6255   return done;
 6256 }
 6257 
 6258 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
 6259                                         VMRegPair* from, int from_count, int& from_index, VMReg to,
 6260                                         RegState reg_state[], Register val_array) {
 6261   assert(sig->at(sig_index)._bt == T_METADATA, "should be at delimiter");
 6262   assert(to->is_valid(), "destination must be valid");
 6263 
 6264   if (reg_state[to->value()] == reg_written) {
 6265     skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
 6266     return true; // Already written
 6267   }
 6268 
 6269   // TODO 8284443 Isn't it an issue if below code uses r14 as tmp when it contains a spilled value?
 6270   // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for).
 6271   Register val_obj_tmp = r11;
 6272   Register from_reg_tmp = r14;
 6273   Register tmp1 = r10;
 6274   Register tmp2 = r13;
 6275   Register tmp3 = rbx;
 6276   Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register();
 6277 
 6278   assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array);
 6279 
 6280   if (reg_state[to->value()] == reg_readonly) {
 6281     if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) {
 6282       skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
 6283       return false; // Not yet writable
 6284     }
 6285     val_obj = val_obj_tmp;
 6286   }
 6287 
 6288   int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_OBJECT);
 6289   load_heap_oop(val_obj, Address(val_array, index));
 6290 
 6291   ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index);
 6292   VMReg fromReg;
 6293   BasicType bt;
 6294   Label L_null;
 6295   while (stream.next(fromReg, bt)) {
 6296     assert(fromReg->is_valid(), "source must be valid");
 6297     reg_state[fromReg->value()] = reg_writable;
 6298 
 6299     int off = sig->at(stream.sig_index())._offset;
 6300     if (off == -1) {
 6301       // Nullable inline type argument, emit null check
 6302       Label L_notNull;
 6303       if (fromReg->is_stack()) {
 6304         int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6305         testb(Address(rsp, ld_off), 1);
 6306       } else {
 6307         testb(fromReg->as_Register(), 1);
 6308       }
 6309       jcc(Assembler::notZero, L_notNull);
 6310       movptr(val_obj, 0);
 6311       jmp(L_null);
 6312       bind(L_notNull);
 6313       continue;
 6314     }
 6315 
 6316     assert(off > 0, "offset in object should be positive");
 6317     size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
 6318 
 6319     Address dst(val_obj, off);
 6320     if (!fromReg->is_XMMRegister()) {
 6321       Register src;
 6322       if (fromReg->is_stack()) {
 6323         src = from_reg_tmp;
 6324         int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6325         load_sized_value(src, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false);
 6326       } else {
 6327         src = fromReg->as_Register();
 6328       }
 6329       assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array);
 6330       if (is_reference_type(bt)) {
 6331         store_heap_oop(dst, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
 6332       } else {
 6333         store_sized_value(dst, src, size_in_bytes);
 6334       }
 6335     } else if (bt == T_DOUBLE) {
 6336       movdbl(dst, fromReg->as_XMMRegister());
 6337     } else {
 6338       assert(bt == T_FLOAT, "must be float");
 6339       movflt(dst, fromReg->as_XMMRegister());
 6340     }
 6341   }
 6342   bind(L_null);
 6343   sig_index = stream.sig_index();
 6344   from_index = stream.regs_index();
 6345 
 6346   assert(reg_state[to->value()] == reg_writable, "must have already been read");
 6347   bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state);
 6348   assert(success, "to register must be writeable");
 6349   return true;
 6350 }
 6351 
 6352 VMReg MacroAssembler::spill_reg_for(VMReg reg) {
 6353   return reg->is_XMMRegister() ? xmm8->as_VMReg() : r14->as_VMReg();
 6354 }
 6355 
 6356 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) {
 6357   assert((initial_framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
 6358   if (needs_stack_repair) {
 6359     movq(rbp, Address(rsp, initial_framesize));
 6360     // The stack increment resides just below the saved rbp
 6361     addq(rsp, Address(rsp, initial_framesize - wordSize));
 6362   } else {
 6363     if (initial_framesize > 0) {
 6364       addq(rsp, initial_framesize);
 6365     }
 6366     pop(rbp);
 6367   }
 6368 }
 6369 
 6370 // Clearing constant sized memory using YMM/ZMM registers.
 6371 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 6372   assert(UseAVX > 2 && VM_Version::supports_avx512vl(), "");
 6373   bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
 6374 
 6375   int vector64_count = (cnt & (~0x7)) >> 3;
 6376   cnt = cnt & 0x7;
 6377   const int fill64_per_loop = 4;
 6378   const int max_unrolled_fill64 = 8;
 6379 
 6380   // 64 byte initialization loop.
 6381   vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit);
 6382   int start64 = 0;
 6383   if (vector64_count > max_unrolled_fill64) {
 6384     Label LOOP;
 6385     Register index = rtmp;
 6386 
 6387     start64 = vector64_count - (vector64_count % fill64_per_loop);
 6388 
 6389     movl(index, 0);
 6390     BIND(LOOP);
 6391     for (int i = 0; i < fill64_per_loop; i++) {
 6392       fill64(Address(base, index, Address::times_1, i * 64), xtmp, use64byteVector);
 6393     }
 6394     addl(index, fill64_per_loop * 64);
 6395     cmpl(index, start64 * 64);
 6396     jccb(Assembler::less, LOOP);
 6397   }
 6398   for (int i = start64; i < vector64_count; i++) {
 6399     fill64(base, i * 64, xtmp, use64byteVector);
 6400   }
 6401 
 6402   // Clear remaining 64 byte tail.
 6403   int disp = vector64_count * 64;
 6404   if (cnt) {
 6405     switch (cnt) {
 6406       case 1:
 6407         movq(Address(base, disp), xtmp);
 6408         break;
 6409       case 2:
 6410         evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_128bit);
 6411         break;
 6412       case 3:
 6413         movl(rtmp, 0x7);
 6414         kmovwl(mask, rtmp);
 6415         evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_256bit);
 6416         break;
 6417       case 4:
 6418         evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 6419         break;
 6420       case 5:
 6421         if (use64byteVector) {
 6422           movl(rtmp, 0x1F);
 6423           kmovwl(mask, rtmp);
 6424           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 6425         } else {
 6426           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 6427           movq(Address(base, disp + 32), xtmp);
 6428         }
 6429         break;
 6430       case 6:
 6431         if (use64byteVector) {
 6432           movl(rtmp, 0x3F);
 6433           kmovwl(mask, rtmp);
 6434           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 6435         } else {
 6436           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 6437           evmovdqu(T_LONG, k0, Address(base, disp + 32), xtmp, false, Assembler::AVX_128bit);
 6438         }
 6439         break;
 6440       case 7:
 6441         if (use64byteVector) {
 6442           movl(rtmp, 0x7F);
 6443           kmovwl(mask, rtmp);
 6444           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 6445         } else {
 6446           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 6447           movl(rtmp, 0x7);
 6448           kmovwl(mask, rtmp);
 6449           evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit);
 6450         }
 6451         break;
 6452       default:
 6453         fatal("Unexpected length : %d\n",cnt);
 6454         break;
 6455     }
 6456   }
 6457 }
 6458 
 6459 void MacroAssembler::clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp,
 6460                                bool is_large, bool word_copy_only, KRegister mask) {
 6461   // cnt      - number of qwords (8-byte words).
 6462   // base     - start address, qword aligned.
 6463   // is_large - if optimizers know cnt is larger than InitArrayShortSize
 6464   assert(base==rdi, "base register must be edi for rep stos");
 6465   assert(val==rax,   "val register must be eax for rep stos");
 6466   assert(cnt==rcx,   "cnt register must be ecx for rep stos");
 6467   assert(InitArrayShortSize % BytesPerLong == 0,
 6468     "InitArrayShortSize should be the multiple of BytesPerLong");
 6469 
 6470   Label DONE;
 6471 
 6472   if (!is_large) {
 6473     Label LOOP, LONG;
 6474     cmpptr(cnt, InitArrayShortSize/BytesPerLong);
 6475     jccb(Assembler::greater, LONG);
 6476 
 6477     decrement(cnt);
 6478     jccb(Assembler::negative, DONE); // Zero length
 6479 
 6480     // Use individual pointer-sized stores for small counts:
 6481     BIND(LOOP);
 6482     movptr(Address(base, cnt, Address::times_ptr), val);
 6483     decrement(cnt);
 6484     jccb(Assembler::greaterEqual, LOOP);
 6485     jmpb(DONE);
 6486 
 6487     BIND(LONG);
 6488   }
 6489 
 6490   // Use longer rep-prefixed ops for non-small counts:
 6491   if (UseFastStosb && !word_copy_only) {
 6492     shlptr(cnt, 3); // convert to number of bytes
 6493     rep_stosb();
 6494   } else if (UseXMMForObjInit) {
 6495     xmm_clear_mem(base, cnt, val, xtmp, mask);
 6496   } else {
 6497     rep_stos();
 6498   }
 6499 
 6500   BIND(DONE);
 6501 }
 6502 
 6503 #endif //COMPILER2_OR_JVMCI
 6504 
 6505 
 6506 void MacroAssembler::generate_fill(BasicType t, bool aligned,
 6507                                    Register to, Register value, Register count,
 6508                                    Register rtmp, XMMRegister xtmp) {
 6509   ShortBranchVerifier sbv(this);
 6510   assert_different_registers(to, value, count, rtmp);
 6511   Label L_exit;
 6512   Label L_fill_2_bytes, L_fill_4_bytes;
 6513 
 6514 #if defined(COMPILER2)
 6515   if(MaxVectorSize >=32 &&
 6516      VM_Version::supports_avx512vlbw() &&
 6517      VM_Version::supports_bmi2()) {
 6518     generate_fill_avx3(t, to, value, count, rtmp, xtmp);
 6519     return;
 6520   }
 6521 #endif
 6522 
 6523   int shift = -1;
 6524   switch (t) {
 6525     case T_BYTE:
 6526       shift = 2;
 6527       break;
 6528     case T_SHORT:
 6529       shift = 1;
 6530       break;
 6531     case T_INT:
 6532       shift = 0;
 6533       break;
 6534     default: ShouldNotReachHere();
 6535   }
 6536 
 6537   if (t == T_BYTE) {
 6538     andl(value, 0xff);
 6539     movl(rtmp, value);
 6540     shll(rtmp, 8);
 6541     orl(value, rtmp);
 6542   }
 6543   if (t == T_SHORT) {
 6544     andl(value, 0xffff);
 6545   }
 6546   if (t == T_BYTE || t == T_SHORT) {
 6547     movl(rtmp, value);
 6548     shll(rtmp, 16);
 6549     orl(value, rtmp);
 6550   }
 6551 
 6552   cmpptr(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
 6553   jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp
 6554   if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
 6555     Label L_skip_align2;
 6556     // align source address at 4 bytes address boundary
 6557     if (t == T_BYTE) {
 6558       Label L_skip_align1;
 6559       // One byte misalignment happens only for byte arrays
 6560       testptr(to, 1);
 6561       jccb(Assembler::zero, L_skip_align1);
 6562       movb(Address(to, 0), value);
 6563       increment(to);
 6564       decrement(count);
 6565       BIND(L_skip_align1);
 6566     }
 6567     // Two bytes misalignment happens only for byte and short (char) arrays
 6568     testptr(to, 2);
 6569     jccb(Assembler::zero, L_skip_align2);
 6570     movw(Address(to, 0), value);
 6571     addptr(to, 2);
 6572     subptr(count, 1<<(shift-1));
 6573     BIND(L_skip_align2);
 6574   }
 6575   {
 6576     Label L_fill_32_bytes;
 6577     if (!UseUnalignedLoadStores) {
 6578       // align to 8 bytes, we know we are 4 byte aligned to start
 6579       testptr(to, 4);
 6580       jccb(Assembler::zero, L_fill_32_bytes);
 6581       movl(Address(to, 0), value);
 6582       addptr(to, 4);
 6583       subptr(count, 1<<shift);
 6584     }
 6585     BIND(L_fill_32_bytes);
 6586     {
 6587       Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
 6588       movdl(xtmp, value);
 6589       if (UseAVX >= 2 && UseUnalignedLoadStores) {
 6590         Label L_check_fill_32_bytes;
 6591         if (UseAVX > 2) {
 6592           // Fill 64-byte chunks
 6593           Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2;
 6594 
 6595           // If number of bytes to fill < VM_Version::avx3_threshold(), perform fill using AVX2
 6596           cmpptr(count, VM_Version::avx3_threshold());
 6597           jccb(Assembler::below, L_check_fill_64_bytes_avx2);
 6598 
 6599           vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
 6600 
 6601           subptr(count, 16 << shift);
 6602           jccb(Assembler::less, L_check_fill_32_bytes);
 6603           align(16);
 6604 
 6605           BIND(L_fill_64_bytes_loop_avx3);
 6606           evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit);
 6607           addptr(to, 64);
 6608           subptr(count, 16 << shift);
 6609           jcc(Assembler::greaterEqual, L_fill_64_bytes_loop_avx3);
 6610           jmpb(L_check_fill_32_bytes);
 6611 
 6612           BIND(L_check_fill_64_bytes_avx2);
 6613         }
 6614         // Fill 64-byte chunks
 6615         Label L_fill_64_bytes_loop;
 6616         vpbroadcastd(xtmp, xtmp, Assembler::AVX_256bit);
 6617 
 6618         subptr(count, 16 << shift);
 6619         jcc(Assembler::less, L_check_fill_32_bytes);
 6620         align(16);
 6621 
 6622         BIND(L_fill_64_bytes_loop);
 6623         vmovdqu(Address(to, 0), xtmp);
 6624         vmovdqu(Address(to, 32), xtmp);
 6625         addptr(to, 64);
 6626         subptr(count, 16 << shift);
 6627         jcc(Assembler::greaterEqual, L_fill_64_bytes_loop);
 6628 
 6629         BIND(L_check_fill_32_bytes);
 6630         addptr(count, 8 << shift);
 6631         jccb(Assembler::less, L_check_fill_8_bytes);
 6632         vmovdqu(Address(to, 0), xtmp);
 6633         addptr(to, 32);
 6634         subptr(count, 8 << shift);
 6635 
 6636         BIND(L_check_fill_8_bytes);
 6637         // clean upper bits of YMM registers
 6638         movdl(xtmp, value);
 6639         pshufd(xtmp, xtmp, 0);
 6640       } else {
 6641         // Fill 32-byte chunks
 6642         pshufd(xtmp, xtmp, 0);
 6643 
 6644         subptr(count, 8 << shift);
 6645         jcc(Assembler::less, L_check_fill_8_bytes);
 6646         align(16);
 6647 
 6648         BIND(L_fill_32_bytes_loop);
 6649 
 6650         if (UseUnalignedLoadStores) {
 6651           movdqu(Address(to, 0), xtmp);
 6652           movdqu(Address(to, 16), xtmp);
 6653         } else {
 6654           movq(Address(to, 0), xtmp);
 6655           movq(Address(to, 8), xtmp);
 6656           movq(Address(to, 16), xtmp);
 6657           movq(Address(to, 24), xtmp);
 6658         }
 6659 
 6660         addptr(to, 32);
 6661         subptr(count, 8 << shift);
 6662         jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
 6663 
 6664         BIND(L_check_fill_8_bytes);
 6665       }
 6666       addptr(count, 8 << shift);
 6667       jccb(Assembler::zero, L_exit);
 6668       jmpb(L_fill_8_bytes);
 6669 
 6670       //
 6671       // length is too short, just fill qwords
 6672       //
 6673       BIND(L_fill_8_bytes_loop);
 6674       movq(Address(to, 0), xtmp);
 6675       addptr(to, 8);
 6676       BIND(L_fill_8_bytes);
 6677       subptr(count, 1 << (shift + 1));
 6678       jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
 6679     }
 6680   }
 6681   // fill trailing 4 bytes
 6682   BIND(L_fill_4_bytes);
 6683   testl(count, 1<<shift);
 6684   jccb(Assembler::zero, L_fill_2_bytes);
 6685   movl(Address(to, 0), value);
 6686   if (t == T_BYTE || t == T_SHORT) {
 6687     Label L_fill_byte;
 6688     addptr(to, 4);
 6689     BIND(L_fill_2_bytes);
 6690     // fill trailing 2 bytes
 6691     testl(count, 1<<(shift-1));
 6692     jccb(Assembler::zero, L_fill_byte);
 6693     movw(Address(to, 0), value);
 6694     if (t == T_BYTE) {
 6695       addptr(to, 2);
 6696       BIND(L_fill_byte);
 6697       // fill trailing byte
 6698       testl(count, 1);
 6699       jccb(Assembler::zero, L_exit);
 6700       movb(Address(to, 0), value);
 6701     } else {
 6702       BIND(L_fill_byte);
 6703     }
 6704   } else {
 6705     BIND(L_fill_2_bytes);
 6706   }
 6707   BIND(L_exit);
 6708 }
 6709 
 6710 void MacroAssembler::evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len) {
 6711   switch(type) {
 6712     case T_BYTE:
 6713     case T_BOOLEAN:
 6714       evpbroadcastb(dst, src, vector_len);
 6715       break;
 6716     case T_SHORT:
 6717     case T_CHAR:
 6718       evpbroadcastw(dst, src, vector_len);
 6719       break;
 6720     case T_INT:
 6721     case T_FLOAT:
 6722       evpbroadcastd(dst, src, vector_len);
 6723       break;
 6724     case T_LONG:
 6725     case T_DOUBLE:
 6726       evpbroadcastq(dst, src, vector_len);
 6727       break;
 6728     default:
 6729       fatal("Unhandled type : %s", type2name(type));
 6730       break;
 6731   }
 6732 }
 6733 
 6734 // encode char[] to byte[] in ISO_8859_1 or ASCII
 6735    //@IntrinsicCandidate
 6736    //private static int implEncodeISOArray(byte[] sa, int sp,
 6737    //byte[] da, int dp, int len) {
 6738    //  int i = 0;
 6739    //  for (; i < len; i++) {
 6740    //    char c = StringUTF16.getChar(sa, sp++);
 6741    //    if (c > '\u00FF')
 6742    //      break;
 6743    //    da[dp++] = (byte)c;
 6744    //  }
 6745    //  return i;
 6746    //}
 6747    //
 6748    //@IntrinsicCandidate
 6749    //private static int implEncodeAsciiArray(char[] sa, int sp,
 6750    //    byte[] da, int dp, int len) {
 6751    //  int i = 0;
 6752    //  for (; i < len; i++) {
 6753    //    char c = sa[sp++];
 6754    //    if (c >= '\u0080')
 6755    //      break;
 6756    //    da[dp++] = (byte)c;
 6757    //  }
 6758    //  return i;
 6759    //}
 6760 void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
 6761   XMMRegister tmp1Reg, XMMRegister tmp2Reg,
 6762   XMMRegister tmp3Reg, XMMRegister tmp4Reg,
 6763   Register tmp5, Register result, bool ascii) {
 6764 
 6765   // rsi: src
 6766   // rdi: dst
 6767   // rdx: len
 6768   // rcx: tmp5
 6769   // rax: result
 6770   ShortBranchVerifier sbv(this);
 6771   assert_different_registers(src, dst, len, tmp5, result);
 6772   Label L_done, L_copy_1_char, L_copy_1_char_exit;
 6773 
 6774   int mask = ascii ? 0xff80ff80 : 0xff00ff00;
 6775   int short_mask = ascii ? 0xff80 : 0xff00;
 6776 
 6777   // set result
 6778   xorl(result, result);
 6779   // check for zero length
 6780   testl(len, len);
 6781   jcc(Assembler::zero, L_done);
 6782 
 6783   movl(result, len);
 6784 
 6785   // Setup pointers
 6786   lea(src, Address(src, len, Address::times_2)); // char[]
 6787   lea(dst, Address(dst, len, Address::times_1)); // byte[]
 6788   negptr(len);
 6789 
 6790   if (UseSSE42Intrinsics || UseAVX >= 2) {
 6791     Label L_copy_8_chars, L_copy_8_chars_exit;
 6792     Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit;
 6793 
 6794     if (UseAVX >= 2) {
 6795       Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit;
 6796       movl(tmp5, mask);   // create mask to test for Unicode or non-ASCII chars in vector
 6797       movdl(tmp1Reg, tmp5);
 6798       vpbroadcastd(tmp1Reg, tmp1Reg, Assembler::AVX_256bit);
 6799       jmp(L_chars_32_check);
 6800 
 6801       bind(L_copy_32_chars);
 6802       vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64));
 6803       vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32));
 6804       vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1);
 6805       vptest(tmp2Reg, tmp1Reg);       // check for Unicode or non-ASCII chars in vector
 6806       jccb(Assembler::notZero, L_copy_32_chars_exit);
 6807       vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1);
 6808       vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector_len */ 1);
 6809       vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg);
 6810 
 6811       bind(L_chars_32_check);
 6812       addptr(len, 32);
 6813       jcc(Assembler::lessEqual, L_copy_32_chars);
 6814 
 6815       bind(L_copy_32_chars_exit);
 6816       subptr(len, 16);
 6817       jccb(Assembler::greater, L_copy_16_chars_exit);
 6818 
 6819     } else if (UseSSE42Intrinsics) {
 6820       movl(tmp5, mask);   // create mask to test for Unicode or non-ASCII chars in vector
 6821       movdl(tmp1Reg, tmp5);
 6822       pshufd(tmp1Reg, tmp1Reg, 0);
 6823       jmpb(L_chars_16_check);
 6824     }
 6825 
 6826     bind(L_copy_16_chars);
 6827     if (UseAVX >= 2) {
 6828       vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32));
 6829       vptest(tmp2Reg, tmp1Reg);
 6830       jcc(Assembler::notZero, L_copy_16_chars_exit);
 6831       vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector_len */ 1);
 6832       vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector_len */ 1);
 6833     } else {
 6834       if (UseAVX > 0) {
 6835         movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
 6836         movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
 6837         vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 0);
 6838       } else {
 6839         movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
 6840         por(tmp2Reg, tmp3Reg);
 6841         movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
 6842         por(tmp2Reg, tmp4Reg);
 6843       }
 6844       ptest(tmp2Reg, tmp1Reg);       // check for Unicode or non-ASCII chars in vector
 6845       jccb(Assembler::notZero, L_copy_16_chars_exit);
 6846       packuswb(tmp3Reg, tmp4Reg);
 6847     }
 6848     movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg);
 6849 
 6850     bind(L_chars_16_check);
 6851     addptr(len, 16);
 6852     jcc(Assembler::lessEqual, L_copy_16_chars);
 6853 
 6854     bind(L_copy_16_chars_exit);
 6855     if (UseAVX >= 2) {
 6856       // clean upper bits of YMM registers
 6857       vpxor(tmp2Reg, tmp2Reg);
 6858       vpxor(tmp3Reg, tmp3Reg);
 6859       vpxor(tmp4Reg, tmp4Reg);
 6860       movdl(tmp1Reg, tmp5);
 6861       pshufd(tmp1Reg, tmp1Reg, 0);
 6862     }
 6863     subptr(len, 8);
 6864     jccb(Assembler::greater, L_copy_8_chars_exit);
 6865 
 6866     bind(L_copy_8_chars);
 6867     movdqu(tmp3Reg, Address(src, len, Address::times_2, -16));
 6868     ptest(tmp3Reg, tmp1Reg);
 6869     jccb(Assembler::notZero, L_copy_8_chars_exit);
 6870     packuswb(tmp3Reg, tmp1Reg);
 6871     movq(Address(dst, len, Address::times_1, -8), tmp3Reg);
 6872     addptr(len, 8);
 6873     jccb(Assembler::lessEqual, L_copy_8_chars);
 6874 
 6875     bind(L_copy_8_chars_exit);
 6876     subptr(len, 8);
 6877     jccb(Assembler::zero, L_done);
 6878   }
 6879 
 6880   bind(L_copy_1_char);
 6881   load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0));
 6882   testl(tmp5, short_mask);      // check if Unicode or non-ASCII char
 6883   jccb(Assembler::notZero, L_copy_1_char_exit);
 6884   movb(Address(dst, len, Address::times_1, 0), tmp5);
 6885   addptr(len, 1);
 6886   jccb(Assembler::less, L_copy_1_char);
 6887 
 6888   bind(L_copy_1_char_exit);
 6889   addptr(result, len); // len is negative count of not processed elements
 6890 
 6891   bind(L_done);
 6892 }
 6893 
 6894 /**
 6895  * Helper for multiply_to_len().
 6896  */
 6897 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) {
 6898   addq(dest_lo, src1);
 6899   adcq(dest_hi, 0);
 6900   addq(dest_lo, src2);
 6901   adcq(dest_hi, 0);
 6902 }
 6903 
 6904 /**
 6905  * Multiply 64 bit by 64 bit first loop.
 6906  */
 6907 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
 6908                                            Register y, Register y_idx, Register z,
 6909                                            Register carry, Register product,
 6910                                            Register idx, Register kdx) {
 6911   //
 6912   //  jlong carry, x[], y[], z[];
 6913   //  for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
 6914   //    huge_128 product = y[idx] * x[xstart] + carry;
 6915   //    z[kdx] = (jlong)product;
 6916   //    carry  = (jlong)(product >>> 64);
 6917   //  }
 6918   //  z[xstart] = carry;
 6919   //
 6920 
 6921   Label L_first_loop, L_first_loop_exit;
 6922   Label L_one_x, L_one_y, L_multiply;
 6923 
 6924   decrementl(xstart);
 6925   jcc(Assembler::negative, L_one_x);
 6926 
 6927   movq(x_xstart, Address(x, xstart, Address::times_4,  0));
 6928   rorq(x_xstart, 32); // convert big-endian to little-endian
 6929 
 6930   bind(L_first_loop);
 6931   decrementl(idx);
 6932   jcc(Assembler::negative, L_first_loop_exit);
 6933   decrementl(idx);
 6934   jcc(Assembler::negative, L_one_y);
 6935   movq(y_idx, Address(y, idx, Address::times_4,  0));
 6936   rorq(y_idx, 32); // convert big-endian to little-endian
 6937   bind(L_multiply);
 6938   movq(product, x_xstart);
 6939   mulq(y_idx); // product(rax) * y_idx -> rdx:rax
 6940   addq(product, carry);
 6941   adcq(rdx, 0);
 6942   subl(kdx, 2);
 6943   movl(Address(z, kdx, Address::times_4,  4), product);
 6944   shrq(product, 32);
 6945   movl(Address(z, kdx, Address::times_4,  0), product);
 6946   movq(carry, rdx);
 6947   jmp(L_first_loop);
 6948 
 6949   bind(L_one_y);
 6950   movl(y_idx, Address(y,  0));
 6951   jmp(L_multiply);
 6952 
 6953   bind(L_one_x);
 6954   movl(x_xstart, Address(x,  0));
 6955   jmp(L_first_loop);
 6956 
 6957   bind(L_first_loop_exit);
 6958 }
 6959 
 6960 /**
 6961  * Multiply 64 bit by 64 bit and add 128 bit.
 6962  */
 6963 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z,
 6964                                             Register yz_idx, Register idx,
 6965                                             Register carry, Register product, int offset) {
 6966   //     huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry;
 6967   //     z[kdx] = (jlong)product;
 6968 
 6969   movq(yz_idx, Address(y, idx, Address::times_4,  offset));
 6970   rorq(yz_idx, 32); // convert big-endian to little-endian
 6971   movq(product, x_xstart);
 6972   mulq(yz_idx);     // product(rax) * yz_idx -> rdx:product(rax)
 6973   movq(yz_idx, Address(z, idx, Address::times_4,  offset));
 6974   rorq(yz_idx, 32); // convert big-endian to little-endian
 6975 
 6976   add2_with_carry(rdx, product, carry, yz_idx);
 6977 
 6978   movl(Address(z, idx, Address::times_4,  offset+4), product);
 6979   shrq(product, 32);
 6980   movl(Address(z, idx, Address::times_4,  offset), product);
 6981 
 6982 }
 6983 
 6984 /**
 6985  * Multiply 128 bit by 128 bit. Unrolled inner loop.
 6986  */
 6987 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
 6988                                              Register yz_idx, Register idx, Register jdx,
 6989                                              Register carry, Register product,
 6990                                              Register carry2) {
 6991   //   jlong carry, x[], y[], z[];
 6992   //   int kdx = ystart+1;
 6993   //   for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
 6994   //     huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry;
 6995   //     z[kdx+idx+1] = (jlong)product;
 6996   //     jlong carry2  = (jlong)(product >>> 64);
 6997   //     product = (y[idx] * x_xstart) + z[kdx+idx] + carry2;
 6998   //     z[kdx+idx] = (jlong)product;
 6999   //     carry  = (jlong)(product >>> 64);
 7000   //   }
 7001   //   idx += 2;
 7002   //   if (idx > 0) {
 7003   //     product = (y[idx] * x_xstart) + z[kdx+idx] + carry;
 7004   //     z[kdx+idx] = (jlong)product;
 7005   //     carry  = (jlong)(product >>> 64);
 7006   //   }
 7007   //
 7008 
 7009   Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
 7010 
 7011   movl(jdx, idx);
 7012   andl(jdx, 0xFFFFFFFC);
 7013   shrl(jdx, 2);
 7014 
 7015   bind(L_third_loop);
 7016   subl(jdx, 1);
 7017   jcc(Assembler::negative, L_third_loop_exit);
 7018   subl(idx, 4);
 7019 
 7020   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8);
 7021   movq(carry2, rdx);
 7022 
 7023   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0);
 7024   movq(carry, rdx);
 7025   jmp(L_third_loop);
 7026 
 7027   bind (L_third_loop_exit);
 7028 
 7029   andl (idx, 0x3);
 7030   jcc(Assembler::zero, L_post_third_loop_done);
 7031 
 7032   Label L_check_1;
 7033   subl(idx, 2);
 7034   jcc(Assembler::negative, L_check_1);
 7035 
 7036   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0);
 7037   movq(carry, rdx);
 7038 
 7039   bind (L_check_1);
 7040   addl (idx, 0x2);
 7041   andl (idx, 0x1);
 7042   subl(idx, 1);
 7043   jcc(Assembler::negative, L_post_third_loop_done);
 7044 
 7045   movl(yz_idx, Address(y, idx, Address::times_4,  0));
 7046   movq(product, x_xstart);
 7047   mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax)
 7048   movl(yz_idx, Address(z, idx, Address::times_4,  0));
 7049 
 7050   add2_with_carry(rdx, product, yz_idx, carry);
 7051 
 7052   movl(Address(z, idx, Address::times_4,  0), product);
 7053   shrq(product, 32);
 7054 
 7055   shlq(rdx, 32);
 7056   orq(product, rdx);
 7057   movq(carry, product);
 7058 
 7059   bind(L_post_third_loop_done);
 7060 }
 7061 
 7062 /**
 7063  * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop.
 7064  *
 7065  */
 7066 void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z,
 7067                                                   Register carry, Register carry2,
 7068                                                   Register idx, Register jdx,
 7069                                                   Register yz_idx1, Register yz_idx2,
 7070                                                   Register tmp, Register tmp3, Register tmp4) {
 7071   assert(UseBMI2Instructions, "should be used only when BMI2 is available");
 7072 
 7073   //   jlong carry, x[], y[], z[];
 7074   //   int kdx = ystart+1;
 7075   //   for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
 7076   //     huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry;
 7077   //     jlong carry2  = (jlong)(tmp3 >>> 64);
 7078   //     huge_128 tmp4 = (y[idx]   * rdx) + z[kdx+idx] + carry2;
 7079   //     carry  = (jlong)(tmp4 >>> 64);
 7080   //     z[kdx+idx+1] = (jlong)tmp3;
 7081   //     z[kdx+idx] = (jlong)tmp4;
 7082   //   }
 7083   //   idx += 2;
 7084   //   if (idx > 0) {
 7085   //     yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry;
 7086   //     z[kdx+idx] = (jlong)yz_idx1;
 7087   //     carry  = (jlong)(yz_idx1 >>> 64);
 7088   //   }
 7089   //
 7090 
 7091   Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
 7092 
 7093   movl(jdx, idx);
 7094   andl(jdx, 0xFFFFFFFC);
 7095   shrl(jdx, 2);
 7096 
 7097   bind(L_third_loop);
 7098   subl(jdx, 1);
 7099   jcc(Assembler::negative, L_third_loop_exit);
 7100   subl(idx, 4);
 7101 
 7102   movq(yz_idx1,  Address(y, idx, Address::times_4,  8));
 7103   rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian
 7104   movq(yz_idx2, Address(y, idx, Address::times_4,  0));
 7105   rorxq(yz_idx2, yz_idx2, 32);
 7106 
 7107   mulxq(tmp4, tmp3, yz_idx1);  //  yz_idx1 * rdx -> tmp4:tmp3
 7108   mulxq(carry2, tmp, yz_idx2); //  yz_idx2 * rdx -> carry2:tmp
 7109 
 7110   movq(yz_idx1,  Address(z, idx, Address::times_4,  8));
 7111   rorxq(yz_idx1, yz_idx1, 32);
 7112   movq(yz_idx2, Address(z, idx, Address::times_4,  0));
 7113   rorxq(yz_idx2, yz_idx2, 32);
 7114 
 7115   if (VM_Version::supports_adx()) {
 7116     adcxq(tmp3, carry);
 7117     adoxq(tmp3, yz_idx1);
 7118 
 7119     adcxq(tmp4, tmp);
 7120     adoxq(tmp4, yz_idx2);
 7121 
 7122     movl(carry, 0); // does not affect flags
 7123     adcxq(carry2, carry);
 7124     adoxq(carry2, carry);
 7125   } else {
 7126     add2_with_carry(tmp4, tmp3, carry, yz_idx1);
 7127     add2_with_carry(carry2, tmp4, tmp, yz_idx2);
 7128   }
 7129   movq(carry, carry2);
 7130 
 7131   movl(Address(z, idx, Address::times_4, 12), tmp3);
 7132   shrq(tmp3, 32);
 7133   movl(Address(z, idx, Address::times_4,  8), tmp3);
 7134 
 7135   movl(Address(z, idx, Address::times_4,  4), tmp4);
 7136   shrq(tmp4, 32);
 7137   movl(Address(z, idx, Address::times_4,  0), tmp4);
 7138 
 7139   jmp(L_third_loop);
 7140 
 7141   bind (L_third_loop_exit);
 7142 
 7143   andl (idx, 0x3);
 7144   jcc(Assembler::zero, L_post_third_loop_done);
 7145 
 7146   Label L_check_1;
 7147   subl(idx, 2);
 7148   jcc(Assembler::negative, L_check_1);
 7149 
 7150   movq(yz_idx1, Address(y, idx, Address::times_4,  0));
 7151   rorxq(yz_idx1, yz_idx1, 32);
 7152   mulxq(tmp4, tmp3, yz_idx1); //  yz_idx1 * rdx -> tmp4:tmp3
 7153   movq(yz_idx2, Address(z, idx, Address::times_4,  0));
 7154   rorxq(yz_idx2, yz_idx2, 32);
 7155 
 7156   add2_with_carry(tmp4, tmp3, carry, yz_idx2);
 7157 
 7158   movl(Address(z, idx, Address::times_4,  4), tmp3);
 7159   shrq(tmp3, 32);
 7160   movl(Address(z, idx, Address::times_4,  0), tmp3);
 7161   movq(carry, tmp4);
 7162 
 7163   bind (L_check_1);
 7164   addl (idx, 0x2);
 7165   andl (idx, 0x1);
 7166   subl(idx, 1);
 7167   jcc(Assembler::negative, L_post_third_loop_done);
 7168   movl(tmp4, Address(y, idx, Address::times_4,  0));
 7169   mulxq(carry2, tmp3, tmp4);  //  tmp4 * rdx -> carry2:tmp3
 7170   movl(tmp4, Address(z, idx, Address::times_4,  0));
 7171 
 7172   add2_with_carry(carry2, tmp3, tmp4, carry);
 7173 
 7174   movl(Address(z, idx, Address::times_4,  0), tmp3);
 7175   shrq(tmp3, 32);
 7176 
 7177   shlq(carry2, 32);
 7178   orq(tmp3, carry2);
 7179   movq(carry, tmp3);
 7180 
 7181   bind(L_post_third_loop_done);
 7182 }
 7183 
 7184 /**
 7185  * Code for BigInteger::multiplyToLen() intrinsic.
 7186  *
 7187  * rdi: x
 7188  * rax: xlen
 7189  * rsi: y
 7190  * rcx: ylen
 7191  * r8:  z
 7192  * r11: tmp0
 7193  * r12: tmp1
 7194  * r13: tmp2
 7195  * r14: tmp3
 7196  * r15: tmp4
 7197  * rbx: tmp5
 7198  *
 7199  */
 7200 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0,
 7201                                      Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) {
 7202   ShortBranchVerifier sbv(this);
 7203   assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, rdx);
 7204 
 7205   push(tmp0);
 7206   push(tmp1);
 7207   push(tmp2);
 7208   push(tmp3);
 7209   push(tmp4);
 7210   push(tmp5);
 7211 
 7212   push(xlen);
 7213 
 7214   const Register idx = tmp1;
 7215   const Register kdx = tmp2;
 7216   const Register xstart = tmp3;
 7217 
 7218   const Register y_idx = tmp4;
 7219   const Register carry = tmp5;
 7220   const Register product  = xlen;
 7221   const Register x_xstart = tmp0;
 7222 
 7223   // First Loop.
 7224   //
 7225   //  final static long LONG_MASK = 0xffffffffL;
 7226   //  int xstart = xlen - 1;
 7227   //  int ystart = ylen - 1;
 7228   //  long carry = 0;
 7229   //  for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
 7230   //    long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
 7231   //    z[kdx] = (int)product;
 7232   //    carry = product >>> 32;
 7233   //  }
 7234   //  z[xstart] = (int)carry;
 7235   //
 7236 
 7237   movl(idx, ylen);               // idx = ylen;
 7238   lea(kdx, Address(xlen, ylen)); // kdx = xlen+ylen;
 7239   xorq(carry, carry);            // carry = 0;
 7240 
 7241   Label L_done;
 7242 
 7243   movl(xstart, xlen);
 7244   decrementl(xstart);
 7245   jcc(Assembler::negative, L_done);
 7246 
 7247   multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
 7248 
 7249   Label L_second_loop;
 7250   testl(kdx, kdx);
 7251   jcc(Assembler::zero, L_second_loop);
 7252 
 7253   Label L_carry;
 7254   subl(kdx, 1);
 7255   jcc(Assembler::zero, L_carry);
 7256 
 7257   movl(Address(z, kdx, Address::times_4,  0), carry);
 7258   shrq(carry, 32);
 7259   subl(kdx, 1);
 7260 
 7261   bind(L_carry);
 7262   movl(Address(z, kdx, Address::times_4,  0), carry);
 7263 
 7264   // Second and third (nested) loops.
 7265   //
 7266   // for (int i = xstart-1; i >= 0; i--) { // Second loop
 7267   //   carry = 0;
 7268   //   for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
 7269   //     long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
 7270   //                    (z[k] & LONG_MASK) + carry;
 7271   //     z[k] = (int)product;
 7272   //     carry = product >>> 32;
 7273   //   }
 7274   //   z[i] = (int)carry;
 7275   // }
 7276   //
 7277   // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx
 7278 
 7279   const Register jdx = tmp1;
 7280 
 7281   bind(L_second_loop);
 7282   xorl(carry, carry);    // carry = 0;
 7283   movl(jdx, ylen);       // j = ystart+1
 7284 
 7285   subl(xstart, 1);       // i = xstart-1;
 7286   jcc(Assembler::negative, L_done);
 7287 
 7288   push (z);
 7289 
 7290   Label L_last_x;
 7291   lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j
 7292   subl(xstart, 1);       // i = xstart-1;
 7293   jcc(Assembler::negative, L_last_x);
 7294 
 7295   if (UseBMI2Instructions) {
 7296     movq(rdx,  Address(x, xstart, Address::times_4,  0));
 7297     rorxq(rdx, rdx, 32); // convert big-endian to little-endian
 7298   } else {
 7299     movq(x_xstart, Address(x, xstart, Address::times_4,  0));
 7300     rorq(x_xstart, 32);  // convert big-endian to little-endian
 7301   }
 7302 
 7303   Label L_third_loop_prologue;
 7304   bind(L_third_loop_prologue);
 7305 
 7306   push (x);
 7307   push (xstart);
 7308   push (ylen);
 7309 
 7310 
 7311   if (UseBMI2Instructions) {
 7312     multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4);
 7313   } else { // !UseBMI2Instructions
 7314     multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x);
 7315   }
 7316 
 7317   pop(ylen);
 7318   pop(xlen);
 7319   pop(x);
 7320   pop(z);
 7321 
 7322   movl(tmp3, xlen);
 7323   addl(tmp3, 1);
 7324   movl(Address(z, tmp3, Address::times_4,  0), carry);
 7325   subl(tmp3, 1);
 7326   jccb(Assembler::negative, L_done);
 7327 
 7328   shrq(carry, 32);
 7329   movl(Address(z, tmp3, Address::times_4,  0), carry);
 7330   jmp(L_second_loop);
 7331 
 7332   // Next infrequent code is moved outside loops.
 7333   bind(L_last_x);
 7334   if (UseBMI2Instructions) {
 7335     movl(rdx, Address(x,  0));
 7336   } else {
 7337     movl(x_xstart, Address(x,  0));
 7338   }
 7339   jmp(L_third_loop_prologue);
 7340 
 7341   bind(L_done);
 7342 
 7343   pop(xlen);
 7344 
 7345   pop(tmp5);
 7346   pop(tmp4);
 7347   pop(tmp3);
 7348   pop(tmp2);
 7349   pop(tmp1);
 7350   pop(tmp0);
 7351 }
 7352 
 7353 void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
 7354   Register result, Register tmp1, Register tmp2, XMMRegister rymm0, XMMRegister rymm1, XMMRegister rymm2){
 7355   assert(UseSSE42Intrinsics, "SSE4.2 must be enabled.");
 7356   Label VECTOR16_LOOP, VECTOR8_LOOP, VECTOR4_LOOP;
 7357   Label VECTOR8_TAIL, VECTOR4_TAIL;
 7358   Label VECTOR32_NOT_EQUAL, VECTOR16_NOT_EQUAL, VECTOR8_NOT_EQUAL, VECTOR4_NOT_EQUAL;
 7359   Label SAME_TILL_END, DONE;
 7360   Label BYTES_LOOP, BYTES_TAIL, BYTES_NOT_EQUAL;
 7361 
 7362   //scale is in rcx in both Win64 and Unix
 7363   ShortBranchVerifier sbv(this);
 7364 
 7365   shlq(length);
 7366   xorq(result, result);
 7367 
 7368   if ((AVX3Threshold == 0) && (UseAVX > 2) &&
 7369       VM_Version::supports_avx512vlbw()) {
 7370     Label VECTOR64_LOOP, VECTOR64_NOT_EQUAL, VECTOR32_TAIL;
 7371 
 7372     cmpq(length, 64);
 7373     jcc(Assembler::less, VECTOR32_TAIL);
 7374 
 7375     movq(tmp1, length);
 7376     andq(tmp1, 0x3F);      // tail count
 7377     andq(length, ~(0x3F)); //vector count
 7378 
 7379     bind(VECTOR64_LOOP);
 7380     // AVX512 code to compare 64 byte vectors.
 7381     evmovdqub(rymm0, Address(obja, result), Assembler::AVX_512bit);
 7382     evpcmpeqb(k7, rymm0, Address(objb, result), Assembler::AVX_512bit);
 7383     kortestql(k7, k7);
 7384     jcc(Assembler::aboveEqual, VECTOR64_NOT_EQUAL);     // mismatch
 7385     addq(result, 64);
 7386     subq(length, 64);
 7387     jccb(Assembler::notZero, VECTOR64_LOOP);
 7388 
 7389     //bind(VECTOR64_TAIL);
 7390     testq(tmp1, tmp1);
 7391     jcc(Assembler::zero, SAME_TILL_END);
 7392 
 7393     //bind(VECTOR64_TAIL);
 7394     // AVX512 code to compare up to 63 byte vectors.
 7395     mov64(tmp2, 0xFFFFFFFFFFFFFFFF);
 7396     shlxq(tmp2, tmp2, tmp1);
 7397     notq(tmp2);
 7398     kmovql(k3, tmp2);
 7399 
 7400     evmovdqub(rymm0, k3, Address(obja, result), false, Assembler::AVX_512bit);
 7401     evpcmpeqb(k7, k3, rymm0, Address(objb, result), Assembler::AVX_512bit);
 7402 
 7403     ktestql(k7, k3);
 7404     jcc(Assembler::below, SAME_TILL_END);     // not mismatch
 7405 
 7406     bind(VECTOR64_NOT_EQUAL);
 7407     kmovql(tmp1, k7);
 7408     notq(tmp1);
 7409     tzcntq(tmp1, tmp1);
 7410     addq(result, tmp1);
 7411     shrq(result);
 7412     jmp(DONE);
 7413     bind(VECTOR32_TAIL);
 7414   }
 7415 
 7416   cmpq(length, 8);
 7417   jcc(Assembler::equal, VECTOR8_LOOP);
 7418   jcc(Assembler::less, VECTOR4_TAIL);
 7419 
 7420   if (UseAVX >= 2) {
 7421     Label VECTOR16_TAIL, VECTOR32_LOOP;
 7422 
 7423     cmpq(length, 16);
 7424     jcc(Assembler::equal, VECTOR16_LOOP);
 7425     jcc(Assembler::less, VECTOR8_LOOP);
 7426 
 7427     cmpq(length, 32);
 7428     jccb(Assembler::less, VECTOR16_TAIL);
 7429 
 7430     subq(length, 32);
 7431     bind(VECTOR32_LOOP);
 7432     vmovdqu(rymm0, Address(obja, result));
 7433     vmovdqu(rymm1, Address(objb, result));
 7434     vpxor(rymm2, rymm0, rymm1, Assembler::AVX_256bit);
 7435     vptest(rymm2, rymm2);
 7436     jcc(Assembler::notZero, VECTOR32_NOT_EQUAL);//mismatch found
 7437     addq(result, 32);
 7438     subq(length, 32);
 7439     jcc(Assembler::greaterEqual, VECTOR32_LOOP);
 7440     addq(length, 32);
 7441     jcc(Assembler::equal, SAME_TILL_END);
 7442     //falling through if less than 32 bytes left //close the branch here.
 7443 
 7444     bind(VECTOR16_TAIL);
 7445     cmpq(length, 16);
 7446     jccb(Assembler::less, VECTOR8_TAIL);
 7447     bind(VECTOR16_LOOP);
 7448     movdqu(rymm0, Address(obja, result));
 7449     movdqu(rymm1, Address(objb, result));
 7450     vpxor(rymm2, rymm0, rymm1, Assembler::AVX_128bit);
 7451     ptest(rymm2, rymm2);
 7452     jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found
 7453     addq(result, 16);
 7454     subq(length, 16);
 7455     jcc(Assembler::equal, SAME_TILL_END);
 7456     //falling through if less than 16 bytes left
 7457   } else {//regular intrinsics
 7458 
 7459     cmpq(length, 16);
 7460     jccb(Assembler::less, VECTOR8_TAIL);
 7461 
 7462     subq(length, 16);
 7463     bind(VECTOR16_LOOP);
 7464     movdqu(rymm0, Address(obja, result));
 7465     movdqu(rymm1, Address(objb, result));
 7466     pxor(rymm0, rymm1);
 7467     ptest(rymm0, rymm0);
 7468     jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found
 7469     addq(result, 16);
 7470     subq(length, 16);
 7471     jccb(Assembler::greaterEqual, VECTOR16_LOOP);
 7472     addq(length, 16);
 7473     jcc(Assembler::equal, SAME_TILL_END);
 7474     //falling through if less than 16 bytes left
 7475   }
 7476 
 7477   bind(VECTOR8_TAIL);
 7478   cmpq(length, 8);
 7479   jccb(Assembler::less, VECTOR4_TAIL);
 7480   bind(VECTOR8_LOOP);
 7481   movq(tmp1, Address(obja, result));
 7482   movq(tmp2, Address(objb, result));
 7483   xorq(tmp1, tmp2);
 7484   testq(tmp1, tmp1);
 7485   jcc(Assembler::notZero, VECTOR8_NOT_EQUAL);//mismatch found
 7486   addq(result, 8);
 7487   subq(length, 8);
 7488   jcc(Assembler::equal, SAME_TILL_END);
 7489   //falling through if less than 8 bytes left
 7490 
 7491   bind(VECTOR4_TAIL);
 7492   cmpq(length, 4);
 7493   jccb(Assembler::less, BYTES_TAIL);
 7494   bind(VECTOR4_LOOP);
 7495   movl(tmp1, Address(obja, result));
 7496   xorl(tmp1, Address(objb, result));
 7497   testl(tmp1, tmp1);
 7498   jcc(Assembler::notZero, VECTOR4_NOT_EQUAL);//mismatch found
 7499   addq(result, 4);
 7500   subq(length, 4);
 7501   jcc(Assembler::equal, SAME_TILL_END);
 7502   //falling through if less than 4 bytes left
 7503 
 7504   bind(BYTES_TAIL);
 7505   bind(BYTES_LOOP);
 7506   load_unsigned_byte(tmp1, Address(obja, result));
 7507   load_unsigned_byte(tmp2, Address(objb, result));
 7508   xorl(tmp1, tmp2);
 7509   testl(tmp1, tmp1);
 7510   jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
 7511   decq(length);
 7512   jcc(Assembler::zero, SAME_TILL_END);
 7513   incq(result);
 7514   load_unsigned_byte(tmp1, Address(obja, result));
 7515   load_unsigned_byte(tmp2, Address(objb, result));
 7516   xorl(tmp1, tmp2);
 7517   testl(tmp1, tmp1);
 7518   jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
 7519   decq(length);
 7520   jcc(Assembler::zero, SAME_TILL_END);
 7521   incq(result);
 7522   load_unsigned_byte(tmp1, Address(obja, result));
 7523   load_unsigned_byte(tmp2, Address(objb, result));
 7524   xorl(tmp1, tmp2);
 7525   testl(tmp1, tmp1);
 7526   jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
 7527   jmp(SAME_TILL_END);
 7528 
 7529   if (UseAVX >= 2) {
 7530     bind(VECTOR32_NOT_EQUAL);
 7531     vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_256bit);
 7532     vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_256bit);
 7533     vpxor(rymm0, rymm0, rymm2, Assembler::AVX_256bit);
 7534     vpmovmskb(tmp1, rymm0);
 7535     bsfq(tmp1, tmp1);
 7536     addq(result, tmp1);
 7537     shrq(result);
 7538     jmp(DONE);
 7539   }
 7540 
 7541   bind(VECTOR16_NOT_EQUAL);
 7542   if (UseAVX >= 2) {
 7543     vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_128bit);
 7544     vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_128bit);
 7545     pxor(rymm0, rymm2);
 7546   } else {
 7547     pcmpeqb(rymm2, rymm2);
 7548     pxor(rymm0, rymm1);
 7549     pcmpeqb(rymm0, rymm1);
 7550     pxor(rymm0, rymm2);
 7551   }
 7552   pmovmskb(tmp1, rymm0);
 7553   bsfq(tmp1, tmp1);
 7554   addq(result, tmp1);
 7555   shrq(result);
 7556   jmpb(DONE);
 7557 
 7558   bind(VECTOR8_NOT_EQUAL);
 7559   bind(VECTOR4_NOT_EQUAL);
 7560   bsfq(tmp1, tmp1);
 7561   shrq(tmp1, 3);
 7562   addq(result, tmp1);
 7563   bind(BYTES_NOT_EQUAL);
 7564   shrq(result);
 7565   jmpb(DONE);
 7566 
 7567   bind(SAME_TILL_END);
 7568   mov64(result, -1);
 7569 
 7570   bind(DONE);
 7571 }
 7572 
 7573 //Helper functions for square_to_len()
 7574 
 7575 /**
 7576  * Store the squares of x[], right shifted one bit (divided by 2) into z[]
 7577  * Preserves x and z and modifies rest of the registers.
 7578  */
 7579 void MacroAssembler::square_rshift(Register x, Register xlen, Register z, Register tmp1, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
 7580   // Perform square and right shift by 1
 7581   // Handle odd xlen case first, then for even xlen do the following
 7582   // jlong carry = 0;
 7583   // for (int j=0, i=0; j < xlen; j+=2, i+=4) {
 7584   //     huge_128 product = x[j:j+1] * x[j:j+1];
 7585   //     z[i:i+1] = (carry << 63) | (jlong)(product >>> 65);
 7586   //     z[i+2:i+3] = (jlong)(product >>> 1);
 7587   //     carry = (jlong)product;
 7588   // }
 7589 
 7590   xorq(tmp5, tmp5);     // carry
 7591   xorq(rdxReg, rdxReg);
 7592   xorl(tmp1, tmp1);     // index for x
 7593   xorl(tmp4, tmp4);     // index for z
 7594 
 7595   Label L_first_loop, L_first_loop_exit;
 7596 
 7597   testl(xlen, 1);
 7598   jccb(Assembler::zero, L_first_loop); //jump if xlen is even
 7599 
 7600   // Square and right shift by 1 the odd element using 32 bit multiply
 7601   movl(raxReg, Address(x, tmp1, Address::times_4, 0));
 7602   imulq(raxReg, raxReg);
 7603   shrq(raxReg, 1);
 7604   adcq(tmp5, 0);
 7605   movq(Address(z, tmp4, Address::times_4, 0), raxReg);
 7606   incrementl(tmp1);
 7607   addl(tmp4, 2);
 7608 
 7609   // Square and  right shift by 1 the rest using 64 bit multiply
 7610   bind(L_first_loop);
 7611   cmpptr(tmp1, xlen);
 7612   jccb(Assembler::equal, L_first_loop_exit);
 7613 
 7614   // Square
 7615   movq(raxReg, Address(x, tmp1, Address::times_4,  0));
 7616   rorq(raxReg, 32);    // convert big-endian to little-endian
 7617   mulq(raxReg);        // 64-bit multiply rax * rax -> rdx:rax
 7618 
 7619   // Right shift by 1 and save carry
 7620   shrq(tmp5, 1);       // rdx:rax:tmp5 = (tmp5:rdx:rax) >>> 1
 7621   rcrq(rdxReg, 1);
 7622   rcrq(raxReg, 1);
 7623   adcq(tmp5, 0);
 7624 
 7625   // Store result in z
 7626   movq(Address(z, tmp4, Address::times_4, 0), rdxReg);
 7627   movq(Address(z, tmp4, Address::times_4, 8), raxReg);
 7628 
 7629   // Update indices for x and z
 7630   addl(tmp1, 2);
 7631   addl(tmp4, 4);
 7632   jmp(L_first_loop);
 7633 
 7634   bind(L_first_loop_exit);
 7635 }
 7636 
 7637 
 7638 /**
 7639  * Perform the following multiply add operation using BMI2 instructions
 7640  * carry:sum = sum + op1*op2 + carry
 7641  * op2 should be in rdx
 7642  * op2 is preserved, all other registers are modified
 7643  */
 7644 void MacroAssembler::multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, Register tmp2) {
 7645   // assert op2 is rdx
 7646   mulxq(tmp2, op1, op1);  //  op1 * op2 -> tmp2:op1
 7647   addq(sum, carry);
 7648   adcq(tmp2, 0);
 7649   addq(sum, op1);
 7650   adcq(tmp2, 0);
 7651   movq(carry, tmp2);
 7652 }
 7653 
 7654 /**
 7655  * Perform the following multiply add operation:
 7656  * carry:sum = sum + op1*op2 + carry
 7657  * Preserves op1, op2 and modifies rest of registers
 7658  */
 7659 void MacroAssembler::multiply_add_64(Register sum, Register op1, Register op2, Register carry, Register rdxReg, Register raxReg) {
 7660   // rdx:rax = op1 * op2
 7661   movq(raxReg, op2);
 7662   mulq(op1);
 7663 
 7664   //  rdx:rax = sum + carry + rdx:rax
 7665   addq(sum, carry);
 7666   adcq(rdxReg, 0);
 7667   addq(sum, raxReg);
 7668   adcq(rdxReg, 0);
 7669 
 7670   // carry:sum = rdx:sum
 7671   movq(carry, rdxReg);
 7672 }
 7673 
 7674 /**
 7675  * Add 64 bit long carry into z[] with carry propagation.
 7676  * Preserves z and carry register values and modifies rest of registers.
 7677  *
 7678  */
 7679 void MacroAssembler::add_one_64(Register z, Register zlen, Register carry, Register tmp1) {
 7680   Label L_fourth_loop, L_fourth_loop_exit;
 7681 
 7682   movl(tmp1, 1);
 7683   subl(zlen, 2);
 7684   addq(Address(z, zlen, Address::times_4, 0), carry);
 7685 
 7686   bind(L_fourth_loop);
 7687   jccb(Assembler::carryClear, L_fourth_loop_exit);
 7688   subl(zlen, 2);
 7689   jccb(Assembler::negative, L_fourth_loop_exit);
 7690   addq(Address(z, zlen, Address::times_4, 0), tmp1);
 7691   jmp(L_fourth_loop);
 7692   bind(L_fourth_loop_exit);
 7693 }
 7694 
 7695 /**
 7696  * Shift z[] left by 1 bit.
 7697  * Preserves x, len, z and zlen registers and modifies rest of the registers.
 7698  *
 7699  */
 7700 void MacroAssembler::lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
 7701 
 7702   Label L_fifth_loop, L_fifth_loop_exit;
 7703 
 7704   // Fifth loop
 7705   // Perform primitiveLeftShift(z, zlen, 1)
 7706 
 7707   const Register prev_carry = tmp1;
 7708   const Register new_carry = tmp4;
 7709   const Register value = tmp2;
 7710   const Register zidx = tmp3;
 7711 
 7712   // int zidx, carry;
 7713   // long value;
 7714   // carry = 0;
 7715   // for (zidx = zlen-2; zidx >=0; zidx -= 2) {
 7716   //    (carry:value)  = (z[i] << 1) | carry ;
 7717   //    z[i] = value;
 7718   // }
 7719 
 7720   movl(zidx, zlen);
 7721   xorl(prev_carry, prev_carry); // clear carry flag and prev_carry register
 7722 
 7723   bind(L_fifth_loop);
 7724   decl(zidx);  // Use decl to preserve carry flag
 7725   decl(zidx);
 7726   jccb(Assembler::negative, L_fifth_loop_exit);
 7727 
 7728   if (UseBMI2Instructions) {
 7729      movq(value, Address(z, zidx, Address::times_4, 0));
 7730      rclq(value, 1);
 7731      rorxq(value, value, 32);
 7732      movq(Address(z, zidx, Address::times_4,  0), value);  // Store back in big endian form
 7733   }
 7734   else {
 7735     // clear new_carry
 7736     xorl(new_carry, new_carry);
 7737 
 7738     // Shift z[i] by 1, or in previous carry and save new carry
 7739     movq(value, Address(z, zidx, Address::times_4, 0));
 7740     shlq(value, 1);
 7741     adcl(new_carry, 0);
 7742 
 7743     orq(value, prev_carry);
 7744     rorq(value, 0x20);
 7745     movq(Address(z, zidx, Address::times_4,  0), value);  // Store back in big endian form
 7746 
 7747     // Set previous carry = new carry
 7748     movl(prev_carry, new_carry);
 7749   }
 7750   jmp(L_fifth_loop);
 7751 
 7752   bind(L_fifth_loop_exit);
 7753 }
 7754 
 7755 
 7756 /**
 7757  * Code for BigInteger::squareToLen() intrinsic
 7758  *
 7759  * rdi: x
 7760  * rsi: len
 7761  * r8:  z
 7762  * rcx: zlen
 7763  * r12: tmp1
 7764  * r13: tmp2
 7765  * r14: tmp3
 7766  * r15: tmp4
 7767  * rbx: tmp5
 7768  *
 7769  */
 7770 void MacroAssembler::square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
 7771 
 7772   Label L_second_loop, L_second_loop_exit, L_third_loop, L_third_loop_exit, L_last_x, L_multiply;
 7773   push(tmp1);
 7774   push(tmp2);
 7775   push(tmp3);
 7776   push(tmp4);
 7777   push(tmp5);
 7778 
 7779   // First loop
 7780   // Store the squares, right shifted one bit (i.e., divided by 2).
 7781   square_rshift(x, len, z, tmp1, tmp3, tmp4, tmp5, rdxReg, raxReg);
 7782 
 7783   // Add in off-diagonal sums.
 7784   //
 7785   // Second, third (nested) and fourth loops.
 7786   // zlen +=2;
 7787   // for (int xidx=len-2,zidx=zlen-4; xidx > 0; xidx-=2,zidx-=4) {
 7788   //    carry = 0;
 7789   //    long op2 = x[xidx:xidx+1];
 7790   //    for (int j=xidx-2,k=zidx; j >= 0; j-=2) {
 7791   //       k -= 2;
 7792   //       long op1 = x[j:j+1];
 7793   //       long sum = z[k:k+1];
 7794   //       carry:sum = multiply_add_64(sum, op1, op2, carry, tmp_regs);
 7795   //       z[k:k+1] = sum;
 7796   //    }
 7797   //    add_one_64(z, k, carry, tmp_regs);
 7798   // }
 7799 
 7800   const Register carry = tmp5;
 7801   const Register sum = tmp3;
 7802   const Register op1 = tmp4;
 7803   Register op2 = tmp2;
 7804 
 7805   push(zlen);
 7806   push(len);
 7807   addl(zlen,2);
 7808   bind(L_second_loop);
 7809   xorq(carry, carry);
 7810   subl(zlen, 4);
 7811   subl(len, 2);
 7812   push(zlen);
 7813   push(len);
 7814   cmpl(len, 0);
 7815   jccb(Assembler::lessEqual, L_second_loop_exit);
 7816 
 7817   // Multiply an array by one 64 bit long.
 7818   if (UseBMI2Instructions) {
 7819     op2 = rdxReg;
 7820     movq(op2, Address(x, len, Address::times_4,  0));
 7821     rorxq(op2, op2, 32);
 7822   }
 7823   else {
 7824     movq(op2, Address(x, len, Address::times_4,  0));
 7825     rorq(op2, 32);
 7826   }
 7827 
 7828   bind(L_third_loop);
 7829   decrementl(len);
 7830   jccb(Assembler::negative, L_third_loop_exit);
 7831   decrementl(len);
 7832   jccb(Assembler::negative, L_last_x);
 7833 
 7834   movq(op1, Address(x, len, Address::times_4,  0));
 7835   rorq(op1, 32);
 7836 
 7837   bind(L_multiply);
 7838   subl(zlen, 2);
 7839   movq(sum, Address(z, zlen, Address::times_4,  0));
 7840 
 7841   // Multiply 64 bit by 64 bit and add 64 bits lower half and upper 64 bits as carry.
 7842   if (UseBMI2Instructions) {
 7843     multiply_add_64_bmi2(sum, op1, op2, carry, tmp2);
 7844   }
 7845   else {
 7846     multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
 7847   }
 7848 
 7849   movq(Address(z, zlen, Address::times_4, 0), sum);
 7850 
 7851   jmp(L_third_loop);
 7852   bind(L_third_loop_exit);
 7853 
 7854   // Fourth loop
 7855   // Add 64 bit long carry into z with carry propagation.
 7856   // Uses offsetted zlen.
 7857   add_one_64(z, zlen, carry, tmp1);
 7858 
 7859   pop(len);
 7860   pop(zlen);
 7861   jmp(L_second_loop);
 7862 
 7863   // Next infrequent code is moved outside loops.
 7864   bind(L_last_x);
 7865   movl(op1, Address(x, 0));
 7866   jmp(L_multiply);
 7867 
 7868   bind(L_second_loop_exit);
 7869   pop(len);
 7870   pop(zlen);
 7871   pop(len);
 7872   pop(zlen);
 7873 
 7874   // Fifth loop
 7875   // Shift z left 1 bit.
 7876   lshift_by_1(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4);
 7877 
 7878   // z[zlen-1] |= x[len-1] & 1;
 7879   movl(tmp3, Address(x, len, Address::times_4, -4));
 7880   andl(tmp3, 1);
 7881   orl(Address(z, zlen, Address::times_4,  -4), tmp3);
 7882 
 7883   pop(tmp5);
 7884   pop(tmp4);
 7885   pop(tmp3);
 7886   pop(tmp2);
 7887   pop(tmp1);
 7888 }
 7889 
 7890 /**
 7891  * Helper function for mul_add()
 7892  * Multiply the in[] by int k and add to out[] starting at offset offs using
 7893  * 128 bit by 32 bit multiply and return the carry in tmp5.
 7894  * Only quad int aligned length of in[] is operated on in this function.
 7895  * k is in rdxReg for BMI2Instructions, for others it is in tmp2.
 7896  * This function preserves out, in and k registers.
 7897  * len and offset point to the appropriate index in "in" & "out" correspondingly
 7898  * tmp5 has the carry.
 7899  * other registers are temporary and are modified.
 7900  *
 7901  */
 7902 void MacroAssembler::mul_add_128_x_32_loop(Register out, Register in,
 7903   Register offset, Register len, Register tmp1, Register tmp2, Register tmp3,
 7904   Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
 7905 
 7906   Label L_first_loop, L_first_loop_exit;
 7907 
 7908   movl(tmp1, len);
 7909   shrl(tmp1, 2);
 7910 
 7911   bind(L_first_loop);
 7912   subl(tmp1, 1);
 7913   jccb(Assembler::negative, L_first_loop_exit);
 7914 
 7915   subl(len, 4);
 7916   subl(offset, 4);
 7917 
 7918   Register op2 = tmp2;
 7919   const Register sum = tmp3;
 7920   const Register op1 = tmp4;
 7921   const Register carry = tmp5;
 7922 
 7923   if (UseBMI2Instructions) {
 7924     op2 = rdxReg;
 7925   }
 7926 
 7927   movq(op1, Address(in, len, Address::times_4,  8));
 7928   rorq(op1, 32);
 7929   movq(sum, Address(out, offset, Address::times_4,  8));
 7930   rorq(sum, 32);
 7931   if (UseBMI2Instructions) {
 7932     multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
 7933   }
 7934   else {
 7935     multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
 7936   }
 7937   // Store back in big endian from little endian
 7938   rorq(sum, 0x20);
 7939   movq(Address(out, offset, Address::times_4,  8), sum);
 7940 
 7941   movq(op1, Address(in, len, Address::times_4,  0));
 7942   rorq(op1, 32);
 7943   movq(sum, Address(out, offset, Address::times_4,  0));
 7944   rorq(sum, 32);
 7945   if (UseBMI2Instructions) {
 7946     multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
 7947   }
 7948   else {
 7949     multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
 7950   }
 7951   // Store back in big endian from little endian
 7952   rorq(sum, 0x20);
 7953   movq(Address(out, offset, Address::times_4,  0), sum);
 7954 
 7955   jmp(L_first_loop);
 7956   bind(L_first_loop_exit);
 7957 }
 7958 
 7959 /**
 7960  * Code for BigInteger::mulAdd() intrinsic
 7961  *
 7962  * rdi: out
 7963  * rsi: in
 7964  * r11: offs (out.length - offset)
 7965  * rcx: len
 7966  * r8:  k
 7967  * r12: tmp1
 7968  * r13: tmp2
 7969  * r14: tmp3
 7970  * r15: tmp4
 7971  * rbx: tmp5
 7972  * Multiply the in[] by word k and add to out[], return the carry in rax
 7973  */
 7974 void MacroAssembler::mul_add(Register out, Register in, Register offs,
 7975    Register len, Register k, Register tmp1, Register tmp2, Register tmp3,
 7976    Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
 7977 
 7978   Label L_carry, L_last_in, L_done;
 7979 
 7980 // carry = 0;
 7981 // for (int j=len-1; j >= 0; j--) {
 7982 //    long product = (in[j] & LONG_MASK) * kLong +
 7983 //                   (out[offs] & LONG_MASK) + carry;
 7984 //    out[offs--] = (int)product;
 7985 //    carry = product >>> 32;
 7986 // }
 7987 //
 7988   push(tmp1);
 7989   push(tmp2);
 7990   push(tmp3);
 7991   push(tmp4);
 7992   push(tmp5);
 7993 
 7994   Register op2 = tmp2;
 7995   const Register sum = tmp3;
 7996   const Register op1 = tmp4;
 7997   const Register carry =  tmp5;
 7998 
 7999   if (UseBMI2Instructions) {
 8000     op2 = rdxReg;
 8001     movl(op2, k);
 8002   }
 8003   else {
 8004     movl(op2, k);
 8005   }
 8006 
 8007   xorq(carry, carry);
 8008 
 8009   //First loop
 8010 
 8011   //Multiply in[] by k in a 4 way unrolled loop using 128 bit by 32 bit multiply
 8012   //The carry is in tmp5
 8013   mul_add_128_x_32_loop(out, in, offs, len, tmp1, tmp2, tmp3, tmp4, tmp5, rdxReg, raxReg);
 8014 
 8015   //Multiply the trailing in[] entry using 64 bit by 32 bit, if any
 8016   decrementl(len);
 8017   jccb(Assembler::negative, L_carry);
 8018   decrementl(len);
 8019   jccb(Assembler::negative, L_last_in);
 8020 
 8021   movq(op1, Address(in, len, Address::times_4,  0));
 8022   rorq(op1, 32);
 8023 
 8024   subl(offs, 2);
 8025   movq(sum, Address(out, offs, Address::times_4,  0));
 8026   rorq(sum, 32);
 8027 
 8028   if (UseBMI2Instructions) {
 8029     multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
 8030   }
 8031   else {
 8032     multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
 8033   }
 8034 
 8035   // Store back in big endian from little endian
 8036   rorq(sum, 0x20);
 8037   movq(Address(out, offs, Address::times_4,  0), sum);
 8038 
 8039   testl(len, len);
 8040   jccb(Assembler::zero, L_carry);
 8041 
 8042   //Multiply the last in[] entry, if any
 8043   bind(L_last_in);
 8044   movl(op1, Address(in, 0));
 8045   movl(sum, Address(out, offs, Address::times_4,  -4));
 8046 
 8047   movl(raxReg, k);
 8048   mull(op1); //tmp4 * eax -> edx:eax
 8049   addl(sum, carry);
 8050   adcl(rdxReg, 0);
 8051   addl(sum, raxReg);
 8052   adcl(rdxReg, 0);
 8053   movl(carry, rdxReg);
 8054 
 8055   movl(Address(out, offs, Address::times_4,  -4), sum);
 8056 
 8057   bind(L_carry);
 8058   //return tmp5/carry as carry in rax
 8059   movl(rax, carry);
 8060 
 8061   bind(L_done);
 8062   pop(tmp5);
 8063   pop(tmp4);
 8064   pop(tmp3);
 8065   pop(tmp2);
 8066   pop(tmp1);
 8067 }
 8068 
 8069 /**
 8070  * Emits code to update CRC-32 with a byte value according to constants in table
 8071  *
 8072  * @param [in,out]crc   Register containing the crc.
 8073  * @param [in]val       Register containing the byte to fold into the CRC.
 8074  * @param [in]table     Register containing the table of crc constants.
 8075  *
 8076  * uint32_t crc;
 8077  * val = crc_table[(val ^ crc) & 0xFF];
 8078  * crc = val ^ (crc >> 8);
 8079  *
 8080  */
 8081 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
 8082   xorl(val, crc);
 8083   andl(val, 0xFF);
 8084   shrl(crc, 8); // unsigned shift
 8085   xorl(crc, Address(table, val, Address::times_4, 0));
 8086 }
 8087 
 8088 /**
 8089  * Fold 128-bit data chunk
 8090  */
 8091 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
 8092   if (UseAVX > 0) {
 8093     vpclmulhdq(xtmp, xK, xcrc); // [123:64]
 8094     vpclmulldq(xcrc, xK, xcrc); // [63:0]
 8095     vpxor(xcrc, xcrc, Address(buf, offset), 0 /* vector_len */);
 8096     pxor(xcrc, xtmp);
 8097   } else {
 8098     movdqa(xtmp, xcrc);
 8099     pclmulhdq(xtmp, xK);   // [123:64]
 8100     pclmulldq(xcrc, xK);   // [63:0]
 8101     pxor(xcrc, xtmp);
 8102     movdqu(xtmp, Address(buf, offset));
 8103     pxor(xcrc, xtmp);
 8104   }
 8105 }
 8106 
 8107 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) {
 8108   if (UseAVX > 0) {
 8109     vpclmulhdq(xtmp, xK, xcrc);
 8110     vpclmulldq(xcrc, xK, xcrc);
 8111     pxor(xcrc, xbuf);
 8112     pxor(xcrc, xtmp);
 8113   } else {
 8114     movdqa(xtmp, xcrc);
 8115     pclmulhdq(xtmp, xK);
 8116     pclmulldq(xcrc, xK);
 8117     pxor(xcrc, xbuf);
 8118     pxor(xcrc, xtmp);
 8119   }
 8120 }
 8121 
 8122 /**
 8123  * 8-bit folds to compute 32-bit CRC
 8124  *
 8125  * uint64_t xcrc;
 8126  * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8);
 8127  */
 8128 void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) {
 8129   movdl(tmp, xcrc);
 8130   andl(tmp, 0xFF);
 8131   movdl(xtmp, Address(table, tmp, Address::times_4, 0));
 8132   psrldq(xcrc, 1); // unsigned shift one byte
 8133   pxor(xcrc, xtmp);
 8134 }
 8135 
 8136 /**
 8137  * uint32_t crc;
 8138  * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
 8139  */
 8140 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
 8141   movl(tmp, crc);
 8142   andl(tmp, 0xFF);
 8143   shrl(crc, 8);
 8144   xorl(crc, Address(table, tmp, Address::times_4, 0));
 8145 }
 8146 
 8147 /**
 8148  * @param crc   register containing existing CRC (32-bit)
 8149  * @param buf   register pointing to input byte buffer (byte*)
 8150  * @param len   register containing number of bytes
 8151  * @param table register that will contain address of CRC table
 8152  * @param tmp   scratch register
 8153  */
 8154 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) {
 8155   assert_different_registers(crc, buf, len, table, tmp, rax);
 8156 
 8157   Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned;
 8158   Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop;
 8159 
 8160   // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
 8161   // context for the registers used, where all instructions below are using 128-bit mode
 8162   // On EVEX without VL and BW, these instructions will all be AVX.
 8163   lea(table, ExternalAddress(StubRoutines::crc_table_addr()));
 8164   notl(crc); // ~crc
 8165   cmpl(len, 16);
 8166   jcc(Assembler::less, L_tail);
 8167 
 8168   // Align buffer to 16 bytes
 8169   movl(tmp, buf);
 8170   andl(tmp, 0xF);
 8171   jccb(Assembler::zero, L_aligned);
 8172   subl(tmp,  16);
 8173   addl(len, tmp);
 8174 
 8175   align(4);
 8176   BIND(L_align_loop);
 8177   movsbl(rax, Address(buf, 0)); // load byte with sign extension
 8178   update_byte_crc32(crc, rax, table);
 8179   increment(buf);
 8180   incrementl(tmp);
 8181   jccb(Assembler::less, L_align_loop);
 8182 
 8183   BIND(L_aligned);
 8184   movl(tmp, len); // save
 8185   shrl(len, 4);
 8186   jcc(Assembler::zero, L_tail_restore);
 8187 
 8188   // Fold crc into first bytes of vector
 8189   movdqa(xmm1, Address(buf, 0));
 8190   movdl(rax, xmm1);
 8191   xorl(crc, rax);
 8192   if (VM_Version::supports_sse4_1()) {
 8193     pinsrd(xmm1, crc, 0);
 8194   } else {
 8195     pinsrw(xmm1, crc, 0);
 8196     shrl(crc, 16);
 8197     pinsrw(xmm1, crc, 1);
 8198   }
 8199   addptr(buf, 16);
 8200   subl(len, 4); // len > 0
 8201   jcc(Assembler::less, L_fold_tail);
 8202 
 8203   movdqa(xmm2, Address(buf,  0));
 8204   movdqa(xmm3, Address(buf, 16));
 8205   movdqa(xmm4, Address(buf, 32));
 8206   addptr(buf, 48);
 8207   subl(len, 3);
 8208   jcc(Assembler::lessEqual, L_fold_512b);
 8209 
 8210   // Fold total 512 bits of polynomial on each iteration,
 8211   // 128 bits per each of 4 parallel streams.
 8212   movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32), rscratch1);
 8213 
 8214   align32();
 8215   BIND(L_fold_512b_loop);
 8216   fold_128bit_crc32(xmm1, xmm0, xmm5, buf,  0);
 8217   fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16);
 8218   fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32);
 8219   fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48);
 8220   addptr(buf, 64);
 8221   subl(len, 4);
 8222   jcc(Assembler::greater, L_fold_512b_loop);
 8223 
 8224   // Fold 512 bits to 128 bits.
 8225   BIND(L_fold_512b);
 8226   movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1);
 8227   fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2);
 8228   fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3);
 8229   fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4);
 8230 
 8231   // Fold the rest of 128 bits data chunks
 8232   BIND(L_fold_tail);
 8233   addl(len, 3);
 8234   jccb(Assembler::lessEqual, L_fold_128b);
 8235   movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1);
 8236 
 8237   BIND(L_fold_tail_loop);
 8238   fold_128bit_crc32(xmm1, xmm0, xmm5, buf,  0);
 8239   addptr(buf, 16);
 8240   decrementl(len);
 8241   jccb(Assembler::greater, L_fold_tail_loop);
 8242 
 8243   // Fold 128 bits in xmm1 down into 32 bits in crc register.
 8244   BIND(L_fold_128b);
 8245   movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()), rscratch1);
 8246   if (UseAVX > 0) {
 8247     vpclmulqdq(xmm2, xmm0, xmm1, 0x1);
 8248     vpand(xmm3, xmm0, xmm2, 0 /* vector_len */);
 8249     vpclmulqdq(xmm0, xmm0, xmm3, 0x1);
 8250   } else {
 8251     movdqa(xmm2, xmm0);
 8252     pclmulqdq(xmm2, xmm1, 0x1);
 8253     movdqa(xmm3, xmm0);
 8254     pand(xmm3, xmm2);
 8255     pclmulqdq(xmm0, xmm3, 0x1);
 8256   }
 8257   psrldq(xmm1, 8);
 8258   psrldq(xmm2, 4);
 8259   pxor(xmm0, xmm1);
 8260   pxor(xmm0, xmm2);
 8261 
 8262   // 8 8-bit folds to compute 32-bit CRC.
 8263   for (int j = 0; j < 4; j++) {
 8264     fold_8bit_crc32(xmm0, table, xmm1, rax);
 8265   }
 8266   movdl(crc, xmm0); // mov 32 bits to general register
 8267   for (int j = 0; j < 4; j++) {
 8268     fold_8bit_crc32(crc, table, rax);
 8269   }
 8270 
 8271   BIND(L_tail_restore);
 8272   movl(len, tmp); // restore
 8273   BIND(L_tail);
 8274   andl(len, 0xf);
 8275   jccb(Assembler::zero, L_exit);
 8276 
 8277   // Fold the rest of bytes
 8278   align(4);
 8279   BIND(L_tail_loop);
 8280   movsbl(rax, Address(buf, 0)); // load byte with sign extension
 8281   update_byte_crc32(crc, rax, table);
 8282   increment(buf);
 8283   decrementl(len);
 8284   jccb(Assembler::greater, L_tail_loop);
 8285 
 8286   BIND(L_exit);
 8287   notl(crc); // ~c
 8288 }
 8289 
 8290 // Helper function for AVX 512 CRC32
 8291 // Fold 512-bit data chunks
 8292 void MacroAssembler::fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf,
 8293                                              Register pos, int offset) {
 8294   evmovdquq(xmm3, Address(buf, pos, Address::times_1, offset), Assembler::AVX_512bit);
 8295   evpclmulqdq(xtmp, xcrc, xK, 0x10, Assembler::AVX_512bit); // [123:64]
 8296   evpclmulqdq(xmm2, xcrc, xK, 0x01, Assembler::AVX_512bit); // [63:0]
 8297   evpxorq(xcrc, xtmp, xmm2, Assembler::AVX_512bit /* vector_len */);
 8298   evpxorq(xcrc, xcrc, xmm3, Assembler::AVX_512bit /* vector_len */);
 8299 }
 8300 
 8301 // Helper function for AVX 512 CRC32
 8302 // Compute CRC32 for < 256B buffers
 8303 void MacroAssembler::kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register table, Register pos,
 8304                                               Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop,
 8305                                               Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup) {
 8306 
 8307   Label L_less_than_32, L_exact_16_left, L_less_than_16_left;
 8308   Label L_less_than_8_left, L_less_than_4_left, L_less_than_2_left, L_zero_left;
 8309   Label L_only_less_than_4, L_only_less_than_3, L_only_less_than_2;
 8310 
 8311   // check if there is enough buffer to be able to fold 16B at a time
 8312   cmpl(len, 32);
 8313   jcc(Assembler::less, L_less_than_32);
 8314 
 8315   // if there is, load the constants
 8316   movdqu(xmm10, Address(table, 1 * 16));    //rk1 and rk2 in xmm10
 8317   movdl(xmm0, crc);                        // get the initial crc value
 8318   movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext
 8319   pxor(xmm7, xmm0);
 8320 
 8321   // update the buffer pointer
 8322   addl(pos, 16);
 8323   //update the counter.subtract 32 instead of 16 to save one instruction from the loop
 8324   subl(len, 32);
 8325   jmp(L_16B_reduction_loop);
 8326 
 8327   bind(L_less_than_32);
 8328   //mov initial crc to the return value. this is necessary for zero - length buffers.
 8329   movl(rax, crc);
 8330   testl(len, len);
 8331   jcc(Assembler::equal, L_cleanup);
 8332 
 8333   movdl(xmm0, crc);                        //get the initial crc value
 8334 
 8335   cmpl(len, 16);
 8336   jcc(Assembler::equal, L_exact_16_left);
 8337   jcc(Assembler::less, L_less_than_16_left);
 8338 
 8339   movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext
 8340   pxor(xmm7, xmm0);                       //xor the initial crc value
 8341   addl(pos, 16);
 8342   subl(len, 16);
 8343   movdqu(xmm10, Address(table, 1 * 16));    // rk1 and rk2 in xmm10
 8344   jmp(L_get_last_two_xmms);
 8345 
 8346   bind(L_less_than_16_left);
 8347   //use stack space to load data less than 16 bytes, zero - out the 16B in memory first.
 8348   pxor(xmm1, xmm1);
 8349   movptr(tmp1, rsp);
 8350   movdqu(Address(tmp1, 0 * 16), xmm1);
 8351 
 8352   cmpl(len, 4);
 8353   jcc(Assembler::less, L_only_less_than_4);
 8354 
 8355   //backup the counter value
 8356   movl(tmp2, len);
 8357   cmpl(len, 8);
 8358   jcc(Assembler::less, L_less_than_8_left);
 8359 
 8360   //load 8 Bytes
 8361   movq(rax, Address(buf, pos, Address::times_1, 0 * 16));
 8362   movq(Address(tmp1, 0 * 16), rax);
 8363   addptr(tmp1, 8);
 8364   subl(len, 8);
 8365   addl(pos, 8);
 8366 
 8367   bind(L_less_than_8_left);
 8368   cmpl(len, 4);
 8369   jcc(Assembler::less, L_less_than_4_left);
 8370 
 8371   //load 4 Bytes
 8372   movl(rax, Address(buf, pos, Address::times_1, 0));
 8373   movl(Address(tmp1, 0 * 16), rax);
 8374   addptr(tmp1, 4);
 8375   subl(len, 4);
 8376   addl(pos, 4);
 8377 
 8378   bind(L_less_than_4_left);
 8379   cmpl(len, 2);
 8380   jcc(Assembler::less, L_less_than_2_left);
 8381 
 8382   // load 2 Bytes
 8383   movw(rax, Address(buf, pos, Address::times_1, 0));
 8384   movl(Address(tmp1, 0 * 16), rax);
 8385   addptr(tmp1, 2);
 8386   subl(len, 2);
 8387   addl(pos, 2);
 8388 
 8389   bind(L_less_than_2_left);
 8390   cmpl(len, 1);
 8391   jcc(Assembler::less, L_zero_left);
 8392 
 8393   // load 1 Byte
 8394   movb(rax, Address(buf, pos, Address::times_1, 0));
 8395   movb(Address(tmp1, 0 * 16), rax);
 8396 
 8397   bind(L_zero_left);
 8398   movdqu(xmm7, Address(rsp, 0));
 8399   pxor(xmm7, xmm0);                       //xor the initial crc value
 8400 
 8401   lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr()));
 8402   movdqu(xmm0, Address(rax, tmp2));
 8403   pshufb(xmm7, xmm0);
 8404   jmp(L_128_done);
 8405 
 8406   bind(L_exact_16_left);
 8407   movdqu(xmm7, Address(buf, pos, Address::times_1, 0));
 8408   pxor(xmm7, xmm0);                       //xor the initial crc value
 8409   jmp(L_128_done);
 8410 
 8411   bind(L_only_less_than_4);
 8412   cmpl(len, 3);
 8413   jcc(Assembler::less, L_only_less_than_3);
 8414 
 8415   // load 3 Bytes
 8416   movb(rax, Address(buf, pos, Address::times_1, 0));
 8417   movb(Address(tmp1, 0), rax);
 8418 
 8419   movb(rax, Address(buf, pos, Address::times_1, 1));
 8420   movb(Address(tmp1, 1), rax);
 8421 
 8422   movb(rax, Address(buf, pos, Address::times_1, 2));
 8423   movb(Address(tmp1, 2), rax);
 8424 
 8425   movdqu(xmm7, Address(rsp, 0));
 8426   pxor(xmm7, xmm0);                     //xor the initial crc value
 8427 
 8428   pslldq(xmm7, 0x5);
 8429   jmp(L_barrett);
 8430   bind(L_only_less_than_3);
 8431   cmpl(len, 2);
 8432   jcc(Assembler::less, L_only_less_than_2);
 8433 
 8434   // load 2 Bytes
 8435   movb(rax, Address(buf, pos, Address::times_1, 0));
 8436   movb(Address(tmp1, 0), rax);
 8437 
 8438   movb(rax, Address(buf, pos, Address::times_1, 1));
 8439   movb(Address(tmp1, 1), rax);
 8440 
 8441   movdqu(xmm7, Address(rsp, 0));
 8442   pxor(xmm7, xmm0);                     //xor the initial crc value
 8443 
 8444   pslldq(xmm7, 0x6);
 8445   jmp(L_barrett);
 8446 
 8447   bind(L_only_less_than_2);
 8448   //load 1 Byte
 8449   movb(rax, Address(buf, pos, Address::times_1, 0));
 8450   movb(Address(tmp1, 0), rax);
 8451 
 8452   movdqu(xmm7, Address(rsp, 0));
 8453   pxor(xmm7, xmm0);                     //xor the initial crc value
 8454 
 8455   pslldq(xmm7, 0x7);
 8456 }
 8457 
 8458 /**
 8459 * Compute CRC32 using AVX512 instructions
 8460 * param crc   register containing existing CRC (32-bit)
 8461 * param buf   register pointing to input byte buffer (byte*)
 8462 * param len   register containing number of bytes
 8463 * param table address of crc or crc32c table
 8464 * param tmp1  scratch register
 8465 * param tmp2  scratch register
 8466 * return rax  result register
 8467 *
 8468 * This routine is identical for crc32c with the exception of the precomputed constant
 8469 * table which will be passed as the table argument.  The calculation steps are
 8470 * the same for both variants.
 8471 */
 8472 void MacroAssembler::kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2) {
 8473   assert_different_registers(crc, buf, len, table, tmp1, tmp2, rax, r12);
 8474 
 8475   Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned;
 8476   Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop;
 8477   Label L_less_than_256, L_fold_128_B_loop, L_fold_256_B_loop;
 8478   Label L_fold_128_B_register, L_final_reduction_for_128, L_16B_reduction_loop;
 8479   Label L_128_done, L_get_last_two_xmms, L_barrett, L_cleanup;
 8480 
 8481   const Register pos = r12;
 8482   push(r12);
 8483   subptr(rsp, 16 * 2 + 8);
 8484 
 8485   // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
 8486   // context for the registers used, where all instructions below are using 128-bit mode
 8487   // On EVEX without VL and BW, these instructions will all be AVX.
 8488   movl(pos, 0);
 8489 
 8490   // check if smaller than 256B
 8491   cmpl(len, 256);
 8492   jcc(Assembler::less, L_less_than_256);
 8493 
 8494   // load the initial crc value
 8495   movdl(xmm10, crc);
 8496 
 8497   // receive the initial 64B data, xor the initial crc value
 8498   evmovdquq(xmm0, Address(buf, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit);
 8499   evmovdquq(xmm4, Address(buf, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit);
 8500   evpxorq(xmm0, xmm0, xmm10, Assembler::AVX_512bit);
 8501   evbroadcasti32x4(xmm10, Address(table, 2 * 16), Assembler::AVX_512bit); //zmm10 has rk3 and rk4
 8502 
 8503   subl(len, 256);
 8504   cmpl(len, 256);
 8505   jcc(Assembler::less, L_fold_128_B_loop);
 8506 
 8507   evmovdquq(xmm7, Address(buf, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit);
 8508   evmovdquq(xmm8, Address(buf, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit);
 8509   evbroadcasti32x4(xmm16, Address(table, 0 * 16), Assembler::AVX_512bit); //zmm16 has rk-1 and rk-2
 8510   subl(len, 256);
 8511 
 8512   bind(L_fold_256_B_loop);
 8513   addl(pos, 256);
 8514   fold512bit_crc32_avx512(xmm0, xmm16, xmm1, buf, pos, 0 * 64);
 8515   fold512bit_crc32_avx512(xmm4, xmm16, xmm1, buf, pos, 1 * 64);
 8516   fold512bit_crc32_avx512(xmm7, xmm16, xmm1, buf, pos, 2 * 64);
 8517   fold512bit_crc32_avx512(xmm8, xmm16, xmm1, buf, pos, 3 * 64);
 8518 
 8519   subl(len, 256);
 8520   jcc(Assembler::greaterEqual, L_fold_256_B_loop);
 8521 
 8522   // Fold 256 into 128
 8523   addl(pos, 256);
 8524   evpclmulqdq(xmm1, xmm0, xmm10, 0x01, Assembler::AVX_512bit);
 8525   evpclmulqdq(xmm2, xmm0, xmm10, 0x10, Assembler::AVX_512bit);
 8526   vpternlogq(xmm7, 0x96, xmm1, xmm2, Assembler::AVX_512bit); // xor ABC
 8527 
 8528   evpclmulqdq(xmm5, xmm4, xmm10, 0x01, Assembler::AVX_512bit);
 8529   evpclmulqdq(xmm6, xmm4, xmm10, 0x10, Assembler::AVX_512bit);
 8530   vpternlogq(xmm8, 0x96, xmm5, xmm6, Assembler::AVX_512bit); // xor ABC
 8531 
 8532   evmovdquq(xmm0, xmm7, Assembler::AVX_512bit);
 8533   evmovdquq(xmm4, xmm8, Assembler::AVX_512bit);
 8534 
 8535   addl(len, 128);
 8536   jmp(L_fold_128_B_register);
 8537 
 8538   // at this section of the code, there is 128 * x + y(0 <= y<128) bytes of buffer.The fold_128_B_loop
 8539   // loop will fold 128B at a time until we have 128 + y Bytes of buffer
 8540 
 8541   // fold 128B at a time.This section of the code folds 8 xmm registers in parallel
 8542   bind(L_fold_128_B_loop);
 8543   addl(pos, 128);
 8544   fold512bit_crc32_avx512(xmm0, xmm10, xmm1, buf, pos, 0 * 64);
 8545   fold512bit_crc32_avx512(xmm4, xmm10, xmm1, buf, pos, 1 * 64);
 8546 
 8547   subl(len, 128);
 8548   jcc(Assembler::greaterEqual, L_fold_128_B_loop);
 8549 
 8550   addl(pos, 128);
 8551 
 8552   // at this point, the buffer pointer is pointing at the last y Bytes of the buffer, where 0 <= y < 128
 8553   // the 128B of folded data is in 8 of the xmm registers : xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7
 8554   bind(L_fold_128_B_register);
 8555   evmovdquq(xmm16, Address(table, 5 * 16), Assembler::AVX_512bit); // multiply by rk9-rk16
 8556   evmovdquq(xmm11, Address(table, 9 * 16), Assembler::AVX_512bit); // multiply by rk17-rk20, rk1,rk2, 0,0
 8557   evpclmulqdq(xmm1, xmm0, xmm16, 0x01, Assembler::AVX_512bit);
 8558   evpclmulqdq(xmm2, xmm0, xmm16, 0x10, Assembler::AVX_512bit);
 8559   // save last that has no multiplicand
 8560   vextracti64x2(xmm7, xmm4, 3);
 8561 
 8562   evpclmulqdq(xmm5, xmm4, xmm11, 0x01, Assembler::AVX_512bit);
 8563   evpclmulqdq(xmm6, xmm4, xmm11, 0x10, Assembler::AVX_512bit);
 8564   // Needed later in reduction loop
 8565   movdqu(xmm10, Address(table, 1 * 16));
 8566   vpternlogq(xmm1, 0x96, xmm2, xmm5, Assembler::AVX_512bit); // xor ABC
 8567   vpternlogq(xmm1, 0x96, xmm6, xmm7, Assembler::AVX_512bit); // xor ABC
 8568 
 8569   // Swap 1,0,3,2 - 01 00 11 10
 8570   evshufi64x2(xmm8, xmm1, xmm1, 0x4e, Assembler::AVX_512bit);
 8571   evpxorq(xmm8, xmm8, xmm1, Assembler::AVX_256bit);
 8572   vextracti128(xmm5, xmm8, 1);
 8573   evpxorq(xmm7, xmm5, xmm8, Assembler::AVX_128bit);
 8574 
 8575   // instead of 128, we add 128 - 16 to the loop counter to save 1 instruction from the loop
 8576   // instead of a cmp instruction, we use the negative flag with the jl instruction
 8577   addl(len, 128 - 16);
 8578   jcc(Assembler::less, L_final_reduction_for_128);
 8579 
 8580   bind(L_16B_reduction_loop);
 8581   vpclmulqdq(xmm8, xmm7, xmm10, 0x01);
 8582   vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
 8583   vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit);
 8584   movdqu(xmm0, Address(buf, pos, Address::times_1, 0 * 16));
 8585   vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
 8586   addl(pos, 16);
 8587   subl(len, 16);
 8588   jcc(Assembler::greaterEqual, L_16B_reduction_loop);
 8589 
 8590   bind(L_final_reduction_for_128);
 8591   addl(len, 16);
 8592   jcc(Assembler::equal, L_128_done);
 8593 
 8594   bind(L_get_last_two_xmms);
 8595   movdqu(xmm2, xmm7);
 8596   addl(pos, len);
 8597   movdqu(xmm1, Address(buf, pos, Address::times_1, -16));
 8598   subl(pos, len);
 8599 
 8600   // get rid of the extra data that was loaded before
 8601   // load the shift constant
 8602   lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr()));
 8603   movdqu(xmm0, Address(rax, len));
 8604   addl(rax, len);
 8605 
 8606   vpshufb(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
 8607   //Change mask to 512
 8608   vpxor(xmm0, xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 2 * 16), Assembler::AVX_128bit, tmp2);
 8609   vpshufb(xmm2, xmm2, xmm0, Assembler::AVX_128bit);
 8610 
 8611   blendvpb(xmm2, xmm2, xmm1, xmm0, Assembler::AVX_128bit);
 8612   vpclmulqdq(xmm8, xmm7, xmm10, 0x01);
 8613   vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
 8614   vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit);
 8615   vpxor(xmm7, xmm7, xmm2, Assembler::AVX_128bit);
 8616 
 8617   bind(L_128_done);
 8618   // compute crc of a 128-bit value
 8619   movdqu(xmm10, Address(table, 3 * 16));
 8620   movdqu(xmm0, xmm7);
 8621 
 8622   // 64b fold
 8623   vpclmulqdq(xmm7, xmm7, xmm10, 0x0);
 8624   vpsrldq(xmm0, xmm0, 0x8, Assembler::AVX_128bit);
 8625   vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
 8626 
 8627   // 32b fold
 8628   movdqu(xmm0, xmm7);
 8629   vpslldq(xmm7, xmm7, 0x4, Assembler::AVX_128bit);
 8630   vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
 8631   vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
 8632   jmp(L_barrett);
 8633 
 8634   bind(L_less_than_256);
 8635   kernel_crc32_avx512_256B(crc, buf, len, table, pos, tmp1, tmp2, L_barrett, L_16B_reduction_loop, L_get_last_two_xmms, L_128_done, L_cleanup);
 8636 
 8637   //barrett reduction
 8638   bind(L_barrett);
 8639   vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 1 * 16), Assembler::AVX_128bit, tmp2);
 8640   movdqu(xmm1, xmm7);
 8641   movdqu(xmm2, xmm7);
 8642   movdqu(xmm10, Address(table, 4 * 16));
 8643 
 8644   pclmulqdq(xmm7, xmm10, 0x0);
 8645   pxor(xmm7, xmm2);
 8646   vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr()), Assembler::AVX_128bit, tmp2);
 8647   movdqu(xmm2, xmm7);
 8648   pclmulqdq(xmm7, xmm10, 0x10);
 8649   pxor(xmm7, xmm2);
 8650   pxor(xmm7, xmm1);
 8651   pextrd(crc, xmm7, 2);
 8652 
 8653   bind(L_cleanup);
 8654   addptr(rsp, 16 * 2 + 8);
 8655   pop(r12);
 8656 }
 8657 
 8658 // S. Gueron / Information Processing Letters 112 (2012) 184
 8659 // Algorithm 4: Computing carry-less multiplication using a precomputed lookup table.
 8660 // Input: A 32 bit value B = [byte3, byte2, byte1, byte0].
 8661 // Output: the 64-bit carry-less product of B * CONST
 8662 void MacroAssembler::crc32c_ipl_alg4(Register in, uint32_t n,
 8663                                      Register tmp1, Register tmp2, Register tmp3) {
 8664   lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr()));
 8665   if (n > 0) {
 8666     addq(tmp3, n * 256 * 8);
 8667   }
 8668   //    Q1 = TABLEExt[n][B & 0xFF];
 8669   movl(tmp1, in);
 8670   andl(tmp1, 0x000000FF);
 8671   shll(tmp1, 3);
 8672   addq(tmp1, tmp3);
 8673   movq(tmp1, Address(tmp1, 0));
 8674 
 8675   //    Q2 = TABLEExt[n][B >> 8 & 0xFF];
 8676   movl(tmp2, in);
 8677   shrl(tmp2, 8);
 8678   andl(tmp2, 0x000000FF);
 8679   shll(tmp2, 3);
 8680   addq(tmp2, tmp3);
 8681   movq(tmp2, Address(tmp2, 0));
 8682 
 8683   shlq(tmp2, 8);
 8684   xorq(tmp1, tmp2);
 8685 
 8686   //    Q3 = TABLEExt[n][B >> 16 & 0xFF];
 8687   movl(tmp2, in);
 8688   shrl(tmp2, 16);
 8689   andl(tmp2, 0x000000FF);
 8690   shll(tmp2, 3);
 8691   addq(tmp2, tmp3);
 8692   movq(tmp2, Address(tmp2, 0));
 8693 
 8694   shlq(tmp2, 16);
 8695   xorq(tmp1, tmp2);
 8696 
 8697   //    Q4 = TABLEExt[n][B >> 24 & 0xFF];
 8698   shrl(in, 24);
 8699   andl(in, 0x000000FF);
 8700   shll(in, 3);
 8701   addq(in, tmp3);
 8702   movq(in, Address(in, 0));
 8703 
 8704   shlq(in, 24);
 8705   xorq(in, tmp1);
 8706   //    return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24;
 8707 }
 8708 
 8709 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1,
 8710                                       Register in_out,
 8711                                       uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
 8712                                       XMMRegister w_xtmp2,
 8713                                       Register tmp1,
 8714                                       Register n_tmp2, Register n_tmp3) {
 8715   if (is_pclmulqdq_supported) {
 8716     movdl(w_xtmp1, in_out); // modified blindly
 8717 
 8718     movl(tmp1, const_or_pre_comp_const_index);
 8719     movdl(w_xtmp2, tmp1);
 8720     pclmulqdq(w_xtmp1, w_xtmp2, 0);
 8721 
 8722     movdq(in_out, w_xtmp1);
 8723   } else {
 8724     crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3);
 8725   }
 8726 }
 8727 
 8728 // Recombination Alternative 2: No bit-reflections
 8729 // T1 = (CRC_A * U1) << 1
 8730 // T2 = (CRC_B * U2) << 1
 8731 // C1 = T1 >> 32
 8732 // C2 = T2 >> 32
 8733 // T1 = T1 & 0xFFFFFFFF
 8734 // T2 = T2 & 0xFFFFFFFF
 8735 // T1 = CRC32(0, T1)
 8736 // T2 = CRC32(0, T2)
 8737 // C1 = C1 ^ T1
 8738 // C2 = C2 ^ T2
 8739 // CRC = C1 ^ C2 ^ CRC_C
 8740 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
 8741                                      XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
 8742                                      Register tmp1, Register tmp2,
 8743                                      Register n_tmp3) {
 8744   crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
 8745   crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
 8746   shlq(in_out, 1);
 8747   movl(tmp1, in_out);
 8748   shrq(in_out, 32);
 8749   xorl(tmp2, tmp2);
 8750   crc32(tmp2, tmp1, 4);
 8751   xorl(in_out, tmp2); // we don't care about upper 32 bit contents here
 8752   shlq(in1, 1);
 8753   movl(tmp1, in1);
 8754   shrq(in1, 32);
 8755   xorl(tmp2, tmp2);
 8756   crc32(tmp2, tmp1, 4);
 8757   xorl(in1, tmp2);
 8758   xorl(in_out, in1);
 8759   xorl(in_out, in2);
 8760 }
 8761 
 8762 // Set N to predefined value
 8763 // Subtract from a length of a buffer
 8764 // execute in a loop:
 8765 // CRC_A = 0xFFFFFFFF, CRC_B = 0, CRC_C = 0
 8766 // for i = 1 to N do
 8767 //  CRC_A = CRC32(CRC_A, A[i])
 8768 //  CRC_B = CRC32(CRC_B, B[i])
 8769 //  CRC_C = CRC32(CRC_C, C[i])
 8770 // end for
 8771 // Recombine
 8772 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
 8773                                        Register in_out1, Register in_out2, Register in_out3,
 8774                                        Register tmp1, Register tmp2, Register tmp3,
 8775                                        XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
 8776                                        Register tmp4, Register tmp5,
 8777                                        Register n_tmp6) {
 8778   Label L_processPartitions;
 8779   Label L_processPartition;
 8780   Label L_exit;
 8781 
 8782   bind(L_processPartitions);
 8783   cmpl(in_out1, 3 * size);
 8784   jcc(Assembler::less, L_exit);
 8785     xorl(tmp1, tmp1);
 8786     xorl(tmp2, tmp2);
 8787     movq(tmp3, in_out2);
 8788     addq(tmp3, size);
 8789 
 8790     bind(L_processPartition);
 8791       crc32(in_out3, Address(in_out2, 0), 8);
 8792       crc32(tmp1, Address(in_out2, size), 8);
 8793       crc32(tmp2, Address(in_out2, size * 2), 8);
 8794       addq(in_out2, 8);
 8795       cmpq(in_out2, tmp3);
 8796       jcc(Assembler::less, L_processPartition);
 8797     crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2,
 8798             w_xtmp1, w_xtmp2, w_xtmp3,
 8799             tmp4, tmp5,
 8800             n_tmp6);
 8801     addq(in_out2, 2 * size);
 8802     subl(in_out1, 3 * size);
 8803     jmp(L_processPartitions);
 8804 
 8805   bind(L_exit);
 8806 }
 8807 
 8808 // Algorithm 2: Pipelined usage of the CRC32 instruction.
 8809 // Input: A buffer I of L bytes.
 8810 // Output: the CRC32C value of the buffer.
 8811 // Notations:
 8812 // Write L = 24N + r, with N = floor (L/24).
 8813 // r = L mod 24 (0 <= r < 24).
 8814 // Consider I as the concatenation of A|B|C|R, where A, B, C, each,
 8815 // N quadwords, and R consists of r bytes.
 8816 // A[j] = I [8j+7:8j], j= 0, 1, ..., N-1
 8817 // B[j] = I [N + 8j+7:N + 8j], j= 0, 1, ..., N-1
 8818 // C[j] = I [2N + 8j+7:2N + 8j], j= 0, 1, ..., N-1
 8819 // if r > 0 R[j] = I [3N +j], j= 0, 1, ...,r-1
 8820 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
 8821                                           Register tmp1, Register tmp2, Register tmp3,
 8822                                           Register tmp4, Register tmp5, Register tmp6,
 8823                                           XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
 8824                                           bool is_pclmulqdq_supported) {
 8825   uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS];
 8826   Label L_wordByWord;
 8827   Label L_byteByByteProlog;
 8828   Label L_byteByByte;
 8829   Label L_exit;
 8830 
 8831   if (is_pclmulqdq_supported ) {
 8832     const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::crc32c_table_addr();
 8833     const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 1);
 8834 
 8835     const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 2);
 8836     const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 3);
 8837 
 8838     const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 4);
 8839     const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 5);
 8840     assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\"");
 8841   } else {
 8842     const_or_pre_comp_const_index[0] = 1;
 8843     const_or_pre_comp_const_index[1] = 0;
 8844 
 8845     const_or_pre_comp_const_index[2] = 3;
 8846     const_or_pre_comp_const_index[3] = 2;
 8847 
 8848     const_or_pre_comp_const_index[4] = 5;
 8849     const_or_pre_comp_const_index[5] = 4;
 8850    }
 8851   crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported,
 8852                     in2, in1, in_out,
 8853                     tmp1, tmp2, tmp3,
 8854                     w_xtmp1, w_xtmp2, w_xtmp3,
 8855                     tmp4, tmp5,
 8856                     tmp6);
 8857   crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported,
 8858                     in2, in1, in_out,
 8859                     tmp1, tmp2, tmp3,
 8860                     w_xtmp1, w_xtmp2, w_xtmp3,
 8861                     tmp4, tmp5,
 8862                     tmp6);
 8863   crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported,
 8864                     in2, in1, in_out,
 8865                     tmp1, tmp2, tmp3,
 8866                     w_xtmp1, w_xtmp2, w_xtmp3,
 8867                     tmp4, tmp5,
 8868                     tmp6);
 8869   movl(tmp1, in2);
 8870   andl(tmp1, 0x00000007);
 8871   negl(tmp1);
 8872   addl(tmp1, in2);
 8873   addq(tmp1, in1);
 8874 
 8875   cmpq(in1, tmp1);
 8876   jccb(Assembler::greaterEqual, L_byteByByteProlog);
 8877   align(16);
 8878   BIND(L_wordByWord);
 8879     crc32(in_out, Address(in1, 0), 8);
 8880     addq(in1, 8);
 8881     cmpq(in1, tmp1);
 8882     jcc(Assembler::less, L_wordByWord);
 8883 
 8884   BIND(L_byteByByteProlog);
 8885   andl(in2, 0x00000007);
 8886   movl(tmp2, 1);
 8887 
 8888   cmpl(tmp2, in2);
 8889   jccb(Assembler::greater, L_exit);
 8890   BIND(L_byteByByte);
 8891     crc32(in_out, Address(in1, 0), 1);
 8892     incq(in1);
 8893     incl(tmp2);
 8894     cmpl(tmp2, in2);
 8895     jcc(Assembler::lessEqual, L_byteByByte);
 8896 
 8897   BIND(L_exit);
 8898 }
 8899 #undef BIND
 8900 #undef BLOCK_COMMENT
 8901 
 8902 // Compress char[] array to byte[].
 8903 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len)
 8904 // Return the array length if every element in array can be encoded,
 8905 // otherwise, the index of first non-latin1 (> 0xff) character.
 8906 //   @IntrinsicCandidate
 8907 //   public static int compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) {
 8908 //     for (int i = 0; i < len; i++) {
 8909 //       char c = src[srcOff];
 8910 //       if (c > 0xff) {
 8911 //           return i;  // return index of non-latin1 char
 8912 //       }
 8913 //       dst[dstOff] = (byte)c;
 8914 //       srcOff++;
 8915 //       dstOff++;
 8916 //     }
 8917 //     return len;
 8918 //   }
 8919 void MacroAssembler::char_array_compress(Register src, Register dst, Register len,
 8920   XMMRegister tmp1Reg, XMMRegister tmp2Reg,
 8921   XMMRegister tmp3Reg, XMMRegister tmp4Reg,
 8922   Register tmp5, Register result, KRegister mask1, KRegister mask2) {
 8923   Label copy_chars_loop, done, reset_sp, copy_tail;
 8924 
 8925   // rsi: src
 8926   // rdi: dst
 8927   // rdx: len
 8928   // rcx: tmp5
 8929   // rax: result
 8930 
 8931   // rsi holds start addr of source char[] to be compressed
 8932   // rdi holds start addr of destination byte[]
 8933   // rdx holds length
 8934 
 8935   assert(len != result, "");
 8936 
 8937   // save length for return
 8938   movl(result, len);
 8939 
 8940   if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512
 8941     VM_Version::supports_avx512vlbw() &&
 8942     VM_Version::supports_bmi2()) {
 8943 
 8944     Label copy_32_loop, copy_loop_tail, below_threshold, reset_for_copy_tail;
 8945 
 8946     // alignment
 8947     Label post_alignment;
 8948 
 8949     // if length of the string is less than 32, handle it the old fashioned way
 8950     testl(len, -32);
 8951     jcc(Assembler::zero, below_threshold);
 8952 
 8953     // First check whether a character is compressible ( <= 0xFF).
 8954     // Create mask to test for Unicode chars inside zmm vector
 8955     movl(tmp5, 0x00FF);
 8956     evpbroadcastw(tmp2Reg, tmp5, Assembler::AVX_512bit);
 8957 
 8958     testl(len, -64);
 8959     jccb(Assembler::zero, post_alignment);
 8960 
 8961     movl(tmp5, dst);
 8962     andl(tmp5, (32 - 1));
 8963     negl(tmp5);
 8964     andl(tmp5, (32 - 1));
 8965 
 8966     // bail out when there is nothing to be done
 8967     testl(tmp5, 0xFFFFFFFF);
 8968     jccb(Assembler::zero, post_alignment);
 8969 
 8970     // ~(~0 << len), where len is the # of remaining elements to process
 8971     movl(len, 0xFFFFFFFF);
 8972     shlxl(len, len, tmp5);
 8973     notl(len);
 8974     kmovdl(mask2, len);
 8975     movl(len, result);
 8976 
 8977     evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit);
 8978     evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit);
 8979     ktestd(mask1, mask2);
 8980     jcc(Assembler::carryClear, copy_tail);
 8981 
 8982     evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit);
 8983 
 8984     addptr(src, tmp5);
 8985     addptr(src, tmp5);
 8986     addptr(dst, tmp5);
 8987     subl(len, tmp5);
 8988 
 8989     bind(post_alignment);
 8990     // end of alignment
 8991 
 8992     movl(tmp5, len);
 8993     andl(tmp5, (32 - 1));    // tail count (in chars)
 8994     andl(len, ~(32 - 1));    // vector count (in chars)
 8995     jccb(Assembler::zero, copy_loop_tail);
 8996 
 8997     lea(src, Address(src, len, Address::times_2));
 8998     lea(dst, Address(dst, len, Address::times_1));
 8999     negptr(len);
 9000 
 9001     bind(copy_32_loop);
 9002     evmovdquw(tmp1Reg, Address(src, len, Address::times_2), Assembler::AVX_512bit);
 9003     evpcmpuw(mask1, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit);
 9004     kortestdl(mask1, mask1);
 9005     jccb(Assembler::carryClear, reset_for_copy_tail);
 9006 
 9007     // All elements in current processed chunk are valid candidates for
 9008     // compression. Write a truncated byte elements to the memory.
 9009     evpmovwb(Address(dst, len, Address::times_1), tmp1Reg, Assembler::AVX_512bit);
 9010     addptr(len, 32);
 9011     jccb(Assembler::notZero, copy_32_loop);
 9012 
 9013     bind(copy_loop_tail);
 9014     // bail out when there is nothing to be done
 9015     testl(tmp5, 0xFFFFFFFF);
 9016     jcc(Assembler::zero, done);
 9017 
 9018     movl(len, tmp5);
 9019 
 9020     // ~(~0 << len), where len is the # of remaining elements to process
 9021     movl(tmp5, 0xFFFFFFFF);
 9022     shlxl(tmp5, tmp5, len);
 9023     notl(tmp5);
 9024 
 9025     kmovdl(mask2, tmp5);
 9026 
 9027     evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit);
 9028     evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit);
 9029     ktestd(mask1, mask2);
 9030     jcc(Assembler::carryClear, copy_tail);
 9031 
 9032     evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit);
 9033     jmp(done);
 9034 
 9035     bind(reset_for_copy_tail);
 9036     lea(src, Address(src, tmp5, Address::times_2));
 9037     lea(dst, Address(dst, tmp5, Address::times_1));
 9038     subptr(len, tmp5);
 9039     jmp(copy_chars_loop);
 9040 
 9041     bind(below_threshold);
 9042   }
 9043 
 9044   if (UseSSE42Intrinsics) {
 9045     Label copy_32_loop, copy_16, copy_tail_sse, reset_for_copy_tail;
 9046 
 9047     // vectored compression
 9048     testl(len, 0xfffffff8);
 9049     jcc(Assembler::zero, copy_tail);
 9050 
 9051     movl(tmp5, 0xff00ff00);   // create mask to test for Unicode chars in vectors
 9052     movdl(tmp1Reg, tmp5);
 9053     pshufd(tmp1Reg, tmp1Reg, 0);   // store Unicode mask in tmp1Reg
 9054 
 9055     andl(len, 0xfffffff0);
 9056     jccb(Assembler::zero, copy_16);
 9057 
 9058     // compress 16 chars per iter
 9059     pxor(tmp4Reg, tmp4Reg);
 9060 
 9061     lea(src, Address(src, len, Address::times_2));
 9062     lea(dst, Address(dst, len, Address::times_1));
 9063     negptr(len);
 9064 
 9065     bind(copy_32_loop);
 9066     movdqu(tmp2Reg, Address(src, len, Address::times_2));     // load 1st 8 characters
 9067     por(tmp4Reg, tmp2Reg);
 9068     movdqu(tmp3Reg, Address(src, len, Address::times_2, 16)); // load next 8 characters
 9069     por(tmp4Reg, tmp3Reg);
 9070     ptest(tmp4Reg, tmp1Reg);       // check for Unicode chars in next vector
 9071     jccb(Assembler::notZero, reset_for_copy_tail);
 9072     packuswb(tmp2Reg, tmp3Reg);    // only ASCII chars; compress each to 1 byte
 9073     movdqu(Address(dst, len, Address::times_1), tmp2Reg);
 9074     addptr(len, 16);
 9075     jccb(Assembler::notZero, copy_32_loop);
 9076 
 9077     // compress next vector of 8 chars (if any)
 9078     bind(copy_16);
 9079     // len = 0
 9080     testl(result, 0x00000008);     // check if there's a block of 8 chars to compress
 9081     jccb(Assembler::zero, copy_tail_sse);
 9082 
 9083     pxor(tmp3Reg, tmp3Reg);
 9084 
 9085     movdqu(tmp2Reg, Address(src, 0));
 9086     ptest(tmp2Reg, tmp1Reg);       // check for Unicode chars in vector
 9087     jccb(Assembler::notZero, reset_for_copy_tail);
 9088     packuswb(tmp2Reg, tmp3Reg);    // only LATIN1 chars; compress each to 1 byte
 9089     movq(Address(dst, 0), tmp2Reg);
 9090     addptr(src, 16);
 9091     addptr(dst, 8);
 9092     jmpb(copy_tail_sse);
 9093 
 9094     bind(reset_for_copy_tail);
 9095     movl(tmp5, result);
 9096     andl(tmp5, 0x0000000f);
 9097     lea(src, Address(src, tmp5, Address::times_2));
 9098     lea(dst, Address(dst, tmp5, Address::times_1));
 9099     subptr(len, tmp5);
 9100     jmpb(copy_chars_loop);
 9101 
 9102     bind(copy_tail_sse);
 9103     movl(len, result);
 9104     andl(len, 0x00000007);    // tail count (in chars)
 9105   }
 9106   // compress 1 char per iter
 9107   bind(copy_tail);
 9108   testl(len, len);
 9109   jccb(Assembler::zero, done);
 9110   lea(src, Address(src, len, Address::times_2));
 9111   lea(dst, Address(dst, len, Address::times_1));
 9112   negptr(len);
 9113 
 9114   bind(copy_chars_loop);
 9115   load_unsigned_short(tmp5, Address(src, len, Address::times_2));
 9116   testl(tmp5, 0xff00);      // check if Unicode char
 9117   jccb(Assembler::notZero, reset_sp);
 9118   movb(Address(dst, len, Address::times_1), tmp5);  // ASCII char; compress to 1 byte
 9119   increment(len);
 9120   jccb(Assembler::notZero, copy_chars_loop);
 9121 
 9122   // add len then return (len will be zero if compress succeeded, otherwise negative)
 9123   bind(reset_sp);
 9124   addl(result, len);
 9125 
 9126   bind(done);
 9127 }
 9128 
 9129 // Inflate byte[] array to char[].
 9130 //   ..\jdk\src\java.base\share\classes\java\lang\StringLatin1.java
 9131 //   @IntrinsicCandidate
 9132 //   private static void inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len) {
 9133 //     for (int i = 0; i < len; i++) {
 9134 //       dst[dstOff++] = (char)(src[srcOff++] & 0xff);
 9135 //     }
 9136 //   }
 9137 void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len,
 9138   XMMRegister tmp1, Register tmp2, KRegister mask) {
 9139   Label copy_chars_loop, done, below_threshold, avx3_threshold;
 9140   // rsi: src
 9141   // rdi: dst
 9142   // rdx: len
 9143   // rcx: tmp2
 9144 
 9145   // rsi holds start addr of source byte[] to be inflated
 9146   // rdi holds start addr of destination char[]
 9147   // rdx holds length
 9148   assert_different_registers(src, dst, len, tmp2);
 9149   movl(tmp2, len);
 9150   if ((UseAVX > 2) && // AVX512
 9151     VM_Version::supports_avx512vlbw() &&
 9152     VM_Version::supports_bmi2()) {
 9153 
 9154     Label copy_32_loop, copy_tail;
 9155     Register tmp3_aliased = len;
 9156 
 9157     // if length of the string is less than 16, handle it in an old fashioned way
 9158     testl(len, -16);
 9159     jcc(Assembler::zero, below_threshold);
 9160 
 9161     testl(len, -1 * AVX3Threshold);
 9162     jcc(Assembler::zero, avx3_threshold);
 9163 
 9164     // In order to use only one arithmetic operation for the main loop we use
 9165     // this pre-calculation
 9166     andl(tmp2, (32 - 1)); // tail count (in chars), 32 element wide loop
 9167     andl(len, -32);     // vector count
 9168     jccb(Assembler::zero, copy_tail);
 9169 
 9170     lea(src, Address(src, len, Address::times_1));
 9171     lea(dst, Address(dst, len, Address::times_2));
 9172     negptr(len);
 9173 
 9174 
 9175     // inflate 32 chars per iter
 9176     bind(copy_32_loop);
 9177     vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_512bit);
 9178     evmovdquw(Address(dst, len, Address::times_2), tmp1, Assembler::AVX_512bit);
 9179     addptr(len, 32);
 9180     jcc(Assembler::notZero, copy_32_loop);
 9181 
 9182     bind(copy_tail);
 9183     // bail out when there is nothing to be done
 9184     testl(tmp2, -1); // we don't destroy the contents of tmp2 here
 9185     jcc(Assembler::zero, done);
 9186 
 9187     // ~(~0 << length), where length is the # of remaining elements to process
 9188     movl(tmp3_aliased, -1);
 9189     shlxl(tmp3_aliased, tmp3_aliased, tmp2);
 9190     notl(tmp3_aliased);
 9191     kmovdl(mask, tmp3_aliased);
 9192     evpmovzxbw(tmp1, mask, Address(src, 0), Assembler::AVX_512bit);
 9193     evmovdquw(Address(dst, 0), mask, tmp1, /*merge*/ true, Assembler::AVX_512bit);
 9194 
 9195     jmp(done);
 9196     bind(avx3_threshold);
 9197   }
 9198   if (UseSSE42Intrinsics) {
 9199     Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail;
 9200 
 9201     if (UseAVX > 1) {
 9202       andl(tmp2, (16 - 1));
 9203       andl(len, -16);
 9204       jccb(Assembler::zero, copy_new_tail);
 9205     } else {
 9206       andl(tmp2, 0x00000007);   // tail count (in chars)
 9207       andl(len, 0xfffffff8);    // vector count (in chars)
 9208       jccb(Assembler::zero, copy_tail);
 9209     }
 9210 
 9211     // vectored inflation
 9212     lea(src, Address(src, len, Address::times_1));
 9213     lea(dst, Address(dst, len, Address::times_2));
 9214     negptr(len);
 9215 
 9216     if (UseAVX > 1) {
 9217       bind(copy_16_loop);
 9218       vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_256bit);
 9219       vmovdqu(Address(dst, len, Address::times_2), tmp1);
 9220       addptr(len, 16);
 9221       jcc(Assembler::notZero, copy_16_loop);
 9222 
 9223       bind(below_threshold);
 9224       bind(copy_new_tail);
 9225       movl(len, tmp2);
 9226       andl(tmp2, 0x00000007);
 9227       andl(len, 0xFFFFFFF8);
 9228       jccb(Assembler::zero, copy_tail);
 9229 
 9230       pmovzxbw(tmp1, Address(src, 0));
 9231       movdqu(Address(dst, 0), tmp1);
 9232       addptr(src, 8);
 9233       addptr(dst, 2 * 8);
 9234 
 9235       jmp(copy_tail, true);
 9236     }
 9237 
 9238     // inflate 8 chars per iter
 9239     bind(copy_8_loop);
 9240     pmovzxbw(tmp1, Address(src, len, Address::times_1));  // unpack to 8 words
 9241     movdqu(Address(dst, len, Address::times_2), tmp1);
 9242     addptr(len, 8);
 9243     jcc(Assembler::notZero, copy_8_loop);
 9244 
 9245     bind(copy_tail);
 9246     movl(len, tmp2);
 9247 
 9248     cmpl(len, 4);
 9249     jccb(Assembler::less, copy_bytes);
 9250 
 9251     movdl(tmp1, Address(src, 0));  // load 4 byte chars
 9252     pmovzxbw(tmp1, tmp1);
 9253     movq(Address(dst, 0), tmp1);
 9254     subptr(len, 4);
 9255     addptr(src, 4);
 9256     addptr(dst, 8);
 9257 
 9258     bind(copy_bytes);
 9259   } else {
 9260     bind(below_threshold);
 9261   }
 9262 
 9263   testl(len, len);
 9264   jccb(Assembler::zero, done);
 9265   lea(src, Address(src, len, Address::times_1));
 9266   lea(dst, Address(dst, len, Address::times_2));
 9267   negptr(len);
 9268 
 9269   // inflate 1 char per iter
 9270   bind(copy_chars_loop);
 9271   load_unsigned_byte(tmp2, Address(src, len, Address::times_1));  // load byte char
 9272   movw(Address(dst, len, Address::times_2), tmp2);  // inflate byte char to word
 9273   increment(len);
 9274   jcc(Assembler::notZero, copy_chars_loop);
 9275 
 9276   bind(done);
 9277 }
 9278 
 9279 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len) {
 9280   switch(type) {
 9281     case T_BYTE:
 9282     case T_BOOLEAN:
 9283       evmovdqub(dst, kmask, src, merge, vector_len);
 9284       break;
 9285     case T_CHAR:
 9286     case T_SHORT:
 9287       evmovdquw(dst, kmask, src, merge, vector_len);
 9288       break;
 9289     case T_INT:
 9290     case T_FLOAT:
 9291       evmovdqul(dst, kmask, src, merge, vector_len);
 9292       break;
 9293     case T_LONG:
 9294     case T_DOUBLE:
 9295       evmovdquq(dst, kmask, src, merge, vector_len);
 9296       break;
 9297     default:
 9298       fatal("Unexpected type argument %s", type2name(type));
 9299       break;
 9300   }
 9301 }
 9302 
 9303 
 9304 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len) {
 9305   switch(type) {
 9306     case T_BYTE:
 9307     case T_BOOLEAN:
 9308       evmovdqub(dst, kmask, src, merge, vector_len);
 9309       break;
 9310     case T_CHAR:
 9311     case T_SHORT:
 9312       evmovdquw(dst, kmask, src, merge, vector_len);
 9313       break;
 9314     case T_INT:
 9315     case T_FLOAT:
 9316       evmovdqul(dst, kmask, src, merge, vector_len);
 9317       break;
 9318     case T_LONG:
 9319     case T_DOUBLE:
 9320       evmovdquq(dst, kmask, src, merge, vector_len);
 9321       break;
 9322     default:
 9323       fatal("Unexpected type argument %s", type2name(type));
 9324       break;
 9325   }
 9326 }
 9327 
 9328 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len) {
 9329   switch(type) {
 9330     case T_BYTE:
 9331     case T_BOOLEAN:
 9332       evmovdqub(dst, kmask, src, merge, vector_len);
 9333       break;
 9334     case T_CHAR:
 9335     case T_SHORT:
 9336       evmovdquw(dst, kmask, src, merge, vector_len);
 9337       break;
 9338     case T_INT:
 9339     case T_FLOAT:
 9340       evmovdqul(dst, kmask, src, merge, vector_len);
 9341       break;
 9342     case T_LONG:
 9343     case T_DOUBLE:
 9344       evmovdquq(dst, kmask, src, merge, vector_len);
 9345       break;
 9346     default:
 9347       fatal("Unexpected type argument %s", type2name(type));
 9348       break;
 9349   }
 9350 }
 9351 
 9352 void MacroAssembler::knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp, Register rtmp) {
 9353   switch(masklen) {
 9354     case 2:
 9355        knotbl(dst, src);
 9356        movl(rtmp, 3);
 9357        kmovbl(ktmp, rtmp);
 9358        kandbl(dst, ktmp, dst);
 9359        break;
 9360     case 4:
 9361        knotbl(dst, src);
 9362        movl(rtmp, 15);
 9363        kmovbl(ktmp, rtmp);
 9364        kandbl(dst, ktmp, dst);
 9365        break;
 9366     case 8:
 9367        knotbl(dst, src);
 9368        break;
 9369     case 16:
 9370        knotwl(dst, src);
 9371        break;
 9372     case 32:
 9373        knotdl(dst, src);
 9374        break;
 9375     case 64:
 9376        knotql(dst, src);
 9377        break;
 9378     default:
 9379       fatal("Unexpected vector length %d", masklen);
 9380       break;
 9381   }
 9382 }
 9383 
 9384 void MacroAssembler::kand(BasicType type, KRegister dst, KRegister src1, KRegister src2) {
 9385   switch(type) {
 9386     case T_BOOLEAN:
 9387     case T_BYTE:
 9388        kandbl(dst, src1, src2);
 9389        break;
 9390     case T_CHAR:
 9391     case T_SHORT:
 9392        kandwl(dst, src1, src2);
 9393        break;
 9394     case T_INT:
 9395     case T_FLOAT:
 9396        kanddl(dst, src1, src2);
 9397        break;
 9398     case T_LONG:
 9399     case T_DOUBLE:
 9400        kandql(dst, src1, src2);
 9401        break;
 9402     default:
 9403       fatal("Unexpected type argument %s", type2name(type));
 9404       break;
 9405   }
 9406 }
 9407 
 9408 void MacroAssembler::kor(BasicType type, KRegister dst, KRegister src1, KRegister src2) {
 9409   switch(type) {
 9410     case T_BOOLEAN:
 9411     case T_BYTE:
 9412        korbl(dst, src1, src2);
 9413        break;
 9414     case T_CHAR:
 9415     case T_SHORT:
 9416        korwl(dst, src1, src2);
 9417        break;
 9418     case T_INT:
 9419     case T_FLOAT:
 9420        kordl(dst, src1, src2);
 9421        break;
 9422     case T_LONG:
 9423     case T_DOUBLE:
 9424        korql(dst, src1, src2);
 9425        break;
 9426     default:
 9427       fatal("Unexpected type argument %s", type2name(type));
 9428       break;
 9429   }
 9430 }
 9431 
 9432 void MacroAssembler::kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2) {
 9433   switch(type) {
 9434     case T_BOOLEAN:
 9435     case T_BYTE:
 9436        kxorbl(dst, src1, src2);
 9437        break;
 9438     case T_CHAR:
 9439     case T_SHORT:
 9440        kxorwl(dst, src1, src2);
 9441        break;
 9442     case T_INT:
 9443     case T_FLOAT:
 9444        kxordl(dst, src1, src2);
 9445        break;
 9446     case T_LONG:
 9447     case T_DOUBLE:
 9448        kxorql(dst, src1, src2);
 9449        break;
 9450     default:
 9451       fatal("Unexpected type argument %s", type2name(type));
 9452       break;
 9453   }
 9454 }
 9455 
 9456 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9457   switch(type) {
 9458     case T_BOOLEAN:
 9459     case T_BYTE:
 9460       evpermb(dst, mask, nds, src, merge, vector_len); break;
 9461     case T_CHAR:
 9462     case T_SHORT:
 9463       evpermw(dst, mask, nds, src, merge, vector_len); break;
 9464     case T_INT:
 9465     case T_FLOAT:
 9466       evpermd(dst, mask, nds, src, merge, vector_len); break;
 9467     case T_LONG:
 9468     case T_DOUBLE:
 9469       evpermq(dst, mask, nds, src, merge, vector_len); break;
 9470     default:
 9471       fatal("Unexpected type argument %s", type2name(type)); break;
 9472   }
 9473 }
 9474 
 9475 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9476   switch(type) {
 9477     case T_BOOLEAN:
 9478     case T_BYTE:
 9479       evpermb(dst, mask, nds, src, merge, vector_len); break;
 9480     case T_CHAR:
 9481     case T_SHORT:
 9482       evpermw(dst, mask, nds, src, merge, vector_len); break;
 9483     case T_INT:
 9484     case T_FLOAT:
 9485       evpermd(dst, mask, nds, src, merge, vector_len); break;
 9486     case T_LONG:
 9487     case T_DOUBLE:
 9488       evpermq(dst, mask, nds, src, merge, vector_len); break;
 9489     default:
 9490       fatal("Unexpected type argument %s", type2name(type)); break;
 9491   }
 9492 }
 9493 
 9494 void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9495   switch(type) {
 9496     case T_BYTE:
 9497       evpminub(dst, mask, nds, src, merge, vector_len); break;
 9498     case T_SHORT:
 9499       evpminuw(dst, mask, nds, src, merge, vector_len); break;
 9500     case T_INT:
 9501       evpminud(dst, mask, nds, src, merge, vector_len); break;
 9502     case T_LONG:
 9503       evpminuq(dst, mask, nds, src, merge, vector_len); break;
 9504     default:
 9505       fatal("Unexpected type argument %s", type2name(type)); break;
 9506   }
 9507 }
 9508 
 9509 void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9510   switch(type) {
 9511     case T_BYTE:
 9512       evpmaxub(dst, mask, nds, src, merge, vector_len); break;
 9513     case T_SHORT:
 9514       evpmaxuw(dst, mask, nds, src, merge, vector_len); break;
 9515     case T_INT:
 9516       evpmaxud(dst, mask, nds, src, merge, vector_len); break;
 9517     case T_LONG:
 9518       evpmaxuq(dst, mask, nds, src, merge, vector_len); break;
 9519     default:
 9520       fatal("Unexpected type argument %s", type2name(type)); break;
 9521   }
 9522 }
 9523 
 9524 void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9525   switch(type) {
 9526     case T_BYTE:
 9527       evpminub(dst, mask, nds, src, merge, vector_len); break;
 9528     case T_SHORT:
 9529       evpminuw(dst, mask, nds, src, merge, vector_len); break;
 9530     case T_INT:
 9531       evpminud(dst, mask, nds, src, merge, vector_len); break;
 9532     case T_LONG:
 9533       evpminuq(dst, mask, nds, src, merge, vector_len); break;
 9534     default:
 9535       fatal("Unexpected type argument %s", type2name(type)); break;
 9536   }
 9537 }
 9538 
 9539 void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9540   switch(type) {
 9541     case T_BYTE:
 9542       evpmaxub(dst, mask, nds, src, merge, vector_len); break;
 9543     case T_SHORT:
 9544       evpmaxuw(dst, mask, nds, src, merge, vector_len); break;
 9545     case T_INT:
 9546       evpmaxud(dst, mask, nds, src, merge, vector_len); break;
 9547     case T_LONG:
 9548       evpmaxuq(dst, mask, nds, src, merge, vector_len); break;
 9549     default:
 9550       fatal("Unexpected type argument %s", type2name(type)); break;
 9551   }
 9552 }
 9553 
 9554 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9555   switch(type) {
 9556     case T_BYTE:
 9557       evpminsb(dst, mask, nds, src, merge, vector_len); break;
 9558     case T_SHORT:
 9559       evpminsw(dst, mask, nds, src, merge, vector_len); break;
 9560     case T_INT:
 9561       evpminsd(dst, mask, nds, src, merge, vector_len); break;
 9562     case T_LONG:
 9563       evpminsq(dst, mask, nds, src, merge, vector_len); break;
 9564     default:
 9565       fatal("Unexpected type argument %s", type2name(type)); break;
 9566   }
 9567 }
 9568 
 9569 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9570   switch(type) {
 9571     case T_BYTE:
 9572       evpmaxsb(dst, mask, nds, src, merge, vector_len); break;
 9573     case T_SHORT:
 9574       evpmaxsw(dst, mask, nds, src, merge, vector_len); break;
 9575     case T_INT:
 9576       evpmaxsd(dst, mask, nds, src, merge, vector_len); break;
 9577     case T_LONG:
 9578       evpmaxsq(dst, mask, nds, src, merge, vector_len); break;
 9579     default:
 9580       fatal("Unexpected type argument %s", type2name(type)); break;
 9581   }
 9582 }
 9583 
 9584 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9585   switch(type) {
 9586     case T_BYTE:
 9587       evpminsb(dst, mask, nds, src, merge, vector_len); break;
 9588     case T_SHORT:
 9589       evpminsw(dst, mask, nds, src, merge, vector_len); break;
 9590     case T_INT:
 9591       evpminsd(dst, mask, nds, src, merge, vector_len); break;
 9592     case T_LONG:
 9593       evpminsq(dst, mask, nds, src, merge, vector_len); break;
 9594     default:
 9595       fatal("Unexpected type argument %s", type2name(type)); break;
 9596   }
 9597 }
 9598 
 9599 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9600   switch(type) {
 9601     case T_BYTE:
 9602       evpmaxsb(dst, mask, nds, src, merge, vector_len); break;
 9603     case T_SHORT:
 9604       evpmaxsw(dst, mask, nds, src, merge, vector_len); break;
 9605     case T_INT:
 9606       evpmaxsd(dst, mask, nds, src, merge, vector_len); break;
 9607     case T_LONG:
 9608       evpmaxsq(dst, mask, nds, src, merge, vector_len); break;
 9609     default:
 9610       fatal("Unexpected type argument %s", type2name(type)); break;
 9611   }
 9612 }
 9613 
 9614 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9615   switch(type) {
 9616     case T_INT:
 9617       evpxord(dst, mask, nds, src, merge, vector_len); break;
 9618     case T_LONG:
 9619       evpxorq(dst, mask, nds, src, merge, vector_len); break;
 9620     default:
 9621       fatal("Unexpected type argument %s", type2name(type)); break;
 9622   }
 9623 }
 9624 
 9625 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9626   switch(type) {
 9627     case T_INT:
 9628       evpxord(dst, mask, nds, src, merge, vector_len); break;
 9629     case T_LONG:
 9630       evpxorq(dst, mask, nds, src, merge, vector_len); break;
 9631     default:
 9632       fatal("Unexpected type argument %s", type2name(type)); break;
 9633   }
 9634 }
 9635 
 9636 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9637   switch(type) {
 9638     case T_INT:
 9639       Assembler::evpord(dst, mask, nds, src, merge, vector_len); break;
 9640     case T_LONG:
 9641       evporq(dst, mask, nds, src, merge, vector_len); break;
 9642     default:
 9643       fatal("Unexpected type argument %s", type2name(type)); break;
 9644   }
 9645 }
 9646 
 9647 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9648   switch(type) {
 9649     case T_INT:
 9650       Assembler::evpord(dst, mask, nds, src, merge, vector_len); break;
 9651     case T_LONG:
 9652       evporq(dst, mask, nds, src, merge, vector_len); break;
 9653     default:
 9654       fatal("Unexpected type argument %s", type2name(type)); break;
 9655   }
 9656 }
 9657 
 9658 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9659   switch(type) {
 9660     case T_INT:
 9661       evpandd(dst, mask, nds, src, merge, vector_len); break;
 9662     case T_LONG:
 9663       evpandq(dst, mask, nds, src, merge, vector_len); break;
 9664     default:
 9665       fatal("Unexpected type argument %s", type2name(type)); break;
 9666   }
 9667 }
 9668 
 9669 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9670   switch(type) {
 9671     case T_INT:
 9672       evpandd(dst, mask, nds, src, merge, vector_len); break;
 9673     case T_LONG:
 9674       evpandq(dst, mask, nds, src, merge, vector_len); break;
 9675     default:
 9676       fatal("Unexpected type argument %s", type2name(type)); break;
 9677   }
 9678 }
 9679 
 9680 void MacroAssembler::kortest(uint masklen, KRegister src1, KRegister src2) {
 9681   switch(masklen) {
 9682     case 8:
 9683        kortestbl(src1, src2);
 9684        break;
 9685     case 16:
 9686        kortestwl(src1, src2);
 9687        break;
 9688     case 32:
 9689        kortestdl(src1, src2);
 9690        break;
 9691     case 64:
 9692        kortestql(src1, src2);
 9693        break;
 9694     default:
 9695       fatal("Unexpected mask length %d", masklen);
 9696       break;
 9697   }
 9698 }
 9699 
 9700 
 9701 void MacroAssembler::ktest(uint masklen, KRegister src1, KRegister src2) {
 9702   switch(masklen)  {
 9703     case 8:
 9704        ktestbl(src1, src2);
 9705        break;
 9706     case 16:
 9707        ktestwl(src1, src2);
 9708        break;
 9709     case 32:
 9710        ktestdl(src1, src2);
 9711        break;
 9712     case 64:
 9713        ktestql(src1, src2);
 9714        break;
 9715     default:
 9716       fatal("Unexpected mask length %d", masklen);
 9717       break;
 9718   }
 9719 }
 9720 
 9721 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) {
 9722   switch(type) {
 9723     case T_INT:
 9724       evprold(dst, mask, src, shift, merge, vlen_enc); break;
 9725     case T_LONG:
 9726       evprolq(dst, mask, src, shift, merge, vlen_enc); break;
 9727     default:
 9728       fatal("Unexpected type argument %s", type2name(type)); break;
 9729       break;
 9730   }
 9731 }
 9732 
 9733 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) {
 9734   switch(type) {
 9735     case T_INT:
 9736       evprord(dst, mask, src, shift, merge, vlen_enc); break;
 9737     case T_LONG:
 9738       evprorq(dst, mask, src, shift, merge, vlen_enc); break;
 9739     default:
 9740       fatal("Unexpected type argument %s", type2name(type)); break;
 9741   }
 9742 }
 9743 
 9744 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) {
 9745   switch(type) {
 9746     case T_INT:
 9747       evprolvd(dst, mask, src1, src2, merge, vlen_enc); break;
 9748     case T_LONG:
 9749       evprolvq(dst, mask, src1, src2, merge, vlen_enc); break;
 9750     default:
 9751       fatal("Unexpected type argument %s", type2name(type)); break;
 9752   }
 9753 }
 9754 
 9755 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) {
 9756   switch(type) {
 9757     case T_INT:
 9758       evprorvd(dst, mask, src1, src2, merge, vlen_enc); break;
 9759     case T_LONG:
 9760       evprorvq(dst, mask, src1, src2, merge, vlen_enc); break;
 9761     default:
 9762       fatal("Unexpected type argument %s", type2name(type)); break;
 9763   }
 9764 }
 9765 
 9766 void MacroAssembler::evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 9767   assert(rscratch != noreg || always_reachable(src), "missing");
 9768 
 9769   if (reachable(src)) {
 9770     evpandq(dst, nds, as_Address(src), vector_len);
 9771   } else {
 9772     lea(rscratch, src);
 9773     evpandq(dst, nds, Address(rscratch, 0), vector_len);
 9774   }
 9775 }
 9776 
 9777 void MacroAssembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
 9778   assert(rscratch != noreg || always_reachable(src), "missing");
 9779 
 9780   if (reachable(src)) {
 9781     Assembler::evpaddq(dst, mask, nds, as_Address(src), merge, vector_len);
 9782   } else {
 9783     lea(rscratch, src);
 9784     Assembler::evpaddq(dst, mask, nds, Address(rscratch, 0), merge, vector_len);
 9785   }
 9786 }
 9787 
 9788 void MacroAssembler::evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 9789   assert(rscratch != noreg || always_reachable(src), "missing");
 9790 
 9791   if (reachable(src)) {
 9792     evporq(dst, nds, as_Address(src), vector_len);
 9793   } else {
 9794     lea(rscratch, src);
 9795     evporq(dst, nds, Address(rscratch, 0), vector_len);
 9796   }
 9797 }
 9798 
 9799 void MacroAssembler::vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 9800   assert(rscratch != noreg || always_reachable(src), "missing");
 9801 
 9802   if (reachable(src)) {
 9803     vpshufb(dst, nds, as_Address(src), vector_len);
 9804   } else {
 9805     lea(rscratch, src);
 9806     vpshufb(dst, nds, Address(rscratch, 0), vector_len);
 9807   }
 9808 }
 9809 
 9810 void MacroAssembler::vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 9811   assert(rscratch != noreg || always_reachable(src), "missing");
 9812 
 9813   if (reachable(src)) {
 9814     Assembler::vpor(dst, nds, as_Address(src), vector_len);
 9815   } else {
 9816     lea(rscratch, src);
 9817     Assembler::vpor(dst, nds, Address(rscratch, 0), vector_len);
 9818   }
 9819 }
 9820 
 9821 void MacroAssembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch) {
 9822   assert(rscratch != noreg || always_reachable(src3), "missing");
 9823 
 9824   if (reachable(src3)) {
 9825     vpternlogq(dst, imm8, src2, as_Address(src3), vector_len);
 9826   } else {
 9827     lea(rscratch, src3);
 9828     vpternlogq(dst, imm8, src2, Address(rscratch, 0), vector_len);
 9829   }
 9830 }
 9831 
 9832 #if COMPILER2_OR_JVMCI
 9833 
 9834 void MacroAssembler::fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask,
 9835                                  Register length, Register temp, int vec_enc) {
 9836   // Computing mask for predicated vector store.
 9837   movptr(temp, -1);
 9838   bzhiq(temp, temp, length);
 9839   kmov(mask, temp);
 9840   evmovdqu(bt, mask, dst, xmm, true, vec_enc);
 9841 }
 9842 
 9843 // Set memory operation for length "less than" 64 bytes.
 9844 void MacroAssembler::fill64_masked(uint shift, Register dst, int disp,
 9845                                        XMMRegister xmm, KRegister mask, Register length,
 9846                                        Register temp, bool use64byteVector) {
 9847   assert(MaxVectorSize >= 32, "vector length should be >= 32");
 9848   const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG};
 9849   if (!use64byteVector) {
 9850     fill32(dst, disp, xmm);
 9851     subptr(length, 32 >> shift);
 9852     fill32_masked(shift, dst, disp + 32, xmm, mask, length, temp);
 9853   } else {
 9854     assert(MaxVectorSize == 64, "vector length != 64");
 9855     fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_512bit);
 9856   }
 9857 }
 9858 
 9859 
 9860 void MacroAssembler::fill32_masked(uint shift, Register dst, int disp,
 9861                                        XMMRegister xmm, KRegister mask, Register length,
 9862                                        Register temp) {
 9863   assert(MaxVectorSize >= 32, "vector length should be >= 32");
 9864   const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG};
 9865   fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_256bit);
 9866 }
 9867 
 9868 
 9869 void MacroAssembler::fill32(Address dst, XMMRegister xmm) {
 9870   assert(MaxVectorSize >= 32, "vector length should be >= 32");
 9871   vmovdqu(dst, xmm);
 9872 }
 9873 
 9874 void MacroAssembler::fill32(Register dst, int disp, XMMRegister xmm) {
 9875   fill32(Address(dst, disp), xmm);
 9876 }
 9877 
 9878 void MacroAssembler::fill64(Address dst, XMMRegister xmm, bool use64byteVector) {
 9879   assert(MaxVectorSize >= 32, "vector length should be >= 32");
 9880   if (!use64byteVector) {
 9881     fill32(dst, xmm);
 9882     fill32(dst.plus_disp(32), xmm);
 9883   } else {
 9884     evmovdquq(dst, xmm, Assembler::AVX_512bit);
 9885   }
 9886 }
 9887 
 9888 void MacroAssembler::fill64(Register dst, int disp, XMMRegister xmm, bool use64byteVector) {
 9889   fill64(Address(dst, disp), xmm, use64byteVector);
 9890 }
 9891 
 9892 void MacroAssembler::generate_fill_avx3(BasicType type, Register to, Register value,
 9893                                         Register count, Register rtmp, XMMRegister xtmp) {
 9894   Label L_exit;
 9895   Label L_fill_start;
 9896   Label L_fill_64_bytes;
 9897   Label L_fill_96_bytes;
 9898   Label L_fill_128_bytes;
 9899   Label L_fill_128_bytes_loop;
 9900   Label L_fill_128_loop_header;
 9901   Label L_fill_128_bytes_loop_header;
 9902   Label L_fill_128_bytes_loop_pre_header;
 9903   Label L_fill_zmm_sequence;
 9904 
 9905   int shift = -1;
 9906   int avx3threshold = VM_Version::avx3_threshold();
 9907   switch(type) {
 9908     case T_BYTE:  shift = 0;
 9909       break;
 9910     case T_SHORT: shift = 1;
 9911       break;
 9912     case T_INT:   shift = 2;
 9913       break;
 9914     /* Uncomment when LONG fill stubs are supported.
 9915     case T_LONG:  shift = 3;
 9916       break;
 9917     */
 9918     default:
 9919       fatal("Unhandled type: %s\n", type2name(type));
 9920   }
 9921 
 9922   if ((avx3threshold != 0)  || (MaxVectorSize == 32)) {
 9923 
 9924     if (MaxVectorSize == 64) {
 9925       cmpq(count, avx3threshold >> shift);
 9926       jcc(Assembler::greater, L_fill_zmm_sequence);
 9927     }
 9928 
 9929     evpbroadcast(type, xtmp, value, Assembler::AVX_256bit);
 9930 
 9931     bind(L_fill_start);
 9932 
 9933     cmpq(count, 32 >> shift);
 9934     jccb(Assembler::greater, L_fill_64_bytes);
 9935     fill32_masked(shift, to, 0, xtmp, k2, count, rtmp);
 9936     jmp(L_exit);
 9937 
 9938     bind(L_fill_64_bytes);
 9939     cmpq(count, 64 >> shift);
 9940     jccb(Assembler::greater, L_fill_96_bytes);
 9941     fill64_masked(shift, to, 0, xtmp, k2, count, rtmp);
 9942     jmp(L_exit);
 9943 
 9944     bind(L_fill_96_bytes);
 9945     cmpq(count, 96 >> shift);
 9946     jccb(Assembler::greater, L_fill_128_bytes);
 9947     fill64(to, 0, xtmp);
 9948     subq(count, 64 >> shift);
 9949     fill32_masked(shift, to, 64, xtmp, k2, count, rtmp);
 9950     jmp(L_exit);
 9951 
 9952     bind(L_fill_128_bytes);
 9953     cmpq(count, 128 >> shift);
 9954     jccb(Assembler::greater, L_fill_128_bytes_loop_pre_header);
 9955     fill64(to, 0, xtmp);
 9956     fill32(to, 64, xtmp);
 9957     subq(count, 96 >> shift);
 9958     fill32_masked(shift, to, 96, xtmp, k2, count, rtmp);
 9959     jmp(L_exit);
 9960 
 9961     bind(L_fill_128_bytes_loop_pre_header);
 9962     {
 9963       mov(rtmp, to);
 9964       andq(rtmp, 31);
 9965       jccb(Assembler::zero, L_fill_128_bytes_loop_header);
 9966       negq(rtmp);
 9967       addq(rtmp, 32);
 9968       mov64(r8, -1L);
 9969       bzhiq(r8, r8, rtmp);
 9970       kmovql(k2, r8);
 9971       evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_256bit);
 9972       addq(to, rtmp);
 9973       shrq(rtmp, shift);
 9974       subq(count, rtmp);
 9975     }
 9976 
 9977     cmpq(count, 128 >> shift);
 9978     jcc(Assembler::less, L_fill_start);
 9979 
 9980     bind(L_fill_128_bytes_loop_header);
 9981     subq(count, 128 >> shift);
 9982 
 9983     align32();
 9984     bind(L_fill_128_bytes_loop);
 9985       fill64(to, 0, xtmp);
 9986       fill64(to, 64, xtmp);
 9987       addq(to, 128);
 9988       subq(count, 128 >> shift);
 9989       jccb(Assembler::greaterEqual, L_fill_128_bytes_loop);
 9990 
 9991     addq(count, 128 >> shift);
 9992     jcc(Assembler::zero, L_exit);
 9993     jmp(L_fill_start);
 9994   }
 9995 
 9996   if (MaxVectorSize == 64) {
 9997     // Sequence using 64 byte ZMM register.
 9998     Label L_fill_128_bytes_zmm;
 9999     Label L_fill_192_bytes_zmm;
10000     Label L_fill_192_bytes_loop_zmm;
10001     Label L_fill_192_bytes_loop_header_zmm;
10002     Label L_fill_192_bytes_loop_pre_header_zmm;
10003     Label L_fill_start_zmm_sequence;
10004 
10005     bind(L_fill_zmm_sequence);
10006     evpbroadcast(type, xtmp, value, Assembler::AVX_512bit);
10007 
10008     bind(L_fill_start_zmm_sequence);
10009     cmpq(count, 64 >> shift);
10010     jccb(Assembler::greater, L_fill_128_bytes_zmm);
10011     fill64_masked(shift, to, 0, xtmp, k2, count, rtmp, true);
10012     jmp(L_exit);
10013 
10014     bind(L_fill_128_bytes_zmm);
10015     cmpq(count, 128 >> shift);
10016     jccb(Assembler::greater, L_fill_192_bytes_zmm);
10017     fill64(to, 0, xtmp, true);
10018     subq(count, 64 >> shift);
10019     fill64_masked(shift, to, 64, xtmp, k2, count, rtmp, true);
10020     jmp(L_exit);
10021 
10022     bind(L_fill_192_bytes_zmm);
10023     cmpq(count, 192 >> shift);
10024     jccb(Assembler::greater, L_fill_192_bytes_loop_pre_header_zmm);
10025     fill64(to, 0, xtmp, true);
10026     fill64(to, 64, xtmp, true);
10027     subq(count, 128 >> shift);
10028     fill64_masked(shift, to, 128, xtmp, k2, count, rtmp, true);
10029     jmp(L_exit);
10030 
10031     bind(L_fill_192_bytes_loop_pre_header_zmm);
10032     {
10033       movq(rtmp, to);
10034       andq(rtmp, 63);
10035       jccb(Assembler::zero, L_fill_192_bytes_loop_header_zmm);
10036       negq(rtmp);
10037       addq(rtmp, 64);
10038       mov64(r8, -1L);
10039       bzhiq(r8, r8, rtmp);
10040       kmovql(k2, r8);
10041       evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_512bit);
10042       addq(to, rtmp);
10043       shrq(rtmp, shift);
10044       subq(count, rtmp);
10045     }
10046 
10047     cmpq(count, 192 >> shift);
10048     jcc(Assembler::less, L_fill_start_zmm_sequence);
10049 
10050     bind(L_fill_192_bytes_loop_header_zmm);
10051     subq(count, 192 >> shift);
10052 
10053     align32();
10054     bind(L_fill_192_bytes_loop_zmm);
10055       fill64(to, 0, xtmp, true);
10056       fill64(to, 64, xtmp, true);
10057       fill64(to, 128, xtmp, true);
10058       addq(to, 192);
10059       subq(count, 192 >> shift);
10060       jccb(Assembler::greaterEqual, L_fill_192_bytes_loop_zmm);
10061 
10062     addq(count, 192 >> shift);
10063     jcc(Assembler::zero, L_exit);
10064     jmp(L_fill_start_zmm_sequence);
10065   }
10066   bind(L_exit);
10067 }
10068 #endif //COMPILER2_OR_JVMCI
10069 
10070 
10071 void MacroAssembler::convert_f2i(Register dst, XMMRegister src) {
10072   Label done;
10073   cvttss2sil(dst, src);
10074   // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
10075   cmpl(dst, 0x80000000); // float_sign_flip
10076   jccb(Assembler::notEqual, done);
10077   subptr(rsp, 8);
10078   movflt(Address(rsp, 0), src);
10079   call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2i_fixup())));
10080   pop(dst);
10081   bind(done);
10082 }
10083 
10084 void MacroAssembler::convert_d2i(Register dst, XMMRegister src) {
10085   Label done;
10086   cvttsd2sil(dst, src);
10087   // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
10088   cmpl(dst, 0x80000000); // float_sign_flip
10089   jccb(Assembler::notEqual, done);
10090   subptr(rsp, 8);
10091   movdbl(Address(rsp, 0), src);
10092   call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2i_fixup())));
10093   pop(dst);
10094   bind(done);
10095 }
10096 
10097 void MacroAssembler::convert_f2l(Register dst, XMMRegister src) {
10098   Label done;
10099   cvttss2siq(dst, src);
10100   cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip()));
10101   jccb(Assembler::notEqual, done);
10102   subptr(rsp, 8);
10103   movflt(Address(rsp, 0), src);
10104   call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2l_fixup())));
10105   pop(dst);
10106   bind(done);
10107 }
10108 
10109 void MacroAssembler::round_float(Register dst, XMMRegister src, Register rtmp, Register rcx) {
10110   // Following code is line by line assembly translation rounding algorithm.
10111   // Please refer to java.lang.Math.round(float) algorithm for details.
10112   const int32_t FloatConsts_EXP_BIT_MASK = 0x7F800000;
10113   const int32_t FloatConsts_SIGNIFICAND_WIDTH = 24;
10114   const int32_t FloatConsts_EXP_BIAS = 127;
10115   const int32_t FloatConsts_SIGNIF_BIT_MASK = 0x007FFFFF;
10116   const int32_t MINUS_32 = 0xFFFFFFE0;
10117   Label L_special_case, L_block1, L_exit;
10118   movl(rtmp, FloatConsts_EXP_BIT_MASK);
10119   movdl(dst, src);
10120   andl(dst, rtmp);
10121   sarl(dst, FloatConsts_SIGNIFICAND_WIDTH - 1);
10122   movl(rtmp, FloatConsts_SIGNIFICAND_WIDTH - 2 + FloatConsts_EXP_BIAS);
10123   subl(rtmp, dst);
10124   movl(rcx, rtmp);
10125   movl(dst, MINUS_32);
10126   testl(rtmp, dst);
10127   jccb(Assembler::notEqual, L_special_case);
10128   movdl(dst, src);
10129   andl(dst, FloatConsts_SIGNIF_BIT_MASK);
10130   orl(dst, FloatConsts_SIGNIF_BIT_MASK + 1);
10131   movdl(rtmp, src);
10132   testl(rtmp, rtmp);
10133   jccb(Assembler::greaterEqual, L_block1);
10134   negl(dst);
10135   bind(L_block1);
10136   sarl(dst);
10137   addl(dst, 0x1);
10138   sarl(dst, 0x1);
10139   jmp(L_exit);
10140   bind(L_special_case);
10141   convert_f2i(dst, src);
10142   bind(L_exit);
10143 }
10144 
10145 void MacroAssembler::round_double(Register dst, XMMRegister src, Register rtmp, Register rcx) {
10146   // Following code is line by line assembly translation rounding algorithm.
10147   // Please refer to java.lang.Math.round(double) algorithm for details.
10148   const int64_t DoubleConsts_EXP_BIT_MASK = 0x7FF0000000000000L;
10149   const int64_t DoubleConsts_SIGNIFICAND_WIDTH = 53;
10150   const int64_t DoubleConsts_EXP_BIAS = 1023;
10151   const int64_t DoubleConsts_SIGNIF_BIT_MASK = 0x000FFFFFFFFFFFFFL;
10152   const int64_t MINUS_64 = 0xFFFFFFFFFFFFFFC0L;
10153   Label L_special_case, L_block1, L_exit;
10154   mov64(rtmp, DoubleConsts_EXP_BIT_MASK);
10155   movq(dst, src);
10156   andq(dst, rtmp);
10157   sarq(dst, DoubleConsts_SIGNIFICAND_WIDTH - 1);
10158   mov64(rtmp, DoubleConsts_SIGNIFICAND_WIDTH - 2 + DoubleConsts_EXP_BIAS);
10159   subq(rtmp, dst);
10160   movq(rcx, rtmp);
10161   mov64(dst, MINUS_64);
10162   testq(rtmp, dst);
10163   jccb(Assembler::notEqual, L_special_case);
10164   movq(dst, src);
10165   mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK);
10166   andq(dst, rtmp);
10167   mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK + 1);
10168   orq(dst, rtmp);
10169   movq(rtmp, src);
10170   testq(rtmp, rtmp);
10171   jccb(Assembler::greaterEqual, L_block1);
10172   negq(dst);
10173   bind(L_block1);
10174   sarq(dst);
10175   addq(dst, 0x1);
10176   sarq(dst, 0x1);
10177   jmp(L_exit);
10178   bind(L_special_case);
10179   convert_d2l(dst, src);
10180   bind(L_exit);
10181 }
10182 
10183 void MacroAssembler::convert_d2l(Register dst, XMMRegister src) {
10184   Label done;
10185   cvttsd2siq(dst, src);
10186   cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip()));
10187   jccb(Assembler::notEqual, done);
10188   subptr(rsp, 8);
10189   movdbl(Address(rsp, 0), src);
10190   call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2l_fixup())));
10191   pop(dst);
10192   bind(done);
10193 }
10194 
10195 void MacroAssembler::cache_wb(Address line)
10196 {
10197   // 64 bit cpus always support clflush
10198   assert(VM_Version::supports_clflush(), "clflush should be available");
10199   bool optimized = VM_Version::supports_clflushopt();
10200   bool no_evict = VM_Version::supports_clwb();
10201 
10202   // prefer clwb (writeback without evict) otherwise
10203   // prefer clflushopt (potentially parallel writeback with evict)
10204   // otherwise fallback on clflush (serial writeback with evict)
10205 
10206   if (optimized) {
10207     if (no_evict) {
10208       clwb(line);
10209     } else {
10210       clflushopt(line);
10211     }
10212   } else {
10213     // no need for fence when using CLFLUSH
10214     clflush(line);
10215   }
10216 }
10217 
10218 void MacroAssembler::cache_wbsync(bool is_pre)
10219 {
10220   assert(VM_Version::supports_clflush(), "clflush should be available");
10221   bool optimized = VM_Version::supports_clflushopt();
10222   bool no_evict = VM_Version::supports_clwb();
10223 
10224   // pick the correct implementation
10225 
10226   if (!is_pre && (optimized || no_evict)) {
10227     // need an sfence for post flush when using clflushopt or clwb
10228     // otherwise no no need for any synchroniaztion
10229 
10230     sfence();
10231   }
10232 }
10233 
10234 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
10235   switch (cond) {
10236     // Note some conditions are synonyms for others
10237     case Assembler::zero:         return Assembler::notZero;
10238     case Assembler::notZero:      return Assembler::zero;
10239     case Assembler::less:         return Assembler::greaterEqual;
10240     case Assembler::lessEqual:    return Assembler::greater;
10241     case Assembler::greater:      return Assembler::lessEqual;
10242     case Assembler::greaterEqual: return Assembler::less;
10243     case Assembler::below:        return Assembler::aboveEqual;
10244     case Assembler::belowEqual:   return Assembler::above;
10245     case Assembler::above:        return Assembler::belowEqual;
10246     case Assembler::aboveEqual:   return Assembler::below;
10247     case Assembler::overflow:     return Assembler::noOverflow;
10248     case Assembler::noOverflow:   return Assembler::overflow;
10249     case Assembler::negative:     return Assembler::positive;
10250     case Assembler::positive:     return Assembler::negative;
10251     case Assembler::parity:       return Assembler::noParity;
10252     case Assembler::noParity:     return Assembler::parity;
10253   }
10254   ShouldNotReachHere(); return Assembler::overflow;
10255 }
10256 
10257 // This is simply a call to Thread::current()
10258 void MacroAssembler::get_thread_slow(Register thread) {
10259   if (thread != rax) {
10260     push(rax);
10261   }
10262   push(rdi);
10263   push(rsi);
10264   push(rdx);
10265   push(rcx);
10266   push(r8);
10267   push(r9);
10268   push(r10);
10269   push(r11);
10270 
10271   MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, Thread::current), 0);
10272 
10273   pop(r11);
10274   pop(r10);
10275   pop(r9);
10276   pop(r8);
10277   pop(rcx);
10278   pop(rdx);
10279   pop(rsi);
10280   pop(rdi);
10281   if (thread != rax) {
10282     mov(thread, rax);
10283     pop(rax);
10284   }
10285 }
10286 
10287 void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigned bias, Register tmp) {
10288   Label L_stack_ok;
10289   if (bias == 0) {
10290     testptr(sp, 2 * wordSize - 1);
10291   } else {
10292     // lea(tmp, Address(rsp, bias);
10293     mov(tmp, sp);
10294     addptr(tmp, bias);
10295     testptr(tmp, 2 * wordSize - 1);
10296   }
10297   jcc(Assembler::equal, L_stack_ok);
10298   block_comment(msg);
10299   stop(msg);
10300   bind(L_stack_ok);
10301 }
10302 
10303 // Implements lightweight-locking.
10304 //
10305 // obj: the object to be locked
10306 // reg_rax: rax
10307 // thread: the thread which attempts to lock obj
10308 // tmp: a temporary register
10309 void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow) {
10310   Register thread = r15_thread;
10311 
10312   assert(reg_rax == rax, "");
10313   assert_different_registers(basic_lock, obj, reg_rax, thread, tmp);
10314 
10315   Label push;
10316   const Register top = tmp;
10317 
10318   // Preload the markWord. It is important that this is the first
10319   // instruction emitted as it is part of C1's null check semantics.
10320   movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes()));
10321 
10322   if (UseObjectMonitorTable) {
10323     // Clear cache in case fast locking succeeds or we need to take the slow-path.
10324     movptr(Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes()))), 0);
10325   }
10326 
10327   if (DiagnoseSyncOnValueBasedClasses != 0) {
10328     load_klass(tmp, obj, rscratch1);
10329     testb(Address(tmp, Klass::misc_flags_offset()), KlassFlags::_misc_is_value_based_class);
10330     jcc(Assembler::notZero, slow);
10331   }
10332 
10333   // Load top.
10334   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10335 
10336   // Check if the lock-stack is full.
10337   cmpl(top, LockStack::end_offset());
10338   jcc(Assembler::greaterEqual, slow);
10339 
10340   // Check for recursion.
10341   cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
10342   jcc(Assembler::equal, push);
10343 
10344   // Check header for monitor (0b10).
10345   testptr(reg_rax, markWord::monitor_value);
10346   jcc(Assembler::notZero, slow);
10347 
10348   // Try to lock. Transition lock bits 0b01 => 0b00
10349   movptr(tmp, reg_rax);
10350   andptr(tmp, ~(int32_t)markWord::unlocked_value);
10351   orptr(reg_rax, markWord::unlocked_value);
10352   if (EnableValhalla) {
10353     // Mask inline_type bit such that we go to the slow path if object is an inline type
10354     andptr(reg_rax, ~((int) markWord::inline_type_bit_in_place));
10355   }
10356   lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
10357   jcc(Assembler::notEqual, slow);
10358 
10359   // Restore top, CAS clobbers register.
10360   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10361 
10362   bind(push);
10363   // After successful lock, push object on lock-stack.
10364   movptr(Address(thread, top), obj);
10365   incrementl(top, oopSize);
10366   movl(Address(thread, JavaThread::lock_stack_top_offset()), top);
10367 }
10368 
10369 // Implements lightweight-unlocking.
10370 //
10371 // obj: the object to be unlocked
10372 // reg_rax: rax
10373 // thread: the thread
10374 // tmp: a temporary register
10375 void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register tmp, Label& slow) {
10376   Register thread = r15_thread;
10377 
10378   assert(reg_rax == rax, "");
10379   assert_different_registers(obj, reg_rax, thread, tmp);
10380 
10381   Label unlocked, push_and_slow;
10382   const Register top = tmp;
10383 
10384   // Check if obj is top of lock-stack.
10385   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10386   cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
10387   jcc(Assembler::notEqual, slow);
10388 
10389   // Pop lock-stack.
10390   DEBUG_ONLY(movptr(Address(thread, top, Address::times_1, -oopSize), 0);)
10391   subl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
10392 
10393   // Check if recursive.
10394   cmpptr(obj, Address(thread, top, Address::times_1, -2 * oopSize));
10395   jcc(Assembler::equal, unlocked);
10396 
10397   // Not recursive. Check header for monitor (0b10).
10398   movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes()));
10399   testptr(reg_rax, markWord::monitor_value);
10400   jcc(Assembler::notZero, push_and_slow);
10401 
10402 #ifdef ASSERT
10403   // Check header not unlocked (0b01).
10404   Label not_unlocked;
10405   testptr(reg_rax, markWord::unlocked_value);
10406   jcc(Assembler::zero, not_unlocked);
10407   stop("lightweight_unlock already unlocked");
10408   bind(not_unlocked);
10409 #endif
10410 
10411   // Try to unlock. Transition lock bits 0b00 => 0b01
10412   movptr(tmp, reg_rax);
10413   orptr(tmp, markWord::unlocked_value);
10414   lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
10415   jcc(Assembler::equal, unlocked);
10416 
10417   bind(push_and_slow);
10418   // Restore lock-stack and handle the unlock in runtime.
10419 #ifdef ASSERT
10420   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10421   movptr(Address(thread, top), obj);
10422 #endif
10423   addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
10424   jmp(slow);
10425 
10426   bind(unlocked);
10427 }
10428 
10429 // Saves legacy GPRs state on stack.
10430 void MacroAssembler::save_legacy_gprs() {
10431   subq(rsp, 16 * wordSize);
10432   movq(Address(rsp, 15 * wordSize), rax);
10433   movq(Address(rsp, 14 * wordSize), rcx);
10434   movq(Address(rsp, 13 * wordSize), rdx);
10435   movq(Address(rsp, 12 * wordSize), rbx);
10436   movq(Address(rsp, 10 * wordSize), rbp);
10437   movq(Address(rsp, 9 * wordSize), rsi);
10438   movq(Address(rsp, 8 * wordSize), rdi);
10439   movq(Address(rsp, 7 * wordSize), r8);
10440   movq(Address(rsp, 6 * wordSize), r9);
10441   movq(Address(rsp, 5 * wordSize), r10);
10442   movq(Address(rsp, 4 * wordSize), r11);
10443   movq(Address(rsp, 3 * wordSize), r12);
10444   movq(Address(rsp, 2 * wordSize), r13);
10445   movq(Address(rsp, wordSize), r14);
10446   movq(Address(rsp, 0), r15);
10447 }
10448 
10449 // Resotres back legacy GPRs state from stack.
10450 void MacroAssembler::restore_legacy_gprs() {
10451   movq(r15, Address(rsp, 0));
10452   movq(r14, Address(rsp, wordSize));
10453   movq(r13, Address(rsp, 2 * wordSize));
10454   movq(r12, Address(rsp, 3 * wordSize));
10455   movq(r11, Address(rsp, 4 * wordSize));
10456   movq(r10, Address(rsp, 5 * wordSize));
10457   movq(r9,  Address(rsp, 6 * wordSize));
10458   movq(r8,  Address(rsp, 7 * wordSize));
10459   movq(rdi, Address(rsp, 8 * wordSize));
10460   movq(rsi, Address(rsp, 9 * wordSize));
10461   movq(rbp, Address(rsp, 10 * wordSize));
10462   movq(rbx, Address(rsp, 12 * wordSize));
10463   movq(rdx, Address(rsp, 13 * wordSize));
10464   movq(rcx, Address(rsp, 14 * wordSize));
10465   movq(rax, Address(rsp, 15 * wordSize));
10466   addq(rsp, 16 * wordSize);
10467 }
10468 
10469 void MacroAssembler::setcc(Assembler::Condition comparison, Register dst) {
10470   if (VM_Version::supports_apx_f()) {
10471     esetzucc(comparison, dst);
10472   } else {
10473     setb(comparison, dst);
10474     movzbl(dst, dst);
10475   }
10476 }