1 /*
    2  * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
    3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    4  *
    5  * This code is free software; you can redistribute it and/or modify it
    6  * under the terms of the GNU General Public License version 2 only, as
    7  * published by the Free Software Foundation.
    8  *
    9  * This code is distributed in the hope that it will be useful, but WITHOUT
   10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   12  * version 2 for more details (a copy is included in the LICENSE file that
   13  * accompanied this code).
   14  *
   15  * You should have received a copy of the GNU General Public License version
   16  * 2 along with this work; if not, write to the Free Software Foundation,
   17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   18  *
   19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   20  * or visit www.oracle.com if you need additional information or have any
   21  * questions.
   22  *
   23  */
   24 
   25 #include "precompiled.hpp"
   26 #include "asm/assembler.hpp"
   27 #include "asm/assembler.inline.hpp"
   28 #include "compiler/compiler_globals.hpp"
   29 #include "compiler/disassembler.hpp"
   30 #include "ci/ciInlineKlass.hpp"
   31 #include "crc32c.h"
   32 #include "gc/shared/barrierSet.hpp"
   33 #include "gc/shared/barrierSetAssembler.hpp"
   34 #include "gc/shared/collectedHeap.inline.hpp"
   35 #include "gc/shared/tlab_globals.hpp"
   36 #include "interpreter/bytecodeHistogram.hpp"
   37 #include "interpreter/interpreter.hpp"
   38 #include "jvm.h"
   39 #include "memory/resourceArea.hpp"
   40 #include "memory/universe.hpp"
   41 #include "oops/accessDecorators.hpp"
   42 #include "oops/compressedOops.inline.hpp"
   43 #include "oops/klass.inline.hpp"
   44 #include "prims/methodHandles.hpp"
   45 #include "runtime/continuation.hpp"
   46 #include "runtime/flags/flagSetting.hpp"
   47 #include "runtime/interfaceSupport.inline.hpp"
   48 #include "runtime/javaThread.hpp"
   49 #include "runtime/jniHandles.hpp"
   50 #include "runtime/objectMonitor.hpp"
   51 #include "runtime/os.hpp"
   52 #include "runtime/safepoint.hpp"
   53 #include "runtime/safepointMechanism.hpp"
   54 #include "runtime/sharedRuntime.hpp"
   55 #include "runtime/signature_cc.hpp"
   56 #include "runtime/stubRoutines.hpp"
   57 #include "utilities/macros.hpp"
   58 #include "vmreg_x86.inline.hpp"
   59 #ifdef COMPILER2
   60 #include "opto/output.hpp"
   61 #endif
   62 
   63 #ifdef PRODUCT
   64 #define BLOCK_COMMENT(str) /* nothing */
   65 #define STOP(error) stop(error)
   66 #else
   67 #define BLOCK_COMMENT(str) block_comment(str)
   68 #define STOP(error) block_comment(error); stop(error)
   69 #endif
   70 
   71 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
   72 
   73 #ifdef ASSERT
   74 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
   75 #endif
   76 
   77 static Assembler::Condition reverse[] = {
   78     Assembler::noOverflow     /* overflow      = 0x0 */ ,
   79     Assembler::overflow       /* noOverflow    = 0x1 */ ,
   80     Assembler::aboveEqual     /* carrySet      = 0x2, below         = 0x2 */ ,
   81     Assembler::below          /* aboveEqual    = 0x3, carryClear    = 0x3 */ ,
   82     Assembler::notZero        /* zero          = 0x4, equal         = 0x4 */ ,
   83     Assembler::zero           /* notZero       = 0x5, notEqual      = 0x5 */ ,
   84     Assembler::above          /* belowEqual    = 0x6 */ ,
   85     Assembler::belowEqual     /* above         = 0x7 */ ,
   86     Assembler::positive       /* negative      = 0x8 */ ,
   87     Assembler::negative       /* positive      = 0x9 */ ,
   88     Assembler::noParity       /* parity        = 0xa */ ,
   89     Assembler::parity         /* noParity      = 0xb */ ,
   90     Assembler::greaterEqual   /* less          = 0xc */ ,
   91     Assembler::less           /* greaterEqual  = 0xd */ ,
   92     Assembler::greater        /* lessEqual     = 0xe */ ,
   93     Assembler::lessEqual      /* greater       = 0xf, */
   94 
   95 };
   96 
   97 
   98 // Implementation of MacroAssembler
   99 
  100 // First all the versions that have distinct versions depending on 32/64 bit
  101 // Unless the difference is trivial (1 line or so).
  102 
  103 #ifndef _LP64
  104 
  105 // 32bit versions
  106 
  107 Address MacroAssembler::as_Address(AddressLiteral adr) {
  108   return Address(adr.target(), adr.rspec());
  109 }
  110 
  111 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) {
  112   assert(rscratch == noreg, "");
  113   return Address::make_array(adr);
  114 }
  115 
  116 void MacroAssembler::call_VM_leaf_base(address entry_point,
  117                                        int number_of_arguments) {
  118   call(RuntimeAddress(entry_point));
  119   increment(rsp, number_of_arguments * wordSize);
  120 }
  121 
  122 void MacroAssembler::cmpklass(Address src1, Metadata* obj) {
  123   cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
  124 }
  125 
  126 
  127 void MacroAssembler::cmpklass(Register src1, Metadata* obj) {
  128   cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
  129 }
  130 
  131 void MacroAssembler::cmpoop(Address src1, jobject obj) {
  132   cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
  133 }
  134 
  135 void MacroAssembler::cmpoop(Register src1, jobject obj, Register rscratch) {
  136   assert(rscratch == noreg, "redundant");
  137   cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
  138 }
  139 
  140 void MacroAssembler::extend_sign(Register hi, Register lo) {
  141   // According to Intel Doc. AP-526, "Integer Divide", p.18.
  142   if (VM_Version::is_P6() && hi == rdx && lo == rax) {
  143     cdql();
  144   } else {
  145     movl(hi, lo);
  146     sarl(hi, 31);
  147   }
  148 }
  149 
  150 void MacroAssembler::jC2(Register tmp, Label& L) {
  151   // set parity bit if FPU flag C2 is set (via rax)
  152   save_rax(tmp);
  153   fwait(); fnstsw_ax();
  154   sahf();
  155   restore_rax(tmp);
  156   // branch
  157   jcc(Assembler::parity, L);
  158 }
  159 
  160 void MacroAssembler::jnC2(Register tmp, Label& L) {
  161   // set parity bit if FPU flag C2 is set (via rax)
  162   save_rax(tmp);
  163   fwait(); fnstsw_ax();
  164   sahf();
  165   restore_rax(tmp);
  166   // branch
  167   jcc(Assembler::noParity, L);
  168 }
  169 
  170 // 32bit can do a case table jump in one instruction but we no longer allow the base
  171 // to be installed in the Address class
  172 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) {
  173   assert(rscratch == noreg, "not needed");
  174   jmp(as_Address(entry, noreg));
  175 }
  176 
  177 // Note: y_lo will be destroyed
  178 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
  179   // Long compare for Java (semantics as described in JVM spec.)
  180   Label high, low, done;
  181 
  182   cmpl(x_hi, y_hi);
  183   jcc(Assembler::less, low);
  184   jcc(Assembler::greater, high);
  185   // x_hi is the return register
  186   xorl(x_hi, x_hi);
  187   cmpl(x_lo, y_lo);
  188   jcc(Assembler::below, low);
  189   jcc(Assembler::equal, done);
  190 
  191   bind(high);
  192   xorl(x_hi, x_hi);
  193   increment(x_hi);
  194   jmp(done);
  195 
  196   bind(low);
  197   xorl(x_hi, x_hi);
  198   decrementl(x_hi);
  199 
  200   bind(done);
  201 }
  202 
  203 void MacroAssembler::lea(Register dst, AddressLiteral src) {
  204   mov_literal32(dst, (int32_t)src.target(), src.rspec());
  205 }
  206 
  207 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) {
  208   assert(rscratch == noreg, "not needed");
  209 
  210   // leal(dst, as_Address(adr));
  211   // see note in movl as to why we must use a move
  212   mov_literal32(dst, (int32_t)adr.target(), adr.rspec());
  213 }
  214 
  215 void MacroAssembler::leave() {
  216   mov(rsp, rbp);
  217   pop(rbp);
  218 }
  219 
  220 void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) {
  221   // Multiplication of two Java long values stored on the stack
  222   // as illustrated below. Result is in rdx:rax.
  223   //
  224   // rsp ---> [  ??  ] \               \
  225   //            ....    | y_rsp_offset  |
  226   //          [ y_lo ] /  (in bytes)    | x_rsp_offset
  227   //          [ y_hi ]                  | (in bytes)
  228   //            ....                    |
  229   //          [ x_lo ]                 /
  230   //          [ x_hi ]
  231   //            ....
  232   //
  233   // Basic idea: lo(result) = lo(x_lo * y_lo)
  234   //             hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi)
  235   Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset);
  236   Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset);
  237   Label quick;
  238   // load x_hi, y_hi and check if quick
  239   // multiplication is possible
  240   movl(rbx, x_hi);
  241   movl(rcx, y_hi);
  242   movl(rax, rbx);
  243   orl(rbx, rcx);                                 // rbx, = 0 <=> x_hi = 0 and y_hi = 0
  244   jcc(Assembler::zero, quick);                   // if rbx, = 0 do quick multiply
  245   // do full multiplication
  246   // 1st step
  247   mull(y_lo);                                    // x_hi * y_lo
  248   movl(rbx, rax);                                // save lo(x_hi * y_lo) in rbx,
  249   // 2nd step
  250   movl(rax, x_lo);
  251   mull(rcx);                                     // x_lo * y_hi
  252   addl(rbx, rax);                                // add lo(x_lo * y_hi) to rbx,
  253   // 3rd step
  254   bind(quick);                                   // note: rbx, = 0 if quick multiply!
  255   movl(rax, x_lo);
  256   mull(y_lo);                                    // x_lo * y_lo
  257   addl(rdx, rbx);                                // correct hi(x_lo * y_lo)
  258 }
  259 
  260 void MacroAssembler::lneg(Register hi, Register lo) {
  261   negl(lo);
  262   adcl(hi, 0);
  263   negl(hi);
  264 }
  265 
  266 void MacroAssembler::lshl(Register hi, Register lo) {
  267   // Java shift left long support (semantics as described in JVM spec., p.305)
  268   // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n))
  269   // shift value is in rcx !
  270   assert(hi != rcx, "must not use rcx");
  271   assert(lo != rcx, "must not use rcx");
  272   const Register s = rcx;                        // shift count
  273   const int      n = BitsPerWord;
  274   Label L;
  275   andl(s, 0x3f);                                 // s := s & 0x3f (s < 0x40)
  276   cmpl(s, n);                                    // if (s < n)
  277   jcc(Assembler::less, L);                       // else (s >= n)
  278   movl(hi, lo);                                  // x := x << n
  279   xorl(lo, lo);
  280   // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
  281   bind(L);                                       // s (mod n) < n
  282   shldl(hi, lo);                                 // x := x << s
  283   shll(lo);
  284 }
  285 
  286 
  287 void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) {
  288   // Java shift right long support (semantics as described in JVM spec., p.306 & p.310)
  289   // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n))
  290   assert(hi != rcx, "must not use rcx");
  291   assert(lo != rcx, "must not use rcx");
  292   const Register s = rcx;                        // shift count
  293   const int      n = BitsPerWord;
  294   Label L;
  295   andl(s, 0x3f);                                 // s := s & 0x3f (s < 0x40)
  296   cmpl(s, n);                                    // if (s < n)
  297   jcc(Assembler::less, L);                       // else (s >= n)
  298   movl(lo, hi);                                  // x := x >> n
  299   if (sign_extension) sarl(hi, 31);
  300   else                xorl(hi, hi);
  301   // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
  302   bind(L);                                       // s (mod n) < n
  303   shrdl(lo, hi);                                 // x := x >> s
  304   if (sign_extension) sarl(hi);
  305   else                shrl(hi);
  306 }
  307 
  308 void MacroAssembler::movoop(Register dst, jobject obj) {
  309   mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
  310 }
  311 
  312 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) {
  313   assert(rscratch == noreg, "redundant");
  314   mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
  315 }
  316 
  317 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
  318   mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
  319 }
  320 
  321 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) {
  322   assert(rscratch == noreg, "redundant");
  323   mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
  324 }
  325 
  326 void MacroAssembler::movptr(Register dst, AddressLiteral src) {
  327   if (src.is_lval()) {
  328     mov_literal32(dst, (intptr_t)src.target(), src.rspec());
  329   } else {
  330     movl(dst, as_Address(src));
  331   }
  332 }
  333 
  334 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) {
  335   assert(rscratch == noreg, "redundant");
  336   movl(as_Address(dst, noreg), src);
  337 }
  338 
  339 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
  340   movl(dst, as_Address(src, noreg));
  341 }
  342 
  343 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) {
  344   assert(rscratch == noreg, "redundant");
  345   movl(dst, src);
  346 }
  347 
  348 void MacroAssembler::pushoop(jobject obj, Register rscratch) {
  349   assert(rscratch == noreg, "redundant");
  350   push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate());
  351 }
  352 
  353 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) {
  354   assert(rscratch == noreg, "redundant");
  355   push_literal32((int32_t)obj, metadata_Relocation::spec_for_immediate());
  356 }
  357 
  358 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) {
  359   assert(rscratch == noreg, "redundant");
  360   if (src.is_lval()) {
  361     push_literal32((int32_t)src.target(), src.rspec());
  362   } else {
  363     pushl(as_Address(src));
  364   }
  365 }
  366 
  367 static void pass_arg0(MacroAssembler* masm, Register arg) {
  368   masm->push(arg);
  369 }
  370 
  371 static void pass_arg1(MacroAssembler* masm, Register arg) {
  372   masm->push(arg);
  373 }
  374 
  375 static void pass_arg2(MacroAssembler* masm, Register arg) {
  376   masm->push(arg);
  377 }
  378 
  379 static void pass_arg3(MacroAssembler* masm, Register arg) {
  380   masm->push(arg);
  381 }
  382 
  383 #ifndef PRODUCT
  384 extern "C" void findpc(intptr_t x);
  385 #endif
  386 
  387 void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) {
  388   // In order to get locks to work, we need to fake a in_VM state
  389   JavaThread* thread = JavaThread::current();
  390   JavaThreadState saved_state = thread->thread_state();
  391   thread->set_thread_state(_thread_in_vm);
  392   if (ShowMessageBoxOnError) {
  393     JavaThread* thread = JavaThread::current();
  394     JavaThreadState saved_state = thread->thread_state();
  395     thread->set_thread_state(_thread_in_vm);
  396     if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
  397       ttyLocker ttyl;
  398       BytecodeCounter::print();
  399     }
  400     // To see where a verify_oop failed, get $ebx+40/X for this frame.
  401     // This is the value of eip which points to where verify_oop will return.
  402     if (os::message_box(msg, "Execution stopped, print registers?")) {
  403       print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip);
  404       BREAKPOINT;
  405     }
  406   }
  407   fatal("DEBUG MESSAGE: %s", msg);
  408 }
  409 
  410 void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) {
  411   ttyLocker ttyl;
  412   FlagSetting fs(Debugging, true);
  413   tty->print_cr("eip = 0x%08x", eip);
  414 #ifndef PRODUCT
  415   if ((WizardMode || Verbose) && PrintMiscellaneous) {
  416     tty->cr();
  417     findpc(eip);
  418     tty->cr();
  419   }
  420 #endif
  421 #define PRINT_REG(rax) \
  422   { tty->print("%s = ", #rax); os::print_location(tty, rax); }
  423   PRINT_REG(rax);
  424   PRINT_REG(rbx);
  425   PRINT_REG(rcx);
  426   PRINT_REG(rdx);
  427   PRINT_REG(rdi);
  428   PRINT_REG(rsi);
  429   PRINT_REG(rbp);
  430   PRINT_REG(rsp);
  431 #undef PRINT_REG
  432   // Print some words near top of staack.
  433   int* dump_sp = (int*) rsp;
  434   for (int col1 = 0; col1 < 8; col1++) {
  435     tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
  436     os::print_location(tty, *dump_sp++);
  437   }
  438   for (int row = 0; row < 16; row++) {
  439     tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
  440     for (int col = 0; col < 8; col++) {
  441       tty->print(" 0x%08x", *dump_sp++);
  442     }
  443     tty->cr();
  444   }
  445   // Print some instructions around pc:
  446   Disassembler::decode((address)eip-64, (address)eip);
  447   tty->print_cr("--------");
  448   Disassembler::decode((address)eip, (address)eip+32);
  449 }
  450 
  451 void MacroAssembler::stop(const char* msg) {
  452   // push address of message
  453   ExternalAddress message((address)msg);
  454   pushptr(message.addr(), noreg);
  455   { Label L; call(L, relocInfo::none); bind(L); }     // push eip
  456   pusha();                                            // push registers
  457   call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32)));
  458   hlt();
  459 }
  460 
  461 void MacroAssembler::warn(const char* msg) {
  462   push_CPU_state();
  463 
  464   // push address of message
  465   ExternalAddress message((address)msg);
  466   pushptr(message.addr(), noreg);
  467 
  468   call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
  469   addl(rsp, wordSize);       // discard argument
  470   pop_CPU_state();
  471 }
  472 
  473 void MacroAssembler::print_state() {
  474   { Label L; call(L, relocInfo::none); bind(L); }     // push eip
  475   pusha();                                            // push registers
  476 
  477   push_CPU_state();
  478   call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32)));
  479   pop_CPU_state();
  480 
  481   popa();
  482   addl(rsp, wordSize);
  483 }
  484 
  485 #else // _LP64
  486 
  487 // 64 bit versions
  488 
  489 Address MacroAssembler::as_Address(AddressLiteral adr) {
  490   // amd64 always does this as a pc-rel
  491   // we can be absolute or disp based on the instruction type
  492   // jmp/call are displacements others are absolute
  493   assert(!adr.is_lval(), "must be rval");
  494   assert(reachable(adr), "must be");
  495   return Address(checked_cast<int32_t>(adr.target() - pc()), adr.target(), adr.reloc());
  496 
  497 }
  498 
  499 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) {
  500   AddressLiteral base = adr.base();
  501   lea(rscratch, base);
  502   Address index = adr.index();
  503   assert(index._disp == 0, "must not have disp"); // maybe it can?
  504   Address array(rscratch, index._index, index._scale, index._disp);
  505   return array;
  506 }
  507 
  508 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
  509   Label L, E;
  510 
  511 #ifdef _WIN64
  512   // Windows always allocates space for it's register args
  513   assert(num_args <= 4, "only register arguments supported");
  514   subq(rsp,  frame::arg_reg_save_area_bytes);
  515 #endif
  516 
  517   // Align stack if necessary
  518   testl(rsp, 15);
  519   jcc(Assembler::zero, L);
  520 
  521   subq(rsp, 8);
  522   call(RuntimeAddress(entry_point));
  523   addq(rsp, 8);
  524   jmp(E);
  525 
  526   bind(L);
  527   call(RuntimeAddress(entry_point));
  528 
  529   bind(E);
  530 
  531 #ifdef _WIN64
  532   // restore stack pointer
  533   addq(rsp, frame::arg_reg_save_area_bytes);
  534 #endif
  535 
  536 }
  537 
  538 void MacroAssembler::cmp64(Register src1, AddressLiteral src2, Register rscratch) {
  539   assert(!src2.is_lval(), "should use cmpptr");
  540   assert(rscratch != noreg || always_reachable(src2), "missing");
  541 
  542   if (reachable(src2)) {
  543     cmpq(src1, as_Address(src2));
  544   } else {
  545     lea(rscratch, src2);
  546     Assembler::cmpq(src1, Address(rscratch, 0));
  547   }
  548 }
  549 
  550 int MacroAssembler::corrected_idivq(Register reg) {
  551   // Full implementation of Java ldiv and lrem; checks for special
  552   // case as described in JVM spec., p.243 & p.271.  The function
  553   // returns the (pc) offset of the idivl instruction - may be needed
  554   // for implicit exceptions.
  555   //
  556   //         normal case                           special case
  557   //
  558   // input : rax: dividend                         min_long
  559   //         reg: divisor   (may not be eax/edx)   -1
  560   //
  561   // output: rax: quotient  (= rax idiv reg)       min_long
  562   //         rdx: remainder (= rax irem reg)       0
  563   assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
  564   static const int64_t min_long = 0x8000000000000000;
  565   Label normal_case, special_case;
  566 
  567   // check for special case
  568   cmp64(rax, ExternalAddress((address) &min_long), rdx /*rscratch*/);
  569   jcc(Assembler::notEqual, normal_case);
  570   xorl(rdx, rdx); // prepare rdx for possible special case (where
  571                   // remainder = 0)
  572   cmpq(reg, -1);
  573   jcc(Assembler::equal, special_case);
  574 
  575   // handle normal case
  576   bind(normal_case);
  577   cdqq();
  578   int idivq_offset = offset();
  579   idivq(reg);
  580 
  581   // normal and special case exit
  582   bind(special_case);
  583 
  584   return idivq_offset;
  585 }
  586 
  587 void MacroAssembler::decrementq(Register reg, int value) {
  588   if (value == min_jint) { subq(reg, value); return; }
  589   if (value <  0) { incrementq(reg, -value); return; }
  590   if (value == 0) {                        ; return; }
  591   if (value == 1 && UseIncDec) { decq(reg) ; return; }
  592   /* else */      { subq(reg, value)       ; return; }
  593 }
  594 
  595 void MacroAssembler::decrementq(Address dst, int value) {
  596   if (value == min_jint) { subq(dst, value); return; }
  597   if (value <  0) { incrementq(dst, -value); return; }
  598   if (value == 0) {                        ; return; }
  599   if (value == 1 && UseIncDec) { decq(dst) ; return; }
  600   /* else */      { subq(dst, value)       ; return; }
  601 }
  602 
  603 void MacroAssembler::incrementq(AddressLiteral dst, Register rscratch) {
  604   assert(rscratch != noreg || always_reachable(dst), "missing");
  605 
  606   if (reachable(dst)) {
  607     incrementq(as_Address(dst));
  608   } else {
  609     lea(rscratch, dst);
  610     incrementq(Address(rscratch, 0));
  611   }
  612 }
  613 
  614 void MacroAssembler::incrementq(Register reg, int value) {
  615   if (value == min_jint) { addq(reg, value); return; }
  616   if (value <  0) { decrementq(reg, -value); return; }
  617   if (value == 0) {                        ; return; }
  618   if (value == 1 && UseIncDec) { incq(reg) ; return; }
  619   /* else */      { addq(reg, value)       ; return; }
  620 }
  621 
  622 void MacroAssembler::incrementq(Address dst, int value) {
  623   if (value == min_jint) { addq(dst, value); return; }
  624   if (value <  0) { decrementq(dst, -value); return; }
  625   if (value == 0) {                        ; return; }
  626   if (value == 1 && UseIncDec) { incq(dst) ; return; }
  627   /* else */      { addq(dst, value)       ; return; }
  628 }
  629 
  630 // 32bit can do a case table jump in one instruction but we no longer allow the base
  631 // to be installed in the Address class
  632 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) {
  633   lea(rscratch, entry.base());
  634   Address dispatch = entry.index();
  635   assert(dispatch._base == noreg, "must be");
  636   dispatch._base = rscratch;
  637   jmp(dispatch);
  638 }
  639 
  640 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
  641   ShouldNotReachHere(); // 64bit doesn't use two regs
  642   cmpq(x_lo, y_lo);
  643 }
  644 
  645 void MacroAssembler::lea(Register dst, AddressLiteral src) {
  646   mov_literal64(dst, (intptr_t)src.target(), src.rspec());
  647 }
  648 
  649 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) {
  650   lea(rscratch, adr);
  651   movptr(dst, rscratch);
  652 }
  653 
  654 void MacroAssembler::leave() {
  655   // %%% is this really better? Why not on 32bit too?
  656   emit_int8((unsigned char)0xC9); // LEAVE
  657 }
  658 
  659 void MacroAssembler::lneg(Register hi, Register lo) {
  660   ShouldNotReachHere(); // 64bit doesn't use two regs
  661   negq(lo);
  662 }
  663 
  664 void MacroAssembler::movoop(Register dst, jobject obj) {
  665   mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate());
  666 }
  667 
  668 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) {
  669   mov_literal64(rscratch, (intptr_t)obj, oop_Relocation::spec_for_immediate());
  670   movq(dst, rscratch);
  671 }
  672 
  673 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
  674   mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
  675 }
  676 
  677 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) {
  678   mov_literal64(rscratch, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
  679   movq(dst, rscratch);
  680 }
  681 
  682 void MacroAssembler::movptr(Register dst, AddressLiteral src) {
  683   if (src.is_lval()) {
  684     mov_literal64(dst, (intptr_t)src.target(), src.rspec());
  685   } else {
  686     if (reachable(src)) {
  687       movq(dst, as_Address(src));
  688     } else {
  689       lea(dst, src);
  690       movq(dst, Address(dst, 0));
  691     }
  692   }
  693 }
  694 
  695 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) {
  696   movq(as_Address(dst, rscratch), src);
  697 }
  698 
  699 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
  700   movq(dst, as_Address(src, dst /*rscratch*/));
  701 }
  702 
  703 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
  704 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) {
  705   if (is_simm32(src)) {
  706     movptr(dst, checked_cast<int32_t>(src));
  707   } else {
  708     mov64(rscratch, src);
  709     movq(dst, rscratch);
  710   }
  711 }
  712 
  713 void MacroAssembler::pushoop(jobject obj, Register rscratch) {
  714   movoop(rscratch, obj);
  715   push(rscratch);
  716 }
  717 
  718 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) {
  719   mov_metadata(rscratch, obj);
  720   push(rscratch);
  721 }
  722 
  723 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) {
  724   lea(rscratch, src);
  725   if (src.is_lval()) {
  726     push(rscratch);
  727   } else {
  728     pushq(Address(rscratch, 0));
  729   }
  730 }
  731 
  732 void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
  733   reset_last_Java_frame(r15_thread, clear_fp);
  734 }
  735 
  736 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
  737                                          Register last_java_fp,
  738                                          address  last_java_pc,
  739                                          Register rscratch) {
  740   set_last_Java_frame(r15_thread, last_java_sp, last_java_fp, last_java_pc, rscratch);
  741 }
  742 
  743 static void pass_arg0(MacroAssembler* masm, Register arg) {
  744   if (c_rarg0 != arg ) {
  745     masm->mov(c_rarg0, arg);
  746   }
  747 }
  748 
  749 static void pass_arg1(MacroAssembler* masm, Register arg) {
  750   if (c_rarg1 != arg ) {
  751     masm->mov(c_rarg1, arg);
  752   }
  753 }
  754 
  755 static void pass_arg2(MacroAssembler* masm, Register arg) {
  756   if (c_rarg2 != arg ) {
  757     masm->mov(c_rarg2, arg);
  758   }
  759 }
  760 
  761 static void pass_arg3(MacroAssembler* masm, Register arg) {
  762   if (c_rarg3 != arg ) {
  763     masm->mov(c_rarg3, arg);
  764   }
  765 }
  766 
  767 void MacroAssembler::stop(const char* msg) {
  768   if (ShowMessageBoxOnError) {
  769     address rip = pc();
  770     pusha(); // get regs on stack
  771     lea(c_rarg1, InternalAddress(rip));
  772     movq(c_rarg2, rsp); // pass pointer to regs array
  773   }
  774   lea(c_rarg0, ExternalAddress((address) msg));
  775   andq(rsp, -16); // align stack as required by ABI
  776   call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
  777   hlt();
  778 }
  779 
  780 void MacroAssembler::warn(const char* msg) {
  781   push(rbp);
  782   movq(rbp, rsp);
  783   andq(rsp, -16);     // align stack as required by push_CPU_state and call
  784   push_CPU_state();   // keeps alignment at 16 bytes
  785 
  786   lea(c_rarg0, ExternalAddress((address) msg));
  787   call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
  788 
  789   pop_CPU_state();
  790   mov(rsp, rbp);
  791   pop(rbp);
  792 }
  793 
  794 void MacroAssembler::print_state() {
  795   address rip = pc();
  796   pusha();            // get regs on stack
  797   push(rbp);
  798   movq(rbp, rsp);
  799   andq(rsp, -16);     // align stack as required by push_CPU_state and call
  800   push_CPU_state();   // keeps alignment at 16 bytes
  801 
  802   lea(c_rarg0, InternalAddress(rip));
  803   lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array
  804   call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1);
  805 
  806   pop_CPU_state();
  807   mov(rsp, rbp);
  808   pop(rbp);
  809   popa();
  810 }
  811 
  812 #ifndef PRODUCT
  813 extern "C" void findpc(intptr_t x);
  814 #endif
  815 
  816 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
  817   // In order to get locks to work, we need to fake a in_VM state
  818   if (ShowMessageBoxOnError) {
  819     JavaThread* thread = JavaThread::current();
  820     JavaThreadState saved_state = thread->thread_state();
  821     thread->set_thread_state(_thread_in_vm);
  822 #ifndef PRODUCT
  823     if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
  824       ttyLocker ttyl;
  825       BytecodeCounter::print();
  826     }
  827 #endif
  828     // To see where a verify_oop failed, get $ebx+40/X for this frame.
  829     // XXX correct this offset for amd64
  830     // This is the value of eip which points to where verify_oop will return.
  831     if (os::message_box(msg, "Execution stopped, print registers?")) {
  832       print_state64(pc, regs);
  833       BREAKPOINT;
  834     }
  835   }
  836   fatal("DEBUG MESSAGE: %s", msg);
  837 }
  838 
  839 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) {
  840   ttyLocker ttyl;
  841   FlagSetting fs(Debugging, true);
  842   tty->print_cr("rip = 0x%016lx", (intptr_t)pc);
  843 #ifndef PRODUCT
  844   tty->cr();
  845   findpc(pc);
  846   tty->cr();
  847 #endif
  848 #define PRINT_REG(rax, value) \
  849   { tty->print("%s = ", #rax); os::print_location(tty, value); }
  850   PRINT_REG(rax, regs[15]);
  851   PRINT_REG(rbx, regs[12]);
  852   PRINT_REG(rcx, regs[14]);
  853   PRINT_REG(rdx, regs[13]);
  854   PRINT_REG(rdi, regs[8]);
  855   PRINT_REG(rsi, regs[9]);
  856   PRINT_REG(rbp, regs[10]);
  857   // rsp is actually not stored by pusha(), compute the old rsp from regs (rsp after pusha): regs + 16 = old rsp
  858   PRINT_REG(rsp, (intptr_t)(&regs[16]));
  859   PRINT_REG(r8 , regs[7]);
  860   PRINT_REG(r9 , regs[6]);
  861   PRINT_REG(r10, regs[5]);
  862   PRINT_REG(r11, regs[4]);
  863   PRINT_REG(r12, regs[3]);
  864   PRINT_REG(r13, regs[2]);
  865   PRINT_REG(r14, regs[1]);
  866   PRINT_REG(r15, regs[0]);
  867 #undef PRINT_REG
  868   // Print some words near the top of the stack.
  869   int64_t* rsp = &regs[16];
  870   int64_t* dump_sp = rsp;
  871   for (int col1 = 0; col1 < 8; col1++) {
  872     tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
  873     os::print_location(tty, *dump_sp++);
  874   }
  875   for (int row = 0; row < 25; row++) {
  876     tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
  877     for (int col = 0; col < 4; col++) {
  878       tty->print(" 0x%016lx", (intptr_t)*dump_sp++);
  879     }
  880     tty->cr();
  881   }
  882   // Print some instructions around pc:
  883   Disassembler::decode((address)pc-64, (address)pc);
  884   tty->print_cr("--------");
  885   Disassembler::decode((address)pc, (address)pc+32);
  886 }
  887 
  888 // The java_calling_convention describes stack locations as ideal slots on
  889 // a frame with no abi restrictions. Since we must observe abi restrictions
  890 // (like the placement of the register window) the slots must be biased by
  891 // the following value.
  892 static int reg2offset_in(VMReg r) {
  893   // Account for saved rbp and return address
  894   // This should really be in_preserve_stack_slots
  895   return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
  896 }
  897 
  898 static int reg2offset_out(VMReg r) {
  899   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
  900 }
  901 
  902 // A long move
  903 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
  904 
  905   // The calling conventions assures us that each VMregpair is either
  906   // all really one physical register or adjacent stack slots.
  907 
  908   if (src.is_single_phys_reg() ) {
  909     if (dst.is_single_phys_reg()) {
  910       if (dst.first() != src.first()) {
  911         mov(dst.first()->as_Register(), src.first()->as_Register());
  912       }
  913     } else {
  914       assert(dst.is_single_reg(), "not a stack pair: (%s, %s), (%s, %s)",
  915              src.first()->name(), src.second()->name(), dst.first()->name(), dst.second()->name());
  916       movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register());
  917     }
  918   } else if (dst.is_single_phys_reg()) {
  919     assert(src.is_single_reg(),  "not a stack pair");
  920     movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  921   } else {
  922     assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
  923     movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  924     movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
  925   }
  926 }
  927 
  928 // A double move
  929 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
  930 
  931   // The calling conventions assures us that each VMregpair is either
  932   // all really one physical register or adjacent stack slots.
  933 
  934   if (src.is_single_phys_reg() ) {
  935     if (dst.is_single_phys_reg()) {
  936       // In theory these overlap but the ordering is such that this is likely a nop
  937       if ( src.first() != dst.first()) {
  938         movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
  939       }
  940     } else {
  941       assert(dst.is_single_reg(), "not a stack pair");
  942       movdbl(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister());
  943     }
  944   } else if (dst.is_single_phys_reg()) {
  945     assert(src.is_single_reg(),  "not a stack pair");
  946     movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  947   } else {
  948     assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
  949     movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  950     movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
  951   }
  952 }
  953 
  954 
  955 // A float arg may have to do float reg int reg conversion
  956 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
  957   assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
  958 
  959   // The calling conventions assures us that each VMregpair is either
  960   // all really one physical register or adjacent stack slots.
  961 
  962   if (src.first()->is_stack()) {
  963     if (dst.first()->is_stack()) {
  964       movl(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  965       movptr(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
  966     } else {
  967       // stack to reg
  968       assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
  969       movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  970     }
  971   } else if (dst.first()->is_stack()) {
  972     // reg to stack
  973     assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
  974     movflt(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister());
  975   } else {
  976     // reg to reg
  977     // In theory these overlap but the ordering is such that this is likely a nop
  978     if ( src.first() != dst.first()) {
  979       movdbl(dst.first()->as_XMMRegister(),  src.first()->as_XMMRegister());
  980     }
  981   }
  982 }
  983 
  984 // On 64 bit we will store integer like items to the stack as
  985 // 64 bits items (x86_32/64 abi) even though java would only store
  986 // 32bits for a parameter. On 32bit it will simply be 32 bits
  987 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
  988 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
  989   if (src.first()->is_stack()) {
  990     if (dst.first()->is_stack()) {
  991       // stack to stack
  992       movslq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  993       movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
  994     } else {
  995       // stack to reg
  996       movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  997     }
  998   } else if (dst.first()->is_stack()) {
  999     // reg to stack
 1000     // Do we really have to sign extend???
 1001     // __ movslq(src.first()->as_Register(), src.first()->as_Register());
 1002     movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register());
 1003   } else {
 1004     // Do we really have to sign extend???
 1005     // __ movslq(dst.first()->as_Register(), src.first()->as_Register());
 1006     if (dst.first() != src.first()) {
 1007       movq(dst.first()->as_Register(), src.first()->as_Register());
 1008     }
 1009   }
 1010 }
 1011 
 1012 void MacroAssembler::move_ptr(VMRegPair src, VMRegPair dst) {
 1013   if (src.first()->is_stack()) {
 1014     if (dst.first()->is_stack()) {
 1015       // stack to stack
 1016       movq(rax, Address(rbp, reg2offset_in(src.first())));
 1017       movq(Address(rsp, reg2offset_out(dst.first())), rax);
 1018     } else {
 1019       // stack to reg
 1020       movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
 1021     }
 1022   } else if (dst.first()->is_stack()) {
 1023     // reg to stack
 1024     movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
 1025   } else {
 1026     if (dst.first() != src.first()) {
 1027       movq(dst.first()->as_Register(), src.first()->as_Register());
 1028     }
 1029   }
 1030 }
 1031 
 1032 // An oop arg. Must pass a handle not the oop itself
 1033 void MacroAssembler::object_move(OopMap* map,
 1034                         int oop_handle_offset,
 1035                         int framesize_in_slots,
 1036                         VMRegPair src,
 1037                         VMRegPair dst,
 1038                         bool is_receiver,
 1039                         int* receiver_offset) {
 1040 
 1041   // must pass a handle. First figure out the location we use as a handle
 1042 
 1043   Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();
 1044 
 1045   // See if oop is NULL if it is we need no handle
 1046 
 1047   if (src.first()->is_stack()) {
 1048 
 1049     // Oop is already on the stack as an argument
 1050     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
 1051     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
 1052     if (is_receiver) {
 1053       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
 1054     }
 1055 
 1056     cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD);
 1057     lea(rHandle, Address(rbp, reg2offset_in(src.first())));
 1058     // conditionally move a NULL
 1059     cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
 1060   } else {
 1061 
 1062     // Oop is in a register we must store it to the space we reserve
 1063     // on the stack for oop_handles and pass a handle if oop is non-NULL
 1064 
 1065     const Register rOop = src.first()->as_Register();
 1066     int oop_slot;
 1067     if (rOop == j_rarg0)
 1068       oop_slot = 0;
 1069     else if (rOop == j_rarg1)
 1070       oop_slot = 1;
 1071     else if (rOop == j_rarg2)
 1072       oop_slot = 2;
 1073     else if (rOop == j_rarg3)
 1074       oop_slot = 3;
 1075     else if (rOop == j_rarg4)
 1076       oop_slot = 4;
 1077     else {
 1078       assert(rOop == j_rarg5, "wrong register");
 1079       oop_slot = 5;
 1080     }
 1081 
 1082     oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
 1083     int offset = oop_slot*VMRegImpl::stack_slot_size;
 1084 
 1085     map->set_oop(VMRegImpl::stack2reg(oop_slot));
 1086     // Store oop in handle area, may be NULL
 1087     movptr(Address(rsp, offset), rOop);
 1088     if (is_receiver) {
 1089       *receiver_offset = offset;
 1090     }
 1091 
 1092     cmpptr(rOop, NULL_WORD);
 1093     lea(rHandle, Address(rsp, offset));
 1094     // conditionally move a NULL from the handle area where it was just stored
 1095     cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
 1096   }
 1097 
 1098   // If arg is on the stack then place it otherwise it is already in correct reg.
 1099   if (dst.first()->is_stack()) {
 1100     movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
 1101   }
 1102 }
 1103 
 1104 #endif // _LP64
 1105 
 1106 // Now versions that are common to 32/64 bit
 1107 
 1108 void MacroAssembler::addptr(Register dst, int32_t imm32) {
 1109   LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32));
 1110 }
 1111 
 1112 void MacroAssembler::addptr(Register dst, Register src) {
 1113   LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
 1114 }
 1115 
 1116 void MacroAssembler::addptr(Address dst, Register src) {
 1117   LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
 1118 }
 1119 
 1120 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1121   assert(rscratch != noreg || always_reachable(src), "missing");
 1122 
 1123   if (reachable(src)) {
 1124     Assembler::addsd(dst, as_Address(src));
 1125   } else {
 1126     lea(rscratch, src);
 1127     Assembler::addsd(dst, Address(rscratch, 0));
 1128   }
 1129 }
 1130 
 1131 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1132   assert(rscratch != noreg || always_reachable(src), "missing");
 1133 
 1134   if (reachable(src)) {
 1135     addss(dst, as_Address(src));
 1136   } else {
 1137     lea(rscratch, src);
 1138     addss(dst, Address(rscratch, 0));
 1139   }
 1140 }
 1141 
 1142 void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1143   assert(rscratch != noreg || always_reachable(src), "missing");
 1144 
 1145   if (reachable(src)) {
 1146     Assembler::addpd(dst, as_Address(src));
 1147   } else {
 1148     lea(rscratch, src);
 1149     Assembler::addpd(dst, Address(rscratch, 0));
 1150   }
 1151 }
 1152 
 1153 // See 8273459.  Function for ensuring 64-byte alignment, intended for stubs only.
 1154 // Stub code is generated once and never copied.
 1155 // NMethods can't use this because they get copied and we can't force alignment > 32 bytes.
 1156 void MacroAssembler::align64() {
 1157   align(64, (unsigned long long) pc());
 1158 }
 1159 
 1160 void MacroAssembler::align32() {
 1161   align(32, (unsigned long long) pc());
 1162 }
 1163 
 1164 void MacroAssembler::align(int modulus) {
 1165   // 8273459: Ensure alignment is possible with current segment alignment
 1166   assert(modulus <= CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment");
 1167   align(modulus, offset());
 1168 }
 1169 
 1170 void MacroAssembler::align(int modulus, int target) {
 1171   if (target % modulus != 0) {
 1172     nop(modulus - (target % modulus));
 1173   }
 1174 }
 1175 
 1176 void MacroAssembler::push_f(XMMRegister r) {
 1177   subptr(rsp, wordSize);
 1178   movflt(Address(rsp, 0), r);
 1179 }
 1180 
 1181 void MacroAssembler::pop_f(XMMRegister r) {
 1182   movflt(r, Address(rsp, 0));
 1183   addptr(rsp, wordSize);
 1184 }
 1185 
 1186 void MacroAssembler::push_d(XMMRegister r) {
 1187   subptr(rsp, 2 * wordSize);
 1188   movdbl(Address(rsp, 0), r);
 1189 }
 1190 
 1191 void MacroAssembler::pop_d(XMMRegister r) {
 1192   movdbl(r, Address(rsp, 0));
 1193   addptr(rsp, 2 * Interpreter::stackElementSize);
 1194 }
 1195 
 1196 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1197   // Used in sign-masking with aligned address.
 1198   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
 1199   assert(rscratch != noreg || always_reachable(src), "missing");
 1200 
 1201   if (reachable(src)) {
 1202     Assembler::andpd(dst, as_Address(src));
 1203   } else {
 1204     lea(rscratch, src);
 1205     Assembler::andpd(dst, Address(rscratch, 0));
 1206   }
 1207 }
 1208 
 1209 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1210   // Used in sign-masking with aligned address.
 1211   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
 1212   assert(rscratch != noreg || always_reachable(src), "missing");
 1213 
 1214   if (reachable(src)) {
 1215     Assembler::andps(dst, as_Address(src));
 1216   } else {
 1217     lea(rscratch, src);
 1218     Assembler::andps(dst, Address(rscratch, 0));
 1219   }
 1220 }
 1221 
 1222 void MacroAssembler::andptr(Register dst, int32_t imm32) {
 1223   LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32));
 1224 }
 1225 
 1226 #ifdef _LP64
 1227 void MacroAssembler::andq(Register dst, AddressLiteral src, Register rscratch) {
 1228   assert(rscratch != noreg || always_reachable(src), "missing");
 1229 
 1230   if (reachable(src)) {
 1231     andq(dst, as_Address(src));
 1232   } else {
 1233     lea(rscratch, src);
 1234     andq(dst, Address(rscratch, 0));
 1235   }
 1236 }
 1237 #endif
 1238 
 1239 void MacroAssembler::atomic_incl(Address counter_addr) {
 1240   lock();
 1241   incrementl(counter_addr);
 1242 }
 1243 
 1244 void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register rscratch) {
 1245   assert(rscratch != noreg || always_reachable(counter_addr), "missing");
 1246 
 1247   if (reachable(counter_addr)) {
 1248     atomic_incl(as_Address(counter_addr));
 1249   } else {
 1250     lea(rscratch, counter_addr);
 1251     atomic_incl(Address(rscratch, 0));
 1252   }
 1253 }
 1254 
 1255 #ifdef _LP64
 1256 void MacroAssembler::atomic_incq(Address counter_addr) {
 1257   lock();
 1258   incrementq(counter_addr);
 1259 }
 1260 
 1261 void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register rscratch) {
 1262   assert(rscratch != noreg || always_reachable(counter_addr), "missing");
 1263 
 1264   if (reachable(counter_addr)) {
 1265     atomic_incq(as_Address(counter_addr));
 1266   } else {
 1267     lea(rscratch, counter_addr);
 1268     atomic_incq(Address(rscratch, 0));
 1269   }
 1270 }
 1271 #endif
 1272 
 1273 // Writes to stack successive pages until offset reached to check for
 1274 // stack overflow + shadow pages.  This clobbers tmp.
 1275 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
 1276   movptr(tmp, rsp);
 1277   // Bang stack for total size given plus shadow page size.
 1278   // Bang one page at a time because large size can bang beyond yellow and
 1279   // red zones.
 1280   Label loop;
 1281   bind(loop);
 1282   movl(Address(tmp, (-os::vm_page_size())), size );
 1283   subptr(tmp, os::vm_page_size());
 1284   subl(size, os::vm_page_size());
 1285   jcc(Assembler::greater, loop);
 1286 
 1287   // Bang down shadow pages too.
 1288   // At this point, (tmp-0) is the last address touched, so don't
 1289   // touch it again.  (It was touched as (tmp-pagesize) but then tmp
 1290   // was post-decremented.)  Skip this address by starting at i=1, and
 1291   // touch a few more pages below.  N.B.  It is important to touch all
 1292   // the way down including all pages in the shadow zone.
 1293   for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / os::vm_page_size()); i++) {
 1294     // this could be any sized move but this is can be a debugging crumb
 1295     // so the bigger the better.
 1296     movptr(Address(tmp, (-i*os::vm_page_size())), size );
 1297   }
 1298 }
 1299 
 1300 void MacroAssembler::reserved_stack_check() {
 1301   // testing if reserved zone needs to be enabled
 1302   Label no_reserved_zone_enabling;
 1303   Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
 1304   NOT_LP64(get_thread(rsi);)
 1305 
 1306   cmpptr(rsp, Address(thread, JavaThread::reserved_stack_activation_offset()));
 1307   jcc(Assembler::below, no_reserved_zone_enabling);
 1308 
 1309   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), thread);
 1310   jump(RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry()));
 1311   should_not_reach_here();
 1312 
 1313   bind(no_reserved_zone_enabling);
 1314 }
 1315 
 1316 void MacroAssembler::c2bool(Register x) {
 1317   // implements x == 0 ? 0 : 1
 1318   // note: must only look at least-significant byte of x
 1319   //       since C-style booleans are stored in one byte
 1320   //       only! (was bug)
 1321   andl(x, 0xFF);
 1322   setb(Assembler::notZero, x);
 1323 }
 1324 
 1325 // Wouldn't need if AddressLiteral version had new name
 1326 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
 1327   Assembler::call(L, rtype);
 1328 }
 1329 
 1330 void MacroAssembler::call(Register entry) {
 1331   Assembler::call(entry);
 1332 }
 1333 
 1334 void MacroAssembler::call(AddressLiteral entry, Register rscratch) {
 1335   assert(rscratch != noreg || always_reachable(entry), "missing");
 1336 
 1337   if (reachable(entry)) {
 1338     Assembler::call_literal(entry.target(), entry.rspec());
 1339   } else {
 1340     lea(rscratch, entry);
 1341     Assembler::call(rscratch);
 1342   }
 1343 }
 1344 
 1345 void MacroAssembler::ic_call(address entry, jint method_index) {
 1346   RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
 1347   movptr(rax, (intptr_t)Universe::non_oop_word());
 1348   call(AddressLiteral(entry, rh));
 1349 }
 1350 
 1351 void MacroAssembler::emit_static_call_stub() {
 1352   // Static stub relocation also tags the Method* in the code-stream.
 1353   mov_metadata(rbx, (Metadata*) NULL);  // Method is zapped till fixup time.
 1354   // This is recognized as unresolved by relocs/nativeinst/ic code.
 1355   jump(RuntimeAddress(pc()));
 1356 }
 1357 
 1358 // Implementation of call_VM versions
 1359 
 1360 void MacroAssembler::call_VM(Register oop_result,
 1361                              address entry_point,
 1362                              bool check_exceptions) {
 1363   Label C, E;
 1364   call(C, relocInfo::none);
 1365   jmp(E);
 1366 
 1367   bind(C);
 1368   call_VM_helper(oop_result, entry_point, 0, check_exceptions);
 1369   ret(0);
 1370 
 1371   bind(E);
 1372 }
 1373 
 1374 void MacroAssembler::call_VM(Register oop_result,
 1375                              address entry_point,
 1376                              Register arg_1,
 1377                              bool check_exceptions) {
 1378   Label C, E;
 1379   call(C, relocInfo::none);
 1380   jmp(E);
 1381 
 1382   bind(C);
 1383   pass_arg1(this, arg_1);
 1384   call_VM_helper(oop_result, entry_point, 1, check_exceptions);
 1385   ret(0);
 1386 
 1387   bind(E);
 1388 }
 1389 
 1390 void MacroAssembler::call_VM(Register oop_result,
 1391                              address entry_point,
 1392                              Register arg_1,
 1393                              Register arg_2,
 1394                              bool check_exceptions) {
 1395   Label C, E;
 1396   call(C, relocInfo::none);
 1397   jmp(E);
 1398 
 1399   bind(C);
 1400 
 1401   LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
 1402 
 1403   pass_arg2(this, arg_2);
 1404   pass_arg1(this, arg_1);
 1405   call_VM_helper(oop_result, entry_point, 2, check_exceptions);
 1406   ret(0);
 1407 
 1408   bind(E);
 1409 }
 1410 
 1411 void MacroAssembler::call_VM(Register oop_result,
 1412                              address entry_point,
 1413                              Register arg_1,
 1414                              Register arg_2,
 1415                              Register arg_3,
 1416                              bool check_exceptions) {
 1417   Label C, E;
 1418   call(C, relocInfo::none);
 1419   jmp(E);
 1420 
 1421   bind(C);
 1422 
 1423   LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
 1424   LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
 1425   pass_arg3(this, arg_3);
 1426 
 1427   LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
 1428   pass_arg2(this, arg_2);
 1429 
 1430   pass_arg1(this, arg_1);
 1431   call_VM_helper(oop_result, entry_point, 3, check_exceptions);
 1432   ret(0);
 1433 
 1434   bind(E);
 1435 }
 1436 
 1437 void MacroAssembler::call_VM(Register oop_result,
 1438                              Register last_java_sp,
 1439                              address entry_point,
 1440                              int number_of_arguments,
 1441                              bool check_exceptions) {
 1442   Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
 1443   call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
 1444 }
 1445 
 1446 void MacroAssembler::call_VM(Register oop_result,
 1447                              Register last_java_sp,
 1448                              address entry_point,
 1449                              Register arg_1,
 1450                              bool check_exceptions) {
 1451   pass_arg1(this, arg_1);
 1452   call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
 1453 }
 1454 
 1455 void MacroAssembler::call_VM(Register oop_result,
 1456                              Register last_java_sp,
 1457                              address entry_point,
 1458                              Register arg_1,
 1459                              Register arg_2,
 1460                              bool check_exceptions) {
 1461 
 1462   LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
 1463   pass_arg2(this, arg_2);
 1464   pass_arg1(this, arg_1);
 1465   call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
 1466 }
 1467 
 1468 void MacroAssembler::call_VM(Register oop_result,
 1469                              Register last_java_sp,
 1470                              address entry_point,
 1471                              Register arg_1,
 1472                              Register arg_2,
 1473                              Register arg_3,
 1474                              bool check_exceptions) {
 1475   LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
 1476   LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
 1477   pass_arg3(this, arg_3);
 1478   LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
 1479   pass_arg2(this, arg_2);
 1480   pass_arg1(this, arg_1);
 1481   call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
 1482 }
 1483 
 1484 void MacroAssembler::super_call_VM(Register oop_result,
 1485                                    Register last_java_sp,
 1486                                    address entry_point,
 1487                                    int number_of_arguments,
 1488                                    bool check_exceptions) {
 1489   Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
 1490   MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
 1491 }
 1492 
 1493 void MacroAssembler::super_call_VM(Register oop_result,
 1494                                    Register last_java_sp,
 1495                                    address entry_point,
 1496                                    Register arg_1,
 1497                                    bool check_exceptions) {
 1498   pass_arg1(this, arg_1);
 1499   super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
 1500 }
 1501 
 1502 void MacroAssembler::super_call_VM(Register oop_result,
 1503                                    Register last_java_sp,
 1504                                    address entry_point,
 1505                                    Register arg_1,
 1506                                    Register arg_2,
 1507                                    bool check_exceptions) {
 1508 
 1509   LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
 1510   pass_arg2(this, arg_2);
 1511   pass_arg1(this, arg_1);
 1512   super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
 1513 }
 1514 
 1515 void MacroAssembler::super_call_VM(Register oop_result,
 1516                                    Register last_java_sp,
 1517                                    address entry_point,
 1518                                    Register arg_1,
 1519                                    Register arg_2,
 1520                                    Register arg_3,
 1521                                    bool check_exceptions) {
 1522   LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
 1523   LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
 1524   pass_arg3(this, arg_3);
 1525   LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
 1526   pass_arg2(this, arg_2);
 1527   pass_arg1(this, arg_1);
 1528   super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
 1529 }
 1530 
 1531 void MacroAssembler::call_VM_base(Register oop_result,
 1532                                   Register java_thread,
 1533                                   Register last_java_sp,
 1534                                   address  entry_point,
 1535                                   int      number_of_arguments,
 1536                                   bool     check_exceptions) {
 1537   // determine java_thread register
 1538   if (!java_thread->is_valid()) {
 1539 #ifdef _LP64
 1540     java_thread = r15_thread;
 1541 #else
 1542     java_thread = rdi;
 1543     get_thread(java_thread);
 1544 #endif // LP64
 1545   }
 1546   // determine last_java_sp register
 1547   if (!last_java_sp->is_valid()) {
 1548     last_java_sp = rsp;
 1549   }
 1550   // debugging support
 1551   assert(number_of_arguments >= 0   , "cannot have negative number of arguments");
 1552   LP64_ONLY(assert(java_thread == r15_thread, "unexpected register"));
 1553 #ifdef ASSERT
 1554   // TraceBytecodes does not use r12 but saves it over the call, so don't verify
 1555   // r12 is the heapbase.
 1556   LP64_ONLY(if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
 1557 #endif // ASSERT
 1558 
 1559   assert(java_thread != oop_result  , "cannot use the same register for java_thread & oop_result");
 1560   assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
 1561 
 1562   // push java thread (becomes first argument of C function)
 1563 
 1564   NOT_LP64(push(java_thread); number_of_arguments++);
 1565   LP64_ONLY(mov(c_rarg0, r15_thread));
 1566 
 1567   // set last Java frame before call
 1568   assert(last_java_sp != rbp, "can't use ebp/rbp");
 1569 
 1570   // Only interpreter should have to set fp
 1571   set_last_Java_frame(java_thread, last_java_sp, rbp, NULL, rscratch1);
 1572 
 1573   // do the call, remove parameters
 1574   MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
 1575 
 1576   // restore the thread (cannot use the pushed argument since arguments
 1577   // may be overwritten by C code generated by an optimizing compiler);
 1578   // however can use the register value directly if it is callee saved.
 1579   if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) {
 1580     // rdi & rsi (also r15) are callee saved -> nothing to do
 1581 #ifdef ASSERT
 1582     guarantee(java_thread != rax, "change this code");
 1583     push(rax);
 1584     { Label L;
 1585       get_thread(rax);
 1586       cmpptr(java_thread, rax);
 1587       jcc(Assembler::equal, L);
 1588       STOP("MacroAssembler::call_VM_base: rdi not callee saved?");
 1589       bind(L);
 1590     }
 1591     pop(rax);
 1592 #endif
 1593   } else {
 1594     get_thread(java_thread);
 1595   }
 1596   // reset last Java frame
 1597   // Only interpreter should have to clear fp
 1598   reset_last_Java_frame(java_thread, true);
 1599 
 1600    // C++ interp handles this in the interpreter
 1601   check_and_handle_popframe(java_thread);
 1602   check_and_handle_earlyret(java_thread);
 1603 
 1604   if (check_exceptions) {
 1605     // check for pending exceptions (java_thread is set upon return)
 1606     cmpptr(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD);
 1607 #ifndef _LP64
 1608     jump_cc(Assembler::notEqual,
 1609             RuntimeAddress(StubRoutines::forward_exception_entry()));
 1610 #else
 1611     // This used to conditionally jump to forward_exception however it is
 1612     // possible if we relocate that the branch will not reach. So we must jump
 1613     // around so we can always reach
 1614 
 1615     Label ok;
 1616     jcc(Assembler::equal, ok);
 1617     jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
 1618     bind(ok);
 1619 #endif // LP64
 1620   }
 1621 
 1622   // get oop result if there is one and reset the value in the thread
 1623   if (oop_result->is_valid()) {
 1624     get_vm_result(oop_result, java_thread);
 1625   }
 1626 }
 1627 
 1628 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
 1629 
 1630   // Calculate the value for last_Java_sp
 1631   // somewhat subtle. call_VM does an intermediate call
 1632   // which places a return address on the stack just under the
 1633   // stack pointer as the user finished with it. This allows
 1634   // use to retrieve last_Java_pc from last_Java_sp[-1].
 1635   // On 32bit we then have to push additional args on the stack to accomplish
 1636   // the actual requested call. On 64bit call_VM only can use register args
 1637   // so the only extra space is the return address that call_VM created.
 1638   // This hopefully explains the calculations here.
 1639 
 1640 #ifdef _LP64
 1641   // We've pushed one address, correct last_Java_sp
 1642   lea(rax, Address(rsp, wordSize));
 1643 #else
 1644   lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize));
 1645 #endif // LP64
 1646 
 1647   call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions);
 1648 
 1649 }
 1650 
 1651 // Use this method when MacroAssembler version of call_VM_leaf_base() should be called from Interpreter.
 1652 void MacroAssembler::call_VM_leaf0(address entry_point) {
 1653   MacroAssembler::call_VM_leaf_base(entry_point, 0);
 1654 }
 1655 
 1656 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
 1657   call_VM_leaf_base(entry_point, number_of_arguments);
 1658 }
 1659 
 1660 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
 1661   pass_arg0(this, arg_0);
 1662   call_VM_leaf(entry_point, 1);
 1663 }
 1664 
 1665 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
 1666 
 1667   LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
 1668   pass_arg1(this, arg_1);
 1669   pass_arg0(this, arg_0);
 1670   call_VM_leaf(entry_point, 2);
 1671 }
 1672 
 1673 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1674   LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
 1675   LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
 1676   pass_arg2(this, arg_2);
 1677   LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
 1678   pass_arg1(this, arg_1);
 1679   pass_arg0(this, arg_0);
 1680   call_VM_leaf(entry_point, 3);
 1681 }
 1682 
 1683 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
 1684   LP64_ONLY(assert(arg_0 != c_rarg3, "smashed arg"));
 1685   LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
 1686   LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
 1687   pass_arg3(this, arg_3);
 1688   LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
 1689   LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
 1690   pass_arg2(this, arg_2);
 1691   LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
 1692   pass_arg1(this, arg_1);
 1693   pass_arg0(this, arg_0);
 1694   call_VM_leaf(entry_point, 3);
 1695 }
 1696 
 1697 void MacroAssembler::super_call_VM_leaf(address entry_point) {
 1698   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1699 }
 1700 
 1701 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
 1702   pass_arg0(this, arg_0);
 1703   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1704 }
 1705 
 1706 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
 1707 
 1708   LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
 1709   pass_arg1(this, arg_1);
 1710   pass_arg0(this, arg_0);
 1711   MacroAssembler::call_VM_leaf_base(entry_point, 2);
 1712 }
 1713 
 1714 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1715   LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
 1716   LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
 1717   pass_arg2(this, arg_2);
 1718   LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
 1719   pass_arg1(this, arg_1);
 1720   pass_arg0(this, arg_0);
 1721   MacroAssembler::call_VM_leaf_base(entry_point, 3);
 1722 }
 1723 
 1724 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
 1725   LP64_ONLY(assert(arg_0 != c_rarg3, "smashed arg"));
 1726   LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
 1727   LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
 1728   pass_arg3(this, arg_3);
 1729   LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
 1730   LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
 1731   pass_arg2(this, arg_2);
 1732   LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
 1733   pass_arg1(this, arg_1);
 1734   pass_arg0(this, arg_0);
 1735   MacroAssembler::call_VM_leaf_base(entry_point, 4);
 1736 }
 1737 
 1738 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) {
 1739   movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
 1740   movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD);
 1741   verify_oop_msg(oop_result, "broken oop in call_VM_base");
 1742 }
 1743 
 1744 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) {
 1745   movptr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset()));
 1746   movptr(Address(java_thread, JavaThread::vm_result_2_offset()), NULL_WORD);
 1747 }
 1748 
 1749 void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
 1750 }
 1751 
 1752 void MacroAssembler::check_and_handle_popframe(Register java_thread) {
 1753 }
 1754 
 1755 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm, Register rscratch) {
 1756   assert(rscratch != noreg || always_reachable(src1), "missing");
 1757 
 1758   if (reachable(src1)) {
 1759     cmpl(as_Address(src1), imm);
 1760   } else {
 1761     lea(rscratch, src1);
 1762     cmpl(Address(rscratch, 0), imm);
 1763   }
 1764 }
 1765 
 1766 void MacroAssembler::cmp32(Register src1, AddressLiteral src2, Register rscratch) {
 1767   assert(!src2.is_lval(), "use cmpptr");
 1768   assert(rscratch != noreg || always_reachable(src2), "missing");
 1769 
 1770   if (reachable(src2)) {
 1771     cmpl(src1, as_Address(src2));
 1772   } else {
 1773     lea(rscratch, src2);
 1774     cmpl(src1, Address(rscratch, 0));
 1775   }
 1776 }
 1777 
 1778 void MacroAssembler::cmp32(Register src1, int32_t imm) {
 1779   Assembler::cmpl(src1, imm);
 1780 }
 1781 
 1782 void MacroAssembler::cmp32(Register src1, Address src2) {
 1783   Assembler::cmpl(src1, src2);
 1784 }
 1785 
 1786 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
 1787   ucomisd(opr1, opr2);
 1788 
 1789   Label L;
 1790   if (unordered_is_less) {
 1791     movl(dst, -1);
 1792     jcc(Assembler::parity, L);
 1793     jcc(Assembler::below , L);
 1794     movl(dst, 0);
 1795     jcc(Assembler::equal , L);
 1796     increment(dst);
 1797   } else { // unordered is greater
 1798     movl(dst, 1);
 1799     jcc(Assembler::parity, L);
 1800     jcc(Assembler::above , L);
 1801     movl(dst, 0);
 1802     jcc(Assembler::equal , L);
 1803     decrementl(dst);
 1804   }
 1805   bind(L);
 1806 }
 1807 
 1808 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
 1809   ucomiss(opr1, opr2);
 1810 
 1811   Label L;
 1812   if (unordered_is_less) {
 1813     movl(dst, -1);
 1814     jcc(Assembler::parity, L);
 1815     jcc(Assembler::below , L);
 1816     movl(dst, 0);
 1817     jcc(Assembler::equal , L);
 1818     increment(dst);
 1819   } else { // unordered is greater
 1820     movl(dst, 1);
 1821     jcc(Assembler::parity, L);
 1822     jcc(Assembler::above , L);
 1823     movl(dst, 0);
 1824     jcc(Assembler::equal , L);
 1825     decrementl(dst);
 1826   }
 1827   bind(L);
 1828 }
 1829 
 1830 
 1831 void MacroAssembler::cmp8(AddressLiteral src1, int imm, Register rscratch) {
 1832   assert(rscratch != noreg || always_reachable(src1), "missing");
 1833 
 1834   if (reachable(src1)) {
 1835     cmpb(as_Address(src1), imm);
 1836   } else {
 1837     lea(rscratch, src1);
 1838     cmpb(Address(rscratch, 0), imm);
 1839   }
 1840 }
 1841 
 1842 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2, Register rscratch) {
 1843 #ifdef _LP64
 1844   assert(rscratch != noreg || always_reachable(src2), "missing");
 1845 
 1846   if (src2.is_lval()) {
 1847     movptr(rscratch, src2);
 1848     Assembler::cmpq(src1, rscratch);
 1849   } else if (reachable(src2)) {
 1850     cmpq(src1, as_Address(src2));
 1851   } else {
 1852     lea(rscratch, src2);
 1853     Assembler::cmpq(src1, Address(rscratch, 0));
 1854   }
 1855 #else
 1856   assert(rscratch == noreg, "not needed");
 1857   if (src2.is_lval()) {
 1858     cmp_literal32(src1, (int32_t)src2.target(), src2.rspec());
 1859   } else {
 1860     cmpl(src1, as_Address(src2));
 1861   }
 1862 #endif // _LP64
 1863 }
 1864 
 1865 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2, Register rscratch) {
 1866   assert(src2.is_lval(), "not a mem-mem compare");
 1867 #ifdef _LP64
 1868   // moves src2's literal address
 1869   movptr(rscratch, src2);
 1870   Assembler::cmpq(src1, rscratch);
 1871 #else
 1872   assert(rscratch == noreg, "not needed");
 1873   cmp_literal32(src1, (int32_t)src2.target(), src2.rspec());
 1874 #endif // _LP64
 1875 }
 1876 
 1877 void MacroAssembler::cmpoop(Register src1, Register src2) {
 1878   cmpptr(src1, src2);
 1879 }
 1880 
 1881 void MacroAssembler::cmpoop(Register src1, Address src2) {
 1882   cmpptr(src1, src2);
 1883 }
 1884 
 1885 #ifdef _LP64
 1886 void MacroAssembler::cmpoop(Register src1, jobject src2, Register rscratch) {
 1887   movoop(rscratch, src2);
 1888   cmpptr(src1, rscratch);
 1889 }
 1890 #endif
 1891 
 1892 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch) {
 1893   assert(rscratch != noreg || always_reachable(adr), "missing");
 1894 
 1895   if (reachable(adr)) {
 1896     lock();
 1897     cmpxchgptr(reg, as_Address(adr));
 1898   } else {
 1899     lea(rscratch, adr);
 1900     lock();
 1901     cmpxchgptr(reg, Address(rscratch, 0));
 1902   }
 1903 }
 1904 
 1905 void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
 1906   LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr));
 1907 }
 1908 
 1909 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1910   assert(rscratch != noreg || always_reachable(src), "missing");
 1911 
 1912   if (reachable(src)) {
 1913     Assembler::comisd(dst, as_Address(src));
 1914   } else {
 1915     lea(rscratch, src);
 1916     Assembler::comisd(dst, Address(rscratch, 0));
 1917   }
 1918 }
 1919 
 1920 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1921   assert(rscratch != noreg || always_reachable(src), "missing");
 1922 
 1923   if (reachable(src)) {
 1924     Assembler::comiss(dst, as_Address(src));
 1925   } else {
 1926     lea(rscratch, src);
 1927     Assembler::comiss(dst, Address(rscratch, 0));
 1928   }
 1929 }
 1930 
 1931 
 1932 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch) {
 1933   assert(rscratch != noreg || always_reachable(counter_addr), "missing");
 1934 
 1935   Condition negated_cond = negate_condition(cond);
 1936   Label L;
 1937   jcc(negated_cond, L);
 1938   pushf(); // Preserve flags
 1939   atomic_incl(counter_addr, rscratch);
 1940   popf();
 1941   bind(L);
 1942 }
 1943 
 1944 int MacroAssembler::corrected_idivl(Register reg) {
 1945   // Full implementation of Java idiv and irem; checks for
 1946   // special case as described in JVM spec., p.243 & p.271.
 1947   // The function returns the (pc) offset of the idivl
 1948   // instruction - may be needed for implicit exceptions.
 1949   //
 1950   //         normal case                           special case
 1951   //
 1952   // input : rax,: dividend                         min_int
 1953   //         reg: divisor   (may not be rax,/rdx)   -1
 1954   //
 1955   // output: rax,: quotient  (= rax, idiv reg)       min_int
 1956   //         rdx: remainder (= rax, irem reg)       0
 1957   assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register");
 1958   const int min_int = 0x80000000;
 1959   Label normal_case, special_case;
 1960 
 1961   // check for special case
 1962   cmpl(rax, min_int);
 1963   jcc(Assembler::notEqual, normal_case);
 1964   xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
 1965   cmpl(reg, -1);
 1966   jcc(Assembler::equal, special_case);
 1967 
 1968   // handle normal case
 1969   bind(normal_case);
 1970   cdql();
 1971   int idivl_offset = offset();
 1972   idivl(reg);
 1973 
 1974   // normal and special case exit
 1975   bind(special_case);
 1976 
 1977   return idivl_offset;
 1978 }
 1979 
 1980 
 1981 
 1982 void MacroAssembler::decrementl(Register reg, int value) {
 1983   if (value == min_jint) {subl(reg, value) ; return; }
 1984   if (value <  0) { incrementl(reg, -value); return; }
 1985   if (value == 0) {                        ; return; }
 1986   if (value == 1 && UseIncDec) { decl(reg) ; return; }
 1987   /* else */      { subl(reg, value)       ; return; }
 1988 }
 1989 
 1990 void MacroAssembler::decrementl(Address dst, int value) {
 1991   if (value == min_jint) {subl(dst, value) ; return; }
 1992   if (value <  0) { incrementl(dst, -value); return; }
 1993   if (value == 0) {                        ; return; }
 1994   if (value == 1 && UseIncDec) { decl(dst) ; return; }
 1995   /* else */      { subl(dst, value)       ; return; }
 1996 }
 1997 
 1998 void MacroAssembler::division_with_shift (Register reg, int shift_value) {
 1999   assert(shift_value > 0, "illegal shift value");
 2000   Label _is_positive;
 2001   testl (reg, reg);
 2002   jcc (Assembler::positive, _is_positive);
 2003   int offset = (1 << shift_value) - 1 ;
 2004 
 2005   if (offset == 1) {
 2006     incrementl(reg);
 2007   } else {
 2008     addl(reg, offset);
 2009   }
 2010 
 2011   bind (_is_positive);
 2012   sarl(reg, shift_value);
 2013 }
 2014 
 2015 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2016   assert(rscratch != noreg || always_reachable(src), "missing");
 2017 
 2018   if (reachable(src)) {
 2019     Assembler::divsd(dst, as_Address(src));
 2020   } else {
 2021     lea(rscratch, src);
 2022     Assembler::divsd(dst, Address(rscratch, 0));
 2023   }
 2024 }
 2025 
 2026 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2027   assert(rscratch != noreg || always_reachable(src), "missing");
 2028 
 2029   if (reachable(src)) {
 2030     Assembler::divss(dst, as_Address(src));
 2031   } else {
 2032     lea(rscratch, src);
 2033     Assembler::divss(dst, Address(rscratch, 0));
 2034   }
 2035 }
 2036 
 2037 void MacroAssembler::enter() {
 2038   push(rbp);
 2039   mov(rbp, rsp);
 2040 }
 2041 
 2042 void MacroAssembler::post_call_nop() {
 2043   if (!Continuations::enabled()) {
 2044     return;
 2045   }
 2046   InstructionMark im(this);
 2047   relocate(post_call_nop_Relocation::spec());
 2048   emit_int8((int8_t)0x0f);
 2049   emit_int8((int8_t)0x1f);
 2050   emit_int8((int8_t)0x84);
 2051   emit_int8((int8_t)0x00);
 2052   emit_int32(0x00);
 2053 }
 2054 
 2055 // A 5 byte nop that is safe for patching (see patch_verified_entry)
 2056 void MacroAssembler::fat_nop() {
 2057   if (UseAddressNop) {
 2058     addr_nop_5();
 2059   } else {
 2060     emit_int8((int8_t)0x26); // es:
 2061     emit_int8((int8_t)0x2e); // cs:
 2062     emit_int8((int8_t)0x64); // fs:
 2063     emit_int8((int8_t)0x65); // gs:
 2064     emit_int8((int8_t)0x90);
 2065   }
 2066 }
 2067 
 2068 #ifndef _LP64
 2069 void MacroAssembler::fcmp(Register tmp) {
 2070   fcmp(tmp, 1, true, true);
 2071 }
 2072 
 2073 void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) {
 2074   assert(!pop_right || pop_left, "usage error");
 2075   if (VM_Version::supports_cmov()) {
 2076     assert(tmp == noreg, "unneeded temp");
 2077     if (pop_left) {
 2078       fucomip(index);
 2079     } else {
 2080       fucomi(index);
 2081     }
 2082     if (pop_right) {
 2083       fpop();
 2084     }
 2085   } else {
 2086     assert(tmp != noreg, "need temp");
 2087     if (pop_left) {
 2088       if (pop_right) {
 2089         fcompp();
 2090       } else {
 2091         fcomp(index);
 2092       }
 2093     } else {
 2094       fcom(index);
 2095     }
 2096     // convert FPU condition into eflags condition via rax,
 2097     save_rax(tmp);
 2098     fwait(); fnstsw_ax();
 2099     sahf();
 2100     restore_rax(tmp);
 2101   }
 2102   // condition codes set as follows:
 2103   //
 2104   // CF (corresponds to C0) if x < y
 2105   // PF (corresponds to C2) if unordered
 2106   // ZF (corresponds to C3) if x = y
 2107 }
 2108 
 2109 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) {
 2110   fcmp2int(dst, unordered_is_less, 1, true, true);
 2111 }
 2112 
 2113 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) {
 2114   fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right);
 2115   Label L;
 2116   if (unordered_is_less) {
 2117     movl(dst, -1);
 2118     jcc(Assembler::parity, L);
 2119     jcc(Assembler::below , L);
 2120     movl(dst, 0);
 2121     jcc(Assembler::equal , L);
 2122     increment(dst);
 2123   } else { // unordered is greater
 2124     movl(dst, 1);
 2125     jcc(Assembler::parity, L);
 2126     jcc(Assembler::above , L);
 2127     movl(dst, 0);
 2128     jcc(Assembler::equal , L);
 2129     decrementl(dst);
 2130   }
 2131   bind(L);
 2132 }
 2133 
 2134 void MacroAssembler::fld_d(AddressLiteral src) {
 2135   fld_d(as_Address(src));
 2136 }
 2137 
 2138 void MacroAssembler::fld_s(AddressLiteral src) {
 2139   fld_s(as_Address(src));
 2140 }
 2141 
 2142 void MacroAssembler::fldcw(AddressLiteral src) {
 2143   fldcw(as_Address(src));
 2144 }
 2145 
 2146 void MacroAssembler::fpop() {
 2147   ffree();
 2148   fincstp();
 2149 }
 2150 
 2151 void MacroAssembler::fremr(Register tmp) {
 2152   save_rax(tmp);
 2153   { Label L;
 2154     bind(L);
 2155     fprem();
 2156     fwait(); fnstsw_ax();
 2157     sahf();
 2158     jcc(Assembler::parity, L);
 2159   }
 2160   restore_rax(tmp);
 2161   // Result is in ST0.
 2162   // Note: fxch & fpop to get rid of ST1
 2163   // (otherwise FPU stack could overflow eventually)
 2164   fxch(1);
 2165   fpop();
 2166 }
 2167 
 2168 void MacroAssembler::empty_FPU_stack() {
 2169   if (VM_Version::supports_mmx()) {
 2170     emms();
 2171   } else {
 2172     for (int i = 8; i-- > 0; ) ffree(i);
 2173   }
 2174 }
 2175 #endif // !LP64
 2176 
 2177 void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2178   assert(rscratch != noreg || always_reachable(src), "missing");
 2179   if (reachable(src)) {
 2180     Assembler::mulpd(dst, as_Address(src));
 2181   } else {
 2182     lea(rscratch, src);
 2183     Assembler::mulpd(dst, Address(rscratch, 0));
 2184   }
 2185 }
 2186 
 2187 void MacroAssembler::load_float(Address src) {
 2188 #ifdef _LP64
 2189   movflt(xmm0, src);
 2190 #else
 2191   if (UseSSE >= 1) {
 2192     movflt(xmm0, src);
 2193   } else {
 2194     fld_s(src);
 2195   }
 2196 #endif // LP64
 2197 }
 2198 
 2199 void MacroAssembler::store_float(Address dst) {
 2200 #ifdef _LP64
 2201   movflt(dst, xmm0);
 2202 #else
 2203   if (UseSSE >= 1) {
 2204     movflt(dst, xmm0);
 2205   } else {
 2206     fstp_s(dst);
 2207   }
 2208 #endif // LP64
 2209 }
 2210 
 2211 void MacroAssembler::load_double(Address src) {
 2212 #ifdef _LP64
 2213   movdbl(xmm0, src);
 2214 #else
 2215   if (UseSSE >= 2) {
 2216     movdbl(xmm0, src);
 2217   } else {
 2218     fld_d(src);
 2219   }
 2220 #endif // LP64
 2221 }
 2222 
 2223 void MacroAssembler::store_double(Address dst) {
 2224 #ifdef _LP64
 2225   movdbl(dst, xmm0);
 2226 #else
 2227   if (UseSSE >= 2) {
 2228     movdbl(dst, xmm0);
 2229   } else {
 2230     fstp_d(dst);
 2231   }
 2232 #endif // LP64
 2233 }
 2234 
 2235 // dst = c = a * b + c
 2236 void MacroAssembler::fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) {
 2237   Assembler::vfmadd231sd(c, a, b);
 2238   if (dst != c) {
 2239     movdbl(dst, c);
 2240   }
 2241 }
 2242 
 2243 // dst = c = a * b + c
 2244 void MacroAssembler::fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) {
 2245   Assembler::vfmadd231ss(c, a, b);
 2246   if (dst != c) {
 2247     movflt(dst, c);
 2248   }
 2249 }
 2250 
 2251 // dst = c = a * b + c
 2252 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) {
 2253   Assembler::vfmadd231pd(c, a, b, vector_len);
 2254   if (dst != c) {
 2255     vmovdqu(dst, c);
 2256   }
 2257 }
 2258 
 2259 // dst = c = a * b + c
 2260 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) {
 2261   Assembler::vfmadd231ps(c, a, b, vector_len);
 2262   if (dst != c) {
 2263     vmovdqu(dst, c);
 2264   }
 2265 }
 2266 
 2267 // dst = c = a * b + c
 2268 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) {
 2269   Assembler::vfmadd231pd(c, a, b, vector_len);
 2270   if (dst != c) {
 2271     vmovdqu(dst, c);
 2272   }
 2273 }
 2274 
 2275 // dst = c = a * b + c
 2276 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) {
 2277   Assembler::vfmadd231ps(c, a, b, vector_len);
 2278   if (dst != c) {
 2279     vmovdqu(dst, c);
 2280   }
 2281 }
 2282 
 2283 void MacroAssembler::incrementl(AddressLiteral dst, Register rscratch) {
 2284   assert(rscratch != noreg || always_reachable(dst), "missing");
 2285 
 2286   if (reachable(dst)) {
 2287     incrementl(as_Address(dst));
 2288   } else {
 2289     lea(rscratch, dst);
 2290     incrementl(Address(rscratch, 0));
 2291   }
 2292 }
 2293 
 2294 void MacroAssembler::incrementl(ArrayAddress dst, Register rscratch) {
 2295   incrementl(as_Address(dst, rscratch));
 2296 }
 2297 
 2298 void MacroAssembler::incrementl(Register reg, int value) {
 2299   if (value == min_jint) {addl(reg, value) ; return; }
 2300   if (value <  0) { decrementl(reg, -value); return; }
 2301   if (value == 0) {                        ; return; }
 2302   if (value == 1 && UseIncDec) { incl(reg) ; return; }
 2303   /* else */      { addl(reg, value)       ; return; }
 2304 }
 2305 
 2306 void MacroAssembler::incrementl(Address dst, int value) {
 2307   if (value == min_jint) {addl(dst, value) ; return; }
 2308   if (value <  0) { decrementl(dst, -value); return; }
 2309   if (value == 0) {                        ; return; }
 2310   if (value == 1 && UseIncDec) { incl(dst) ; return; }
 2311   /* else */      { addl(dst, value)       ; return; }
 2312 }
 2313 
 2314 void MacroAssembler::jump(AddressLiteral dst, Register rscratch) {
 2315   assert(rscratch != noreg || always_reachable(dst), "missing");
 2316 
 2317   if (reachable(dst)) {
 2318     jmp_literal(dst.target(), dst.rspec());
 2319   } else {
 2320     lea(rscratch, dst);
 2321     jmp(rscratch);
 2322   }
 2323 }
 2324 
 2325 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst, Register rscratch) {
 2326   assert(rscratch != noreg || always_reachable(dst), "missing");
 2327 
 2328   if (reachable(dst)) {
 2329     InstructionMark im(this);
 2330     relocate(dst.reloc());
 2331     const int short_size = 2;
 2332     const int long_size = 6;
 2333     int offs = (intptr_t)dst.target() - ((intptr_t)pc());
 2334     if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
 2335       // 0111 tttn #8-bit disp
 2336       emit_int8(0x70 | cc);
 2337       emit_int8((offs - short_size) & 0xFF);
 2338     } else {
 2339       // 0000 1111 1000 tttn #32-bit disp
 2340       emit_int8(0x0F);
 2341       emit_int8((unsigned char)(0x80 | cc));
 2342       emit_int32(offs - long_size);
 2343     }
 2344   } else {
 2345 #ifdef ASSERT
 2346     warning("reversing conditional branch");
 2347 #endif /* ASSERT */
 2348     Label skip;
 2349     jccb(reverse[cc], skip);
 2350     lea(rscratch, dst);
 2351     Assembler::jmp(rscratch);
 2352     bind(skip);
 2353   }
 2354 }
 2355 
 2356 void MacroAssembler::ldmxcsr(AddressLiteral src, Register rscratch) {
 2357   assert(rscratch != noreg || always_reachable(src), "missing");
 2358 
 2359   if (reachable(src)) {
 2360     Assembler::ldmxcsr(as_Address(src));
 2361   } else {
 2362     lea(rscratch, src);
 2363     Assembler::ldmxcsr(Address(rscratch, 0));
 2364   }
 2365 }
 2366 
 2367 int MacroAssembler::load_signed_byte(Register dst, Address src) {
 2368   int off;
 2369   if (LP64_ONLY(true ||) VM_Version::is_P6()) {
 2370     off = offset();
 2371     movsbl(dst, src); // movsxb
 2372   } else {
 2373     off = load_unsigned_byte(dst, src);
 2374     shll(dst, 24);
 2375     sarl(dst, 24);
 2376   }
 2377   return off;
 2378 }
 2379 
 2380 // Note: load_signed_short used to be called load_signed_word.
 2381 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler
 2382 // manual, which means 16 bits, that usage is found nowhere in HotSpot code.
 2383 // The term "word" in HotSpot means a 32- or 64-bit machine word.
 2384 int MacroAssembler::load_signed_short(Register dst, Address src) {
 2385   int off;
 2386   if (LP64_ONLY(true ||) VM_Version::is_P6()) {
 2387     // This is dubious to me since it seems safe to do a signed 16 => 64 bit
 2388     // version but this is what 64bit has always done. This seems to imply
 2389     // that users are only using 32bits worth.
 2390     off = offset();
 2391     movswl(dst, src); // movsxw
 2392   } else {
 2393     off = load_unsigned_short(dst, src);
 2394     shll(dst, 16);
 2395     sarl(dst, 16);
 2396   }
 2397   return off;
 2398 }
 2399 
 2400 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
 2401   // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
 2402   // and "3.9 Partial Register Penalties", p. 22).
 2403   int off;
 2404   if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) {
 2405     off = offset();
 2406     movzbl(dst, src); // movzxb
 2407   } else {
 2408     xorl(dst, dst);
 2409     off = offset();
 2410     movb(dst, src);
 2411   }
 2412   return off;
 2413 }
 2414 
 2415 // Note: load_unsigned_short used to be called load_unsigned_word.
 2416 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
 2417   // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
 2418   // and "3.9 Partial Register Penalties", p. 22).
 2419   int off;
 2420   if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) {
 2421     off = offset();
 2422     movzwl(dst, src); // movzxw
 2423   } else {
 2424     xorl(dst, dst);
 2425     off = offset();
 2426     movw(dst, src);
 2427   }
 2428   return off;
 2429 }
 2430 
 2431 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
 2432   switch (size_in_bytes) {
 2433 #ifndef _LP64
 2434   case  8:
 2435     assert(dst2 != noreg, "second dest register required");
 2436     movl(dst,  src);
 2437     movl(dst2, src.plus_disp(BytesPerInt));
 2438     break;
 2439 #else
 2440   case  8:  movq(dst, src); break;
 2441 #endif
 2442   case  4:  movl(dst, src); break;
 2443   case  2:  is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
 2444   case  1:  is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
 2445   default:  ShouldNotReachHere();
 2446   }
 2447 }
 2448 
 2449 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
 2450   switch (size_in_bytes) {
 2451 #ifndef _LP64
 2452   case  8:
 2453     assert(src2 != noreg, "second source register required");
 2454     movl(dst,                        src);
 2455     movl(dst.plus_disp(BytesPerInt), src2);
 2456     break;
 2457 #else
 2458   case  8:  movq(dst, src); break;
 2459 #endif
 2460   case  4:  movl(dst, src); break;
 2461   case  2:  movw(dst, src); break;
 2462   case  1:  movb(dst, src); break;
 2463   default:  ShouldNotReachHere();
 2464   }
 2465 }
 2466 
 2467 void MacroAssembler::mov32(AddressLiteral dst, Register src, Register rscratch) {
 2468   assert(rscratch != noreg || always_reachable(dst), "missing");
 2469 
 2470   if (reachable(dst)) {
 2471     movl(as_Address(dst), src);
 2472   } else {
 2473     lea(rscratch, dst);
 2474     movl(Address(rscratch, 0), src);
 2475   }
 2476 }
 2477 
 2478 void MacroAssembler::mov32(Register dst, AddressLiteral src) {
 2479   if (reachable(src)) {
 2480     movl(dst, as_Address(src));
 2481   } else {
 2482     lea(dst, src);
 2483     movl(dst, Address(dst, 0));
 2484   }
 2485 }
 2486 
 2487 // C++ bool manipulation
 2488 
 2489 void MacroAssembler::movbool(Register dst, Address src) {
 2490   if(sizeof(bool) == 1)
 2491     movb(dst, src);
 2492   else if(sizeof(bool) == 2)
 2493     movw(dst, src);
 2494   else if(sizeof(bool) == 4)
 2495     movl(dst, src);
 2496   else
 2497     // unsupported
 2498     ShouldNotReachHere();
 2499 }
 2500 
 2501 void MacroAssembler::movbool(Address dst, bool boolconst) {
 2502   if(sizeof(bool) == 1)
 2503     movb(dst, (int) boolconst);
 2504   else if(sizeof(bool) == 2)
 2505     movw(dst, (int) boolconst);
 2506   else if(sizeof(bool) == 4)
 2507     movl(dst, (int) boolconst);
 2508   else
 2509     // unsupported
 2510     ShouldNotReachHere();
 2511 }
 2512 
 2513 void MacroAssembler::movbool(Address dst, Register src) {
 2514   if(sizeof(bool) == 1)
 2515     movb(dst, src);
 2516   else if(sizeof(bool) == 2)
 2517     movw(dst, src);
 2518   else if(sizeof(bool) == 4)
 2519     movl(dst, src);
 2520   else
 2521     // unsupported
 2522     ShouldNotReachHere();
 2523 }
 2524 
 2525 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2526   assert(rscratch != noreg || always_reachable(src), "missing");
 2527 
 2528   if (reachable(src)) {
 2529     movdl(dst, as_Address(src));
 2530   } else {
 2531     lea(rscratch, src);
 2532     movdl(dst, Address(rscratch, 0));
 2533   }
 2534 }
 2535 
 2536 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2537   assert(rscratch != noreg || always_reachable(src), "missing");
 2538 
 2539   if (reachable(src)) {
 2540     movq(dst, as_Address(src));
 2541   } else {
 2542     lea(rscratch, src);
 2543     movq(dst, Address(rscratch, 0));
 2544   }
 2545 }
 2546 
 2547 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2548   assert(rscratch != noreg || always_reachable(src), "missing");
 2549 
 2550   if (reachable(src)) {
 2551     if (UseXmmLoadAndClearUpper) {
 2552       movsd (dst, as_Address(src));
 2553     } else {
 2554       movlpd(dst, as_Address(src));
 2555     }
 2556   } else {
 2557     lea(rscratch, src);
 2558     if (UseXmmLoadAndClearUpper) {
 2559       movsd (dst, Address(rscratch, 0));
 2560     } else {
 2561       movlpd(dst, Address(rscratch, 0));
 2562     }
 2563   }
 2564 }
 2565 
 2566 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2567   assert(rscratch != noreg || always_reachable(src), "missing");
 2568 
 2569   if (reachable(src)) {
 2570     movss(dst, as_Address(src));
 2571   } else {
 2572     lea(rscratch, src);
 2573     movss(dst, Address(rscratch, 0));
 2574   }
 2575 }
 2576 
 2577 void MacroAssembler::movptr(Register dst, Register src) {
 2578   LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
 2579 }
 2580 
 2581 void MacroAssembler::movptr(Register dst, Address src) {
 2582   LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
 2583 }
 2584 
 2585 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
 2586 void MacroAssembler::movptr(Register dst, intptr_t src) {
 2587   LP64_ONLY(mov64(dst, src)) NOT_LP64(movl(dst, src));
 2588 }
 2589 
 2590 void MacroAssembler::movptr(Address dst, Register src) {
 2591   LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
 2592 }
 2593 
 2594 void MacroAssembler::movptr(Address dst, int32_t src) {
 2595   LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src));
 2596 }
 2597 
 2598 void MacroAssembler::movdqu(Address dst, XMMRegister src) {
 2599   assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 2600   Assembler::movdqu(dst, src);
 2601 }
 2602 
 2603 void MacroAssembler::movdqu(XMMRegister dst, Address src) {
 2604   assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 2605   Assembler::movdqu(dst, src);
 2606 }
 2607 
 2608 void MacroAssembler::movdqu(XMMRegister dst, XMMRegister src) {
 2609   assert(((dst->encoding() < 16  && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 2610   Assembler::movdqu(dst, src);
 2611 }
 2612 
 2613 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2614   assert(rscratch != noreg || always_reachable(src), "missing");
 2615 
 2616   if (reachable(src)) {
 2617     movdqu(dst, as_Address(src));
 2618   } else {
 2619     lea(rscratch, src);
 2620     movdqu(dst, Address(rscratch, 0));
 2621   }
 2622 }
 2623 
 2624 void MacroAssembler::vmovdqu(Address dst, XMMRegister src) {
 2625   assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 2626   Assembler::vmovdqu(dst, src);
 2627 }
 2628 
 2629 void MacroAssembler::vmovdqu(XMMRegister dst, Address src) {
 2630   assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 2631   Assembler::vmovdqu(dst, src);
 2632 }
 2633 
 2634 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src) {
 2635   assert(((dst->encoding() < 16  && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 2636   Assembler::vmovdqu(dst, src);
 2637 }
 2638 
 2639 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2640   assert(rscratch != noreg || always_reachable(src), "missing");
 2641 
 2642   if (reachable(src)) {
 2643     vmovdqu(dst, as_Address(src));
 2644   }
 2645   else {
 2646     lea(rscratch, src);
 2647     vmovdqu(dst, Address(rscratch, 0));
 2648   }
 2649 }
 2650 
 2651 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2652   assert(rscratch != noreg || always_reachable(src), "missing");
 2653 
 2654   if (vector_len == AVX_512bit) {
 2655     evmovdquq(dst, src, AVX_512bit, rscratch);
 2656   } else if (vector_len == AVX_256bit) {
 2657     vmovdqu(dst, src, rscratch);
 2658   } else {
 2659     movdqu(dst, src, rscratch);
 2660   }
 2661 }
 2662 
 2663 void MacroAssembler::kmov(KRegister dst, Address src) {
 2664   if (VM_Version::supports_avx512bw()) {
 2665     kmovql(dst, src);
 2666   } else {
 2667     assert(VM_Version::supports_evex(), "");
 2668     kmovwl(dst, src);
 2669   }
 2670 }
 2671 
 2672 void MacroAssembler::kmov(Address dst, KRegister src) {
 2673   if (VM_Version::supports_avx512bw()) {
 2674     kmovql(dst, src);
 2675   } else {
 2676     assert(VM_Version::supports_evex(), "");
 2677     kmovwl(dst, src);
 2678   }
 2679 }
 2680 
 2681 void MacroAssembler::kmov(KRegister dst, KRegister src) {
 2682   if (VM_Version::supports_avx512bw()) {
 2683     kmovql(dst, src);
 2684   } else {
 2685     assert(VM_Version::supports_evex(), "");
 2686     kmovwl(dst, src);
 2687   }
 2688 }
 2689 
 2690 void MacroAssembler::kmov(Register dst, KRegister src) {
 2691   if (VM_Version::supports_avx512bw()) {
 2692     kmovql(dst, src);
 2693   } else {
 2694     assert(VM_Version::supports_evex(), "");
 2695     kmovwl(dst, src);
 2696   }
 2697 }
 2698 
 2699 void MacroAssembler::kmov(KRegister dst, Register src) {
 2700   if (VM_Version::supports_avx512bw()) {
 2701     kmovql(dst, src);
 2702   } else {
 2703     assert(VM_Version::supports_evex(), "");
 2704     kmovwl(dst, src);
 2705   }
 2706 }
 2707 
 2708 void MacroAssembler::kmovql(KRegister dst, AddressLiteral src, Register rscratch) {
 2709   assert(rscratch != noreg || always_reachable(src), "missing");
 2710 
 2711   if (reachable(src)) {
 2712     kmovql(dst, as_Address(src));
 2713   } else {
 2714     lea(rscratch, src);
 2715     kmovql(dst, Address(rscratch, 0));
 2716   }
 2717 }
 2718 
 2719 void MacroAssembler::kmovwl(KRegister dst, AddressLiteral src, Register rscratch) {
 2720   assert(rscratch != noreg || always_reachable(src), "missing");
 2721 
 2722   if (reachable(src)) {
 2723     kmovwl(dst, as_Address(src));
 2724   } else {
 2725     lea(rscratch, src);
 2726     kmovwl(dst, Address(rscratch, 0));
 2727   }
 2728 }
 2729 
 2730 void MacroAssembler::evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge,
 2731                                int vector_len, Register rscratch) {
 2732   assert(rscratch != noreg || always_reachable(src), "missing");
 2733 
 2734   if (reachable(src)) {
 2735     Assembler::evmovdqub(dst, mask, as_Address(src), merge, vector_len);
 2736   } else {
 2737     lea(rscratch, src);
 2738     Assembler::evmovdqub(dst, mask, Address(rscratch, 0), merge, vector_len);
 2739   }
 2740 }
 2741 
 2742 void MacroAssembler::evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge,
 2743                                int vector_len, Register rscratch) {
 2744   assert(rscratch != noreg || always_reachable(src), "missing");
 2745 
 2746   if (reachable(src)) {
 2747     Assembler::evmovdquw(dst, mask, as_Address(src), merge, vector_len);
 2748   } else {
 2749     lea(rscratch, src);
 2750     Assembler::evmovdquw(dst, mask, Address(rscratch, 0), merge, vector_len);
 2751   }
 2752 }
 2753 
 2754 void MacroAssembler::evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
 2755   assert(rscratch != noreg || always_reachable(src), "missing");
 2756 
 2757   if (reachable(src)) {
 2758     Assembler::evmovdqul(dst, mask, as_Address(src), merge, vector_len);
 2759   } else {
 2760     lea(rscratch, src);
 2761     Assembler::evmovdqul(dst, mask, Address(rscratch, 0), merge, vector_len);
 2762   }
 2763 }
 2764 
 2765 void MacroAssembler::evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
 2766   assert(rscratch != noreg || always_reachable(src), "missing");
 2767 
 2768   if (reachable(src)) {
 2769     Assembler::evmovdquq(dst, mask, as_Address(src), merge, vector_len);
 2770   } else {
 2771     lea(rscratch, src);
 2772     Assembler::evmovdquq(dst, mask, Address(rscratch, 0), merge, vector_len);
 2773   }
 2774 }
 2775 
 2776 void MacroAssembler::evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2777   assert(rscratch != noreg || always_reachable(src), "missing");
 2778 
 2779   if (reachable(src)) {
 2780     Assembler::evmovdquq(dst, as_Address(src), vector_len);
 2781   } else {
 2782     lea(rscratch, src);
 2783     Assembler::evmovdquq(dst, Address(rscratch, 0), vector_len);
 2784   }
 2785 }
 2786 
 2787 void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2788   assert(rscratch != noreg || always_reachable(src), "missing");
 2789 
 2790   if (reachable(src)) {
 2791     Assembler::movdqa(dst, as_Address(src));
 2792   } else {
 2793     lea(rscratch, src);
 2794     Assembler::movdqa(dst, Address(rscratch, 0));
 2795   }
 2796 }
 2797 
 2798 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2799   assert(rscratch != noreg || always_reachable(src), "missing");
 2800 
 2801   if (reachable(src)) {
 2802     Assembler::movsd(dst, as_Address(src));
 2803   } else {
 2804     lea(rscratch, src);
 2805     Assembler::movsd(dst, Address(rscratch, 0));
 2806   }
 2807 }
 2808 
 2809 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2810   assert(rscratch != noreg || always_reachable(src), "missing");
 2811 
 2812   if (reachable(src)) {
 2813     Assembler::movss(dst, as_Address(src));
 2814   } else {
 2815     lea(rscratch, src);
 2816     Assembler::movss(dst, Address(rscratch, 0));
 2817   }
 2818 }
 2819 
 2820 void MacroAssembler::movddup(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2821   assert(rscratch != noreg || always_reachable(src), "missing");
 2822 
 2823   if (reachable(src)) {
 2824     Assembler::movddup(dst, as_Address(src));
 2825   } else {
 2826     lea(rscratch, src);
 2827     Assembler::movddup(dst, Address(rscratch, 0));
 2828   }
 2829 }
 2830 
 2831 void MacroAssembler::vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2832   assert(rscratch != noreg || always_reachable(src), "missing");
 2833 
 2834   if (reachable(src)) {
 2835     Assembler::vmovddup(dst, as_Address(src), vector_len);
 2836   } else {
 2837     lea(rscratch, src);
 2838     Assembler::vmovddup(dst, Address(rscratch, 0), vector_len);
 2839   }
 2840 }
 2841 
 2842 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2843   assert(rscratch != noreg || always_reachable(src), "missing");
 2844 
 2845   if (reachable(src)) {
 2846     Assembler::mulsd(dst, as_Address(src));
 2847   } else {
 2848     lea(rscratch, src);
 2849     Assembler::mulsd(dst, Address(rscratch, 0));
 2850   }
 2851 }
 2852 
 2853 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2854   assert(rscratch != noreg || always_reachable(src), "missing");
 2855 
 2856   if (reachable(src)) {
 2857     Assembler::mulss(dst, as_Address(src));
 2858   } else {
 2859     lea(rscratch, src);
 2860     Assembler::mulss(dst, Address(rscratch, 0));
 2861   }
 2862 }
 2863 
 2864 void MacroAssembler::null_check(Register reg, int offset) {
 2865   if (needs_explicit_null_check(offset)) {
 2866     // provoke OS NULL exception if reg = NULL by
 2867     // accessing M[reg] w/o changing any (non-CC) registers
 2868     // NOTE: cmpl is plenty here to provoke a segv
 2869     cmpptr(rax, Address(reg, 0));
 2870     // Note: should probably use testl(rax, Address(reg, 0));
 2871     //       may be shorter code (however, this version of
 2872     //       testl needs to be implemented first)
 2873   } else {
 2874     // nothing to do, (later) access of M[reg + offset]
 2875     // will provoke OS NULL exception if reg = NULL
 2876   }
 2877 }
 2878 
 2879 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) {
 2880   andptr(markword, markWord::inline_type_mask_in_place);
 2881   cmpptr(markword, markWord::inline_type_pattern);
 2882   jcc(Assembler::equal, is_inline_type);
 2883 }
 2884 
 2885 void MacroAssembler::test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type) {
 2886   movl(temp_reg, Address(klass, Klass::access_flags_offset()));
 2887   testl(temp_reg, JVM_ACC_VALUE);
 2888   jcc(Assembler::notZero, is_inline_type);
 2889 }
 2890 
 2891 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type) {
 2892   testptr(object, object);
 2893   jcc(Assembler::zero, not_inline_type);
 2894   const int is_inline_type_mask = markWord::inline_type_pattern;
 2895   movptr(tmp, Address(object, oopDesc::mark_offset_in_bytes()));
 2896   andptr(tmp, is_inline_type_mask);
 2897   cmpptr(tmp, is_inline_type_mask);
 2898   jcc(Assembler::notEqual, not_inline_type);
 2899 }
 2900 
 2901 void MacroAssembler::test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type) {
 2902 #ifdef ASSERT
 2903   {
 2904     Label done_check;
 2905     test_klass_is_inline_type(klass, temp_reg, done_check);
 2906     stop("test_klass_is_empty_inline_type with non inline type klass");
 2907     bind(done_check);
 2908   }
 2909 #endif
 2910   movl(temp_reg, Address(klass, InstanceKlass::misc_status_offset()));
 2911   testl(temp_reg, InstanceKlassMiscStatus::is_empty_inline_type_value());
 2912   jcc(Assembler::notZero, is_empty_inline_type);
 2913 }
 2914 
 2915 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) {
 2916   movl(temp_reg, flags);
 2917   shrl(temp_reg, ConstantPoolCacheEntry::is_null_free_inline_type_shift);
 2918   andl(temp_reg, 0x1);
 2919   testl(temp_reg, temp_reg);
 2920   jcc(Assembler::notZero, is_null_free_inline_type);
 2921 }
 2922 
 2923 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) {
 2924   movl(temp_reg, flags);
 2925   shrl(temp_reg, ConstantPoolCacheEntry::is_null_free_inline_type_shift);
 2926   andl(temp_reg, 0x1);
 2927   testl(temp_reg, temp_reg);
 2928   jcc(Assembler::zero, not_null_free_inline_type);
 2929 }
 2930 
 2931 void MacroAssembler::test_field_is_inlined(Register flags, Register temp_reg, Label& is_inlined) {
 2932   movl(temp_reg, flags);
 2933   shrl(temp_reg, ConstantPoolCacheEntry::is_inlined_shift);
 2934   andl(temp_reg, 0x1);
 2935   testl(temp_reg, temp_reg);
 2936   jcc(Assembler::notZero, is_inlined);
 2937 }
 2938 
 2939 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) {
 2940   Label test_mark_word;
 2941   // load mark word
 2942   movptr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes()));
 2943   // check displaced
 2944   testl(temp_reg, markWord::unlocked_value);
 2945   jccb(Assembler::notZero, test_mark_word);
 2946   // slow path use klass prototype
 2947   push(rscratch1);
 2948   load_prototype_header(temp_reg, oop, rscratch1);
 2949   pop(rscratch1);
 2950 
 2951   bind(test_mark_word);
 2952   testl(temp_reg, test_bit);
 2953   jcc((jmp_set) ? Assembler::notZero : Assembler::zero, jmp_label);
 2954 }
 2955 
 2956 void MacroAssembler::test_flattened_array_oop(Register oop, Register temp_reg,
 2957                                               Label&is_flattened_array) {
 2958 #ifdef _LP64
 2959   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flattened_array);
 2960 #else
 2961   load_klass(temp_reg, oop, noreg);
 2962   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 2963   test_flattened_array_layout(temp_reg, is_flattened_array);
 2964 #endif
 2965 }
 2966 
 2967 void MacroAssembler::test_non_flattened_array_oop(Register oop, Register temp_reg,
 2968                                                   Label&is_non_flattened_array) {
 2969 #ifdef _LP64
 2970   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flattened_array);
 2971 #else
 2972   load_klass(temp_reg, oop, noreg);
 2973   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 2974   test_non_flattened_array_layout(temp_reg, is_non_flattened_array);
 2975 #endif
 2976 }
 2977 
 2978 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array) {
 2979 #ifdef _LP64
 2980   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array);
 2981 #else
 2982   load_klass(temp_reg, oop, noreg);
 2983   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 2984   test_null_free_array_layout(temp_reg, is_null_free_array);
 2985 #endif
 2986 }
 2987 
 2988 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) {
 2989 #ifdef _LP64
 2990   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array);
 2991 #else
 2992   load_klass(temp_reg, oop, noreg);
 2993   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 2994   test_non_null_free_array_layout(temp_reg, is_non_null_free_array);
 2995 #endif
 2996 }
 2997 
 2998 void MacroAssembler::test_flattened_array_layout(Register lh, Label& is_flattened_array) {
 2999   testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
 3000   jcc(Assembler::notZero, is_flattened_array);
 3001 }
 3002 
 3003 void MacroAssembler::test_non_flattened_array_layout(Register lh, Label& is_non_flattened_array) {
 3004   testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
 3005   jcc(Assembler::zero, is_non_flattened_array);
 3006 }
 3007 
 3008 void MacroAssembler::test_null_free_array_layout(Register lh, Label& is_null_free_array) {
 3009   testl(lh, Klass::_lh_null_free_array_bit_inplace);
 3010   jcc(Assembler::notZero, is_null_free_array);
 3011 }
 3012 
 3013 void MacroAssembler::test_non_null_free_array_layout(Register lh, Label& is_non_null_free_array) {
 3014   testl(lh, Klass::_lh_null_free_array_bit_inplace);
 3015   jcc(Assembler::zero, is_non_null_free_array);
 3016 }
 3017 
 3018 
 3019 void MacroAssembler::os_breakpoint() {
 3020   // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
 3021   // (e.g., MSVC can't call ps() otherwise)
 3022   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
 3023 }
 3024 
 3025 void MacroAssembler::unimplemented(const char* what) {
 3026   const char* buf = NULL;
 3027   {
 3028     ResourceMark rm;
 3029     stringStream ss;
 3030     ss.print("unimplemented: %s", what);
 3031     buf = code_string(ss.as_string());
 3032   }
 3033   stop(buf);
 3034 }
 3035 
 3036 #ifdef _LP64
 3037 #define XSTATE_BV 0x200
 3038 #endif
 3039 
 3040 void MacroAssembler::pop_CPU_state() {
 3041   pop_FPU_state();
 3042   pop_IU_state();
 3043 }
 3044 
 3045 void MacroAssembler::pop_FPU_state() {
 3046 #ifndef _LP64
 3047   frstor(Address(rsp, 0));
 3048 #else
 3049   fxrstor(Address(rsp, 0));
 3050 #endif
 3051   addptr(rsp, FPUStateSizeInWords * wordSize);
 3052 }
 3053 
 3054 void MacroAssembler::pop_IU_state() {
 3055   popa();
 3056   LP64_ONLY(addq(rsp, 8));
 3057   popf();
 3058 }
 3059 
 3060 // Save Integer and Float state
 3061 // Warning: Stack must be 16 byte aligned (64bit)
 3062 void MacroAssembler::push_CPU_state() {
 3063   push_IU_state();
 3064   push_FPU_state();
 3065 }
 3066 
 3067 void MacroAssembler::push_FPU_state() {
 3068   subptr(rsp, FPUStateSizeInWords * wordSize);
 3069 #ifndef _LP64
 3070   fnsave(Address(rsp, 0));
 3071   fwait();
 3072 #else
 3073   fxsave(Address(rsp, 0));
 3074 #endif // LP64
 3075 }
 3076 
 3077 void MacroAssembler::push_IU_state() {
 3078   // Push flags first because pusha kills them
 3079   pushf();
 3080   // Make sure rsp stays 16-byte aligned
 3081   LP64_ONLY(subq(rsp, 8));
 3082   pusha();
 3083 }
 3084 
 3085 void MacroAssembler::push_cont_fastpath() {
 3086   if (!Continuations::enabled()) return;
 3087 
 3088 #ifndef _LP64
 3089   Register rthread = rax;
 3090   Register rrealsp = rbx;
 3091   push(rthread);
 3092   push(rrealsp);
 3093 
 3094   get_thread(rthread);
 3095 
 3096   // The code below wants the original RSP.
 3097   // Move it back after the pushes above.
 3098   movptr(rrealsp, rsp);
 3099   addptr(rrealsp, 2*wordSize);
 3100 #else
 3101   Register rthread = r15_thread;
 3102   Register rrealsp = rsp;
 3103 #endif
 3104 
 3105   Label done;
 3106   cmpptr(rrealsp, Address(rthread, JavaThread::cont_fastpath_offset()));
 3107   jccb(Assembler::belowEqual, done);
 3108   movptr(Address(rthread, JavaThread::cont_fastpath_offset()), rrealsp);
 3109   bind(done);
 3110 
 3111 #ifndef _LP64
 3112   pop(rrealsp);
 3113   pop(rthread);
 3114 #endif
 3115 }
 3116 
 3117 void MacroAssembler::pop_cont_fastpath() {
 3118   if (!Continuations::enabled()) return;
 3119 
 3120 #ifndef _LP64
 3121   Register rthread = rax;
 3122   Register rrealsp = rbx;
 3123   push(rthread);
 3124   push(rrealsp);
 3125 
 3126   get_thread(rthread);
 3127 
 3128   // The code below wants the original RSP.
 3129   // Move it back after the pushes above.
 3130   movptr(rrealsp, rsp);
 3131   addptr(rrealsp, 2*wordSize);
 3132 #else
 3133   Register rthread = r15_thread;
 3134   Register rrealsp = rsp;
 3135 #endif
 3136 
 3137   Label done;
 3138   cmpptr(rrealsp, Address(rthread, JavaThread::cont_fastpath_offset()));
 3139   jccb(Assembler::below, done);
 3140   movptr(Address(rthread, JavaThread::cont_fastpath_offset()), 0);
 3141   bind(done);
 3142 
 3143 #ifndef _LP64
 3144   pop(rrealsp);
 3145   pop(rthread);
 3146 #endif
 3147 }
 3148 
 3149 void MacroAssembler::inc_held_monitor_count() {
 3150 #ifndef _LP64
 3151   Register thread = rax;
 3152   push(thread);
 3153   get_thread(thread);
 3154   incrementl(Address(thread, JavaThread::held_monitor_count_offset()));
 3155   pop(thread);
 3156 #else // LP64
 3157   incrementq(Address(r15_thread, JavaThread::held_monitor_count_offset()));
 3158 #endif
 3159 }
 3160 
 3161 void MacroAssembler::dec_held_monitor_count() {
 3162 #ifndef _LP64
 3163   Register thread = rax;
 3164   push(thread);
 3165   get_thread(thread);
 3166   decrementl(Address(thread, JavaThread::held_monitor_count_offset()));
 3167   pop(thread);
 3168 #else // LP64
 3169   decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset()));
 3170 #endif
 3171 }
 3172 
 3173 #ifdef ASSERT
 3174 void MacroAssembler::stop_if_in_cont(Register cont, const char* name) {
 3175 #ifdef _LP64
 3176   Label no_cont;
 3177   movptr(cont, Address(r15_thread, JavaThread::cont_entry_offset()));
 3178   testl(cont, cont);
 3179   jcc(Assembler::zero, no_cont);
 3180   stop(name);
 3181   bind(no_cont);
 3182 #else
 3183   Unimplemented();
 3184 #endif
 3185 }
 3186 #endif
 3187 
 3188 void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp) { // determine java_thread register
 3189   if (!java_thread->is_valid()) {
 3190     java_thread = rdi;
 3191     get_thread(java_thread);
 3192   }
 3193   // we must set sp to zero to clear frame
 3194   movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
 3195   // must clear fp, so that compiled frames are not confused; it is
 3196   // possible that we need it only for debugging
 3197   if (clear_fp) {
 3198     movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
 3199   }
 3200   // Always clear the pc because it could have been set by make_walkable()
 3201   movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
 3202   vzeroupper();
 3203 }
 3204 
 3205 void MacroAssembler::restore_rax(Register tmp) {
 3206   if (tmp == noreg) pop(rax);
 3207   else if (tmp != rax) mov(rax, tmp);
 3208 }
 3209 
 3210 void MacroAssembler::round_to(Register reg, int modulus) {
 3211   addptr(reg, modulus - 1);
 3212   andptr(reg, -modulus);
 3213 }
 3214 
 3215 void MacroAssembler::save_rax(Register tmp) {
 3216   if (tmp == noreg) push(rax);
 3217   else if (tmp != rax) mov(tmp, rax);
 3218 }
 3219 
 3220 void MacroAssembler::safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod) {
 3221   if (at_return) {
 3222     // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
 3223     // we may safely use rsp instead to perform the stack watermark check.
 3224     cmpptr(in_nmethod ? rsp : rbp, Address(thread_reg, JavaThread::polling_word_offset()));
 3225     jcc(Assembler::above, slow_path);
 3226     return;
 3227   }
 3228   testb(Address(thread_reg, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
 3229   jcc(Assembler::notZero, slow_path); // handshake bit set implies poll
 3230 }
 3231 
 3232 // Calls to C land
 3233 //
 3234 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded
 3235 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
 3236 // has to be reset to 0. This is required to allow proper stack traversal.
 3237 void MacroAssembler::set_last_Java_frame(Register java_thread,
 3238                                          Register last_java_sp,
 3239                                          Register last_java_fp,
 3240                                          address  last_java_pc,
 3241                                          Register rscratch) {
 3242   vzeroupper();
 3243   // determine java_thread register
 3244   if (!java_thread->is_valid()) {
 3245     java_thread = rdi;
 3246     get_thread(java_thread);
 3247   }
 3248   // determine last_java_sp register
 3249   if (!last_java_sp->is_valid()) {
 3250     last_java_sp = rsp;
 3251   }
 3252   // last_java_fp is optional
 3253   if (last_java_fp->is_valid()) {
 3254     movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
 3255   }
 3256   // last_java_pc is optional
 3257   if (last_java_pc != NULL) {
 3258     Address java_pc(java_thread,
 3259                     JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
 3260     lea(java_pc, InternalAddress(last_java_pc), rscratch);
 3261   }
 3262   movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
 3263 }
 3264 
 3265 void MacroAssembler::shlptr(Register dst, int imm8) {
 3266   LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8));
 3267 }
 3268 
 3269 void MacroAssembler::shrptr(Register dst, int imm8) {
 3270   LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8));
 3271 }
 3272 
 3273 void MacroAssembler::sign_extend_byte(Register reg) {
 3274   if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) {
 3275     movsbl(reg, reg); // movsxb
 3276   } else {
 3277     shll(reg, 24);
 3278     sarl(reg, 24);
 3279   }
 3280 }
 3281 
 3282 void MacroAssembler::sign_extend_short(Register reg) {
 3283   if (LP64_ONLY(true ||) VM_Version::is_P6()) {
 3284     movswl(reg, reg); // movsxw
 3285   } else {
 3286     shll(reg, 16);
 3287     sarl(reg, 16);
 3288   }
 3289 }
 3290 
 3291 void MacroAssembler::testl(Address dst, int32_t imm32) {
 3292   if (imm32 >= 0 && is8bit(imm32)) {
 3293     testb(dst, imm32);
 3294   } else {
 3295     Assembler::testl(dst, imm32);
 3296   }
 3297 }
 3298 
 3299 void MacroAssembler::testl(Register dst, int32_t imm32) {
 3300   if (imm32 >= 0 && is8bit(imm32) && dst->has_byte_register()) {
 3301     testb(dst, imm32);
 3302   } else {
 3303     Assembler::testl(dst, imm32);
 3304   }
 3305 }
 3306 
 3307 void MacroAssembler::testl(Register dst, AddressLiteral src) {
 3308   assert(always_reachable(src), "Address should be reachable");
 3309   testl(dst, as_Address(src));
 3310 }
 3311 
 3312 #ifdef _LP64
 3313 
 3314 void MacroAssembler::testq(Address dst, int32_t imm32) {
 3315   if (imm32 >= 0) {
 3316     testl(dst, imm32);
 3317   } else {
 3318     Assembler::testq(dst, imm32);
 3319   }
 3320 }
 3321 
 3322 void MacroAssembler::testq(Register dst, int32_t imm32) {
 3323   if (imm32 >= 0) {
 3324     testl(dst, imm32);
 3325   } else {
 3326     Assembler::testq(dst, imm32);
 3327   }
 3328 }
 3329 
 3330 #endif
 3331 
 3332 void MacroAssembler::pcmpeqb(XMMRegister dst, XMMRegister src) {
 3333   assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3334   Assembler::pcmpeqb(dst, src);
 3335 }
 3336 
 3337 void MacroAssembler::pcmpeqw(XMMRegister dst, XMMRegister src) {
 3338   assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3339   Assembler::pcmpeqw(dst, src);
 3340 }
 3341 
 3342 void MacroAssembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
 3343   assert((dst->encoding() < 16),"XMM register should be 0-15");
 3344   Assembler::pcmpestri(dst, src, imm8);
 3345 }
 3346 
 3347 void MacroAssembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
 3348   assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15");
 3349   Assembler::pcmpestri(dst, src, imm8);
 3350 }
 3351 
 3352 void MacroAssembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
 3353   assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3354   Assembler::pmovzxbw(dst, src);
 3355 }
 3356 
 3357 void MacroAssembler::pmovzxbw(XMMRegister dst, Address src) {
 3358   assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3359   Assembler::pmovzxbw(dst, src);
 3360 }
 3361 
 3362 void MacroAssembler::pmovmskb(Register dst, XMMRegister src) {
 3363   assert((src->encoding() < 16),"XMM register should be 0-15");
 3364   Assembler::pmovmskb(dst, src);
 3365 }
 3366 
 3367 void MacroAssembler::ptest(XMMRegister dst, XMMRegister src) {
 3368   assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15");
 3369   Assembler::ptest(dst, src);
 3370 }
 3371 
 3372 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 3373   assert(rscratch != noreg || always_reachable(src), "missing");
 3374 
 3375   if (reachable(src)) {
 3376     Assembler::sqrtss(dst, as_Address(src));
 3377   } else {
 3378     lea(rscratch, src);
 3379     Assembler::sqrtss(dst, Address(rscratch, 0));
 3380   }
 3381 }
 3382 
 3383 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 3384   assert(rscratch != noreg || always_reachable(src), "missing");
 3385 
 3386   if (reachable(src)) {
 3387     Assembler::subsd(dst, as_Address(src));
 3388   } else {
 3389     lea(rscratch, src);
 3390     Assembler::subsd(dst, Address(rscratch, 0));
 3391   }
 3392 }
 3393 
 3394 void MacroAssembler::roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch) {
 3395   assert(rscratch != noreg || always_reachable(src), "missing");
 3396 
 3397   if (reachable(src)) {
 3398     Assembler::roundsd(dst, as_Address(src), rmode);
 3399   } else {
 3400     lea(rscratch, src);
 3401     Assembler::roundsd(dst, Address(rscratch, 0), rmode);
 3402   }
 3403 }
 3404 
 3405 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 3406   assert(rscratch != noreg || always_reachable(src), "missing");
 3407 
 3408   if (reachable(src)) {
 3409     Assembler::subss(dst, as_Address(src));
 3410   } else {
 3411     lea(rscratch, src);
 3412     Assembler::subss(dst, Address(rscratch, 0));
 3413   }
 3414 }
 3415 
 3416 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 3417   assert(rscratch != noreg || always_reachable(src), "missing");
 3418 
 3419   if (reachable(src)) {
 3420     Assembler::ucomisd(dst, as_Address(src));
 3421   } else {
 3422     lea(rscratch, src);
 3423     Assembler::ucomisd(dst, Address(rscratch, 0));
 3424   }
 3425 }
 3426 
 3427 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 3428   assert(rscratch != noreg || always_reachable(src), "missing");
 3429 
 3430   if (reachable(src)) {
 3431     Assembler::ucomiss(dst, as_Address(src));
 3432   } else {
 3433     lea(rscratch, src);
 3434     Assembler::ucomiss(dst, Address(rscratch, 0));
 3435   }
 3436 }
 3437 
 3438 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 3439   assert(rscratch != noreg || always_reachable(src), "missing");
 3440 
 3441   // Used in sign-bit flipping with aligned address.
 3442   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
 3443   if (reachable(src)) {
 3444     Assembler::xorpd(dst, as_Address(src));
 3445   } else {
 3446     lea(rscratch, src);
 3447     Assembler::xorpd(dst, Address(rscratch, 0));
 3448   }
 3449 }
 3450 
 3451 void MacroAssembler::xorpd(XMMRegister dst, XMMRegister src) {
 3452   if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) {
 3453     Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit);
 3454   }
 3455   else {
 3456     Assembler::xorpd(dst, src);
 3457   }
 3458 }
 3459 
 3460 void MacroAssembler::xorps(XMMRegister dst, XMMRegister src) {
 3461   if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) {
 3462     Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit);
 3463   } else {
 3464     Assembler::xorps(dst, src);
 3465   }
 3466 }
 3467 
 3468 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src, Register rscratch) {
 3469   assert(rscratch != noreg || always_reachable(src), "missing");
 3470 
 3471   // Used in sign-bit flipping with aligned address.
 3472   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
 3473   if (reachable(src)) {
 3474     Assembler::xorps(dst, as_Address(src));
 3475   } else {
 3476     lea(rscratch, src);
 3477     Assembler::xorps(dst, Address(rscratch, 0));
 3478   }
 3479 }
 3480 
 3481 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src, Register rscratch) {
 3482   assert(rscratch != noreg || always_reachable(src), "missing");
 3483 
 3484   // Used in sign-bit flipping with aligned address.
 3485   bool aligned_adr = (((intptr_t)src.target() & 15) == 0);
 3486   assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes");
 3487   if (reachable(src)) {
 3488     Assembler::pshufb(dst, as_Address(src));
 3489   } else {
 3490     lea(rscratch, src);
 3491     Assembler::pshufb(dst, Address(rscratch, 0));
 3492   }
 3493 }
 3494 
 3495 // AVX 3-operands instructions
 3496 
 3497 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3498   assert(rscratch != noreg || always_reachable(src), "missing");
 3499 
 3500   if (reachable(src)) {
 3501     vaddsd(dst, nds, as_Address(src));
 3502   } else {
 3503     lea(rscratch, src);
 3504     vaddsd(dst, nds, Address(rscratch, 0));
 3505   }
 3506 }
 3507 
 3508 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3509   assert(rscratch != noreg || always_reachable(src), "missing");
 3510 
 3511   if (reachable(src)) {
 3512     vaddss(dst, nds, as_Address(src));
 3513   } else {
 3514     lea(rscratch, src);
 3515     vaddss(dst, nds, Address(rscratch, 0));
 3516   }
 3517 }
 3518 
 3519 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3520   assert(UseAVX > 0, "requires some form of AVX");
 3521   assert(rscratch != noreg || always_reachable(src), "missing");
 3522 
 3523   if (reachable(src)) {
 3524     Assembler::vpaddb(dst, nds, as_Address(src), vector_len);
 3525   } else {
 3526     lea(rscratch, src);
 3527     Assembler::vpaddb(dst, nds, Address(rscratch, 0), vector_len);
 3528   }
 3529 }
 3530 
 3531 void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3532   assert(UseAVX > 0, "requires some form of AVX");
 3533   assert(rscratch != noreg || always_reachable(src), "missing");
 3534 
 3535   if (reachable(src)) {
 3536     Assembler::vpaddd(dst, nds, as_Address(src), vector_len);
 3537   } else {
 3538     lea(rscratch, src);
 3539     Assembler::vpaddd(dst, nds, Address(rscratch, 0), vector_len);
 3540   }
 3541 }
 3542 
 3543 void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) {
 3544   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
 3545   assert(rscratch != noreg || always_reachable(negate_field), "missing");
 3546 
 3547   vandps(dst, nds, negate_field, vector_len, rscratch);
 3548 }
 3549 
 3550 void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) {
 3551   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
 3552   assert(rscratch != noreg || always_reachable(negate_field), "missing");
 3553 
 3554   vandpd(dst, nds, negate_field, vector_len, rscratch);
 3555 }
 3556 
 3557 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 3558   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3559   Assembler::vpaddb(dst, nds, src, vector_len);
 3560 }
 3561 
 3562 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 3563   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3564   Assembler::vpaddb(dst, nds, src, vector_len);
 3565 }
 3566 
 3567 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 3568   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3569   Assembler::vpaddw(dst, nds, src, vector_len);
 3570 }
 3571 
 3572 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 3573   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3574   Assembler::vpaddw(dst, nds, src, vector_len);
 3575 }
 3576 
 3577 void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3578   assert(rscratch != noreg || always_reachable(src), "missing");
 3579 
 3580   if (reachable(src)) {
 3581     Assembler::vpand(dst, nds, as_Address(src), vector_len);
 3582   } else {
 3583     lea(rscratch, src);
 3584     Assembler::vpand(dst, nds, Address(rscratch, 0), vector_len);
 3585   }
 3586 }
 3587 
 3588 void MacroAssembler::vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 3589   assert(rscratch != noreg || always_reachable(src), "missing");
 3590 
 3591   if (reachable(src)) {
 3592     Assembler::vpbroadcastd(dst, as_Address(src), vector_len);
 3593   } else {
 3594     lea(rscratch, src);
 3595     Assembler::vpbroadcastd(dst, Address(rscratch, 0), vector_len);
 3596   }
 3597 }
 3598 
 3599 void MacroAssembler::vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 3600   assert(rscratch != noreg || always_reachable(src), "missing");
 3601 
 3602   if (reachable(src)) {
 3603     Assembler::vpbroadcastq(dst, as_Address(src), vector_len);
 3604   } else {
 3605     lea(rscratch, src);
 3606     Assembler::vpbroadcastq(dst, Address(rscratch, 0), vector_len);
 3607   }
 3608 }
 3609 
 3610 void MacroAssembler::vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 3611   assert(rscratch != noreg || always_reachable(src), "missing");
 3612 
 3613   if (reachable(src)) {
 3614     Assembler::vbroadcastsd(dst, as_Address(src), vector_len);
 3615   } else {
 3616     lea(rscratch, src);
 3617     Assembler::vbroadcastsd(dst, Address(rscratch, 0), vector_len);
 3618   }
 3619 }
 3620 
 3621 void MacroAssembler::vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 3622   assert(rscratch != noreg || always_reachable(src), "missing");
 3623 
 3624   if (reachable(src)) {
 3625     Assembler::vbroadcastss(dst, as_Address(src), vector_len);
 3626   } else {
 3627     lea(rscratch, src);
 3628     Assembler::vbroadcastss(dst, Address(rscratch, 0), vector_len);
 3629   }
 3630 }
 3631 
 3632 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 3633   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3634   Assembler::vpcmpeqb(dst, nds, src, vector_len);
 3635 }
 3636 
 3637 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 3638   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3639   Assembler::vpcmpeqw(dst, nds, src, vector_len);
 3640 }
 3641 
 3642 void MacroAssembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3643   assert(rscratch != noreg || always_reachable(src), "missing");
 3644 
 3645   if (reachable(src)) {
 3646     Assembler::evpcmpeqd(kdst, mask, nds, as_Address(src), vector_len);
 3647   } else {
 3648     lea(rscratch, src);
 3649     Assembler::evpcmpeqd(kdst, mask, nds, Address(rscratch, 0), vector_len);
 3650   }
 3651 }
 3652 
 3653 void MacroAssembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
 3654                              int comparison, bool is_signed, int vector_len, Register rscratch) {
 3655   assert(rscratch != noreg || always_reachable(src), "missing");
 3656 
 3657   if (reachable(src)) {
 3658     Assembler::evpcmpd(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
 3659   } else {
 3660     lea(rscratch, src);
 3661     Assembler::evpcmpd(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
 3662   }
 3663 }
 3664 
 3665 void MacroAssembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
 3666                              int comparison, bool is_signed, int vector_len, Register rscratch) {
 3667   assert(rscratch != noreg || always_reachable(src), "missing");
 3668 
 3669   if (reachable(src)) {
 3670     Assembler::evpcmpq(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
 3671   } else {
 3672     lea(rscratch, src);
 3673     Assembler::evpcmpq(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
 3674   }
 3675 }
 3676 
 3677 void MacroAssembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
 3678                              int comparison, bool is_signed, int vector_len, Register rscratch) {
 3679   assert(rscratch != noreg || always_reachable(src), "missing");
 3680 
 3681   if (reachable(src)) {
 3682     Assembler::evpcmpb(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
 3683   } else {
 3684     lea(rscratch, src);
 3685     Assembler::evpcmpb(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
 3686   }
 3687 }
 3688 
 3689 void MacroAssembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
 3690                              int comparison, bool is_signed, int vector_len, Register rscratch) {
 3691   assert(rscratch != noreg || always_reachable(src), "missing");
 3692 
 3693   if (reachable(src)) {
 3694     Assembler::evpcmpw(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
 3695   } else {
 3696     lea(rscratch, src);
 3697     Assembler::evpcmpw(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
 3698   }
 3699 }
 3700 
 3701 void MacroAssembler::vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len) {
 3702   if (width == Assembler::Q) {
 3703     Assembler::vpcmpCCq(dst, nds, src, cond_encoding, vector_len);
 3704   } else {
 3705     Assembler::vpcmpCCbwd(dst, nds, src, cond_encoding, vector_len);
 3706   }
 3707 }
 3708 
 3709 void MacroAssembler::vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len) {
 3710   int eq_cond_enc = 0x29;
 3711   int gt_cond_enc = 0x37;
 3712   if (width != Assembler::Q) {
 3713     eq_cond_enc = 0x74 + width;
 3714     gt_cond_enc = 0x64 + width;
 3715   }
 3716   switch (cond) {
 3717   case eq:
 3718     vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len);
 3719     break;
 3720   case neq:
 3721     vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len);
 3722     vallones(xtmp, vector_len);
 3723     vpxor(dst, xtmp, dst, vector_len);
 3724     break;
 3725   case le:
 3726     vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len);
 3727     vallones(xtmp, vector_len);
 3728     vpxor(dst, xtmp, dst, vector_len);
 3729     break;
 3730   case nlt:
 3731     vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len);
 3732     vallones(xtmp, vector_len);
 3733     vpxor(dst, xtmp, dst, vector_len);
 3734     break;
 3735   case lt:
 3736     vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len);
 3737     break;
 3738   case nle:
 3739     vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len);
 3740     break;
 3741   default:
 3742     assert(false, "Should not reach here");
 3743   }
 3744 }
 3745 
 3746 void MacroAssembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) {
 3747   assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3748   Assembler::vpmovzxbw(dst, src, vector_len);
 3749 }
 3750 
 3751 void MacroAssembler::vpmovmskb(Register dst, XMMRegister src, int vector_len) {
 3752   assert((src->encoding() < 16),"XMM register should be 0-15");
 3753   Assembler::vpmovmskb(dst, src, vector_len);
 3754 }
 3755 
 3756 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 3757   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3758   Assembler::vpmullw(dst, nds, src, vector_len);
 3759 }
 3760 
 3761 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 3762   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3763   Assembler::vpmullw(dst, nds, src, vector_len);
 3764 }
 3765 
 3766 void MacroAssembler::vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3767   assert((UseAVX > 0), "AVX support is needed");
 3768   assert(rscratch != noreg || always_reachable(src), "missing");
 3769 
 3770   if (reachable(src)) {
 3771     Assembler::vpmulld(dst, nds, as_Address(src), vector_len);
 3772   } else {
 3773     lea(rscratch, src);
 3774     Assembler::vpmulld(dst, nds, Address(rscratch, 0), vector_len);
 3775   }
 3776 }
 3777 
 3778 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 3779   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3780   Assembler::vpsubb(dst, nds, src, vector_len);
 3781 }
 3782 
 3783 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 3784   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3785   Assembler::vpsubb(dst, nds, src, vector_len);
 3786 }
 3787 
 3788 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 3789   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3790   Assembler::vpsubw(dst, nds, src, vector_len);
 3791 }
 3792 
 3793 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 3794   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3795   Assembler::vpsubw(dst, nds, src, vector_len);
 3796 }
 3797 
 3798 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
 3799   assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3800   Assembler::vpsraw(dst, nds, shift, vector_len);
 3801 }
 3802 
 3803 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
 3804   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3805   Assembler::vpsraw(dst, nds, shift, vector_len);
 3806 }
 3807 
 3808 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
 3809   assert(UseAVX > 2,"");
 3810   if (!VM_Version::supports_avx512vl() && vector_len < 2) {
 3811      vector_len = 2;
 3812   }
 3813   Assembler::evpsraq(dst, nds, shift, vector_len);
 3814 }
 3815 
 3816 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
 3817   assert(UseAVX > 2,"");
 3818   if (!VM_Version::supports_avx512vl() && vector_len < 2) {
 3819      vector_len = 2;
 3820   }
 3821   Assembler::evpsraq(dst, nds, shift, vector_len);
 3822 }
 3823 
 3824 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
 3825   assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3826   Assembler::vpsrlw(dst, nds, shift, vector_len);
 3827 }
 3828 
 3829 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
 3830   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3831   Assembler::vpsrlw(dst, nds, shift, vector_len);
 3832 }
 3833 
 3834 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
 3835   assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3836   Assembler::vpsllw(dst, nds, shift, vector_len);
 3837 }
 3838 
 3839 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
 3840   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3841   Assembler::vpsllw(dst, nds, shift, vector_len);
 3842 }
 3843 
 3844 void MacroAssembler::vptest(XMMRegister dst, XMMRegister src) {
 3845   assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15");
 3846   Assembler::vptest(dst, src);
 3847 }
 3848 
 3849 void MacroAssembler::punpcklbw(XMMRegister dst, XMMRegister src) {
 3850   assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3851   Assembler::punpcklbw(dst, src);
 3852 }
 3853 
 3854 void MacroAssembler::pshufd(XMMRegister dst, Address src, int mode) {
 3855   assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 3856   Assembler::pshufd(dst, src, mode);
 3857 }
 3858 
 3859 void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
 3860   assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3861   Assembler::pshuflw(dst, src, mode);
 3862 }
 3863 
 3864 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3865   assert(rscratch != noreg || always_reachable(src), "missing");
 3866 
 3867   if (reachable(src)) {
 3868     vandpd(dst, nds, as_Address(src), vector_len);
 3869   } else {
 3870     lea(rscratch, src);
 3871     vandpd(dst, nds, Address(rscratch, 0), vector_len);
 3872   }
 3873 }
 3874 
 3875 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3876   assert(rscratch != noreg || always_reachable(src), "missing");
 3877 
 3878   if (reachable(src)) {
 3879     vandps(dst, nds, as_Address(src), vector_len);
 3880   } else {
 3881     lea(rscratch, src);
 3882     vandps(dst, nds, Address(rscratch, 0), vector_len);
 3883   }
 3884 }
 3885 
 3886 void MacroAssembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src,
 3887                             bool merge, int vector_len, Register rscratch) {
 3888   assert(rscratch != noreg || always_reachable(src), "missing");
 3889 
 3890   if (reachable(src)) {
 3891     Assembler::evpord(dst, mask, nds, as_Address(src), merge, vector_len);
 3892   } else {
 3893     lea(rscratch, src);
 3894     Assembler::evpord(dst, mask, nds, Address(rscratch, 0), merge, vector_len);
 3895   }
 3896 }
 3897 
 3898 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3899   assert(rscratch != noreg || always_reachable(src), "missing");
 3900 
 3901   if (reachable(src)) {
 3902     vdivsd(dst, nds, as_Address(src));
 3903   } else {
 3904     lea(rscratch, src);
 3905     vdivsd(dst, nds, Address(rscratch, 0));
 3906   }
 3907 }
 3908 
 3909 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3910   assert(rscratch != noreg || always_reachable(src), "missing");
 3911 
 3912   if (reachable(src)) {
 3913     vdivss(dst, nds, as_Address(src));
 3914   } else {
 3915     lea(rscratch, src);
 3916     vdivss(dst, nds, Address(rscratch, 0));
 3917   }
 3918 }
 3919 
 3920 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3921   assert(rscratch != noreg || always_reachable(src), "missing");
 3922 
 3923   if (reachable(src)) {
 3924     vmulsd(dst, nds, as_Address(src));
 3925   } else {
 3926     lea(rscratch, src);
 3927     vmulsd(dst, nds, Address(rscratch, 0));
 3928   }
 3929 }
 3930 
 3931 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3932   assert(rscratch != noreg || always_reachable(src), "missing");
 3933 
 3934   if (reachable(src)) {
 3935     vmulss(dst, nds, as_Address(src));
 3936   } else {
 3937     lea(rscratch, src);
 3938     vmulss(dst, nds, Address(rscratch, 0));
 3939   }
 3940 }
 3941 
 3942 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3943   assert(rscratch != noreg || always_reachable(src), "missing");
 3944 
 3945   if (reachable(src)) {
 3946     vsubsd(dst, nds, as_Address(src));
 3947   } else {
 3948     lea(rscratch, src);
 3949     vsubsd(dst, nds, Address(rscratch, 0));
 3950   }
 3951 }
 3952 
 3953 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3954   assert(rscratch != noreg || always_reachable(src), "missing");
 3955 
 3956   if (reachable(src)) {
 3957     vsubss(dst, nds, as_Address(src));
 3958   } else {
 3959     lea(rscratch, src);
 3960     vsubss(dst, nds, Address(rscratch, 0));
 3961   }
 3962 }
 3963 
 3964 void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3965   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
 3966   assert(rscratch != noreg || always_reachable(src), "missing");
 3967 
 3968   vxorps(dst, nds, src, Assembler::AVX_128bit, rscratch);
 3969 }
 3970 
 3971 void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3972   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
 3973   assert(rscratch != noreg || always_reachable(src), "missing");
 3974 
 3975   vxorpd(dst, nds, src, Assembler::AVX_128bit, rscratch);
 3976 }
 3977 
 3978 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3979   assert(rscratch != noreg || always_reachable(src), "missing");
 3980 
 3981   if (reachable(src)) {
 3982     vxorpd(dst, nds, as_Address(src), vector_len);
 3983   } else {
 3984     lea(rscratch, src);
 3985     vxorpd(dst, nds, Address(rscratch, 0), vector_len);
 3986   }
 3987 }
 3988 
 3989 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3990   assert(rscratch != noreg || always_reachable(src), "missing");
 3991 
 3992   if (reachable(src)) {
 3993     vxorps(dst, nds, as_Address(src), vector_len);
 3994   } else {
 3995     lea(rscratch, src);
 3996     vxorps(dst, nds, Address(rscratch, 0), vector_len);
 3997   }
 3998 }
 3999 
 4000 void MacroAssembler::vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 4001   assert(rscratch != noreg || always_reachable(src), "missing");
 4002 
 4003   if (UseAVX > 1 || (vector_len < 1)) {
 4004     if (reachable(src)) {
 4005       Assembler::vpxor(dst, nds, as_Address(src), vector_len);
 4006     } else {
 4007       lea(rscratch, src);
 4008       Assembler::vpxor(dst, nds, Address(rscratch, 0), vector_len);
 4009     }
 4010   } else {
 4011     MacroAssembler::vxorpd(dst, nds, src, vector_len, rscratch);
 4012   }
 4013 }
 4014 
 4015 void MacroAssembler::vpermd(XMMRegister dst,  XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 4016   assert(rscratch != noreg || always_reachable(src), "missing");
 4017 
 4018   if (reachable(src)) {
 4019     Assembler::vpermd(dst, nds, as_Address(src), vector_len);
 4020   } else {
 4021     lea(rscratch, src);
 4022     Assembler::vpermd(dst, nds, Address(rscratch, 0), vector_len);
 4023   }
 4024 }
 4025 
 4026 void MacroAssembler::clear_jweak_tag(Register possibly_jweak) {
 4027   const int32_t inverted_jweak_mask = ~static_cast<int32_t>(JNIHandles::weak_tag_mask);
 4028   STATIC_ASSERT(inverted_jweak_mask == -2); // otherwise check this code
 4029   // The inverted mask is sign-extended
 4030   andptr(possibly_jweak, inverted_jweak_mask);
 4031 }
 4032 
 4033 void MacroAssembler::resolve_jobject(Register value,
 4034                                      Register thread,
 4035                                      Register tmp) {
 4036   assert_different_registers(value, thread, tmp);
 4037   Label done, not_weak;
 4038   testptr(value, value);
 4039   jcc(Assembler::zero, done);                // Use NULL as-is.
 4040   testptr(value, JNIHandles::weak_tag_mask); // Test for jweak tag.
 4041   jcc(Assembler::zero, not_weak);
 4042   // Resolve jweak.
 4043   access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
 4044                  value, Address(value, -JNIHandles::weak_tag_value), tmp, thread);
 4045   verify_oop(value);
 4046   jmp(done);
 4047   bind(not_weak);
 4048   // Resolve (untagged) jobject.
 4049   access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, 0), tmp, thread);
 4050   verify_oop(value);
 4051   bind(done);
 4052 }
 4053 
 4054 void MacroAssembler::subptr(Register dst, int32_t imm32) {
 4055   LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32));
 4056 }
 4057 
 4058 // Force generation of a 4 byte immediate value even if it fits into 8bit
 4059 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) {
 4060   LP64_ONLY(subq_imm32(dst, imm32)) NOT_LP64(subl_imm32(dst, imm32));
 4061 }
 4062 
 4063 void MacroAssembler::subptr(Register dst, Register src) {
 4064   LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src));
 4065 }
 4066 
 4067 // C++ bool manipulation
 4068 void MacroAssembler::testbool(Register dst) {
 4069   if(sizeof(bool) == 1)
 4070     testb(dst, 0xff);
 4071   else if(sizeof(bool) == 2) {
 4072     // testw implementation needed for two byte bools
 4073     ShouldNotReachHere();
 4074   } else if(sizeof(bool) == 4)
 4075     testl(dst, dst);
 4076   else
 4077     // unsupported
 4078     ShouldNotReachHere();
 4079 }
 4080 
 4081 void MacroAssembler::testptr(Register dst, Register src) {
 4082   LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
 4083 }
 4084 
 4085 // Object / value buffer allocation...
 4086 //
 4087 // Kills klass and rsi on LP64
 4088 void MacroAssembler::allocate_instance(Register klass, Register new_obj,
 4089                                        Register t1, Register t2,
 4090                                        bool clear_fields, Label& alloc_failed)
 4091 {
 4092   Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop;
 4093   Register layout_size = t1;
 4094   assert(new_obj == rax, "needs to be rax");
 4095   assert_different_registers(klass, new_obj, t1, t2);
 4096 
 4097   // get instance_size in InstanceKlass (scaled to a count of bytes)
 4098   movl(layout_size, Address(klass, Klass::layout_helper_offset()));
 4099   // test to see if it has a finalizer or is malformed in some way
 4100   testl(layout_size, Klass::_lh_instance_slow_path_bit);
 4101   jcc(Assembler::notZero, slow_case_no_pop);
 4102 
 4103   // Allocate the instance:
 4104   //  If TLAB is enabled:
 4105   //    Try to allocate in the TLAB.
 4106   //    If fails, go to the slow path.
 4107   //  Else If inline contiguous allocations are enabled:
 4108   //    Try to allocate in eden.
 4109   //    If fails due to heap end, go to slow path.
 4110   //
 4111   //  If TLAB is enabled OR inline contiguous is enabled:
 4112   //    Initialize the allocation.
 4113   //    Exit.
 4114   //
 4115   //  Go to slow path.
 4116 
 4117   push(klass);
 4118   const Register thread = LP64_ONLY(r15_thread) NOT_LP64(klass);
 4119 #ifndef _LP64
 4120   if (UseTLAB) {
 4121     get_thread(thread);
 4122   }
 4123 #endif // _LP64
 4124 
 4125   if (UseTLAB) {
 4126     tlab_allocate(thread, new_obj, layout_size, 0, klass, t2, slow_case);
 4127     if (ZeroTLAB || (!clear_fields)) {
 4128       // the fields have been already cleared
 4129       jmp(initialize_header);
 4130     } else {
 4131       // initialize both the header and fields
 4132       jmp(initialize_object);
 4133     }
 4134   } else {
 4135     jmp(slow_case);
 4136   }
 4137 
 4138   // If UseTLAB is true, the object is created above and there is an initialize need.
 4139   // Otherwise, skip and go to the slow path.
 4140   if (UseTLAB) {
 4141     if (clear_fields) {
 4142       // The object is initialized before the header.  If the object size is
 4143       // zero, go directly to the header initialization.
 4144       bind(initialize_object);
 4145       decrement(layout_size, sizeof(oopDesc));
 4146       jcc(Assembler::zero, initialize_header);
 4147 
 4148       // Initialize topmost object field, divide size by 8, check if odd and
 4149       // test if zero.
 4150       Register zero = klass;
 4151       xorl(zero, zero);    // use zero reg to clear memory (shorter code)
 4152       shrl(layout_size, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
 4153 
 4154   #ifdef ASSERT
 4155       // make sure instance_size was multiple of 8
 4156       Label L;
 4157       // Ignore partial flag stall after shrl() since it is debug VM
 4158       jcc(Assembler::carryClear, L);
 4159       stop("object size is not multiple of 2 - adjust this code");
 4160       bind(L);
 4161       // must be > 0, no extra check needed here
 4162   #endif
 4163 
 4164       // initialize remaining object fields: instance_size was a multiple of 8
 4165       {
 4166         Label loop;
 4167         bind(loop);
 4168         movptr(Address(new_obj, layout_size, Address::times_8, sizeof(oopDesc) - 1*oopSize), zero);
 4169         NOT_LP64(movptr(Address(new_obj, layout_size, Address::times_8, sizeof(oopDesc) - 2*oopSize), zero));
 4170         decrement(layout_size);
 4171         jcc(Assembler::notZero, loop);
 4172       }
 4173     } // clear_fields
 4174 
 4175     // initialize object header only.
 4176     bind(initialize_header);
 4177     pop(klass);
 4178     Register mark_word = t2;
 4179     movptr(mark_word, Address(klass, Klass::prototype_header_offset()));
 4180     movptr(Address(new_obj, oopDesc::mark_offset_in_bytes ()), mark_word);
 4181 #ifdef _LP64
 4182     xorl(rsi, rsi);                 // use zero reg to clear memory (shorter code)
 4183     store_klass_gap(new_obj, rsi);  // zero klass gap for compressed oops
 4184 #endif
 4185     movptr(t2, klass);         // preserve klass
 4186     store_klass(new_obj, t2, rscratch1);  // src klass reg is potentially compressed
 4187 
 4188     jmp(done);
 4189   }
 4190 
 4191   bind(slow_case);
 4192   pop(klass);
 4193   bind(slow_case_no_pop);
 4194   jmp(alloc_failed);
 4195 
 4196   bind(done);
 4197 }
 4198 
 4199 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
 4200 void MacroAssembler::tlab_allocate(Register thread, Register obj,
 4201                                    Register var_size_in_bytes,
 4202                                    int con_size_in_bytes,
 4203                                    Register t1,
 4204                                    Register t2,
 4205                                    Label& slow_case) {
 4206   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 4207   bs->tlab_allocate(this, thread, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
 4208 }
 4209 
 4210 RegSet MacroAssembler::call_clobbered_gp_registers() {
 4211   RegSet regs;
 4212 #ifdef _LP64
 4213   regs += RegSet::of(rax, rcx, rdx);
 4214 #ifndef WINDOWS
 4215   regs += RegSet::of(rsi, rdi);
 4216 #endif
 4217   regs += RegSet::range(r8, r11);
 4218 #else
 4219   regs += RegSet::of(rax, rcx, rdx);
 4220 #endif
 4221   return regs;
 4222 }
 4223 
 4224 XMMRegSet MacroAssembler::call_clobbered_xmm_registers() {
 4225   int num_xmm_registers = XMMRegister::available_xmm_registers();
 4226 #if defined(WINDOWS) && defined(_LP64)
 4227   XMMRegSet result = XMMRegSet::range(xmm0, xmm5);
 4228   if (num_xmm_registers > 16) {
 4229      result += XMMRegSet::range(xmm16, as_XMMRegister(num_xmm_registers - 1));
 4230   }
 4231   return result;
 4232 #else
 4233   return XMMRegSet::range(xmm0, as_XMMRegister(num_xmm_registers - 1));
 4234 #endif
 4235 }
 4236 
 4237 static int FPUSaveAreaSize = align_up(108, StackAlignmentInBytes); // 108 bytes needed for FPU state by fsave/frstor
 4238 
 4239 #ifndef _LP64
 4240 static bool use_x87_registers() { return UseSSE < 2; }
 4241 #endif
 4242 static bool use_xmm_registers() { return UseSSE >= 1; }
 4243 
 4244 // C1 only ever uses the first double/float of the XMM register.
 4245 static int xmm_save_size() { return UseSSE >= 2 ? sizeof(double) : sizeof(float); }
 4246 
 4247 static void save_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) {
 4248   if (UseSSE == 1) {
 4249     masm->movflt(Address(rsp, offset), reg);
 4250   } else {
 4251     masm->movdbl(Address(rsp, offset), reg);
 4252   }
 4253 }
 4254 
 4255 static void restore_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) {
 4256   if (UseSSE == 1) {
 4257     masm->movflt(reg, Address(rsp, offset));
 4258   } else {
 4259     masm->movdbl(reg, Address(rsp, offset));
 4260   }
 4261 }
 4262 
 4263 int register_section_sizes(RegSet gp_registers, XMMRegSet xmm_registers, bool save_fpu,
 4264                            int& gp_area_size, int& fp_area_size, int& xmm_area_size) {
 4265 
 4266   gp_area_size = align_up(gp_registers.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size,
 4267                          StackAlignmentInBytes);
 4268 #ifdef _LP64
 4269   fp_area_size = 0;
 4270 #else
 4271   fp_area_size = (save_fpu && use_x87_registers()) ? FPUSaveAreaSize : 0;
 4272 #endif
 4273   xmm_area_size = (save_fpu && use_xmm_registers()) ? xmm_registers.size() * xmm_save_size() : 0;
 4274 
 4275   return gp_area_size + fp_area_size + xmm_area_size;
 4276 }
 4277 
 4278 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude, bool save_fpu) {
 4279   block_comment("push_call_clobbered_registers start");
 4280   // Regular registers
 4281   RegSet gp_registers_to_push = call_clobbered_gp_registers() - exclude;
 4282 
 4283   int gp_area_size;
 4284   int fp_area_size;
 4285   int xmm_area_size;
 4286   int total_save_size = register_section_sizes(gp_registers_to_push, call_clobbered_xmm_registers(), save_fpu,
 4287                                                gp_area_size, fp_area_size, xmm_area_size);
 4288   subptr(rsp, total_save_size);
 4289 
 4290   push_set(gp_registers_to_push, 0);
 4291 
 4292 #ifndef _LP64
 4293   if (save_fpu && use_x87_registers()) {
 4294     fnsave(Address(rsp, gp_area_size));
 4295     fwait();
 4296   }
 4297 #endif
 4298   if (save_fpu && use_xmm_registers()) {
 4299     push_set(call_clobbered_xmm_registers(), gp_area_size + fp_area_size);
 4300   }
 4301 
 4302   block_comment("push_call_clobbered_registers end");
 4303 }
 4304 
 4305 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu) {
 4306   block_comment("pop_call_clobbered_registers start");
 4307 
 4308   RegSet gp_registers_to_pop = call_clobbered_gp_registers() - exclude;
 4309 
 4310   int gp_area_size;
 4311   int fp_area_size;
 4312   int xmm_area_size;
 4313   int total_save_size = register_section_sizes(gp_registers_to_pop, call_clobbered_xmm_registers(), restore_fpu,
 4314                                                gp_area_size, fp_area_size, xmm_area_size);
 4315 
 4316   if (restore_fpu && use_xmm_registers()) {
 4317     pop_set(call_clobbered_xmm_registers(), gp_area_size + fp_area_size);
 4318   }
 4319 #ifndef _LP64
 4320   if (restore_fpu && use_x87_registers()) {
 4321     frstor(Address(rsp, gp_area_size));
 4322   }
 4323 #endif
 4324 
 4325   pop_set(gp_registers_to_pop, 0);
 4326 
 4327   addptr(rsp, total_save_size);
 4328 
 4329   vzeroupper();
 4330 
 4331   block_comment("pop_call_clobbered_registers end");
 4332 }
 4333 
 4334 void MacroAssembler::push_set(XMMRegSet set, int offset) {
 4335   assert(is_aligned(set.size() * xmm_save_size(), StackAlignmentInBytes), "must be");
 4336   int spill_offset = offset;
 4337 
 4338   for (RegSetIterator<XMMRegister> it = set.begin(); *it != xnoreg; ++it) {
 4339     save_xmm_register(this, spill_offset, *it);
 4340     spill_offset += xmm_save_size();
 4341   }
 4342 }
 4343 
 4344 void MacroAssembler::pop_set(XMMRegSet set, int offset) {
 4345   int restore_size = set.size() * xmm_save_size();
 4346   assert(is_aligned(restore_size, StackAlignmentInBytes), "must be");
 4347 
 4348   int restore_offset = offset + restore_size - xmm_save_size();
 4349 
 4350   for (ReverseRegSetIterator<XMMRegister> it = set.rbegin(); *it != xnoreg; ++it) {
 4351     restore_xmm_register(this, restore_offset, *it);
 4352     restore_offset -= xmm_save_size();
 4353   }
 4354 }
 4355 
 4356 void MacroAssembler::push_set(RegSet set, int offset) {
 4357   int spill_offset;
 4358   if (offset == -1) {
 4359     int register_push_size = set.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size;
 4360     int aligned_size = align_up(register_push_size, StackAlignmentInBytes);
 4361     subptr(rsp, aligned_size);
 4362     spill_offset = 0;
 4363   } else {
 4364     spill_offset = offset;
 4365   }
 4366 
 4367   for (RegSetIterator<Register> it = set.begin(); *it != noreg; ++it) {
 4368     movptr(Address(rsp, spill_offset), *it);
 4369     spill_offset += Register::max_slots_per_register * VMRegImpl::stack_slot_size;
 4370   }
 4371 }
 4372 
 4373 void MacroAssembler::pop_set(RegSet set, int offset) {
 4374 
 4375   int gp_reg_size = Register::max_slots_per_register * VMRegImpl::stack_slot_size;
 4376   int restore_size = set.size() * gp_reg_size;
 4377   int aligned_size = align_up(restore_size, StackAlignmentInBytes);
 4378 
 4379   int restore_offset;
 4380   if (offset == -1) {
 4381     restore_offset = restore_size - gp_reg_size;
 4382   } else {
 4383     restore_offset = offset + restore_size - gp_reg_size;
 4384   }
 4385   for (ReverseRegSetIterator<Register> it = set.rbegin(); *it != noreg; ++it) {
 4386     movptr(*it, Address(rsp, restore_offset));
 4387     restore_offset -= gp_reg_size;
 4388   }
 4389 
 4390   if (offset == -1) {
 4391     addptr(rsp, aligned_size);
 4392   }
 4393 }
 4394 
 4395 // Preserves the contents of address, destroys the contents length_in_bytes and temp.
 4396 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) {
 4397   assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different");
 4398   assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord");
 4399   Label done;
 4400 
 4401   testptr(length_in_bytes, length_in_bytes);
 4402   jcc(Assembler::zero, done);
 4403 
 4404   // initialize topmost word, divide index by 2, check if odd and test if zero
 4405   // note: for the remaining code to work, index must be a multiple of BytesPerWord
 4406 #ifdef ASSERT
 4407   {
 4408     Label L;
 4409     testptr(length_in_bytes, BytesPerWord - 1);
 4410     jcc(Assembler::zero, L);
 4411     stop("length must be a multiple of BytesPerWord");
 4412     bind(L);
 4413   }
 4414 #endif
 4415   Register index = length_in_bytes;
 4416   xorptr(temp, temp);    // use _zero reg to clear memory (shorter code)
 4417   if (UseIncDec) {
 4418     shrptr(index, 3);  // divide by 8/16 and set carry flag if bit 2 was set
 4419   } else {
 4420     shrptr(index, 2);  // use 2 instructions to avoid partial flag stall
 4421     shrptr(index, 1);
 4422   }
 4423 #ifndef _LP64
 4424   // index could have not been a multiple of 8 (i.e., bit 2 was set)
 4425   {
 4426     Label even;
 4427     // note: if index was a multiple of 8, then it cannot
 4428     //       be 0 now otherwise it must have been 0 before
 4429     //       => if it is even, we don't need to check for 0 again
 4430     jcc(Assembler::carryClear, even);
 4431     // clear topmost word (no jump would be needed if conditional assignment worked here)
 4432     movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp);
 4433     // index could be 0 now, must check again
 4434     jcc(Assembler::zero, done);
 4435     bind(even);
 4436   }
 4437 #endif // !_LP64
 4438   // initialize remaining object fields: index is a multiple of 2 now
 4439   {
 4440     Label loop;
 4441     bind(loop);
 4442     movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp);
 4443     NOT_LP64(movptr(Address(address, index, Address::times_8, offset_in_bytes - 2*BytesPerWord), temp);)
 4444     decrement(index);
 4445     jcc(Assembler::notZero, loop);
 4446   }
 4447 
 4448   bind(done);
 4449 }
 4450 
 4451 void MacroAssembler::get_inline_type_field_klass(Register klass, Register index, Register inline_klass) {
 4452   movptr(inline_klass, Address(klass, InstanceKlass::inline_type_field_klasses_offset()));
 4453 #ifdef ASSERT
 4454   {
 4455     Label done;
 4456     cmpptr(inline_klass, 0);
 4457     jcc(Assembler::notEqual, done);
 4458     stop("get_inline_type_field_klass contains no inline klass");
 4459     bind(done);
 4460   }
 4461 #endif
 4462   movptr(inline_klass, Address(inline_klass, index, Address::times_ptr));
 4463 }
 4464 
 4465 void MacroAssembler::get_default_value_oop(Register inline_klass, Register temp_reg, Register obj) {
 4466 #ifdef ASSERT
 4467   {
 4468     Label done_check;
 4469     test_klass_is_inline_type(inline_klass, temp_reg, done_check);
 4470     stop("get_default_value_oop from non inline type klass");
 4471     bind(done_check);
 4472   }
 4473 #endif
 4474   Register offset = temp_reg;
 4475   // Getting the offset of the pre-allocated default value
 4476   movptr(offset, Address(inline_klass, in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset())));
 4477   movl(offset, Address(offset, in_bytes(InlineKlass::default_value_offset_offset())));
 4478 
 4479   // Getting the mirror
 4480   movptr(obj, Address(inline_klass, in_bytes(Klass::java_mirror_offset())));
 4481   resolve_oop_handle(obj, inline_klass);
 4482 
 4483   // Getting the pre-allocated default value from the mirror
 4484   Address field(obj, offset, Address::times_1);
 4485   load_heap_oop(obj, field);
 4486 }
 4487 
 4488 void MacroAssembler::get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj) {
 4489 #ifdef ASSERT
 4490   {
 4491     Label done_check;
 4492     test_klass_is_empty_inline_type(inline_klass, temp_reg, done_check);
 4493     stop("get_empty_value from non-empty inline klass");
 4494     bind(done_check);
 4495   }
 4496 #endif
 4497   get_default_value_oop(inline_klass, temp_reg, obj);
 4498 }
 4499 
 4500 
 4501 // Look up the method for a megamorphic invokeinterface call.
 4502 // The target method is determined by <intf_klass, itable_index>.
 4503 // The receiver klass is in recv_klass.
 4504 // On success, the result will be in method_result, and execution falls through.
 4505 // On failure, execution transfers to the given label.
 4506 void MacroAssembler::lookup_interface_method(Register recv_klass,
 4507                                              Register intf_klass,
 4508                                              RegisterOrConstant itable_index,
 4509                                              Register method_result,
 4510                                              Register scan_temp,
 4511                                              Label& L_no_such_interface,
 4512                                              bool return_method) {
 4513   assert_different_registers(recv_klass, intf_klass, scan_temp);
 4514   assert_different_registers(method_result, intf_klass, scan_temp);
 4515   assert(recv_klass != method_result || !return_method,
 4516          "recv_klass can be destroyed when method isn't needed");
 4517 
 4518   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
 4519          "caller must use same register for non-constant itable index as for method");
 4520 
 4521   // Compute start of first itableOffsetEntry (which is at the end of the vtable)
 4522   int vtable_base = in_bytes(Klass::vtable_start_offset());
 4523   int itentry_off = itableMethodEntry::method_offset_in_bytes();
 4524   int scan_step   = itableOffsetEntry::size() * wordSize;
 4525   int vte_size    = vtableEntry::size_in_bytes();
 4526   Address::ScaleFactor times_vte_scale = Address::times_ptr;
 4527   assert(vte_size == wordSize, "else adjust times_vte_scale");
 4528 
 4529   movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
 4530 
 4531   // %%% Could store the aligned, prescaled offset in the klassoop.
 4532   lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
 4533 
 4534   if (return_method) {
 4535     // Adjust recv_klass by scaled itable_index, so we can free itable_index.
 4536     assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
 4537     lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
 4538   }
 4539 
 4540   // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
 4541   //   if (scan->interface() == intf) {
 4542   //     result = (klass + scan->offset() + itable_index);
 4543   //   }
 4544   // }
 4545   Label search, found_method;
 4546 
 4547   for (int peel = 1; peel >= 0; peel--) {
 4548     movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes()));
 4549     cmpptr(intf_klass, method_result);
 4550 
 4551     if (peel) {
 4552       jccb(Assembler::equal, found_method);
 4553     } else {
 4554       jccb(Assembler::notEqual, search);
 4555       // (invert the test to fall through to found_method...)
 4556     }
 4557 
 4558     if (!peel)  break;
 4559 
 4560     bind(search);
 4561 
 4562     // Check that the previous entry is non-null.  A null entry means that
 4563     // the receiver class doesn't implement the interface, and wasn't the
 4564     // same as when the caller was compiled.
 4565     testptr(method_result, method_result);
 4566     jcc(Assembler::zero, L_no_such_interface);
 4567     addptr(scan_temp, scan_step);
 4568   }
 4569 
 4570   bind(found_method);
 4571 
 4572   if (return_method) {
 4573     // Got a hit.
 4574     movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes()));
 4575     movptr(method_result, Address(recv_klass, scan_temp, Address::times_1));
 4576   }
 4577 }
 4578 
 4579 
 4580 // virtual method calling
 4581 void MacroAssembler::lookup_virtual_method(Register recv_klass,
 4582                                            RegisterOrConstant vtable_index,
 4583                                            Register method_result) {
 4584   const int base = in_bytes(Klass::vtable_start_offset());
 4585   assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
 4586   Address vtable_entry_addr(recv_klass,
 4587                             vtable_index, Address::times_ptr,
 4588                             base + vtableEntry::method_offset_in_bytes());
 4589   movptr(method_result, vtable_entry_addr);
 4590 }
 4591 
 4592 
 4593 void MacroAssembler::check_klass_subtype(Register sub_klass,
 4594                            Register super_klass,
 4595                            Register temp_reg,
 4596                            Label& L_success) {
 4597   Label L_failure;
 4598   check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg,        &L_success, &L_failure, NULL);
 4599   check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL);
 4600   bind(L_failure);
 4601 }
 4602 
 4603 
 4604 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
 4605                                                    Register super_klass,
 4606                                                    Register temp_reg,
 4607                                                    Label* L_success,
 4608                                                    Label* L_failure,
 4609                                                    Label* L_slow_path,
 4610                                         RegisterOrConstant super_check_offset) {
 4611   assert_different_registers(sub_klass, super_klass, temp_reg);
 4612   bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
 4613   if (super_check_offset.is_register()) {
 4614     assert_different_registers(sub_klass, super_klass,
 4615                                super_check_offset.as_register());
 4616   } else if (must_load_sco) {
 4617     assert(temp_reg != noreg, "supply either a temp or a register offset");
 4618   }
 4619 
 4620   Label L_fallthrough;
 4621   int label_nulls = 0;
 4622   if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
 4623   if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
 4624   if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
 4625   assert(label_nulls <= 1, "at most one NULL in the batch");
 4626 
 4627   int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
 4628   int sco_offset = in_bytes(Klass::super_check_offset_offset());
 4629   Address super_check_offset_addr(super_klass, sco_offset);
 4630 
 4631   // Hacked jcc, which "knows" that L_fallthrough, at least, is in
 4632   // range of a jccb.  If this routine grows larger, reconsider at
 4633   // least some of these.
 4634 #define local_jcc(assembler_cond, label)                                \
 4635   if (&(label) == &L_fallthrough)  jccb(assembler_cond, label);         \
 4636   else                             jcc( assembler_cond, label) /*omit semi*/
 4637 
 4638   // Hacked jmp, which may only be used just before L_fallthrough.
 4639 #define final_jmp(label)                                                \
 4640   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
 4641   else                            jmp(label)                /*omit semi*/
 4642 
 4643   // If the pointers are equal, we are done (e.g., String[] elements).
 4644   // This self-check enables sharing of secondary supertype arrays among
 4645   // non-primary types such as array-of-interface.  Otherwise, each such
 4646   // type would need its own customized SSA.
 4647   // We move this check to the front of the fast path because many
 4648   // type checks are in fact trivially successful in this manner,
 4649   // so we get a nicely predicted branch right at the start of the check.
 4650   cmpptr(sub_klass, super_klass);
 4651   local_jcc(Assembler::equal, *L_success);
 4652 
 4653   // Check the supertype display:
 4654   if (must_load_sco) {
 4655     // Positive movl does right thing on LP64.
 4656     movl(temp_reg, super_check_offset_addr);
 4657     super_check_offset = RegisterOrConstant(temp_reg);
 4658   }
 4659   Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
 4660   cmpptr(super_klass, super_check_addr); // load displayed supertype
 4661 
 4662   // This check has worked decisively for primary supers.
 4663   // Secondary supers are sought in the super_cache ('super_cache_addr').
 4664   // (Secondary supers are interfaces and very deeply nested subtypes.)
 4665   // This works in the same check above because of a tricky aliasing
 4666   // between the super_cache and the primary super display elements.
 4667   // (The 'super_check_addr' can address either, as the case requires.)
 4668   // Note that the cache is updated below if it does not help us find
 4669   // what we need immediately.
 4670   // So if it was a primary super, we can just fail immediately.
 4671   // Otherwise, it's the slow path for us (no success at this point).
 4672 
 4673   if (super_check_offset.is_register()) {
 4674     local_jcc(Assembler::equal, *L_success);
 4675     cmpl(super_check_offset.as_register(), sc_offset);
 4676     if (L_failure == &L_fallthrough) {
 4677       local_jcc(Assembler::equal, *L_slow_path);
 4678     } else {
 4679       local_jcc(Assembler::notEqual, *L_failure);
 4680       final_jmp(*L_slow_path);
 4681     }
 4682   } else if (super_check_offset.as_constant() == sc_offset) {
 4683     // Need a slow path; fast failure is impossible.
 4684     if (L_slow_path == &L_fallthrough) {
 4685       local_jcc(Assembler::equal, *L_success);
 4686     } else {
 4687       local_jcc(Assembler::notEqual, *L_slow_path);
 4688       final_jmp(*L_success);
 4689     }
 4690   } else {
 4691     // No slow path; it's a fast decision.
 4692     if (L_failure == &L_fallthrough) {
 4693       local_jcc(Assembler::equal, *L_success);
 4694     } else {
 4695       local_jcc(Assembler::notEqual, *L_failure);
 4696       final_jmp(*L_success);
 4697     }
 4698   }
 4699 
 4700   bind(L_fallthrough);
 4701 
 4702 #undef local_jcc
 4703 #undef final_jmp
 4704 }
 4705 
 4706 
 4707 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
 4708                                                    Register super_klass,
 4709                                                    Register temp_reg,
 4710                                                    Register temp2_reg,
 4711                                                    Label* L_success,
 4712                                                    Label* L_failure,
 4713                                                    bool set_cond_codes) {
 4714   assert_different_registers(sub_klass, super_klass, temp_reg);
 4715   if (temp2_reg != noreg)
 4716     assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg);
 4717 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
 4718 
 4719   Label L_fallthrough;
 4720   int label_nulls = 0;
 4721   if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
 4722   if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
 4723   assert(label_nulls <= 1, "at most one NULL in the batch");
 4724 
 4725   // a couple of useful fields in sub_klass:
 4726   int ss_offset = in_bytes(Klass::secondary_supers_offset());
 4727   int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
 4728   Address secondary_supers_addr(sub_klass, ss_offset);
 4729   Address super_cache_addr(     sub_klass, sc_offset);
 4730 
 4731   // Do a linear scan of the secondary super-klass chain.
 4732   // This code is rarely used, so simplicity is a virtue here.
 4733   // The repne_scan instruction uses fixed registers, which we must spill.
 4734   // Don't worry too much about pre-existing connections with the input regs.
 4735 
 4736   assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super)
 4737   assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter)
 4738 
 4739   // Get super_klass value into rax (even if it was in rdi or rcx).
 4740   bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false;
 4741   if (super_klass != rax) {
 4742     if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; }
 4743     mov(rax, super_klass);
 4744   }
 4745   if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; }
 4746   if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; }
 4747 
 4748 #ifndef PRODUCT
 4749   int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
 4750   ExternalAddress pst_counter_addr((address) pst_counter);
 4751   NOT_LP64(  incrementl(pst_counter_addr) );
 4752   LP64_ONLY( lea(rcx, pst_counter_addr) );
 4753   LP64_ONLY( incrementl(Address(rcx, 0)) );
 4754 #endif //PRODUCT
 4755 
 4756   // We will consult the secondary-super array.
 4757   movptr(rdi, secondary_supers_addr);
 4758   // Load the array length.  (Positive movl does right thing on LP64.)
 4759   movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes()));
 4760   // Skip to start of data.
 4761   addptr(rdi, Array<Klass*>::base_offset_in_bytes());
 4762 
 4763   // Scan RCX words at [RDI] for an occurrence of RAX.
 4764   // Set NZ/Z based on last compare.
 4765   // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does
 4766   // not change flags (only scas instruction which is repeated sets flags).
 4767   // Set Z = 0 (not equal) before 'repne' to indicate that class was not found.
 4768 
 4769     testptr(rax,rax); // Set Z = 0
 4770     repne_scan();
 4771 
 4772   // Unspill the temp. registers:
 4773   if (pushed_rdi)  pop(rdi);
 4774   if (pushed_rcx)  pop(rcx);
 4775   if (pushed_rax)  pop(rax);
 4776 
 4777   if (set_cond_codes) {
 4778     // Special hack for the AD files:  rdi is guaranteed non-zero.
 4779     assert(!pushed_rdi, "rdi must be left non-NULL");
 4780     // Also, the condition codes are properly set Z/NZ on succeed/failure.
 4781   }
 4782 
 4783   if (L_failure == &L_fallthrough)
 4784         jccb(Assembler::notEqual, *L_failure);
 4785   else  jcc(Assembler::notEqual, *L_failure);
 4786 
 4787   // Success.  Cache the super we found and proceed in triumph.
 4788   movptr(super_cache_addr, super_klass);
 4789 
 4790   if (L_success != &L_fallthrough) {
 4791     jmp(*L_success);
 4792   }
 4793 
 4794 #undef IS_A_TEMP
 4795 
 4796   bind(L_fallthrough);
 4797 }
 4798 
 4799 void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) {
 4800   assert(L_fast_path != NULL || L_slow_path != NULL, "at least one is required");
 4801 
 4802   Label L_fallthrough;
 4803   if (L_fast_path == NULL) {
 4804     L_fast_path = &L_fallthrough;
 4805   } else if (L_slow_path == NULL) {
 4806     L_slow_path = &L_fallthrough;
 4807   }
 4808 
 4809   // Fast path check: class is fully initialized
 4810   cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
 4811   jcc(Assembler::equal, *L_fast_path);
 4812 
 4813   // Fast path check: current thread is initializer thread
 4814   cmpptr(thread, Address(klass, InstanceKlass::init_thread_offset()));
 4815   if (L_slow_path == &L_fallthrough) {
 4816     jcc(Assembler::equal, *L_fast_path);
 4817     bind(*L_slow_path);
 4818   } else if (L_fast_path == &L_fallthrough) {
 4819     jcc(Assembler::notEqual, *L_slow_path);
 4820     bind(*L_fast_path);
 4821   } else {
 4822     Unimplemented();
 4823   }
 4824 }
 4825 
 4826 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) {
 4827   if (VM_Version::supports_cmov()) {
 4828     cmovl(cc, dst, src);
 4829   } else {
 4830     Label L;
 4831     jccb(negate_condition(cc), L);
 4832     movl(dst, src);
 4833     bind(L);
 4834   }
 4835 }
 4836 
 4837 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
 4838   if (VM_Version::supports_cmov()) {
 4839     cmovl(cc, dst, src);
 4840   } else {
 4841     Label L;
 4842     jccb(negate_condition(cc), L);
 4843     movl(dst, src);
 4844     bind(L);
 4845   }
 4846 }
 4847 
 4848 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
 4849   if (!VerifyOops || VerifyAdapterSharing) {
 4850     // Below address of the code string confuses VerifyAdapterSharing
 4851     // because it may differ between otherwise equivalent adapters.
 4852     return;
 4853   }
 4854 
 4855   BLOCK_COMMENT("verify_oop {");
 4856 #ifdef _LP64
 4857   push(rscratch1);
 4858 #endif
 4859   push(rax);                          // save rax
 4860   push(reg);                          // pass register argument
 4861 
 4862   // Pass register number to verify_oop_subroutine
 4863   const char* b = NULL;
 4864   {
 4865     ResourceMark rm;
 4866     stringStream ss;
 4867     ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
 4868     b = code_string(ss.as_string());
 4869   }
 4870   ExternalAddress buffer((address) b);
 4871   pushptr(buffer.addr(), rscratch1);
 4872 
 4873   // call indirectly to solve generation ordering problem
 4874   movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
 4875   call(rax);
 4876   // Caller pops the arguments (oop, message) and restores rax, r10
 4877   BLOCK_COMMENT("} verify_oop");
 4878 }
 4879 
 4880 void MacroAssembler::vallones(XMMRegister dst, int vector_len) {
 4881   if (UseAVX > 2 && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl())) {
 4882     // Only pcmpeq has dependency breaking treatment (i.e the execution can begin without
 4883     // waiting for the previous result on dst), not vpcmpeqd, so just use vpternlog
 4884     vpternlogd(dst, 0xFF, dst, dst, vector_len);
 4885   } else if (VM_Version::supports_avx()) {
 4886     vpcmpeqd(dst, dst, dst, vector_len);
 4887   } else {
 4888     assert(VM_Version::supports_sse2(), "");
 4889     pcmpeqd(dst, dst);
 4890   }
 4891 }
 4892 
 4893 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
 4894                                          int extra_slot_offset) {
 4895   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
 4896   int stackElementSize = Interpreter::stackElementSize;
 4897   int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
 4898 #ifdef ASSERT
 4899   int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
 4900   assert(offset1 - offset == stackElementSize, "correct arithmetic");
 4901 #endif
 4902   Register             scale_reg    = noreg;
 4903   Address::ScaleFactor scale_factor = Address::no_scale;
 4904   if (arg_slot.is_constant()) {
 4905     offset += arg_slot.as_constant() * stackElementSize;
 4906   } else {
 4907     scale_reg    = arg_slot.as_register();
 4908     scale_factor = Address::times(stackElementSize);
 4909   }
 4910   offset += wordSize;           // return PC is on stack
 4911   return Address(rsp, scale_reg, scale_factor, offset);
 4912 }
 4913 
 4914 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
 4915   if (!VerifyOops || VerifyAdapterSharing) {
 4916     // Below address of the code string confuses VerifyAdapterSharing
 4917     // because it may differ between otherwise equivalent adapters.
 4918     return;
 4919   }
 4920 
 4921 #ifdef _LP64
 4922   push(rscratch1);
 4923 #endif
 4924   push(rax); // save rax,
 4925   // addr may contain rsp so we will have to adjust it based on the push
 4926   // we just did (and on 64 bit we do two pushes)
 4927   // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
 4928   // stores rax into addr which is backwards of what was intended.
 4929   if (addr.uses(rsp)) {
 4930     lea(rax, addr);
 4931     pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord));
 4932   } else {
 4933     pushptr(addr);
 4934   }
 4935 
 4936   // Pass register number to verify_oop_subroutine
 4937   const char* b = NULL;
 4938   {
 4939     ResourceMark rm;
 4940     stringStream ss;
 4941     ss.print("verify_oop_addr: %s (%s:%d)", s, file, line);
 4942     b = code_string(ss.as_string());
 4943   }
 4944   ExternalAddress buffer((address) b);
 4945   pushptr(buffer.addr(), rscratch1);
 4946 
 4947   // call indirectly to solve generation ordering problem
 4948   movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
 4949   call(rax);
 4950   // Caller pops the arguments (addr, message) and restores rax, r10.
 4951 }
 4952 
 4953 void MacroAssembler::verify_tlab() {
 4954 #ifdef ASSERT
 4955   if (UseTLAB && VerifyOops) {
 4956     Label next, ok;
 4957     Register t1 = rsi;
 4958     Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread);
 4959 
 4960     push(t1);
 4961     NOT_LP64(push(thread_reg));
 4962     NOT_LP64(get_thread(thread_reg));
 4963 
 4964     movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
 4965     cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
 4966     jcc(Assembler::aboveEqual, next);
 4967     STOP("assert(top >= start)");
 4968     should_not_reach_here();
 4969 
 4970     bind(next);
 4971     movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
 4972     cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
 4973     jcc(Assembler::aboveEqual, ok);
 4974     STOP("assert(top <= end)");
 4975     should_not_reach_here();
 4976 
 4977     bind(ok);
 4978     NOT_LP64(pop(thread_reg));
 4979     pop(t1);
 4980   }
 4981 #endif
 4982 }
 4983 
 4984 class ControlWord {
 4985  public:
 4986   int32_t _value;
 4987 
 4988   int  rounding_control() const        { return  (_value >> 10) & 3      ; }
 4989   int  precision_control() const       { return  (_value >>  8) & 3      ; }
 4990   bool precision() const               { return ((_value >>  5) & 1) != 0; }
 4991   bool underflow() const               { return ((_value >>  4) & 1) != 0; }
 4992   bool overflow() const                { return ((_value >>  3) & 1) != 0; }
 4993   bool zero_divide() const             { return ((_value >>  2) & 1) != 0; }
 4994   bool denormalized() const            { return ((_value >>  1) & 1) != 0; }
 4995   bool invalid() const                 { return ((_value >>  0) & 1) != 0; }
 4996 
 4997   void print() const {
 4998     // rounding control
 4999     const char* rc;
 5000     switch (rounding_control()) {
 5001       case 0: rc = "round near"; break;
 5002       case 1: rc = "round down"; break;
 5003       case 2: rc = "round up  "; break;
 5004       case 3: rc = "chop      "; break;
 5005       default:
 5006         rc = NULL; // silence compiler warnings
 5007         fatal("Unknown rounding control: %d", rounding_control());
 5008     };
 5009     // precision control
 5010     const char* pc;
 5011     switch (precision_control()) {
 5012       case 0: pc = "24 bits "; break;
 5013       case 1: pc = "reserved"; break;
 5014       case 2: pc = "53 bits "; break;
 5015       case 3: pc = "64 bits "; break;
 5016       default:
 5017         pc = NULL; // silence compiler warnings
 5018         fatal("Unknown precision control: %d", precision_control());
 5019     };
 5020     // flags
 5021     char f[9];
 5022     f[0] = ' ';
 5023     f[1] = ' ';
 5024     f[2] = (precision   ()) ? 'P' : 'p';
 5025     f[3] = (underflow   ()) ? 'U' : 'u';
 5026     f[4] = (overflow    ()) ? 'O' : 'o';
 5027     f[5] = (zero_divide ()) ? 'Z' : 'z';
 5028     f[6] = (denormalized()) ? 'D' : 'd';
 5029     f[7] = (invalid     ()) ? 'I' : 'i';
 5030     f[8] = '\x0';
 5031     // output
 5032     printf("%04x  masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc);
 5033   }
 5034 
 5035 };
 5036 
 5037 class StatusWord {
 5038  public:
 5039   int32_t _value;
 5040 
 5041   bool busy() const                    { return ((_value >> 15) & 1) != 0; }
 5042   bool C3() const                      { return ((_value >> 14) & 1) != 0; }
 5043   bool C2() const                      { return ((_value >> 10) & 1) != 0; }
 5044   bool C1() const                      { return ((_value >>  9) & 1) != 0; }
 5045   bool C0() const                      { return ((_value >>  8) & 1) != 0; }
 5046   int  top() const                     { return  (_value >> 11) & 7      ; }
 5047   bool error_status() const            { return ((_value >>  7) & 1) != 0; }
 5048   bool stack_fault() const             { return ((_value >>  6) & 1) != 0; }
 5049   bool precision() const               { return ((_value >>  5) & 1) != 0; }
 5050   bool underflow() const               { return ((_value >>  4) & 1) != 0; }
 5051   bool overflow() const                { return ((_value >>  3) & 1) != 0; }
 5052   bool zero_divide() const             { return ((_value >>  2) & 1) != 0; }
 5053   bool denormalized() const            { return ((_value >>  1) & 1) != 0; }
 5054   bool invalid() const                 { return ((_value >>  0) & 1) != 0; }
 5055 
 5056   void print() const {
 5057     // condition codes
 5058     char c[5];
 5059     c[0] = (C3()) ? '3' : '-';
 5060     c[1] = (C2()) ? '2' : '-';
 5061     c[2] = (C1()) ? '1' : '-';
 5062     c[3] = (C0()) ? '0' : '-';
 5063     c[4] = '\x0';
 5064     // flags
 5065     char f[9];
 5066     f[0] = (error_status()) ? 'E' : '-';
 5067     f[1] = (stack_fault ()) ? 'S' : '-';
 5068     f[2] = (precision   ()) ? 'P' : '-';
 5069     f[3] = (underflow   ()) ? 'U' : '-';
 5070     f[4] = (overflow    ()) ? 'O' : '-';
 5071     f[5] = (zero_divide ()) ? 'Z' : '-';
 5072     f[6] = (denormalized()) ? 'D' : '-';
 5073     f[7] = (invalid     ()) ? 'I' : '-';
 5074     f[8] = '\x0';
 5075     // output
 5076     printf("%04x  flags = %s, cc =  %s, top = %d", _value & 0xFFFF, f, c, top());
 5077   }
 5078 
 5079 };
 5080 
 5081 class TagWord {
 5082  public:
 5083   int32_t _value;
 5084 
 5085   int tag_at(int i) const              { return (_value >> (i*2)) & 3; }
 5086 
 5087   void print() const {
 5088     printf("%04x", _value & 0xFFFF);
 5089   }
 5090 
 5091 };
 5092 
 5093 class FPU_Register {
 5094  public:
 5095   int32_t _m0;
 5096   int32_t _m1;
 5097   int16_t _ex;
 5098 
 5099   bool is_indefinite() const           {
 5100     return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0;
 5101   }
 5102 
 5103   void print() const {
 5104     char  sign = (_ex < 0) ? '-' : '+';
 5105     const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : "   ";
 5106     printf("%c%04hx.%08x%08x  %s", sign, _ex, _m1, _m0, kind);
 5107   };
 5108 
 5109 };
 5110 
 5111 class FPU_State {
 5112  public:
 5113   enum {
 5114     register_size       = 10,
 5115     number_of_registers =  8,
 5116     register_mask       =  7
 5117   };
 5118 
 5119   ControlWord  _control_word;
 5120   StatusWord   _status_word;
 5121   TagWord      _tag_word;
 5122   int32_t      _error_offset;
 5123   int32_t      _error_selector;
 5124   int32_t      _data_offset;
 5125   int32_t      _data_selector;
 5126   int8_t       _register[register_size * number_of_registers];
 5127 
 5128   int tag_for_st(int i) const          { return _tag_word.tag_at((_status_word.top() + i) & register_mask); }
 5129   FPU_Register* st(int i) const        { return (FPU_Register*)&_register[register_size * i]; }
 5130 
 5131   const char* tag_as_string(int tag) const {
 5132     switch (tag) {
 5133       case 0: return "valid";
 5134       case 1: return "zero";
 5135       case 2: return "special";
 5136       case 3: return "empty";
 5137     }
 5138     ShouldNotReachHere();
 5139     return NULL;
 5140   }
 5141 
 5142   void print() const {
 5143     // print computation registers
 5144     { int t = _status_word.top();
 5145       for (int i = 0; i < number_of_registers; i++) {
 5146         int j = (i - t) & register_mask;
 5147         printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j);
 5148         st(j)->print();
 5149         printf(" %s\n", tag_as_string(_tag_word.tag_at(i)));
 5150       }
 5151     }
 5152     printf("\n");
 5153     // print control registers
 5154     printf("ctrl = "); _control_word.print(); printf("\n");
 5155     printf("stat = "); _status_word .print(); printf("\n");
 5156     printf("tags = "); _tag_word    .print(); printf("\n");
 5157   }
 5158 
 5159 };
 5160 
 5161 class Flag_Register {
 5162  public:
 5163   int32_t _value;
 5164 
 5165   bool overflow() const                { return ((_value >> 11) & 1) != 0; }
 5166   bool direction() const               { return ((_value >> 10) & 1) != 0; }
 5167   bool sign() const                    { return ((_value >>  7) & 1) != 0; }
 5168   bool zero() const                    { return ((_value >>  6) & 1) != 0; }
 5169   bool auxiliary_carry() const         { return ((_value >>  4) & 1) != 0; }
 5170   bool parity() const                  { return ((_value >>  2) & 1) != 0; }
 5171   bool carry() const                   { return ((_value >>  0) & 1) != 0; }
 5172 
 5173   void print() const {
 5174     // flags
 5175     char f[8];
 5176     f[0] = (overflow       ()) ? 'O' : '-';
 5177     f[1] = (direction      ()) ? 'D' : '-';
 5178     f[2] = (sign           ()) ? 'S' : '-';
 5179     f[3] = (zero           ()) ? 'Z' : '-';
 5180     f[4] = (auxiliary_carry()) ? 'A' : '-';
 5181     f[5] = (parity         ()) ? 'P' : '-';
 5182     f[6] = (carry          ()) ? 'C' : '-';
 5183     f[7] = '\x0';
 5184     // output
 5185     printf("%08x  flags = %s", _value, f);
 5186   }
 5187 
 5188 };
 5189 
 5190 class IU_Register {
 5191  public:
 5192   int32_t _value;
 5193 
 5194   void print() const {
 5195     printf("%08x  %11d", _value, _value);
 5196   }
 5197 
 5198 };
 5199 
 5200 class IU_State {
 5201  public:
 5202   Flag_Register _eflags;
 5203   IU_Register   _rdi;
 5204   IU_Register   _rsi;
 5205   IU_Register   _rbp;
 5206   IU_Register   _rsp;
 5207   IU_Register   _rbx;
 5208   IU_Register   _rdx;
 5209   IU_Register   _rcx;
 5210   IU_Register   _rax;
 5211 
 5212   void print() const {
 5213     // computation registers
 5214     printf("rax,  = "); _rax.print(); printf("\n");
 5215     printf("rbx,  = "); _rbx.print(); printf("\n");
 5216     printf("rcx  = "); _rcx.print(); printf("\n");
 5217     printf("rdx  = "); _rdx.print(); printf("\n");
 5218     printf("rdi  = "); _rdi.print(); printf("\n");
 5219     printf("rsi  = "); _rsi.print(); printf("\n");
 5220     printf("rbp,  = "); _rbp.print(); printf("\n");
 5221     printf("rsp  = "); _rsp.print(); printf("\n");
 5222     printf("\n");
 5223     // control registers
 5224     printf("flgs = "); _eflags.print(); printf("\n");
 5225   }
 5226 };
 5227 
 5228 
 5229 class CPU_State {
 5230  public:
 5231   FPU_State _fpu_state;
 5232   IU_State  _iu_state;
 5233 
 5234   void print() const {
 5235     printf("--------------------------------------------------\n");
 5236     _iu_state .print();
 5237     printf("\n");
 5238     _fpu_state.print();
 5239     printf("--------------------------------------------------\n");
 5240   }
 5241 
 5242 };
 5243 
 5244 
 5245 static void _print_CPU_state(CPU_State* state) {
 5246   state->print();
 5247 };
 5248 
 5249 
 5250 void MacroAssembler::print_CPU_state() {
 5251   push_CPU_state();
 5252   push(rsp);                // pass CPU state
 5253   call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state)));
 5254   addptr(rsp, wordSize);       // discard argument
 5255   pop_CPU_state();
 5256 }
 5257 
 5258 
 5259 #ifndef _LP64
 5260 static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) {
 5261   static int counter = 0;
 5262   FPU_State* fs = &state->_fpu_state;
 5263   counter++;
 5264   // For leaf calls, only verify that the top few elements remain empty.
 5265   // We only need 1 empty at the top for C2 code.
 5266   if( stack_depth < 0 ) {
 5267     if( fs->tag_for_st(7) != 3 ) {
 5268       printf("FPR7 not empty\n");
 5269       state->print();
 5270       assert(false, "error");
 5271       return false;
 5272     }
 5273     return true;                // All other stack states do not matter
 5274   }
 5275 
 5276   assert((fs->_control_word._value & 0xffff) == StubRoutines::x86::fpu_cntrl_wrd_std(),
 5277          "bad FPU control word");
 5278 
 5279   // compute stack depth
 5280   int i = 0;
 5281   while (i < FPU_State::number_of_registers && fs->tag_for_st(i)  < 3) i++;
 5282   int d = i;
 5283   while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++;
 5284   // verify findings
 5285   if (i != FPU_State::number_of_registers) {
 5286     // stack not contiguous
 5287     printf("%s: stack not contiguous at ST%d\n", s, i);
 5288     state->print();
 5289     assert(false, "error");
 5290     return false;
 5291   }
 5292   // check if computed stack depth corresponds to expected stack depth
 5293   if (stack_depth < 0) {
 5294     // expected stack depth is -stack_depth or less
 5295     if (d > -stack_depth) {
 5296       // too many elements on the stack
 5297       printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d);
 5298       state->print();
 5299       assert(false, "error");
 5300       return false;
 5301     }
 5302   } else {
 5303     // expected stack depth is stack_depth
 5304     if (d != stack_depth) {
 5305       // wrong stack depth
 5306       printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d);
 5307       state->print();
 5308       assert(false, "error");
 5309       return false;
 5310     }
 5311   }
 5312   // everything is cool
 5313   return true;
 5314 }
 5315 
 5316 void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
 5317   if (!VerifyFPU) return;
 5318   push_CPU_state();
 5319   push(rsp);                // pass CPU state
 5320   ExternalAddress msg((address) s);
 5321   // pass message string s
 5322   pushptr(msg.addr(), noreg);
 5323   push(stack_depth);        // pass stack depth
 5324   call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU)));
 5325   addptr(rsp, 3 * wordSize);   // discard arguments
 5326   // check for error
 5327   { Label L;
 5328     testl(rax, rax);
 5329     jcc(Assembler::notZero, L);
 5330     int3();                  // break if error condition
 5331     bind(L);
 5332   }
 5333   pop_CPU_state();
 5334 }
 5335 #endif // _LP64
 5336 
 5337 void MacroAssembler::restore_cpu_control_state_after_jni(Register rscratch) {
 5338   // Either restore the MXCSR register after returning from the JNI Call
 5339   // or verify that it wasn't changed (with -Xcheck:jni flag).
 5340   if (VM_Version::supports_sse()) {
 5341     if (RestoreMXCSROnJNICalls) {
 5342       ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), rscratch);
 5343     } else if (CheckJNICalls) {
 5344       call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
 5345     }
 5346   }
 5347   // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty.
 5348   vzeroupper();
 5349 
 5350 #ifndef _LP64
 5351   // Either restore the x87 floating pointer control word after returning
 5352   // from the JNI call or verify that it wasn't changed.
 5353   if (CheckJNICalls) {
 5354     call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
 5355   }
 5356 #endif // _LP64
 5357 }
 5358 
 5359 // ((OopHandle)result).resolve();
 5360 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) {
 5361   assert_different_registers(result, tmp);
 5362 
 5363   // Only 64 bit platforms support GCs that require a tmp register
 5364   // Only IN_HEAP loads require a thread_tmp register
 5365   // OopHandle::resolve is an indirection like jobject.
 5366   access_load_at(T_OBJECT, IN_NATIVE,
 5367                  result, Address(result, 0), tmp, /*tmp_thread*/noreg);
 5368 }
 5369 
 5370 // ((WeakHandle)result).resolve();
 5371 void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) {
 5372   assert_different_registers(rresult, rtmp);
 5373   Label resolved;
 5374 
 5375   // A null weak handle resolves to null.
 5376   cmpptr(rresult, 0);
 5377   jcc(Assembler::equal, resolved);
 5378 
 5379   // Only 64 bit platforms support GCs that require a tmp register
 5380   // Only IN_HEAP loads require a thread_tmp register
 5381   // WeakHandle::resolve is an indirection like jweak.
 5382   access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
 5383                  rresult, Address(rresult, 0), rtmp, /*tmp_thread*/noreg);
 5384   bind(resolved);
 5385 }
 5386 
 5387 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
 5388   // get mirror
 5389   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
 5390   load_method_holder(mirror, method);
 5391   movptr(mirror, Address(mirror, mirror_offset));
 5392   resolve_oop_handle(mirror, tmp);
 5393 }
 5394 
 5395 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
 5396   load_method_holder(rresult, rmethod);
 5397   movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
 5398 }
 5399 
 5400 void MacroAssembler::load_method_holder(Register holder, Register method) {
 5401   movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
 5402   movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
 5403   movptr(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes())); // InstanceKlass*
 5404 }
 5405 
 5406 void MacroAssembler::load_metadata(Register dst, Register src) {
 5407   if (UseCompressedClassPointers) {
 5408     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5409   } else {
 5410     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5411   }
 5412 }
 5413 
 5414 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
 5415   assert_different_registers(src, tmp);
 5416   assert_different_registers(dst, tmp);
 5417 #ifdef _LP64
 5418   if (UseCompressedClassPointers) {
 5419     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5420     decode_klass_not_null(dst, tmp);
 5421   } else
 5422 #endif
 5423   movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5424 }
 5425 
 5426 void MacroAssembler::load_prototype_header(Register dst, Register src, Register tmp) {
 5427   load_klass(dst, src, tmp);
 5428   movptr(dst, Address(dst, Klass::prototype_header_offset()));
 5429 }
 5430 
 5431 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
 5432   assert_different_registers(src, tmp);
 5433   assert_different_registers(dst, tmp);
 5434 #ifdef _LP64
 5435   if (UseCompressedClassPointers) {
 5436     encode_klass_not_null(src, tmp);
 5437     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5438   } else
 5439 #endif
 5440     movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5441 }
 5442 
 5443 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
 5444                                     Register tmp1, Register thread_tmp) {
 5445   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5446   decorators = AccessInternal::decorator_fixup(decorators, type);
 5447   bool as_raw = (decorators & AS_RAW) != 0;
 5448   if (as_raw) {
 5449     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
 5450   } else {
 5451     bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
 5452   }
 5453 }
 5454 
 5455 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
 5456                                      Register tmp1, Register tmp2, Register tmp3) {
 5457   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5458   decorators = AccessInternal::decorator_fixup(decorators, type);
 5459   bool as_raw = (decorators & AS_RAW) != 0;
 5460   if (as_raw) {
 5461     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5462   } else {
 5463     bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5464   }
 5465 }
 5466 
 5467 void MacroAssembler::access_value_copy(DecoratorSet decorators, Register src, Register dst,
 5468                                        Register inline_klass) {
 5469   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5470   bs->value_copy(this, decorators, src, dst, inline_klass);
 5471 }
 5472 
 5473 void MacroAssembler::first_field_offset(Register inline_klass, Register offset) {
 5474   movptr(offset, Address(inline_klass, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 5475   movl(offset, Address(offset, InlineKlass::first_field_offset_offset()));
 5476 }
 5477 
 5478 void MacroAssembler::data_for_oop(Register oop, Register data, Register inline_klass) {
 5479   // ((address) (void*) o) + vk->first_field_offset();
 5480   Register offset = (data == oop) ? rscratch1 : data;
 5481   first_field_offset(inline_klass, offset);
 5482   if (data == oop) {
 5483     addptr(data, offset);
 5484   } else {
 5485     lea(data, Address(oop, offset));
 5486   }
 5487 }
 5488 
 5489 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass,
 5490                                                 Register index, Register data) {
 5491   assert(index != rcx, "index needs to shift by rcx");
 5492   assert_different_registers(array, array_klass, index);
 5493   assert_different_registers(rcx, array, index);
 5494 
 5495   // array->base() + (index << Klass::layout_helper_log2_element_size(lh));
 5496   movl(rcx, Address(array_klass, Klass::layout_helper_offset()));
 5497 
 5498   // Klass::layout_helper_log2_element_size(lh)
 5499   // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask;
 5500   shrl(rcx, Klass::_lh_log2_element_size_shift);
 5501   andl(rcx, Klass::_lh_log2_element_size_mask);
 5502   shlptr(index); // index << rcx
 5503 
 5504   lea(data, Address(array, index, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT)));
 5505 }
 5506 
 5507 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
 5508                                    Register thread_tmp, DecoratorSet decorators) {
 5509   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
 5510 }
 5511 
 5512 // Doesn't do verification, generates fixed size code
 5513 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
 5514                                             Register thread_tmp, DecoratorSet decorators) {
 5515   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
 5516 }
 5517 
 5518 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
 5519                                     Register tmp2, Register tmp3, DecoratorSet decorators) {
 5520   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
 5521 }
 5522 
 5523 // Used for storing NULLs.
 5524 void MacroAssembler::store_heap_oop_null(Address dst) {
 5525   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
 5526 }
 5527 
 5528 #ifdef _LP64
 5529 void MacroAssembler::store_klass_gap(Register dst, Register src) {
 5530   if (UseCompressedClassPointers) {
 5531     // Store to klass gap in destination
 5532     movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
 5533   }
 5534 }
 5535 
 5536 #ifdef ASSERT
 5537 void MacroAssembler::verify_heapbase(const char* msg) {
 5538   assert (UseCompressedOops, "should be compressed");
 5539   assert (Universe::heap() != NULL, "java heap should be initialized");
 5540   if (CheckCompressedOops) {
 5541     Label ok;
 5542     ExternalAddress src2(CompressedOops::ptrs_base_addr());
 5543     const bool is_src2_reachable = reachable(src2);
 5544     if (!is_src2_reachable) {
 5545       push(rscratch1);  // cmpptr trashes rscratch1
 5546     }
 5547     cmpptr(r12_heapbase, src2, rscratch1);
 5548     jcc(Assembler::equal, ok);
 5549     STOP(msg);
 5550     bind(ok);
 5551     if (!is_src2_reachable) {
 5552       pop(rscratch1);
 5553     }
 5554   }
 5555 }
 5556 #endif
 5557 
 5558 // Algorithm must match oop.inline.hpp encode_heap_oop.
 5559 void MacroAssembler::encode_heap_oop(Register r) {
 5560 #ifdef ASSERT
 5561   verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
 5562 #endif
 5563   verify_oop_msg(r, "broken oop in encode_heap_oop");
 5564   if (CompressedOops::base() == NULL) {
 5565     if (CompressedOops::shift() != 0) {
 5566       assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
 5567       shrq(r, LogMinObjAlignmentInBytes);
 5568     }
 5569     return;
 5570   }
 5571   testq(r, r);
 5572   cmovq(Assembler::equal, r, r12_heapbase);
 5573   subq(r, r12_heapbase);
 5574   shrq(r, LogMinObjAlignmentInBytes);
 5575 }
 5576 
 5577 void MacroAssembler::encode_heap_oop_not_null(Register r) {
 5578 #ifdef ASSERT
 5579   verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
 5580   if (CheckCompressedOops) {
 5581     Label ok;
 5582     testq(r, r);
 5583     jcc(Assembler::notEqual, ok);
 5584     STOP("null oop passed to encode_heap_oop_not_null");
 5585     bind(ok);
 5586   }
 5587 #endif
 5588   verify_oop_msg(r, "broken oop in encode_heap_oop_not_null");
 5589   if (CompressedOops::base() != NULL) {
 5590     subq(r, r12_heapbase);
 5591   }
 5592   if (CompressedOops::shift() != 0) {
 5593     assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
 5594     shrq(r, LogMinObjAlignmentInBytes);
 5595   }
 5596 }
 5597 
 5598 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
 5599 #ifdef ASSERT
 5600   verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
 5601   if (CheckCompressedOops) {
 5602     Label ok;
 5603     testq(src, src);
 5604     jcc(Assembler::notEqual, ok);
 5605     STOP("null oop passed to encode_heap_oop_not_null2");
 5606     bind(ok);
 5607   }
 5608 #endif
 5609   verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2");
 5610   if (dst != src) {
 5611     movq(dst, src);
 5612   }
 5613   if (CompressedOops::base() != NULL) {
 5614     subq(dst, r12_heapbase);
 5615   }
 5616   if (CompressedOops::shift() != 0) {
 5617     assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
 5618     shrq(dst, LogMinObjAlignmentInBytes);
 5619   }
 5620 }
 5621 
 5622 void  MacroAssembler::decode_heap_oop(Register r) {
 5623 #ifdef ASSERT
 5624   verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
 5625 #endif
 5626   if (CompressedOops::base() == NULL) {
 5627     if (CompressedOops::shift() != 0) {
 5628       assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
 5629       shlq(r, LogMinObjAlignmentInBytes);
 5630     }
 5631   } else {
 5632     Label done;
 5633     shlq(r, LogMinObjAlignmentInBytes);
 5634     jccb(Assembler::equal, done);
 5635     addq(r, r12_heapbase);
 5636     bind(done);
 5637   }
 5638   verify_oop_msg(r, "broken oop in decode_heap_oop");
 5639 }
 5640 
 5641 void  MacroAssembler::decode_heap_oop_not_null(Register r) {
 5642   // Note: it will change flags
 5643   assert (UseCompressedOops, "should only be used for compressed headers");
 5644   assert (Universe::heap() != NULL, "java heap should be initialized");
 5645   // Cannot assert, unverified entry point counts instructions (see .ad file)
 5646   // vtableStubs also counts instructions in pd_code_size_limit.
 5647   // Also do not verify_oop as this is called by verify_oop.
 5648   if (CompressedOops::shift() != 0) {
 5649     assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
 5650     shlq(r, LogMinObjAlignmentInBytes);
 5651     if (CompressedOops::base() != NULL) {
 5652       addq(r, r12_heapbase);
 5653     }
 5654   } else {
 5655     assert (CompressedOops::base() == NULL, "sanity");
 5656   }
 5657 }
 5658 
 5659 void  MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
 5660   // Note: it will change flags
 5661   assert (UseCompressedOops, "should only be used for compressed headers");
 5662   assert (Universe::heap() != NULL, "java heap should be initialized");
 5663   // Cannot assert, unverified entry point counts instructions (see .ad file)
 5664   // vtableStubs also counts instructions in pd_code_size_limit.
 5665   // Also do not verify_oop as this is called by verify_oop.
 5666   if (CompressedOops::shift() != 0) {
 5667     assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
 5668     if (LogMinObjAlignmentInBytes == Address::times_8) {
 5669       leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
 5670     } else {
 5671       if (dst != src) {
 5672         movq(dst, src);
 5673       }
 5674       shlq(dst, LogMinObjAlignmentInBytes);
 5675       if (CompressedOops::base() != NULL) {
 5676         addq(dst, r12_heapbase);
 5677       }
 5678     }
 5679   } else {
 5680     assert (CompressedOops::base() == NULL, "sanity");
 5681     if (dst != src) {
 5682       movq(dst, src);
 5683     }
 5684   }
 5685 }
 5686 
 5687 void MacroAssembler::encode_klass_not_null(Register r, Register tmp) {
 5688   assert_different_registers(r, tmp);
 5689   if (CompressedKlassPointers::base() != NULL) {
 5690     mov64(tmp, (int64_t)CompressedKlassPointers::base());
 5691     subq(r, tmp);
 5692   }
 5693   if (CompressedKlassPointers::shift() != 0) {
 5694     assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
 5695     shrq(r, LogKlassAlignmentInBytes);
 5696   }
 5697 }
 5698 
 5699 void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) {
 5700   assert_different_registers(src, dst);
 5701   if (CompressedKlassPointers::base() != NULL) {
 5702     mov64(dst, -(int64_t)CompressedKlassPointers::base());
 5703     addq(dst, src);
 5704   } else {
 5705     movptr(dst, src);
 5706   }
 5707   if (CompressedKlassPointers::shift() != 0) {
 5708     assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
 5709     shrq(dst, LogKlassAlignmentInBytes);
 5710   }
 5711 }
 5712 
 5713 void  MacroAssembler::decode_klass_not_null(Register r, Register tmp) {
 5714   assert_different_registers(r, tmp);
 5715   // Note: it will change flags
 5716   assert(UseCompressedClassPointers, "should only be used for compressed headers");
 5717   // Cannot assert, unverified entry point counts instructions (see .ad file)
 5718   // vtableStubs also counts instructions in pd_code_size_limit.
 5719   // Also do not verify_oop as this is called by verify_oop.
 5720   if (CompressedKlassPointers::shift() != 0) {
 5721     assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
 5722     shlq(r, LogKlassAlignmentInBytes);
 5723   }
 5724   if (CompressedKlassPointers::base() != NULL) {
 5725     mov64(tmp, (int64_t)CompressedKlassPointers::base());
 5726     addq(r, tmp);
 5727   }
 5728 }
 5729 
 5730 void  MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) {
 5731   assert_different_registers(src, dst);
 5732   // Note: it will change flags
 5733   assert (UseCompressedClassPointers, "should only be used for compressed headers");
 5734   // Cannot assert, unverified entry point counts instructions (see .ad file)
 5735   // vtableStubs also counts instructions in pd_code_size_limit.
 5736   // Also do not verify_oop as this is called by verify_oop.
 5737 
 5738   if (CompressedKlassPointers::base() == NULL &&
 5739       CompressedKlassPointers::shift() == 0) {
 5740     // The best case scenario is that there is no base or shift. Then it is already
 5741     // a pointer that needs nothing but a register rename.
 5742     movl(dst, src);
 5743   } else {
 5744     if (CompressedKlassPointers::base() != NULL) {
 5745       mov64(dst, (int64_t)CompressedKlassPointers::base());
 5746     } else {
 5747       xorq(dst, dst);
 5748     }
 5749     if (CompressedKlassPointers::shift() != 0) {
 5750       assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
 5751       assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
 5752       leaq(dst, Address(dst, src, Address::times_8, 0));
 5753     } else {
 5754       addq(dst, src);
 5755     }
 5756   }
 5757 }
 5758 
 5759 void  MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
 5760   assert (UseCompressedOops, "should only be used for compressed headers");
 5761   assert (Universe::heap() != NULL, "java heap should be initialized");
 5762   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
 5763   int oop_index = oop_recorder()->find_index(obj);
 5764   RelocationHolder rspec = oop_Relocation::spec(oop_index);
 5765   mov_narrow_oop(dst, oop_index, rspec);
 5766 }
 5767 
 5768 void  MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
 5769   assert (UseCompressedOops, "should only be used for compressed headers");
 5770   assert (Universe::heap() != NULL, "java heap should be initialized");
 5771   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
 5772   int oop_index = oop_recorder()->find_index(obj);
 5773   RelocationHolder rspec = oop_Relocation::spec(oop_index);
 5774   mov_narrow_oop(dst, oop_index, rspec);
 5775 }
 5776 
 5777 void  MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
 5778   assert (UseCompressedClassPointers, "should only be used for compressed headers");
 5779   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
 5780   int klass_index = oop_recorder()->find_index(k);
 5781   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
 5782   mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 5783 }
 5784 
 5785 void  MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
 5786   assert (UseCompressedClassPointers, "should only be used for compressed headers");
 5787   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
 5788   int klass_index = oop_recorder()->find_index(k);
 5789   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
 5790   mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 5791 }
 5792 
 5793 void  MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
 5794   assert (UseCompressedOops, "should only be used for compressed headers");
 5795   assert (Universe::heap() != NULL, "java heap should be initialized");
 5796   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
 5797   int oop_index = oop_recorder()->find_index(obj);
 5798   RelocationHolder rspec = oop_Relocation::spec(oop_index);
 5799   Assembler::cmp_narrow_oop(dst, oop_index, rspec);
 5800 }
 5801 
 5802 void  MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
 5803   assert (UseCompressedOops, "should only be used for compressed headers");
 5804   assert (Universe::heap() != NULL, "java heap should be initialized");
 5805   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
 5806   int oop_index = oop_recorder()->find_index(obj);
 5807   RelocationHolder rspec = oop_Relocation::spec(oop_index);
 5808   Assembler::cmp_narrow_oop(dst, oop_index, rspec);
 5809 }
 5810 
 5811 void  MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
 5812   assert (UseCompressedClassPointers, "should only be used for compressed headers");
 5813   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
 5814   int klass_index = oop_recorder()->find_index(k);
 5815   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
 5816   Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 5817 }
 5818 
 5819 void  MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
 5820   assert (UseCompressedClassPointers, "should only be used for compressed headers");
 5821   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
 5822   int klass_index = oop_recorder()->find_index(k);
 5823   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
 5824   Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 5825 }
 5826 
 5827 void MacroAssembler::reinit_heapbase() {
 5828   if (UseCompressedOops) {
 5829     if (Universe::heap() != NULL) {
 5830       if (CompressedOops::base() == NULL) {
 5831         MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
 5832       } else {
 5833         mov64(r12_heapbase, (int64_t)CompressedOops::ptrs_base());
 5834       }
 5835     } else {
 5836       movptr(r12_heapbase, ExternalAddress(CompressedOops::ptrs_base_addr()));
 5837     }
 5838   }
 5839 }
 5840 
 5841 #endif // _LP64
 5842 
 5843 #if COMPILER2_OR_JVMCI
 5844 
 5845 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
 5846 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, KRegister mask) {
 5847   // cnt - number of qwords (8-byte words).
 5848   // base - start address, qword aligned.
 5849   Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
 5850   bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0);
 5851   if (use64byteVector) {
 5852     evpbroadcastq(xtmp, val, AVX_512bit);
 5853   } else if (MaxVectorSize >= 32) {
 5854     movdq(xtmp, val);
 5855     punpcklqdq(xtmp, xtmp);
 5856     vinserti128_high(xtmp, xtmp);
 5857   } else {
 5858     movdq(xtmp, val);
 5859     punpcklqdq(xtmp, xtmp);
 5860   }
 5861   jmp(L_zero_64_bytes);
 5862 
 5863   BIND(L_loop);
 5864   if (MaxVectorSize >= 32) {
 5865     fill64(base, 0, xtmp, use64byteVector);
 5866   } else {
 5867     movdqu(Address(base,  0), xtmp);
 5868     movdqu(Address(base, 16), xtmp);
 5869     movdqu(Address(base, 32), xtmp);
 5870     movdqu(Address(base, 48), xtmp);
 5871   }
 5872   addptr(base, 64);
 5873 
 5874   BIND(L_zero_64_bytes);
 5875   subptr(cnt, 8);
 5876   jccb(Assembler::greaterEqual, L_loop);
 5877 
 5878   // Copy trailing 64 bytes
 5879   if (use64byteVector) {
 5880     addptr(cnt, 8);
 5881     jccb(Assembler::equal, L_end);
 5882     fill64_masked(3, base, 0, xtmp, mask, cnt, val, true);
 5883     jmp(L_end);
 5884   } else {
 5885     addptr(cnt, 4);
 5886     jccb(Assembler::less, L_tail);
 5887     if (MaxVectorSize >= 32) {
 5888       vmovdqu(Address(base, 0), xtmp);
 5889     } else {
 5890       movdqu(Address(base,  0), xtmp);
 5891       movdqu(Address(base, 16), xtmp);
 5892     }
 5893   }
 5894   addptr(base, 32);
 5895   subptr(cnt, 4);
 5896 
 5897   BIND(L_tail);
 5898   addptr(cnt, 4);
 5899   jccb(Assembler::lessEqual, L_end);
 5900   if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
 5901     fill32_masked(3, base, 0, xtmp, mask, cnt, val);
 5902   } else {
 5903     decrement(cnt);
 5904 
 5905     BIND(L_sloop);
 5906     movq(Address(base, 0), xtmp);
 5907     addptr(base, 8);
 5908     decrement(cnt);
 5909     jccb(Assembler::greaterEqual, L_sloop);
 5910   }
 5911   BIND(L_end);
 5912 }
 5913 
 5914 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) {
 5915   assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
 5916   // An inline type might be returned. If fields are in registers we
 5917   // need to allocate an inline type instance and initialize it with
 5918   // the value of the fields.
 5919   Label skip;
 5920   // We only need a new buffered inline type if a new one is not returned
 5921   testptr(rax, 1);
 5922   jcc(Assembler::zero, skip);
 5923   int call_offset = -1;
 5924 
 5925 #ifdef _LP64
 5926   // The following code is similar to allocate_instance but has some slight differences,
 5927   // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after
 5928   // allocating is not necessary if vk != NULL, etc. allocate_instance is not aware of these.
 5929   Label slow_case;
 5930   // 1. Try to allocate a new buffered inline instance either from TLAB or eden space
 5931   mov(rscratch1, rax); // save rax for slow_case since *_allocate may corrupt it when allocation failed
 5932   if (vk != NULL) {
 5933     // Called from C1, where the return type is statically known.
 5934     movptr(rbx, (intptr_t)vk->get_InlineKlass());
 5935     jint obj_size = vk->layout_helper();
 5936     assert(obj_size != Klass::_lh_neutral_value, "inline class in return type must have been resolved");
 5937     if (UseTLAB) {
 5938       tlab_allocate(r15_thread, rax, noreg, obj_size, r13, r14, slow_case);
 5939     } else {
 5940       jmp(slow_case);
 5941     }
 5942   } else {
 5943     // Call from interpreter. RAX contains ((the InlineKlass* of the return type) | 0x01)
 5944     mov(rbx, rax);
 5945     andptr(rbx, -2);
 5946     movl(r14, Address(rbx, Klass::layout_helper_offset()));
 5947     if (UseTLAB) {
 5948       tlab_allocate(r15_thread, rax, r14, 0, r13, r14, slow_case);
 5949     } else {
 5950       jmp(slow_case);
 5951     }
 5952   }
 5953   if (UseTLAB) {
 5954     // 2. Initialize buffered inline instance header
 5955     Register buffer_obj = rax;
 5956     movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), (intptr_t)markWord::inline_type_prototype().value());
 5957     xorl(r13, r13);
 5958     store_klass_gap(buffer_obj, r13);
 5959     if (vk == NULL) {
 5960       // store_klass corrupts rbx(klass), so save it in r13 for later use (interpreter case only).
 5961       mov(r13, rbx);
 5962     }
 5963     store_klass(buffer_obj, rbx, rscratch1);
 5964     // 3. Initialize its fields with an inline class specific handler
 5965     if (vk != NULL) {
 5966       call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint.
 5967     } else {
 5968       movptr(rbx, Address(r13, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 5969       movptr(rbx, Address(rbx, InlineKlass::pack_handler_offset()));
 5970       call(rbx);
 5971     }
 5972     jmp(skip);
 5973   }
 5974   bind(slow_case);
 5975   // We failed to allocate a new inline type, fall back to a runtime
 5976   // call. Some oop field may be live in some registers but we can't
 5977   // tell. That runtime call will take care of preserving them
 5978   // across a GC if there's one.
 5979   mov(rax, rscratch1);
 5980 #endif
 5981 
 5982   if (from_interpreter) {
 5983     super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf());
 5984   } else {
 5985     call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf()));
 5986     call_offset = offset();
 5987   }
 5988 
 5989   bind(skip);
 5990   return call_offset;
 5991 }
 5992 
 5993 // Move a value between registers/stack slots and update the reg_state
 5994 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) {
 5995   assert(from->is_valid() && to->is_valid(), "source and destination must be valid");
 5996   if (reg_state[to->value()] == reg_written) {
 5997     return true; // Already written
 5998   }
 5999   if (from != to && bt != T_VOID) {
 6000     if (reg_state[to->value()] == reg_readonly) {
 6001       return false; // Not yet writable
 6002     }
 6003     if (from->is_reg()) {
 6004       if (to->is_reg()) {
 6005         if (from->is_XMMRegister()) {
 6006           if (bt == T_DOUBLE) {
 6007             movdbl(to->as_XMMRegister(), from->as_XMMRegister());
 6008           } else {
 6009             assert(bt == T_FLOAT, "must be float");
 6010             movflt(to->as_XMMRegister(), from->as_XMMRegister());
 6011           }
 6012         } else {
 6013           movq(to->as_Register(), from->as_Register());
 6014         }
 6015       } else {
 6016         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6017         Address to_addr = Address(rsp, st_off);
 6018         if (from->is_XMMRegister()) {
 6019           if (bt == T_DOUBLE) {
 6020             movdbl(to_addr, from->as_XMMRegister());
 6021           } else {
 6022             assert(bt == T_FLOAT, "must be float");
 6023             movflt(to_addr, from->as_XMMRegister());
 6024           }
 6025         } else {
 6026           movq(to_addr, from->as_Register());
 6027         }
 6028       }
 6029     } else {
 6030       Address from_addr = Address(rsp, from->reg2stack() * VMRegImpl::stack_slot_size + wordSize);
 6031       if (to->is_reg()) {
 6032         if (to->is_XMMRegister()) {
 6033           if (bt == T_DOUBLE) {
 6034             movdbl(to->as_XMMRegister(), from_addr);
 6035           } else {
 6036             assert(bt == T_FLOAT, "must be float");
 6037             movflt(to->as_XMMRegister(), from_addr);
 6038           }
 6039         } else {
 6040           movq(to->as_Register(), from_addr);
 6041         }
 6042       } else {
 6043         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6044         movq(r13, from_addr);
 6045         movq(Address(rsp, st_off), r13);
 6046       }
 6047     }
 6048   }
 6049   // Update register states
 6050   reg_state[from->value()] = reg_writable;
 6051   reg_state[to->value()] = reg_written;
 6052   return true;
 6053 }
 6054 
 6055 // Calculate the extra stack space required for packing or unpacking inline
 6056 // args and adjust the stack pointer
 6057 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) {
 6058   // Two additional slots to account for return address
 6059   int sp_inc = (args_on_stack + 2) * VMRegImpl::stack_slot_size;
 6060   sp_inc = align_up(sp_inc, StackAlignmentInBytes);
 6061   // Save the return address, adjust the stack (make sure it is properly
 6062   // 16-byte aligned) and copy the return address to the new top of the stack.
 6063   // The stack will be repaired on return (see MacroAssembler::remove_frame).
 6064   assert(sp_inc > 0, "sanity");
 6065   pop(r13);
 6066   subptr(rsp, sp_inc);
 6067   push(r13);
 6068   return sp_inc;
 6069 }
 6070 
 6071 // Read all fields from an inline type buffer and store the field values in registers/stack slots.
 6072 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
 6073                                           VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
 6074                                           RegState reg_state[]) {
 6075   assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
 6076   assert(from->is_valid(), "source must be valid");
 6077   bool progress = false;
 6078 #ifdef ASSERT
 6079   const int start_offset = offset();
 6080 #endif
 6081 
 6082   Label L_null, L_notNull;
 6083   // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for)
 6084   Register tmp1 = r10;
 6085   Register tmp2 = r13;
 6086   Register fromReg = noreg;
 6087   ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, -1);
 6088   bool done = true;
 6089   bool mark_done = true;
 6090   VMReg toReg;
 6091   BasicType bt;
 6092   // Check if argument requires a null check
 6093   bool null_check = false;
 6094   VMReg nullCheckReg;
 6095   while (stream.next(nullCheckReg, bt)) {
 6096     if (sig->at(stream.sig_index())._offset == -1) {
 6097       null_check = true;
 6098       break;
 6099     }
 6100   }
 6101   stream.reset(sig_index, to_index);
 6102   while (stream.next(toReg, bt)) {
 6103     assert(toReg->is_valid(), "destination must be valid");
 6104     int idx = (int)toReg->value();
 6105     if (reg_state[idx] == reg_readonly) {
 6106       if (idx != from->value()) {
 6107         mark_done = false;
 6108       }
 6109       done = false;
 6110       continue;
 6111     } else if (reg_state[idx] == reg_written) {
 6112       continue;
 6113     }
 6114     assert(reg_state[idx] == reg_writable, "must be writable");
 6115     reg_state[idx] = reg_written;
 6116     progress = true;
 6117 
 6118     if (fromReg == noreg) {
 6119       if (from->is_reg()) {
 6120         fromReg = from->as_Register();
 6121       } else {
 6122         int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6123         movq(tmp1, Address(rsp, st_off));
 6124         fromReg = tmp1;
 6125       }
 6126       if (null_check) {
 6127         // Nullable inline type argument, emit null check
 6128         testptr(fromReg, fromReg);
 6129         jcc(Assembler::zero, L_null);
 6130       }
 6131     }
 6132     int off = sig->at(stream.sig_index())._offset;
 6133     if (off == -1) {
 6134       assert(null_check, "Missing null check at");
 6135       if (toReg->is_stack()) {
 6136         int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6137         movq(Address(rsp, st_off), 1);
 6138       } else {
 6139         movq(toReg->as_Register(), 1);
 6140       }
 6141       continue;
 6142     }
 6143     assert(off > 0, "offset in object should be positive");
 6144     Address fromAddr = Address(fromReg, off);
 6145     if (!toReg->is_XMMRegister()) {
 6146       Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register();
 6147       if (is_reference_type(bt)) {
 6148         load_heap_oop(dst, fromAddr);
 6149       } else {
 6150         bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
 6151         load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed);
 6152       }
 6153       if (toReg->is_stack()) {
 6154         int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6155         movq(Address(rsp, st_off), dst);
 6156       }
 6157     } else if (bt == T_DOUBLE) {
 6158       movdbl(toReg->as_XMMRegister(), fromAddr);
 6159     } else {
 6160       assert(bt == T_FLOAT, "must be float");
 6161       movflt(toReg->as_XMMRegister(), fromAddr);
 6162     }
 6163   }
 6164   if (progress && null_check) {
 6165     if (done) {
 6166       jmp(L_notNull);
 6167       bind(L_null);
 6168       // Set IsInit field to zero to signal that the argument is null.
 6169       // Also set all oop fields to zero to make the GC happy.
 6170       stream.reset(sig_index, to_index);
 6171       while (stream.next(toReg, bt)) {
 6172         if (sig->at(stream.sig_index())._offset == -1 ||
 6173             bt == T_OBJECT || bt == T_ARRAY) {
 6174           if (toReg->is_stack()) {
 6175             int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6176             movq(Address(rsp, st_off), 0);
 6177           } else {
 6178             xorq(toReg->as_Register(), toReg->as_Register());
 6179           }
 6180         }
 6181       }
 6182       bind(L_notNull);
 6183     } else {
 6184       bind(L_null);
 6185     }
 6186   }
 6187 
 6188   sig_index = stream.sig_index();
 6189   to_index = stream.regs_index();
 6190 
 6191   if (mark_done && reg_state[from->value()] != reg_written) {
 6192     // This is okay because no one else will write to that slot
 6193     reg_state[from->value()] = reg_writable;
 6194   }
 6195   from_index--;
 6196   assert(progress || (start_offset == offset()), "should not emit code");
 6197   return done;
 6198 }
 6199 
 6200 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
 6201                                         VMRegPair* from, int from_count, int& from_index, VMReg to,
 6202                                         RegState reg_state[], Register val_array) {
 6203   assert(sig->at(sig_index)._bt == T_PRIMITIVE_OBJECT, "should be at end delimiter");
 6204   assert(to->is_valid(), "destination must be valid");
 6205 
 6206   if (reg_state[to->value()] == reg_written) {
 6207     skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
 6208     return true; // Already written
 6209   }
 6210 
 6211   // TODO 8284443 Isn't it an issue if below code uses r14 as tmp when it contains a spilled value?
 6212   // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for).
 6213   Register val_obj_tmp = r11;
 6214   Register from_reg_tmp = r14;
 6215   Register tmp1 = r10;
 6216   Register tmp2 = r13;
 6217   Register tmp3 = rbx;
 6218   Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register();
 6219 
 6220   assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array);
 6221 
 6222   if (reg_state[to->value()] == reg_readonly) {
 6223     if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) {
 6224       skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
 6225       return false; // Not yet writable
 6226     }
 6227     val_obj = val_obj_tmp;
 6228   }
 6229 
 6230   int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_PRIMITIVE_OBJECT);
 6231   load_heap_oop(val_obj, Address(val_array, index));
 6232 
 6233   ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index);
 6234   VMReg fromReg;
 6235   BasicType bt;
 6236   Label L_null;
 6237   while (stream.next(fromReg, bt)) {
 6238     assert(fromReg->is_valid(), "source must be valid");
 6239     reg_state[fromReg->value()] = reg_writable;
 6240 
 6241     int off = sig->at(stream.sig_index())._offset;
 6242     if (off == -1) {
 6243       // Nullable inline type argument, emit null check
 6244       Label L_notNull;
 6245       if (fromReg->is_stack()) {
 6246         int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6247         testb(Address(rsp, ld_off), 1);
 6248       } else {
 6249         testb(fromReg->as_Register(), 1);
 6250       }
 6251       jcc(Assembler::notZero, L_notNull);
 6252       movptr(val_obj, 0);
 6253       jmp(L_null);
 6254       bind(L_notNull);
 6255       continue;
 6256     }
 6257 
 6258     assert(off > 0, "offset in object should be positive");
 6259     size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
 6260 
 6261     Address dst(val_obj, off);
 6262     if (!fromReg->is_XMMRegister()) {
 6263       Register src;
 6264       if (fromReg->is_stack()) {
 6265         src = from_reg_tmp;
 6266         int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6267         load_sized_value(src, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false);
 6268       } else {
 6269         src = fromReg->as_Register();
 6270       }
 6271       assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array);
 6272       if (is_reference_type(bt)) {
 6273         store_heap_oop(dst, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
 6274       } else {
 6275         store_sized_value(dst, src, size_in_bytes);
 6276       }
 6277     } else if (bt == T_DOUBLE) {
 6278       movdbl(dst, fromReg->as_XMMRegister());
 6279     } else {
 6280       assert(bt == T_FLOAT, "must be float");
 6281       movflt(dst, fromReg->as_XMMRegister());
 6282     }
 6283   }
 6284   bind(L_null);
 6285   sig_index = stream.sig_index();
 6286   from_index = stream.regs_index();
 6287 
 6288   assert(reg_state[to->value()] == reg_writable, "must have already been read");
 6289   bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state);
 6290   assert(success, "to register must be writeable");
 6291   return true;
 6292 }
 6293 
 6294 VMReg MacroAssembler::spill_reg_for(VMReg reg) {
 6295   return reg->is_XMMRegister() ? xmm8->as_VMReg() : r14->as_VMReg();
 6296 }
 6297 
 6298 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) {
 6299   assert((initial_framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
 6300   if (needs_stack_repair) {
 6301     movq(rbp, Address(rsp, initial_framesize));
 6302     // The stack increment resides just below the saved rbp
 6303     addq(rsp, Address(rsp, initial_framesize - wordSize));
 6304   } else {
 6305     if (initial_framesize > 0) {
 6306       addq(rsp, initial_framesize);
 6307     }
 6308     pop(rbp);
 6309   }
 6310 }
 6311 
 6312 // Clearing constant sized memory using YMM/ZMM registers.
 6313 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 6314   assert(UseAVX > 2 && VM_Version::supports_avx512vlbw(), "");
 6315   bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
 6316 
 6317   int vector64_count = (cnt & (~0x7)) >> 3;
 6318   cnt = cnt & 0x7;
 6319   const int fill64_per_loop = 4;
 6320   const int max_unrolled_fill64 = 8;
 6321 
 6322   // 64 byte initialization loop.
 6323   vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit);
 6324   int start64 = 0;
 6325   if (vector64_count > max_unrolled_fill64) {
 6326     Label LOOP;
 6327     Register index = rtmp;
 6328 
 6329     start64 = vector64_count - (vector64_count % fill64_per_loop);
 6330 
 6331     movl(index, 0);
 6332     BIND(LOOP);
 6333     for (int i = 0; i < fill64_per_loop; i++) {
 6334       fill64(Address(base, index, Address::times_1, i * 64), xtmp, use64byteVector);
 6335     }
 6336     addl(index, fill64_per_loop * 64);
 6337     cmpl(index, start64 * 64);
 6338     jccb(Assembler::less, LOOP);
 6339   }
 6340   for (int i = start64; i < vector64_count; i++) {
 6341     fill64(base, i * 64, xtmp, use64byteVector);
 6342   }
 6343 
 6344   // Clear remaining 64 byte tail.
 6345   int disp = vector64_count * 64;
 6346   if (cnt) {
 6347     switch (cnt) {
 6348       case 1:
 6349         movq(Address(base, disp), xtmp);
 6350         break;
 6351       case 2:
 6352         evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_128bit);
 6353         break;
 6354       case 3:
 6355         movl(rtmp, 0x7);
 6356         kmovwl(mask, rtmp);
 6357         evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_256bit);
 6358         break;
 6359       case 4:
 6360         evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 6361         break;
 6362       case 5:
 6363         if (use64byteVector) {
 6364           movl(rtmp, 0x1F);
 6365           kmovwl(mask, rtmp);
 6366           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 6367         } else {
 6368           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 6369           movq(Address(base, disp + 32), xtmp);
 6370         }
 6371         break;
 6372       case 6:
 6373         if (use64byteVector) {
 6374           movl(rtmp, 0x3F);
 6375           kmovwl(mask, rtmp);
 6376           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 6377         } else {
 6378           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 6379           evmovdqu(T_LONG, k0, Address(base, disp + 32), xtmp, false, Assembler::AVX_128bit);
 6380         }
 6381         break;
 6382       case 7:
 6383         if (use64byteVector) {
 6384           movl(rtmp, 0x7F);
 6385           kmovwl(mask, rtmp);
 6386           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 6387         } else {
 6388           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 6389           movl(rtmp, 0x7);
 6390           kmovwl(mask, rtmp);
 6391           evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit);
 6392         }
 6393         break;
 6394       default:
 6395         fatal("Unexpected length : %d\n",cnt);
 6396         break;
 6397     }
 6398   }
 6399 }
 6400 
 6401 void MacroAssembler::clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp,
 6402                                bool is_large, bool word_copy_only, KRegister mask) {
 6403   // cnt      - number of qwords (8-byte words).
 6404   // base     - start address, qword aligned.
 6405   // is_large - if optimizers know cnt is larger than InitArrayShortSize
 6406   assert(base==rdi, "base register must be edi for rep stos");
 6407   assert(val==rax,   "val register must be eax for rep stos");
 6408   assert(cnt==rcx,   "cnt register must be ecx for rep stos");
 6409   assert(InitArrayShortSize % BytesPerLong == 0,
 6410     "InitArrayShortSize should be the multiple of BytesPerLong");
 6411 
 6412   Label DONE;
 6413 
 6414   if (!is_large) {
 6415     Label LOOP, LONG;
 6416     cmpptr(cnt, InitArrayShortSize/BytesPerLong);
 6417     jccb(Assembler::greater, LONG);
 6418 
 6419     NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
 6420 
 6421     decrement(cnt);
 6422     jccb(Assembler::negative, DONE); // Zero length
 6423 
 6424     // Use individual pointer-sized stores for small counts:
 6425     BIND(LOOP);
 6426     movptr(Address(base, cnt, Address::times_ptr), val);
 6427     decrement(cnt);
 6428     jccb(Assembler::greaterEqual, LOOP);
 6429     jmpb(DONE);
 6430 
 6431     BIND(LONG);
 6432   }
 6433 
 6434   // Use longer rep-prefixed ops for non-small counts:
 6435   if (UseFastStosb && !word_copy_only) {
 6436     shlptr(cnt, 3); // convert to number of bytes
 6437     rep_stosb();
 6438   } else if (UseXMMForObjInit) {
 6439     xmm_clear_mem(base, cnt, val, xtmp, mask);
 6440   } else {
 6441     NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
 6442     rep_stos();
 6443   }
 6444 
 6445   BIND(DONE);
 6446 }
 6447 
 6448 #endif //COMPILER2_OR_JVMCI
 6449 
 6450 
 6451 void MacroAssembler::generate_fill(BasicType t, bool aligned,
 6452                                    Register to, Register value, Register count,
 6453                                    Register rtmp, XMMRegister xtmp) {
 6454   ShortBranchVerifier sbv(this);
 6455   assert_different_registers(to, value, count, rtmp);
 6456   Label L_exit;
 6457   Label L_fill_2_bytes, L_fill_4_bytes;
 6458 
 6459 #if defined(COMPILER2) && defined(_LP64)
 6460   if(MaxVectorSize >=32 &&
 6461      VM_Version::supports_avx512vlbw() &&
 6462      VM_Version::supports_bmi2()) {
 6463     generate_fill_avx3(t, to, value, count, rtmp, xtmp);
 6464     return;
 6465   }
 6466 #endif
 6467 
 6468   int shift = -1;
 6469   switch (t) {
 6470     case T_BYTE:
 6471       shift = 2;
 6472       break;
 6473     case T_SHORT:
 6474       shift = 1;
 6475       break;
 6476     case T_INT:
 6477       shift = 0;
 6478       break;
 6479     default: ShouldNotReachHere();
 6480   }
 6481 
 6482   if (t == T_BYTE) {
 6483     andl(value, 0xff);
 6484     movl(rtmp, value);
 6485     shll(rtmp, 8);
 6486     orl(value, rtmp);
 6487   }
 6488   if (t == T_SHORT) {
 6489     andl(value, 0xffff);
 6490   }
 6491   if (t == T_BYTE || t == T_SHORT) {
 6492     movl(rtmp, value);
 6493     shll(rtmp, 16);
 6494     orl(value, rtmp);
 6495   }
 6496 
 6497   cmpl(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
 6498   jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp
 6499   if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
 6500     Label L_skip_align2;
 6501     // align source address at 4 bytes address boundary
 6502     if (t == T_BYTE) {
 6503       Label L_skip_align1;
 6504       // One byte misalignment happens only for byte arrays
 6505       testptr(to, 1);
 6506       jccb(Assembler::zero, L_skip_align1);
 6507       movb(Address(to, 0), value);
 6508       increment(to);
 6509       decrement(count);
 6510       BIND(L_skip_align1);
 6511     }
 6512     // Two bytes misalignment happens only for byte and short (char) arrays
 6513     testptr(to, 2);
 6514     jccb(Assembler::zero, L_skip_align2);
 6515     movw(Address(to, 0), value);
 6516     addptr(to, 2);
 6517     subl(count, 1<<(shift-1));
 6518     BIND(L_skip_align2);
 6519   }
 6520   if (UseSSE < 2) {
 6521     Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
 6522     // Fill 32-byte chunks
 6523     subl(count, 8 << shift);
 6524     jcc(Assembler::less, L_check_fill_8_bytes);
 6525     align(16);
 6526 
 6527     BIND(L_fill_32_bytes_loop);
 6528 
 6529     for (int i = 0; i < 32; i += 4) {
 6530       movl(Address(to, i), value);
 6531     }
 6532 
 6533     addptr(to, 32);
 6534     subl(count, 8 << shift);
 6535     jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
 6536     BIND(L_check_fill_8_bytes);
 6537     addl(count, 8 << shift);
 6538     jccb(Assembler::zero, L_exit);
 6539     jmpb(L_fill_8_bytes);
 6540 
 6541     //
 6542     // length is too short, just fill qwords
 6543     //
 6544     BIND(L_fill_8_bytes_loop);
 6545     movl(Address(to, 0), value);
 6546     movl(Address(to, 4), value);
 6547     addptr(to, 8);
 6548     BIND(L_fill_8_bytes);
 6549     subl(count, 1 << (shift + 1));
 6550     jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
 6551     // fall through to fill 4 bytes
 6552   } else {
 6553     Label L_fill_32_bytes;
 6554     if (!UseUnalignedLoadStores) {
 6555       // align to 8 bytes, we know we are 4 byte aligned to start
 6556       testptr(to, 4);
 6557       jccb(Assembler::zero, L_fill_32_bytes);
 6558       movl(Address(to, 0), value);
 6559       addptr(to, 4);
 6560       subl(count, 1<<shift);
 6561     }
 6562     BIND(L_fill_32_bytes);
 6563     {
 6564       assert( UseSSE >= 2, "supported cpu only" );
 6565       Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
 6566       movdl(xtmp, value);
 6567       if (UseAVX >= 2 && UseUnalignedLoadStores) {
 6568         Label L_check_fill_32_bytes;
 6569         if (UseAVX > 2) {
 6570           // Fill 64-byte chunks
 6571           Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2;
 6572 
 6573           // If number of bytes to fill < VM_Version::avx3_threshold(), perform fill using AVX2
 6574           cmpl(count, VM_Version::avx3_threshold());
 6575           jccb(Assembler::below, L_check_fill_64_bytes_avx2);
 6576 
 6577           vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
 6578 
 6579           subl(count, 16 << shift);
 6580           jccb(Assembler::less, L_check_fill_32_bytes);
 6581           align(16);
 6582 
 6583           BIND(L_fill_64_bytes_loop_avx3);
 6584           evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit);
 6585           addptr(to, 64);
 6586           subl(count, 16 << shift);
 6587           jcc(Assembler::greaterEqual, L_fill_64_bytes_loop_avx3);
 6588           jmpb(L_check_fill_32_bytes);
 6589 
 6590           BIND(L_check_fill_64_bytes_avx2);
 6591         }
 6592         // Fill 64-byte chunks
 6593         Label L_fill_64_bytes_loop;
 6594         vpbroadcastd(xtmp, xtmp, Assembler::AVX_256bit);
 6595 
 6596         subl(count, 16 << shift);
 6597         jcc(Assembler::less, L_check_fill_32_bytes);
 6598         align(16);
 6599 
 6600         BIND(L_fill_64_bytes_loop);
 6601         vmovdqu(Address(to, 0), xtmp);
 6602         vmovdqu(Address(to, 32), xtmp);
 6603         addptr(to, 64);
 6604         subl(count, 16 << shift);
 6605         jcc(Assembler::greaterEqual, L_fill_64_bytes_loop);
 6606 
 6607         BIND(L_check_fill_32_bytes);
 6608         addl(count, 8 << shift);
 6609         jccb(Assembler::less, L_check_fill_8_bytes);
 6610         vmovdqu(Address(to, 0), xtmp);
 6611         addptr(to, 32);
 6612         subl(count, 8 << shift);
 6613 
 6614         BIND(L_check_fill_8_bytes);
 6615         // clean upper bits of YMM registers
 6616         movdl(xtmp, value);
 6617         pshufd(xtmp, xtmp, 0);
 6618       } else {
 6619         // Fill 32-byte chunks
 6620         pshufd(xtmp, xtmp, 0);
 6621 
 6622         subl(count, 8 << shift);
 6623         jcc(Assembler::less, L_check_fill_8_bytes);
 6624         align(16);
 6625 
 6626         BIND(L_fill_32_bytes_loop);
 6627 
 6628         if (UseUnalignedLoadStores) {
 6629           movdqu(Address(to, 0), xtmp);
 6630           movdqu(Address(to, 16), xtmp);
 6631         } else {
 6632           movq(Address(to, 0), xtmp);
 6633           movq(Address(to, 8), xtmp);
 6634           movq(Address(to, 16), xtmp);
 6635           movq(Address(to, 24), xtmp);
 6636         }
 6637 
 6638         addptr(to, 32);
 6639         subl(count, 8 << shift);
 6640         jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
 6641 
 6642         BIND(L_check_fill_8_bytes);
 6643       }
 6644       addl(count, 8 << shift);
 6645       jccb(Assembler::zero, L_exit);
 6646       jmpb(L_fill_8_bytes);
 6647 
 6648       //
 6649       // length is too short, just fill qwords
 6650       //
 6651       BIND(L_fill_8_bytes_loop);
 6652       movq(Address(to, 0), xtmp);
 6653       addptr(to, 8);
 6654       BIND(L_fill_8_bytes);
 6655       subl(count, 1 << (shift + 1));
 6656       jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
 6657     }
 6658   }
 6659   // fill trailing 4 bytes
 6660   BIND(L_fill_4_bytes);
 6661   testl(count, 1<<shift);
 6662   jccb(Assembler::zero, L_fill_2_bytes);
 6663   movl(Address(to, 0), value);
 6664   if (t == T_BYTE || t == T_SHORT) {
 6665     Label L_fill_byte;
 6666     addptr(to, 4);
 6667     BIND(L_fill_2_bytes);
 6668     // fill trailing 2 bytes
 6669     testl(count, 1<<(shift-1));
 6670     jccb(Assembler::zero, L_fill_byte);
 6671     movw(Address(to, 0), value);
 6672     if (t == T_BYTE) {
 6673       addptr(to, 2);
 6674       BIND(L_fill_byte);
 6675       // fill trailing byte
 6676       testl(count, 1);
 6677       jccb(Assembler::zero, L_exit);
 6678       movb(Address(to, 0), value);
 6679     } else {
 6680       BIND(L_fill_byte);
 6681     }
 6682   } else {
 6683     BIND(L_fill_2_bytes);
 6684   }
 6685   BIND(L_exit);
 6686 }
 6687 
 6688 void MacroAssembler::evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len) {
 6689   switch(type) {
 6690     case T_BYTE:
 6691     case T_BOOLEAN:
 6692       evpbroadcastb(dst, src, vector_len);
 6693       break;
 6694     case T_SHORT:
 6695     case T_CHAR:
 6696       evpbroadcastw(dst, src, vector_len);
 6697       break;
 6698     case T_INT:
 6699     case T_FLOAT:
 6700       evpbroadcastd(dst, src, vector_len);
 6701       break;
 6702     case T_LONG:
 6703     case T_DOUBLE:
 6704       evpbroadcastq(dst, src, vector_len);
 6705       break;
 6706     default:
 6707       fatal("Unhandled type : %s", type2name(type));
 6708       break;
 6709   }
 6710 }
 6711 
 6712 // encode char[] to byte[] in ISO_8859_1 or ASCII
 6713    //@IntrinsicCandidate
 6714    //private static int implEncodeISOArray(byte[] sa, int sp,
 6715    //byte[] da, int dp, int len) {
 6716    //  int i = 0;
 6717    //  for (; i < len; i++) {
 6718    //    char c = StringUTF16.getChar(sa, sp++);
 6719    //    if (c > '\u00FF')
 6720    //      break;
 6721    //    da[dp++] = (byte)c;
 6722    //  }
 6723    //  return i;
 6724    //}
 6725    //
 6726    //@IntrinsicCandidate
 6727    //private static int implEncodeAsciiArray(char[] sa, int sp,
 6728    //    byte[] da, int dp, int len) {
 6729    //  int i = 0;
 6730    //  for (; i < len; i++) {
 6731    //    char c = sa[sp++];
 6732    //    if (c >= '\u0080')
 6733    //      break;
 6734    //    da[dp++] = (byte)c;
 6735    //  }
 6736    //  return i;
 6737    //}
 6738 void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
 6739   XMMRegister tmp1Reg, XMMRegister tmp2Reg,
 6740   XMMRegister tmp3Reg, XMMRegister tmp4Reg,
 6741   Register tmp5, Register result, bool ascii) {
 6742 
 6743   // rsi: src
 6744   // rdi: dst
 6745   // rdx: len
 6746   // rcx: tmp5
 6747   // rax: result
 6748   ShortBranchVerifier sbv(this);
 6749   assert_different_registers(src, dst, len, tmp5, result);
 6750   Label L_done, L_copy_1_char, L_copy_1_char_exit;
 6751 
 6752   int mask = ascii ? 0xff80ff80 : 0xff00ff00;
 6753   int short_mask = ascii ? 0xff80 : 0xff00;
 6754 
 6755   // set result
 6756   xorl(result, result);
 6757   // check for zero length
 6758   testl(len, len);
 6759   jcc(Assembler::zero, L_done);
 6760 
 6761   movl(result, len);
 6762 
 6763   // Setup pointers
 6764   lea(src, Address(src, len, Address::times_2)); // char[]
 6765   lea(dst, Address(dst, len, Address::times_1)); // byte[]
 6766   negptr(len);
 6767 
 6768   if (UseSSE42Intrinsics || UseAVX >= 2) {
 6769     Label L_copy_8_chars, L_copy_8_chars_exit;
 6770     Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit;
 6771 
 6772     if (UseAVX >= 2) {
 6773       Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit;
 6774       movl(tmp5, mask);   // create mask to test for Unicode or non-ASCII chars in vector
 6775       movdl(tmp1Reg, tmp5);
 6776       vpbroadcastd(tmp1Reg, tmp1Reg, Assembler::AVX_256bit);
 6777       jmp(L_chars_32_check);
 6778 
 6779       bind(L_copy_32_chars);
 6780       vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64));
 6781       vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32));
 6782       vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1);
 6783       vptest(tmp2Reg, tmp1Reg);       // check for Unicode or non-ASCII chars in vector
 6784       jccb(Assembler::notZero, L_copy_32_chars_exit);
 6785       vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1);
 6786       vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector_len */ 1);
 6787       vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg);
 6788 
 6789       bind(L_chars_32_check);
 6790       addptr(len, 32);
 6791       jcc(Assembler::lessEqual, L_copy_32_chars);
 6792 
 6793       bind(L_copy_32_chars_exit);
 6794       subptr(len, 16);
 6795       jccb(Assembler::greater, L_copy_16_chars_exit);
 6796 
 6797     } else if (UseSSE42Intrinsics) {
 6798       movl(tmp5, mask);   // create mask to test for Unicode or non-ASCII chars in vector
 6799       movdl(tmp1Reg, tmp5);
 6800       pshufd(tmp1Reg, tmp1Reg, 0);
 6801       jmpb(L_chars_16_check);
 6802     }
 6803 
 6804     bind(L_copy_16_chars);
 6805     if (UseAVX >= 2) {
 6806       vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32));
 6807       vptest(tmp2Reg, tmp1Reg);
 6808       jcc(Assembler::notZero, L_copy_16_chars_exit);
 6809       vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector_len */ 1);
 6810       vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector_len */ 1);
 6811     } else {
 6812       if (UseAVX > 0) {
 6813         movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
 6814         movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
 6815         vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 0);
 6816       } else {
 6817         movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
 6818         por(tmp2Reg, tmp3Reg);
 6819         movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
 6820         por(tmp2Reg, tmp4Reg);
 6821       }
 6822       ptest(tmp2Reg, tmp1Reg);       // check for Unicode or non-ASCII chars in vector
 6823       jccb(Assembler::notZero, L_copy_16_chars_exit);
 6824       packuswb(tmp3Reg, tmp4Reg);
 6825     }
 6826     movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg);
 6827 
 6828     bind(L_chars_16_check);
 6829     addptr(len, 16);
 6830     jcc(Assembler::lessEqual, L_copy_16_chars);
 6831 
 6832     bind(L_copy_16_chars_exit);
 6833     if (UseAVX >= 2) {
 6834       // clean upper bits of YMM registers
 6835       vpxor(tmp2Reg, tmp2Reg);
 6836       vpxor(tmp3Reg, tmp3Reg);
 6837       vpxor(tmp4Reg, tmp4Reg);
 6838       movdl(tmp1Reg, tmp5);
 6839       pshufd(tmp1Reg, tmp1Reg, 0);
 6840     }
 6841     subptr(len, 8);
 6842     jccb(Assembler::greater, L_copy_8_chars_exit);
 6843 
 6844     bind(L_copy_8_chars);
 6845     movdqu(tmp3Reg, Address(src, len, Address::times_2, -16));
 6846     ptest(tmp3Reg, tmp1Reg);
 6847     jccb(Assembler::notZero, L_copy_8_chars_exit);
 6848     packuswb(tmp3Reg, tmp1Reg);
 6849     movq(Address(dst, len, Address::times_1, -8), tmp3Reg);
 6850     addptr(len, 8);
 6851     jccb(Assembler::lessEqual, L_copy_8_chars);
 6852 
 6853     bind(L_copy_8_chars_exit);
 6854     subptr(len, 8);
 6855     jccb(Assembler::zero, L_done);
 6856   }
 6857 
 6858   bind(L_copy_1_char);
 6859   load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0));
 6860   testl(tmp5, short_mask);      // check if Unicode or non-ASCII char
 6861   jccb(Assembler::notZero, L_copy_1_char_exit);
 6862   movb(Address(dst, len, Address::times_1, 0), tmp5);
 6863   addptr(len, 1);
 6864   jccb(Assembler::less, L_copy_1_char);
 6865 
 6866   bind(L_copy_1_char_exit);
 6867   addptr(result, len); // len is negative count of not processed elements
 6868 
 6869   bind(L_done);
 6870 }
 6871 
 6872 #ifdef _LP64
 6873 /**
 6874  * Helper for multiply_to_len().
 6875  */
 6876 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) {
 6877   addq(dest_lo, src1);
 6878   adcq(dest_hi, 0);
 6879   addq(dest_lo, src2);
 6880   adcq(dest_hi, 0);
 6881 }
 6882 
 6883 /**
 6884  * Multiply 64 bit by 64 bit first loop.
 6885  */
 6886 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
 6887                                            Register y, Register y_idx, Register z,
 6888                                            Register carry, Register product,
 6889                                            Register idx, Register kdx) {
 6890   //
 6891   //  jlong carry, x[], y[], z[];
 6892   //  for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
 6893   //    huge_128 product = y[idx] * x[xstart] + carry;
 6894   //    z[kdx] = (jlong)product;
 6895   //    carry  = (jlong)(product >>> 64);
 6896   //  }
 6897   //  z[xstart] = carry;
 6898   //
 6899 
 6900   Label L_first_loop, L_first_loop_exit;
 6901   Label L_one_x, L_one_y, L_multiply;
 6902 
 6903   decrementl(xstart);
 6904   jcc(Assembler::negative, L_one_x);
 6905 
 6906   movq(x_xstart, Address(x, xstart, Address::times_4,  0));
 6907   rorq(x_xstart, 32); // convert big-endian to little-endian
 6908 
 6909   bind(L_first_loop);
 6910   decrementl(idx);
 6911   jcc(Assembler::negative, L_first_loop_exit);
 6912   decrementl(idx);
 6913   jcc(Assembler::negative, L_one_y);
 6914   movq(y_idx, Address(y, idx, Address::times_4,  0));
 6915   rorq(y_idx, 32); // convert big-endian to little-endian
 6916   bind(L_multiply);
 6917   movq(product, x_xstart);
 6918   mulq(y_idx); // product(rax) * y_idx -> rdx:rax
 6919   addq(product, carry);
 6920   adcq(rdx, 0);
 6921   subl(kdx, 2);
 6922   movl(Address(z, kdx, Address::times_4,  4), product);
 6923   shrq(product, 32);
 6924   movl(Address(z, kdx, Address::times_4,  0), product);
 6925   movq(carry, rdx);
 6926   jmp(L_first_loop);
 6927 
 6928   bind(L_one_y);
 6929   movl(y_idx, Address(y,  0));
 6930   jmp(L_multiply);
 6931 
 6932   bind(L_one_x);
 6933   movl(x_xstart, Address(x,  0));
 6934   jmp(L_first_loop);
 6935 
 6936   bind(L_first_loop_exit);
 6937 }
 6938 
 6939 /**
 6940  * Multiply 64 bit by 64 bit and add 128 bit.
 6941  */
 6942 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z,
 6943                                             Register yz_idx, Register idx,
 6944                                             Register carry, Register product, int offset) {
 6945   //     huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry;
 6946   //     z[kdx] = (jlong)product;
 6947 
 6948   movq(yz_idx, Address(y, idx, Address::times_4,  offset));
 6949   rorq(yz_idx, 32); // convert big-endian to little-endian
 6950   movq(product, x_xstart);
 6951   mulq(yz_idx);     // product(rax) * yz_idx -> rdx:product(rax)
 6952   movq(yz_idx, Address(z, idx, Address::times_4,  offset));
 6953   rorq(yz_idx, 32); // convert big-endian to little-endian
 6954 
 6955   add2_with_carry(rdx, product, carry, yz_idx);
 6956 
 6957   movl(Address(z, idx, Address::times_4,  offset+4), product);
 6958   shrq(product, 32);
 6959   movl(Address(z, idx, Address::times_4,  offset), product);
 6960 
 6961 }
 6962 
 6963 /**
 6964  * Multiply 128 bit by 128 bit. Unrolled inner loop.
 6965  */
 6966 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
 6967                                              Register yz_idx, Register idx, Register jdx,
 6968                                              Register carry, Register product,
 6969                                              Register carry2) {
 6970   //   jlong carry, x[], y[], z[];
 6971   //   int kdx = ystart+1;
 6972   //   for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
 6973   //     huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry;
 6974   //     z[kdx+idx+1] = (jlong)product;
 6975   //     jlong carry2  = (jlong)(product >>> 64);
 6976   //     product = (y[idx] * x_xstart) + z[kdx+idx] + carry2;
 6977   //     z[kdx+idx] = (jlong)product;
 6978   //     carry  = (jlong)(product >>> 64);
 6979   //   }
 6980   //   idx += 2;
 6981   //   if (idx > 0) {
 6982   //     product = (y[idx] * x_xstart) + z[kdx+idx] + carry;
 6983   //     z[kdx+idx] = (jlong)product;
 6984   //     carry  = (jlong)(product >>> 64);
 6985   //   }
 6986   //
 6987 
 6988   Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
 6989 
 6990   movl(jdx, idx);
 6991   andl(jdx, 0xFFFFFFFC);
 6992   shrl(jdx, 2);
 6993 
 6994   bind(L_third_loop);
 6995   subl(jdx, 1);
 6996   jcc(Assembler::negative, L_third_loop_exit);
 6997   subl(idx, 4);
 6998 
 6999   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8);
 7000   movq(carry2, rdx);
 7001 
 7002   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0);
 7003   movq(carry, rdx);
 7004   jmp(L_third_loop);
 7005 
 7006   bind (L_third_loop_exit);
 7007 
 7008   andl (idx, 0x3);
 7009   jcc(Assembler::zero, L_post_third_loop_done);
 7010 
 7011   Label L_check_1;
 7012   subl(idx, 2);
 7013   jcc(Assembler::negative, L_check_1);
 7014 
 7015   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0);
 7016   movq(carry, rdx);
 7017 
 7018   bind (L_check_1);
 7019   addl (idx, 0x2);
 7020   andl (idx, 0x1);
 7021   subl(idx, 1);
 7022   jcc(Assembler::negative, L_post_third_loop_done);
 7023 
 7024   movl(yz_idx, Address(y, idx, Address::times_4,  0));
 7025   movq(product, x_xstart);
 7026   mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax)
 7027   movl(yz_idx, Address(z, idx, Address::times_4,  0));
 7028 
 7029   add2_with_carry(rdx, product, yz_idx, carry);
 7030 
 7031   movl(Address(z, idx, Address::times_4,  0), product);
 7032   shrq(product, 32);
 7033 
 7034   shlq(rdx, 32);
 7035   orq(product, rdx);
 7036   movq(carry, product);
 7037 
 7038   bind(L_post_third_loop_done);
 7039 }
 7040 
 7041 /**
 7042  * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop.
 7043  *
 7044  */
 7045 void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z,
 7046                                                   Register carry, Register carry2,
 7047                                                   Register idx, Register jdx,
 7048                                                   Register yz_idx1, Register yz_idx2,
 7049                                                   Register tmp, Register tmp3, Register tmp4) {
 7050   assert(UseBMI2Instructions, "should be used only when BMI2 is available");
 7051 
 7052   //   jlong carry, x[], y[], z[];
 7053   //   int kdx = ystart+1;
 7054   //   for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
 7055   //     huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry;
 7056   //     jlong carry2  = (jlong)(tmp3 >>> 64);
 7057   //     huge_128 tmp4 = (y[idx]   * rdx) + z[kdx+idx] + carry2;
 7058   //     carry  = (jlong)(tmp4 >>> 64);
 7059   //     z[kdx+idx+1] = (jlong)tmp3;
 7060   //     z[kdx+idx] = (jlong)tmp4;
 7061   //   }
 7062   //   idx += 2;
 7063   //   if (idx > 0) {
 7064   //     yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry;
 7065   //     z[kdx+idx] = (jlong)yz_idx1;
 7066   //     carry  = (jlong)(yz_idx1 >>> 64);
 7067   //   }
 7068   //
 7069 
 7070   Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
 7071 
 7072   movl(jdx, idx);
 7073   andl(jdx, 0xFFFFFFFC);
 7074   shrl(jdx, 2);
 7075 
 7076   bind(L_third_loop);
 7077   subl(jdx, 1);
 7078   jcc(Assembler::negative, L_third_loop_exit);
 7079   subl(idx, 4);
 7080 
 7081   movq(yz_idx1,  Address(y, idx, Address::times_4,  8));
 7082   rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian
 7083   movq(yz_idx2, Address(y, idx, Address::times_4,  0));
 7084   rorxq(yz_idx2, yz_idx2, 32);
 7085 
 7086   mulxq(tmp4, tmp3, yz_idx1);  //  yz_idx1 * rdx -> tmp4:tmp3
 7087   mulxq(carry2, tmp, yz_idx2); //  yz_idx2 * rdx -> carry2:tmp
 7088 
 7089   movq(yz_idx1,  Address(z, idx, Address::times_4,  8));
 7090   rorxq(yz_idx1, yz_idx1, 32);
 7091   movq(yz_idx2, Address(z, idx, Address::times_4,  0));
 7092   rorxq(yz_idx2, yz_idx2, 32);
 7093 
 7094   if (VM_Version::supports_adx()) {
 7095     adcxq(tmp3, carry);
 7096     adoxq(tmp3, yz_idx1);
 7097 
 7098     adcxq(tmp4, tmp);
 7099     adoxq(tmp4, yz_idx2);
 7100 
 7101     movl(carry, 0); // does not affect flags
 7102     adcxq(carry2, carry);
 7103     adoxq(carry2, carry);
 7104   } else {
 7105     add2_with_carry(tmp4, tmp3, carry, yz_idx1);
 7106     add2_with_carry(carry2, tmp4, tmp, yz_idx2);
 7107   }
 7108   movq(carry, carry2);
 7109 
 7110   movl(Address(z, idx, Address::times_4, 12), tmp3);
 7111   shrq(tmp3, 32);
 7112   movl(Address(z, idx, Address::times_4,  8), tmp3);
 7113 
 7114   movl(Address(z, idx, Address::times_4,  4), tmp4);
 7115   shrq(tmp4, 32);
 7116   movl(Address(z, idx, Address::times_4,  0), tmp4);
 7117 
 7118   jmp(L_third_loop);
 7119 
 7120   bind (L_third_loop_exit);
 7121 
 7122   andl (idx, 0x3);
 7123   jcc(Assembler::zero, L_post_third_loop_done);
 7124 
 7125   Label L_check_1;
 7126   subl(idx, 2);
 7127   jcc(Assembler::negative, L_check_1);
 7128 
 7129   movq(yz_idx1, Address(y, idx, Address::times_4,  0));
 7130   rorxq(yz_idx1, yz_idx1, 32);
 7131   mulxq(tmp4, tmp3, yz_idx1); //  yz_idx1 * rdx -> tmp4:tmp3
 7132   movq(yz_idx2, Address(z, idx, Address::times_4,  0));
 7133   rorxq(yz_idx2, yz_idx2, 32);
 7134 
 7135   add2_with_carry(tmp4, tmp3, carry, yz_idx2);
 7136 
 7137   movl(Address(z, idx, Address::times_4,  4), tmp3);
 7138   shrq(tmp3, 32);
 7139   movl(Address(z, idx, Address::times_4,  0), tmp3);
 7140   movq(carry, tmp4);
 7141 
 7142   bind (L_check_1);
 7143   addl (idx, 0x2);
 7144   andl (idx, 0x1);
 7145   subl(idx, 1);
 7146   jcc(Assembler::negative, L_post_third_loop_done);
 7147   movl(tmp4, Address(y, idx, Address::times_4,  0));
 7148   mulxq(carry2, tmp3, tmp4);  //  tmp4 * rdx -> carry2:tmp3
 7149   movl(tmp4, Address(z, idx, Address::times_4,  0));
 7150 
 7151   add2_with_carry(carry2, tmp3, tmp4, carry);
 7152 
 7153   movl(Address(z, idx, Address::times_4,  0), tmp3);
 7154   shrq(tmp3, 32);
 7155 
 7156   shlq(carry2, 32);
 7157   orq(tmp3, carry2);
 7158   movq(carry, tmp3);
 7159 
 7160   bind(L_post_third_loop_done);
 7161 }
 7162 
 7163 /**
 7164  * Code for BigInteger::multiplyToLen() intrinsic.
 7165  *
 7166  * rdi: x
 7167  * rax: xlen
 7168  * rsi: y
 7169  * rcx: ylen
 7170  * r8:  z
 7171  * r11: zlen
 7172  * r12: tmp1
 7173  * r13: tmp2
 7174  * r14: tmp3
 7175  * r15: tmp4
 7176  * rbx: tmp5
 7177  *
 7178  */
 7179 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen,
 7180                                      Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) {
 7181   ShortBranchVerifier sbv(this);
 7182   assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx);
 7183 
 7184   push(tmp1);
 7185   push(tmp2);
 7186   push(tmp3);
 7187   push(tmp4);
 7188   push(tmp5);
 7189 
 7190   push(xlen);
 7191   push(zlen);
 7192 
 7193   const Register idx = tmp1;
 7194   const Register kdx = tmp2;
 7195   const Register xstart = tmp3;
 7196 
 7197   const Register y_idx = tmp4;
 7198   const Register carry = tmp5;
 7199   const Register product  = xlen;
 7200   const Register x_xstart = zlen;  // reuse register
 7201 
 7202   // First Loop.
 7203   //
 7204   //  final static long LONG_MASK = 0xffffffffL;
 7205   //  int xstart = xlen - 1;
 7206   //  int ystart = ylen - 1;
 7207   //  long carry = 0;
 7208   //  for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
 7209   //    long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
 7210   //    z[kdx] = (int)product;
 7211   //    carry = product >>> 32;
 7212   //  }
 7213   //  z[xstart] = (int)carry;
 7214   //
 7215 
 7216   movl(idx, ylen);      // idx = ylen;
 7217   movl(kdx, zlen);      // kdx = xlen+ylen;
 7218   xorq(carry, carry);   // carry = 0;
 7219 
 7220   Label L_done;
 7221 
 7222   movl(xstart, xlen);
 7223   decrementl(xstart);
 7224   jcc(Assembler::negative, L_done);
 7225 
 7226   multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
 7227 
 7228   Label L_second_loop;
 7229   testl(kdx, kdx);
 7230   jcc(Assembler::zero, L_second_loop);
 7231 
 7232   Label L_carry;
 7233   subl(kdx, 1);
 7234   jcc(Assembler::zero, L_carry);
 7235 
 7236   movl(Address(z, kdx, Address::times_4,  0), carry);
 7237   shrq(carry, 32);
 7238   subl(kdx, 1);
 7239 
 7240   bind(L_carry);
 7241   movl(Address(z, kdx, Address::times_4,  0), carry);
 7242 
 7243   // Second and third (nested) loops.
 7244   //
 7245   // for (int i = xstart-1; i >= 0; i--) { // Second loop
 7246   //   carry = 0;
 7247   //   for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
 7248   //     long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
 7249   //                    (z[k] & LONG_MASK) + carry;
 7250   //     z[k] = (int)product;
 7251   //     carry = product >>> 32;
 7252   //   }
 7253   //   z[i] = (int)carry;
 7254   // }
 7255   //
 7256   // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx
 7257 
 7258   const Register jdx = tmp1;
 7259 
 7260   bind(L_second_loop);
 7261   xorl(carry, carry);    // carry = 0;
 7262   movl(jdx, ylen);       // j = ystart+1
 7263 
 7264   subl(xstart, 1);       // i = xstart-1;
 7265   jcc(Assembler::negative, L_done);
 7266 
 7267   push (z);
 7268 
 7269   Label L_last_x;
 7270   lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j
 7271   subl(xstart, 1);       // i = xstart-1;
 7272   jcc(Assembler::negative, L_last_x);
 7273 
 7274   if (UseBMI2Instructions) {
 7275     movq(rdx,  Address(x, xstart, Address::times_4,  0));
 7276     rorxq(rdx, rdx, 32); // convert big-endian to little-endian
 7277   } else {
 7278     movq(x_xstart, Address(x, xstart, Address::times_4,  0));
 7279     rorq(x_xstart, 32);  // convert big-endian to little-endian
 7280   }
 7281 
 7282   Label L_third_loop_prologue;
 7283   bind(L_third_loop_prologue);
 7284 
 7285   push (x);
 7286   push (xstart);
 7287   push (ylen);
 7288 
 7289 
 7290   if (UseBMI2Instructions) {
 7291     multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4);
 7292   } else { // !UseBMI2Instructions
 7293     multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x);
 7294   }
 7295 
 7296   pop(ylen);
 7297   pop(xlen);
 7298   pop(x);
 7299   pop(z);
 7300 
 7301   movl(tmp3, xlen);
 7302   addl(tmp3, 1);
 7303   movl(Address(z, tmp3, Address::times_4,  0), carry);
 7304   subl(tmp3, 1);
 7305   jccb(Assembler::negative, L_done);
 7306 
 7307   shrq(carry, 32);
 7308   movl(Address(z, tmp3, Address::times_4,  0), carry);
 7309   jmp(L_second_loop);
 7310 
 7311   // Next infrequent code is moved outside loops.
 7312   bind(L_last_x);
 7313   if (UseBMI2Instructions) {
 7314     movl(rdx, Address(x,  0));
 7315   } else {
 7316     movl(x_xstart, Address(x,  0));
 7317   }
 7318   jmp(L_third_loop_prologue);
 7319 
 7320   bind(L_done);
 7321 
 7322   pop(zlen);
 7323   pop(xlen);
 7324 
 7325   pop(tmp5);
 7326   pop(tmp4);
 7327   pop(tmp3);
 7328   pop(tmp2);
 7329   pop(tmp1);
 7330 }
 7331 
 7332 void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
 7333   Register result, Register tmp1, Register tmp2, XMMRegister rymm0, XMMRegister rymm1, XMMRegister rymm2){
 7334   assert(UseSSE42Intrinsics, "SSE4.2 must be enabled.");
 7335   Label VECTOR16_LOOP, VECTOR8_LOOP, VECTOR4_LOOP;
 7336   Label VECTOR8_TAIL, VECTOR4_TAIL;
 7337   Label VECTOR32_NOT_EQUAL, VECTOR16_NOT_EQUAL, VECTOR8_NOT_EQUAL, VECTOR4_NOT_EQUAL;
 7338   Label SAME_TILL_END, DONE;
 7339   Label BYTES_LOOP, BYTES_TAIL, BYTES_NOT_EQUAL;
 7340 
 7341   //scale is in rcx in both Win64 and Unix
 7342   ShortBranchVerifier sbv(this);
 7343 
 7344   shlq(length);
 7345   xorq(result, result);
 7346 
 7347   if ((AVX3Threshold == 0) && (UseAVX > 2) &&
 7348       VM_Version::supports_avx512vlbw()) {
 7349     Label VECTOR64_LOOP, VECTOR64_NOT_EQUAL, VECTOR32_TAIL;
 7350 
 7351     cmpq(length, 64);
 7352     jcc(Assembler::less, VECTOR32_TAIL);
 7353 
 7354     movq(tmp1, length);
 7355     andq(tmp1, 0x3F);      // tail count
 7356     andq(length, ~(0x3F)); //vector count
 7357 
 7358     bind(VECTOR64_LOOP);
 7359     // AVX512 code to compare 64 byte vectors.
 7360     evmovdqub(rymm0, Address(obja, result), Assembler::AVX_512bit);
 7361     evpcmpeqb(k7, rymm0, Address(objb, result), Assembler::AVX_512bit);
 7362     kortestql(k7, k7);
 7363     jcc(Assembler::aboveEqual, VECTOR64_NOT_EQUAL);     // mismatch
 7364     addq(result, 64);
 7365     subq(length, 64);
 7366     jccb(Assembler::notZero, VECTOR64_LOOP);
 7367 
 7368     //bind(VECTOR64_TAIL);
 7369     testq(tmp1, tmp1);
 7370     jcc(Assembler::zero, SAME_TILL_END);
 7371 
 7372     //bind(VECTOR64_TAIL);
 7373     // AVX512 code to compare up to 63 byte vectors.
 7374     mov64(tmp2, 0xFFFFFFFFFFFFFFFF);
 7375     shlxq(tmp2, tmp2, tmp1);
 7376     notq(tmp2);
 7377     kmovql(k3, tmp2);
 7378 
 7379     evmovdqub(rymm0, k3, Address(obja, result), false, Assembler::AVX_512bit);
 7380     evpcmpeqb(k7, k3, rymm0, Address(objb, result), Assembler::AVX_512bit);
 7381 
 7382     ktestql(k7, k3);
 7383     jcc(Assembler::below, SAME_TILL_END);     // not mismatch
 7384 
 7385     bind(VECTOR64_NOT_EQUAL);
 7386     kmovql(tmp1, k7);
 7387     notq(tmp1);
 7388     tzcntq(tmp1, tmp1);
 7389     addq(result, tmp1);
 7390     shrq(result);
 7391     jmp(DONE);
 7392     bind(VECTOR32_TAIL);
 7393   }
 7394 
 7395   cmpq(length, 8);
 7396   jcc(Assembler::equal, VECTOR8_LOOP);
 7397   jcc(Assembler::less, VECTOR4_TAIL);
 7398 
 7399   if (UseAVX >= 2) {
 7400     Label VECTOR16_TAIL, VECTOR32_LOOP;
 7401 
 7402     cmpq(length, 16);
 7403     jcc(Assembler::equal, VECTOR16_LOOP);
 7404     jcc(Assembler::less, VECTOR8_LOOP);
 7405 
 7406     cmpq(length, 32);
 7407     jccb(Assembler::less, VECTOR16_TAIL);
 7408 
 7409     subq(length, 32);
 7410     bind(VECTOR32_LOOP);
 7411     vmovdqu(rymm0, Address(obja, result));
 7412     vmovdqu(rymm1, Address(objb, result));
 7413     vpxor(rymm2, rymm0, rymm1, Assembler::AVX_256bit);
 7414     vptest(rymm2, rymm2);
 7415     jcc(Assembler::notZero, VECTOR32_NOT_EQUAL);//mismatch found
 7416     addq(result, 32);
 7417     subq(length, 32);
 7418     jcc(Assembler::greaterEqual, VECTOR32_LOOP);
 7419     addq(length, 32);
 7420     jcc(Assembler::equal, SAME_TILL_END);
 7421     //falling through if less than 32 bytes left //close the branch here.
 7422 
 7423     bind(VECTOR16_TAIL);
 7424     cmpq(length, 16);
 7425     jccb(Assembler::less, VECTOR8_TAIL);
 7426     bind(VECTOR16_LOOP);
 7427     movdqu(rymm0, Address(obja, result));
 7428     movdqu(rymm1, Address(objb, result));
 7429     vpxor(rymm2, rymm0, rymm1, Assembler::AVX_128bit);
 7430     ptest(rymm2, rymm2);
 7431     jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found
 7432     addq(result, 16);
 7433     subq(length, 16);
 7434     jcc(Assembler::equal, SAME_TILL_END);
 7435     //falling through if less than 16 bytes left
 7436   } else {//regular intrinsics
 7437 
 7438     cmpq(length, 16);
 7439     jccb(Assembler::less, VECTOR8_TAIL);
 7440 
 7441     subq(length, 16);
 7442     bind(VECTOR16_LOOP);
 7443     movdqu(rymm0, Address(obja, result));
 7444     movdqu(rymm1, Address(objb, result));
 7445     pxor(rymm0, rymm1);
 7446     ptest(rymm0, rymm0);
 7447     jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found
 7448     addq(result, 16);
 7449     subq(length, 16);
 7450     jccb(Assembler::greaterEqual, VECTOR16_LOOP);
 7451     addq(length, 16);
 7452     jcc(Assembler::equal, SAME_TILL_END);
 7453     //falling through if less than 16 bytes left
 7454   }
 7455 
 7456   bind(VECTOR8_TAIL);
 7457   cmpq(length, 8);
 7458   jccb(Assembler::less, VECTOR4_TAIL);
 7459   bind(VECTOR8_LOOP);
 7460   movq(tmp1, Address(obja, result));
 7461   movq(tmp2, Address(objb, result));
 7462   xorq(tmp1, tmp2);
 7463   testq(tmp1, tmp1);
 7464   jcc(Assembler::notZero, VECTOR8_NOT_EQUAL);//mismatch found
 7465   addq(result, 8);
 7466   subq(length, 8);
 7467   jcc(Assembler::equal, SAME_TILL_END);
 7468   //falling through if less than 8 bytes left
 7469 
 7470   bind(VECTOR4_TAIL);
 7471   cmpq(length, 4);
 7472   jccb(Assembler::less, BYTES_TAIL);
 7473   bind(VECTOR4_LOOP);
 7474   movl(tmp1, Address(obja, result));
 7475   xorl(tmp1, Address(objb, result));
 7476   testl(tmp1, tmp1);
 7477   jcc(Assembler::notZero, VECTOR4_NOT_EQUAL);//mismatch found
 7478   addq(result, 4);
 7479   subq(length, 4);
 7480   jcc(Assembler::equal, SAME_TILL_END);
 7481   //falling through if less than 4 bytes left
 7482 
 7483   bind(BYTES_TAIL);
 7484   bind(BYTES_LOOP);
 7485   load_unsigned_byte(tmp1, Address(obja, result));
 7486   load_unsigned_byte(tmp2, Address(objb, result));
 7487   xorl(tmp1, tmp2);
 7488   testl(tmp1, tmp1);
 7489   jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
 7490   decq(length);
 7491   jcc(Assembler::zero, SAME_TILL_END);
 7492   incq(result);
 7493   load_unsigned_byte(tmp1, Address(obja, result));
 7494   load_unsigned_byte(tmp2, Address(objb, result));
 7495   xorl(tmp1, tmp2);
 7496   testl(tmp1, tmp1);
 7497   jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
 7498   decq(length);
 7499   jcc(Assembler::zero, SAME_TILL_END);
 7500   incq(result);
 7501   load_unsigned_byte(tmp1, Address(obja, result));
 7502   load_unsigned_byte(tmp2, Address(objb, result));
 7503   xorl(tmp1, tmp2);
 7504   testl(tmp1, tmp1);
 7505   jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
 7506   jmp(SAME_TILL_END);
 7507 
 7508   if (UseAVX >= 2) {
 7509     bind(VECTOR32_NOT_EQUAL);
 7510     vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_256bit);
 7511     vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_256bit);
 7512     vpxor(rymm0, rymm0, rymm2, Assembler::AVX_256bit);
 7513     vpmovmskb(tmp1, rymm0);
 7514     bsfq(tmp1, tmp1);
 7515     addq(result, tmp1);
 7516     shrq(result);
 7517     jmp(DONE);
 7518   }
 7519 
 7520   bind(VECTOR16_NOT_EQUAL);
 7521   if (UseAVX >= 2) {
 7522     vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_128bit);
 7523     vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_128bit);
 7524     pxor(rymm0, rymm2);
 7525   } else {
 7526     pcmpeqb(rymm2, rymm2);
 7527     pxor(rymm0, rymm1);
 7528     pcmpeqb(rymm0, rymm1);
 7529     pxor(rymm0, rymm2);
 7530   }
 7531   pmovmskb(tmp1, rymm0);
 7532   bsfq(tmp1, tmp1);
 7533   addq(result, tmp1);
 7534   shrq(result);
 7535   jmpb(DONE);
 7536 
 7537   bind(VECTOR8_NOT_EQUAL);
 7538   bind(VECTOR4_NOT_EQUAL);
 7539   bsfq(tmp1, tmp1);
 7540   shrq(tmp1, 3);
 7541   addq(result, tmp1);
 7542   bind(BYTES_NOT_EQUAL);
 7543   shrq(result);
 7544   jmpb(DONE);
 7545 
 7546   bind(SAME_TILL_END);
 7547   mov64(result, -1);
 7548 
 7549   bind(DONE);
 7550 }
 7551 
 7552 //Helper functions for square_to_len()
 7553 
 7554 /**
 7555  * Store the squares of x[], right shifted one bit (divided by 2) into z[]
 7556  * Preserves x and z and modifies rest of the registers.
 7557  */
 7558 void MacroAssembler::square_rshift(Register x, Register xlen, Register z, Register tmp1, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
 7559   // Perform square and right shift by 1
 7560   // Handle odd xlen case first, then for even xlen do the following
 7561   // jlong carry = 0;
 7562   // for (int j=0, i=0; j < xlen; j+=2, i+=4) {
 7563   //     huge_128 product = x[j:j+1] * x[j:j+1];
 7564   //     z[i:i+1] = (carry << 63) | (jlong)(product >>> 65);
 7565   //     z[i+2:i+3] = (jlong)(product >>> 1);
 7566   //     carry = (jlong)product;
 7567   // }
 7568 
 7569   xorq(tmp5, tmp5);     // carry
 7570   xorq(rdxReg, rdxReg);
 7571   xorl(tmp1, tmp1);     // index for x
 7572   xorl(tmp4, tmp4);     // index for z
 7573 
 7574   Label L_first_loop, L_first_loop_exit;
 7575 
 7576   testl(xlen, 1);
 7577   jccb(Assembler::zero, L_first_loop); //jump if xlen is even
 7578 
 7579   // Square and right shift by 1 the odd element using 32 bit multiply
 7580   movl(raxReg, Address(x, tmp1, Address::times_4, 0));
 7581   imulq(raxReg, raxReg);
 7582   shrq(raxReg, 1);
 7583   adcq(tmp5, 0);
 7584   movq(Address(z, tmp4, Address::times_4, 0), raxReg);
 7585   incrementl(tmp1);
 7586   addl(tmp4, 2);
 7587 
 7588   // Square and  right shift by 1 the rest using 64 bit multiply
 7589   bind(L_first_loop);
 7590   cmpptr(tmp1, xlen);
 7591   jccb(Assembler::equal, L_first_loop_exit);
 7592 
 7593   // Square
 7594   movq(raxReg, Address(x, tmp1, Address::times_4,  0));
 7595   rorq(raxReg, 32);    // convert big-endian to little-endian
 7596   mulq(raxReg);        // 64-bit multiply rax * rax -> rdx:rax
 7597 
 7598   // Right shift by 1 and save carry
 7599   shrq(tmp5, 1);       // rdx:rax:tmp5 = (tmp5:rdx:rax) >>> 1
 7600   rcrq(rdxReg, 1);
 7601   rcrq(raxReg, 1);
 7602   adcq(tmp5, 0);
 7603 
 7604   // Store result in z
 7605   movq(Address(z, tmp4, Address::times_4, 0), rdxReg);
 7606   movq(Address(z, tmp4, Address::times_4, 8), raxReg);
 7607 
 7608   // Update indices for x and z
 7609   addl(tmp1, 2);
 7610   addl(tmp4, 4);
 7611   jmp(L_first_loop);
 7612 
 7613   bind(L_first_loop_exit);
 7614 }
 7615 
 7616 
 7617 /**
 7618  * Perform the following multiply add operation using BMI2 instructions
 7619  * carry:sum = sum + op1*op2 + carry
 7620  * op2 should be in rdx
 7621  * op2 is preserved, all other registers are modified
 7622  */
 7623 void MacroAssembler::multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, Register tmp2) {
 7624   // assert op2 is rdx
 7625   mulxq(tmp2, op1, op1);  //  op1 * op2 -> tmp2:op1
 7626   addq(sum, carry);
 7627   adcq(tmp2, 0);
 7628   addq(sum, op1);
 7629   adcq(tmp2, 0);
 7630   movq(carry, tmp2);
 7631 }
 7632 
 7633 /**
 7634  * Perform the following multiply add operation:
 7635  * carry:sum = sum + op1*op2 + carry
 7636  * Preserves op1, op2 and modifies rest of registers
 7637  */
 7638 void MacroAssembler::multiply_add_64(Register sum, Register op1, Register op2, Register carry, Register rdxReg, Register raxReg) {
 7639   // rdx:rax = op1 * op2
 7640   movq(raxReg, op2);
 7641   mulq(op1);
 7642 
 7643   //  rdx:rax = sum + carry + rdx:rax
 7644   addq(sum, carry);
 7645   adcq(rdxReg, 0);
 7646   addq(sum, raxReg);
 7647   adcq(rdxReg, 0);
 7648 
 7649   // carry:sum = rdx:sum
 7650   movq(carry, rdxReg);
 7651 }
 7652 
 7653 /**
 7654  * Add 64 bit long carry into z[] with carry propagation.
 7655  * Preserves z and carry register values and modifies rest of registers.
 7656  *
 7657  */
 7658 void MacroAssembler::add_one_64(Register z, Register zlen, Register carry, Register tmp1) {
 7659   Label L_fourth_loop, L_fourth_loop_exit;
 7660 
 7661   movl(tmp1, 1);
 7662   subl(zlen, 2);
 7663   addq(Address(z, zlen, Address::times_4, 0), carry);
 7664 
 7665   bind(L_fourth_loop);
 7666   jccb(Assembler::carryClear, L_fourth_loop_exit);
 7667   subl(zlen, 2);
 7668   jccb(Assembler::negative, L_fourth_loop_exit);
 7669   addq(Address(z, zlen, Address::times_4, 0), tmp1);
 7670   jmp(L_fourth_loop);
 7671   bind(L_fourth_loop_exit);
 7672 }
 7673 
 7674 /**
 7675  * Shift z[] left by 1 bit.
 7676  * Preserves x, len, z and zlen registers and modifies rest of the registers.
 7677  *
 7678  */
 7679 void MacroAssembler::lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
 7680 
 7681   Label L_fifth_loop, L_fifth_loop_exit;
 7682 
 7683   // Fifth loop
 7684   // Perform primitiveLeftShift(z, zlen, 1)
 7685 
 7686   const Register prev_carry = tmp1;
 7687   const Register new_carry = tmp4;
 7688   const Register value = tmp2;
 7689   const Register zidx = tmp3;
 7690 
 7691   // int zidx, carry;
 7692   // long value;
 7693   // carry = 0;
 7694   // for (zidx = zlen-2; zidx >=0; zidx -= 2) {
 7695   //    (carry:value)  = (z[i] << 1) | carry ;
 7696   //    z[i] = value;
 7697   // }
 7698 
 7699   movl(zidx, zlen);
 7700   xorl(prev_carry, prev_carry); // clear carry flag and prev_carry register
 7701 
 7702   bind(L_fifth_loop);
 7703   decl(zidx);  // Use decl to preserve carry flag
 7704   decl(zidx);
 7705   jccb(Assembler::negative, L_fifth_loop_exit);
 7706 
 7707   if (UseBMI2Instructions) {
 7708      movq(value, Address(z, zidx, Address::times_4, 0));
 7709      rclq(value, 1);
 7710      rorxq(value, value, 32);
 7711      movq(Address(z, zidx, Address::times_4,  0), value);  // Store back in big endian form
 7712   }
 7713   else {
 7714     // clear new_carry
 7715     xorl(new_carry, new_carry);
 7716 
 7717     // Shift z[i] by 1, or in previous carry and save new carry
 7718     movq(value, Address(z, zidx, Address::times_4, 0));
 7719     shlq(value, 1);
 7720     adcl(new_carry, 0);
 7721 
 7722     orq(value, prev_carry);
 7723     rorq(value, 0x20);
 7724     movq(Address(z, zidx, Address::times_4,  0), value);  // Store back in big endian form
 7725 
 7726     // Set previous carry = new carry
 7727     movl(prev_carry, new_carry);
 7728   }
 7729   jmp(L_fifth_loop);
 7730 
 7731   bind(L_fifth_loop_exit);
 7732 }
 7733 
 7734 
 7735 /**
 7736  * Code for BigInteger::squareToLen() intrinsic
 7737  *
 7738  * rdi: x
 7739  * rsi: len
 7740  * r8:  z
 7741  * rcx: zlen
 7742  * r12: tmp1
 7743  * r13: tmp2
 7744  * r14: tmp3
 7745  * r15: tmp4
 7746  * rbx: tmp5
 7747  *
 7748  */
 7749 void MacroAssembler::square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
 7750 
 7751   Label L_second_loop, L_second_loop_exit, L_third_loop, L_third_loop_exit, L_last_x, L_multiply;
 7752   push(tmp1);
 7753   push(tmp2);
 7754   push(tmp3);
 7755   push(tmp4);
 7756   push(tmp5);
 7757 
 7758   // First loop
 7759   // Store the squares, right shifted one bit (i.e., divided by 2).
 7760   square_rshift(x, len, z, tmp1, tmp3, tmp4, tmp5, rdxReg, raxReg);
 7761 
 7762   // Add in off-diagonal sums.
 7763   //
 7764   // Second, third (nested) and fourth loops.
 7765   // zlen +=2;
 7766   // for (int xidx=len-2,zidx=zlen-4; xidx > 0; xidx-=2,zidx-=4) {
 7767   //    carry = 0;
 7768   //    long op2 = x[xidx:xidx+1];
 7769   //    for (int j=xidx-2,k=zidx; j >= 0; j-=2) {
 7770   //       k -= 2;
 7771   //       long op1 = x[j:j+1];
 7772   //       long sum = z[k:k+1];
 7773   //       carry:sum = multiply_add_64(sum, op1, op2, carry, tmp_regs);
 7774   //       z[k:k+1] = sum;
 7775   //    }
 7776   //    add_one_64(z, k, carry, tmp_regs);
 7777   // }
 7778 
 7779   const Register carry = tmp5;
 7780   const Register sum = tmp3;
 7781   const Register op1 = tmp4;
 7782   Register op2 = tmp2;
 7783 
 7784   push(zlen);
 7785   push(len);
 7786   addl(zlen,2);
 7787   bind(L_second_loop);
 7788   xorq(carry, carry);
 7789   subl(zlen, 4);
 7790   subl(len, 2);
 7791   push(zlen);
 7792   push(len);
 7793   cmpl(len, 0);
 7794   jccb(Assembler::lessEqual, L_second_loop_exit);
 7795 
 7796   // Multiply an array by one 64 bit long.
 7797   if (UseBMI2Instructions) {
 7798     op2 = rdxReg;
 7799     movq(op2, Address(x, len, Address::times_4,  0));
 7800     rorxq(op2, op2, 32);
 7801   }
 7802   else {
 7803     movq(op2, Address(x, len, Address::times_4,  0));
 7804     rorq(op2, 32);
 7805   }
 7806 
 7807   bind(L_third_loop);
 7808   decrementl(len);
 7809   jccb(Assembler::negative, L_third_loop_exit);
 7810   decrementl(len);
 7811   jccb(Assembler::negative, L_last_x);
 7812 
 7813   movq(op1, Address(x, len, Address::times_4,  0));
 7814   rorq(op1, 32);
 7815 
 7816   bind(L_multiply);
 7817   subl(zlen, 2);
 7818   movq(sum, Address(z, zlen, Address::times_4,  0));
 7819 
 7820   // Multiply 64 bit by 64 bit and add 64 bits lower half and upper 64 bits as carry.
 7821   if (UseBMI2Instructions) {
 7822     multiply_add_64_bmi2(sum, op1, op2, carry, tmp2);
 7823   }
 7824   else {
 7825     multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
 7826   }
 7827 
 7828   movq(Address(z, zlen, Address::times_4, 0), sum);
 7829 
 7830   jmp(L_third_loop);
 7831   bind(L_third_loop_exit);
 7832 
 7833   // Fourth loop
 7834   // Add 64 bit long carry into z with carry propagation.
 7835   // Uses offsetted zlen.
 7836   add_one_64(z, zlen, carry, tmp1);
 7837 
 7838   pop(len);
 7839   pop(zlen);
 7840   jmp(L_second_loop);
 7841 
 7842   // Next infrequent code is moved outside loops.
 7843   bind(L_last_x);
 7844   movl(op1, Address(x, 0));
 7845   jmp(L_multiply);
 7846 
 7847   bind(L_second_loop_exit);
 7848   pop(len);
 7849   pop(zlen);
 7850   pop(len);
 7851   pop(zlen);
 7852 
 7853   // Fifth loop
 7854   // Shift z left 1 bit.
 7855   lshift_by_1(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4);
 7856 
 7857   // z[zlen-1] |= x[len-1] & 1;
 7858   movl(tmp3, Address(x, len, Address::times_4, -4));
 7859   andl(tmp3, 1);
 7860   orl(Address(z, zlen, Address::times_4,  -4), tmp3);
 7861 
 7862   pop(tmp5);
 7863   pop(tmp4);
 7864   pop(tmp3);
 7865   pop(tmp2);
 7866   pop(tmp1);
 7867 }
 7868 
 7869 /**
 7870  * Helper function for mul_add()
 7871  * Multiply the in[] by int k and add to out[] starting at offset offs using
 7872  * 128 bit by 32 bit multiply and return the carry in tmp5.
 7873  * Only quad int aligned length of in[] is operated on in this function.
 7874  * k is in rdxReg for BMI2Instructions, for others it is in tmp2.
 7875  * This function preserves out, in and k registers.
 7876  * len and offset point to the appropriate index in "in" & "out" correspondingly
 7877  * tmp5 has the carry.
 7878  * other registers are temporary and are modified.
 7879  *
 7880  */
 7881 void MacroAssembler::mul_add_128_x_32_loop(Register out, Register in,
 7882   Register offset, Register len, Register tmp1, Register tmp2, Register tmp3,
 7883   Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
 7884 
 7885   Label L_first_loop, L_first_loop_exit;
 7886 
 7887   movl(tmp1, len);
 7888   shrl(tmp1, 2);
 7889 
 7890   bind(L_first_loop);
 7891   subl(tmp1, 1);
 7892   jccb(Assembler::negative, L_first_loop_exit);
 7893 
 7894   subl(len, 4);
 7895   subl(offset, 4);
 7896 
 7897   Register op2 = tmp2;
 7898   const Register sum = tmp3;
 7899   const Register op1 = tmp4;
 7900   const Register carry = tmp5;
 7901 
 7902   if (UseBMI2Instructions) {
 7903     op2 = rdxReg;
 7904   }
 7905 
 7906   movq(op1, Address(in, len, Address::times_4,  8));
 7907   rorq(op1, 32);
 7908   movq(sum, Address(out, offset, Address::times_4,  8));
 7909   rorq(sum, 32);
 7910   if (UseBMI2Instructions) {
 7911     multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
 7912   }
 7913   else {
 7914     multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
 7915   }
 7916   // Store back in big endian from little endian
 7917   rorq(sum, 0x20);
 7918   movq(Address(out, offset, Address::times_4,  8), sum);
 7919 
 7920   movq(op1, Address(in, len, Address::times_4,  0));
 7921   rorq(op1, 32);
 7922   movq(sum, Address(out, offset, Address::times_4,  0));
 7923   rorq(sum, 32);
 7924   if (UseBMI2Instructions) {
 7925     multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
 7926   }
 7927   else {
 7928     multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
 7929   }
 7930   // Store back in big endian from little endian
 7931   rorq(sum, 0x20);
 7932   movq(Address(out, offset, Address::times_4,  0), sum);
 7933 
 7934   jmp(L_first_loop);
 7935   bind(L_first_loop_exit);
 7936 }
 7937 
 7938 /**
 7939  * Code for BigInteger::mulAdd() intrinsic
 7940  *
 7941  * rdi: out
 7942  * rsi: in
 7943  * r11: offs (out.length - offset)
 7944  * rcx: len
 7945  * r8:  k
 7946  * r12: tmp1
 7947  * r13: tmp2
 7948  * r14: tmp3
 7949  * r15: tmp4
 7950  * rbx: tmp5
 7951  * Multiply the in[] by word k and add to out[], return the carry in rax
 7952  */
 7953 void MacroAssembler::mul_add(Register out, Register in, Register offs,
 7954    Register len, Register k, Register tmp1, Register tmp2, Register tmp3,
 7955    Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
 7956 
 7957   Label L_carry, L_last_in, L_done;
 7958 
 7959 // carry = 0;
 7960 // for (int j=len-1; j >= 0; j--) {
 7961 //    long product = (in[j] & LONG_MASK) * kLong +
 7962 //                   (out[offs] & LONG_MASK) + carry;
 7963 //    out[offs--] = (int)product;
 7964 //    carry = product >>> 32;
 7965 // }
 7966 //
 7967   push(tmp1);
 7968   push(tmp2);
 7969   push(tmp3);
 7970   push(tmp4);
 7971   push(tmp5);
 7972 
 7973   Register op2 = tmp2;
 7974   const Register sum = tmp3;
 7975   const Register op1 = tmp4;
 7976   const Register carry =  tmp5;
 7977 
 7978   if (UseBMI2Instructions) {
 7979     op2 = rdxReg;
 7980     movl(op2, k);
 7981   }
 7982   else {
 7983     movl(op2, k);
 7984   }
 7985 
 7986   xorq(carry, carry);
 7987 
 7988   //First loop
 7989 
 7990   //Multiply in[] by k in a 4 way unrolled loop using 128 bit by 32 bit multiply
 7991   //The carry is in tmp5
 7992   mul_add_128_x_32_loop(out, in, offs, len, tmp1, tmp2, tmp3, tmp4, tmp5, rdxReg, raxReg);
 7993 
 7994   //Multiply the trailing in[] entry using 64 bit by 32 bit, if any
 7995   decrementl(len);
 7996   jccb(Assembler::negative, L_carry);
 7997   decrementl(len);
 7998   jccb(Assembler::negative, L_last_in);
 7999 
 8000   movq(op1, Address(in, len, Address::times_4,  0));
 8001   rorq(op1, 32);
 8002 
 8003   subl(offs, 2);
 8004   movq(sum, Address(out, offs, Address::times_4,  0));
 8005   rorq(sum, 32);
 8006 
 8007   if (UseBMI2Instructions) {
 8008     multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
 8009   }
 8010   else {
 8011     multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
 8012   }
 8013 
 8014   // Store back in big endian from little endian
 8015   rorq(sum, 0x20);
 8016   movq(Address(out, offs, Address::times_4,  0), sum);
 8017 
 8018   testl(len, len);
 8019   jccb(Assembler::zero, L_carry);
 8020 
 8021   //Multiply the last in[] entry, if any
 8022   bind(L_last_in);
 8023   movl(op1, Address(in, 0));
 8024   movl(sum, Address(out, offs, Address::times_4,  -4));
 8025 
 8026   movl(raxReg, k);
 8027   mull(op1); //tmp4 * eax -> edx:eax
 8028   addl(sum, carry);
 8029   adcl(rdxReg, 0);
 8030   addl(sum, raxReg);
 8031   adcl(rdxReg, 0);
 8032   movl(carry, rdxReg);
 8033 
 8034   movl(Address(out, offs, Address::times_4,  -4), sum);
 8035 
 8036   bind(L_carry);
 8037   //return tmp5/carry as carry in rax
 8038   movl(rax, carry);
 8039 
 8040   bind(L_done);
 8041   pop(tmp5);
 8042   pop(tmp4);
 8043   pop(tmp3);
 8044   pop(tmp2);
 8045   pop(tmp1);
 8046 }
 8047 #endif
 8048 
 8049 /**
 8050  * Emits code to update CRC-32 with a byte value according to constants in table
 8051  *
 8052  * @param [in,out]crc   Register containing the crc.
 8053  * @param [in]val       Register containing the byte to fold into the CRC.
 8054  * @param [in]table     Register containing the table of crc constants.
 8055  *
 8056  * uint32_t crc;
 8057  * val = crc_table[(val ^ crc) & 0xFF];
 8058  * crc = val ^ (crc >> 8);
 8059  *
 8060  */
 8061 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
 8062   xorl(val, crc);
 8063   andl(val, 0xFF);
 8064   shrl(crc, 8); // unsigned shift
 8065   xorl(crc, Address(table, val, Address::times_4, 0));
 8066 }
 8067 
 8068 /**
 8069  * Fold 128-bit data chunk
 8070  */
 8071 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
 8072   if (UseAVX > 0) {
 8073     vpclmulhdq(xtmp, xK, xcrc); // [123:64]
 8074     vpclmulldq(xcrc, xK, xcrc); // [63:0]
 8075     vpxor(xcrc, xcrc, Address(buf, offset), 0 /* vector_len */);
 8076     pxor(xcrc, xtmp);
 8077   } else {
 8078     movdqa(xtmp, xcrc);
 8079     pclmulhdq(xtmp, xK);   // [123:64]
 8080     pclmulldq(xcrc, xK);   // [63:0]
 8081     pxor(xcrc, xtmp);
 8082     movdqu(xtmp, Address(buf, offset));
 8083     pxor(xcrc, xtmp);
 8084   }
 8085 }
 8086 
 8087 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) {
 8088   if (UseAVX > 0) {
 8089     vpclmulhdq(xtmp, xK, xcrc);
 8090     vpclmulldq(xcrc, xK, xcrc);
 8091     pxor(xcrc, xbuf);
 8092     pxor(xcrc, xtmp);
 8093   } else {
 8094     movdqa(xtmp, xcrc);
 8095     pclmulhdq(xtmp, xK);
 8096     pclmulldq(xcrc, xK);
 8097     pxor(xcrc, xbuf);
 8098     pxor(xcrc, xtmp);
 8099   }
 8100 }
 8101 
 8102 /**
 8103  * 8-bit folds to compute 32-bit CRC
 8104  *
 8105  * uint64_t xcrc;
 8106  * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8);
 8107  */
 8108 void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) {
 8109   movdl(tmp, xcrc);
 8110   andl(tmp, 0xFF);
 8111   movdl(xtmp, Address(table, tmp, Address::times_4, 0));
 8112   psrldq(xcrc, 1); // unsigned shift one byte
 8113   pxor(xcrc, xtmp);
 8114 }
 8115 
 8116 /**
 8117  * uint32_t crc;
 8118  * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
 8119  */
 8120 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
 8121   movl(tmp, crc);
 8122   andl(tmp, 0xFF);
 8123   shrl(crc, 8);
 8124   xorl(crc, Address(table, tmp, Address::times_4, 0));
 8125 }
 8126 
 8127 /**
 8128  * @param crc   register containing existing CRC (32-bit)
 8129  * @param buf   register pointing to input byte buffer (byte*)
 8130  * @param len   register containing number of bytes
 8131  * @param table register that will contain address of CRC table
 8132  * @param tmp   scratch register
 8133  */
 8134 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) {
 8135   assert_different_registers(crc, buf, len, table, tmp, rax);
 8136 
 8137   Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned;
 8138   Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop;
 8139 
 8140   // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
 8141   // context for the registers used, where all instructions below are using 128-bit mode
 8142   // On EVEX without VL and BW, these instructions will all be AVX.
 8143   lea(table, ExternalAddress(StubRoutines::crc_table_addr()));
 8144   notl(crc); // ~crc
 8145   cmpl(len, 16);
 8146   jcc(Assembler::less, L_tail);
 8147 
 8148   // Align buffer to 16 bytes
 8149   movl(tmp, buf);
 8150   andl(tmp, 0xF);
 8151   jccb(Assembler::zero, L_aligned);
 8152   subl(tmp,  16);
 8153   addl(len, tmp);
 8154 
 8155   align(4);
 8156   BIND(L_align_loop);
 8157   movsbl(rax, Address(buf, 0)); // load byte with sign extension
 8158   update_byte_crc32(crc, rax, table);
 8159   increment(buf);
 8160   incrementl(tmp);
 8161   jccb(Assembler::less, L_align_loop);
 8162 
 8163   BIND(L_aligned);
 8164   movl(tmp, len); // save
 8165   shrl(len, 4);
 8166   jcc(Assembler::zero, L_tail_restore);
 8167 
 8168   // Fold crc into first bytes of vector
 8169   movdqa(xmm1, Address(buf, 0));
 8170   movdl(rax, xmm1);
 8171   xorl(crc, rax);
 8172   if (VM_Version::supports_sse4_1()) {
 8173     pinsrd(xmm1, crc, 0);
 8174   } else {
 8175     pinsrw(xmm1, crc, 0);
 8176     shrl(crc, 16);
 8177     pinsrw(xmm1, crc, 1);
 8178   }
 8179   addptr(buf, 16);
 8180   subl(len, 4); // len > 0
 8181   jcc(Assembler::less, L_fold_tail);
 8182 
 8183   movdqa(xmm2, Address(buf,  0));
 8184   movdqa(xmm3, Address(buf, 16));
 8185   movdqa(xmm4, Address(buf, 32));
 8186   addptr(buf, 48);
 8187   subl(len, 3);
 8188   jcc(Assembler::lessEqual, L_fold_512b);
 8189 
 8190   // Fold total 512 bits of polynomial on each iteration,
 8191   // 128 bits per each of 4 parallel streams.
 8192   movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32), rscratch1);
 8193 
 8194   align32();
 8195   BIND(L_fold_512b_loop);
 8196   fold_128bit_crc32(xmm1, xmm0, xmm5, buf,  0);
 8197   fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16);
 8198   fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32);
 8199   fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48);
 8200   addptr(buf, 64);
 8201   subl(len, 4);
 8202   jcc(Assembler::greater, L_fold_512b_loop);
 8203 
 8204   // Fold 512 bits to 128 bits.
 8205   BIND(L_fold_512b);
 8206   movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1);
 8207   fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2);
 8208   fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3);
 8209   fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4);
 8210 
 8211   // Fold the rest of 128 bits data chunks
 8212   BIND(L_fold_tail);
 8213   addl(len, 3);
 8214   jccb(Assembler::lessEqual, L_fold_128b);
 8215   movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1);
 8216 
 8217   BIND(L_fold_tail_loop);
 8218   fold_128bit_crc32(xmm1, xmm0, xmm5, buf,  0);
 8219   addptr(buf, 16);
 8220   decrementl(len);
 8221   jccb(Assembler::greater, L_fold_tail_loop);
 8222 
 8223   // Fold 128 bits in xmm1 down into 32 bits in crc register.
 8224   BIND(L_fold_128b);
 8225   movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()), rscratch1);
 8226   if (UseAVX > 0) {
 8227     vpclmulqdq(xmm2, xmm0, xmm1, 0x1);
 8228     vpand(xmm3, xmm0, xmm2, 0 /* vector_len */);
 8229     vpclmulqdq(xmm0, xmm0, xmm3, 0x1);
 8230   } else {
 8231     movdqa(xmm2, xmm0);
 8232     pclmulqdq(xmm2, xmm1, 0x1);
 8233     movdqa(xmm3, xmm0);
 8234     pand(xmm3, xmm2);
 8235     pclmulqdq(xmm0, xmm3, 0x1);
 8236   }
 8237   psrldq(xmm1, 8);
 8238   psrldq(xmm2, 4);
 8239   pxor(xmm0, xmm1);
 8240   pxor(xmm0, xmm2);
 8241 
 8242   // 8 8-bit folds to compute 32-bit CRC.
 8243   for (int j = 0; j < 4; j++) {
 8244     fold_8bit_crc32(xmm0, table, xmm1, rax);
 8245   }
 8246   movdl(crc, xmm0); // mov 32 bits to general register
 8247   for (int j = 0; j < 4; j++) {
 8248     fold_8bit_crc32(crc, table, rax);
 8249   }
 8250 
 8251   BIND(L_tail_restore);
 8252   movl(len, tmp); // restore
 8253   BIND(L_tail);
 8254   andl(len, 0xf);
 8255   jccb(Assembler::zero, L_exit);
 8256 
 8257   // Fold the rest of bytes
 8258   align(4);
 8259   BIND(L_tail_loop);
 8260   movsbl(rax, Address(buf, 0)); // load byte with sign extension
 8261   update_byte_crc32(crc, rax, table);
 8262   increment(buf);
 8263   decrementl(len);
 8264   jccb(Assembler::greater, L_tail_loop);
 8265 
 8266   BIND(L_exit);
 8267   notl(crc); // ~c
 8268 }
 8269 
 8270 #ifdef _LP64
 8271 // Helper function for AVX 512 CRC32
 8272 // Fold 512-bit data chunks
 8273 void MacroAssembler::fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf,
 8274                                              Register pos, int offset) {
 8275   evmovdquq(xmm3, Address(buf, pos, Address::times_1, offset), Assembler::AVX_512bit);
 8276   evpclmulqdq(xtmp, xcrc, xK, 0x10, Assembler::AVX_512bit); // [123:64]
 8277   evpclmulqdq(xmm2, xcrc, xK, 0x01, Assembler::AVX_512bit); // [63:0]
 8278   evpxorq(xcrc, xtmp, xmm2, Assembler::AVX_512bit /* vector_len */);
 8279   evpxorq(xcrc, xcrc, xmm3, Assembler::AVX_512bit /* vector_len */);
 8280 }
 8281 
 8282 // Helper function for AVX 512 CRC32
 8283 // Compute CRC32 for < 256B buffers
 8284 void MacroAssembler::kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register table, Register pos,
 8285                                               Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop,
 8286                                               Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup) {
 8287 
 8288   Label L_less_than_32, L_exact_16_left, L_less_than_16_left;
 8289   Label L_less_than_8_left, L_less_than_4_left, L_less_than_2_left, L_zero_left;
 8290   Label L_only_less_than_4, L_only_less_than_3, L_only_less_than_2;
 8291 
 8292   // check if there is enough buffer to be able to fold 16B at a time
 8293   cmpl(len, 32);
 8294   jcc(Assembler::less, L_less_than_32);
 8295 
 8296   // if there is, load the constants
 8297   movdqu(xmm10, Address(table, 1 * 16));    //rk1 and rk2 in xmm10
 8298   movdl(xmm0, crc);                        // get the initial crc value
 8299   movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext
 8300   pxor(xmm7, xmm0);
 8301 
 8302   // update the buffer pointer
 8303   addl(pos, 16);
 8304   //update the counter.subtract 32 instead of 16 to save one instruction from the loop
 8305   subl(len, 32);
 8306   jmp(L_16B_reduction_loop);
 8307 
 8308   bind(L_less_than_32);
 8309   //mov initial crc to the return value. this is necessary for zero - length buffers.
 8310   movl(rax, crc);
 8311   testl(len, len);
 8312   jcc(Assembler::equal, L_cleanup);
 8313 
 8314   movdl(xmm0, crc);                        //get the initial crc value
 8315 
 8316   cmpl(len, 16);
 8317   jcc(Assembler::equal, L_exact_16_left);
 8318   jcc(Assembler::less, L_less_than_16_left);
 8319 
 8320   movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext
 8321   pxor(xmm7, xmm0);                       //xor the initial crc value
 8322   addl(pos, 16);
 8323   subl(len, 16);
 8324   movdqu(xmm10, Address(table, 1 * 16));    // rk1 and rk2 in xmm10
 8325   jmp(L_get_last_two_xmms);
 8326 
 8327   bind(L_less_than_16_left);
 8328   //use stack space to load data less than 16 bytes, zero - out the 16B in memory first.
 8329   pxor(xmm1, xmm1);
 8330   movptr(tmp1, rsp);
 8331   movdqu(Address(tmp1, 0 * 16), xmm1);
 8332 
 8333   cmpl(len, 4);
 8334   jcc(Assembler::less, L_only_less_than_4);
 8335 
 8336   //backup the counter value
 8337   movl(tmp2, len);
 8338   cmpl(len, 8);
 8339   jcc(Assembler::less, L_less_than_8_left);
 8340 
 8341   //load 8 Bytes
 8342   movq(rax, Address(buf, pos, Address::times_1, 0 * 16));
 8343   movq(Address(tmp1, 0 * 16), rax);
 8344   addptr(tmp1, 8);
 8345   subl(len, 8);
 8346   addl(pos, 8);
 8347 
 8348   bind(L_less_than_8_left);
 8349   cmpl(len, 4);
 8350   jcc(Assembler::less, L_less_than_4_left);
 8351 
 8352   //load 4 Bytes
 8353   movl(rax, Address(buf, pos, Address::times_1, 0));
 8354   movl(Address(tmp1, 0 * 16), rax);
 8355   addptr(tmp1, 4);
 8356   subl(len, 4);
 8357   addl(pos, 4);
 8358 
 8359   bind(L_less_than_4_left);
 8360   cmpl(len, 2);
 8361   jcc(Assembler::less, L_less_than_2_left);
 8362 
 8363   // load 2 Bytes
 8364   movw(rax, Address(buf, pos, Address::times_1, 0));
 8365   movl(Address(tmp1, 0 * 16), rax);
 8366   addptr(tmp1, 2);
 8367   subl(len, 2);
 8368   addl(pos, 2);
 8369 
 8370   bind(L_less_than_2_left);
 8371   cmpl(len, 1);
 8372   jcc(Assembler::less, L_zero_left);
 8373 
 8374   // load 1 Byte
 8375   movb(rax, Address(buf, pos, Address::times_1, 0));
 8376   movb(Address(tmp1, 0 * 16), rax);
 8377 
 8378   bind(L_zero_left);
 8379   movdqu(xmm7, Address(rsp, 0));
 8380   pxor(xmm7, xmm0);                       //xor the initial crc value
 8381 
 8382   lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr()));
 8383   movdqu(xmm0, Address(rax, tmp2));
 8384   pshufb(xmm7, xmm0);
 8385   jmp(L_128_done);
 8386 
 8387   bind(L_exact_16_left);
 8388   movdqu(xmm7, Address(buf, pos, Address::times_1, 0));
 8389   pxor(xmm7, xmm0);                       //xor the initial crc value
 8390   jmp(L_128_done);
 8391 
 8392   bind(L_only_less_than_4);
 8393   cmpl(len, 3);
 8394   jcc(Assembler::less, L_only_less_than_3);
 8395 
 8396   // load 3 Bytes
 8397   movb(rax, Address(buf, pos, Address::times_1, 0));
 8398   movb(Address(tmp1, 0), rax);
 8399 
 8400   movb(rax, Address(buf, pos, Address::times_1, 1));
 8401   movb(Address(tmp1, 1), rax);
 8402 
 8403   movb(rax, Address(buf, pos, Address::times_1, 2));
 8404   movb(Address(tmp1, 2), rax);
 8405 
 8406   movdqu(xmm7, Address(rsp, 0));
 8407   pxor(xmm7, xmm0);                     //xor the initial crc value
 8408 
 8409   pslldq(xmm7, 0x5);
 8410   jmp(L_barrett);
 8411   bind(L_only_less_than_3);
 8412   cmpl(len, 2);
 8413   jcc(Assembler::less, L_only_less_than_2);
 8414 
 8415   // load 2 Bytes
 8416   movb(rax, Address(buf, pos, Address::times_1, 0));
 8417   movb(Address(tmp1, 0), rax);
 8418 
 8419   movb(rax, Address(buf, pos, Address::times_1, 1));
 8420   movb(Address(tmp1, 1), rax);
 8421 
 8422   movdqu(xmm7, Address(rsp, 0));
 8423   pxor(xmm7, xmm0);                     //xor the initial crc value
 8424 
 8425   pslldq(xmm7, 0x6);
 8426   jmp(L_barrett);
 8427 
 8428   bind(L_only_less_than_2);
 8429   //load 1 Byte
 8430   movb(rax, Address(buf, pos, Address::times_1, 0));
 8431   movb(Address(tmp1, 0), rax);
 8432 
 8433   movdqu(xmm7, Address(rsp, 0));
 8434   pxor(xmm7, xmm0);                     //xor the initial crc value
 8435 
 8436   pslldq(xmm7, 0x7);
 8437 }
 8438 
 8439 /**
 8440 * Compute CRC32 using AVX512 instructions
 8441 * param crc   register containing existing CRC (32-bit)
 8442 * param buf   register pointing to input byte buffer (byte*)
 8443 * param len   register containing number of bytes
 8444 * param table address of crc or crc32c table
 8445 * param tmp1  scratch register
 8446 * param tmp2  scratch register
 8447 * return rax  result register
 8448 *
 8449 * This routine is identical for crc32c with the exception of the precomputed constant
 8450 * table which will be passed as the table argument.  The calculation steps are
 8451 * the same for both variants.
 8452 */
 8453 void MacroAssembler::kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2) {
 8454   assert_different_registers(crc, buf, len, table, tmp1, tmp2, rax, r12);
 8455 
 8456   Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned;
 8457   Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop;
 8458   Label L_less_than_256, L_fold_128_B_loop, L_fold_256_B_loop;
 8459   Label L_fold_128_B_register, L_final_reduction_for_128, L_16B_reduction_loop;
 8460   Label L_128_done, L_get_last_two_xmms, L_barrett, L_cleanup;
 8461 
 8462   const Register pos = r12;
 8463   push(r12);
 8464   subptr(rsp, 16 * 2 + 8);
 8465 
 8466   // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
 8467   // context for the registers used, where all instructions below are using 128-bit mode
 8468   // On EVEX without VL and BW, these instructions will all be AVX.
 8469   movl(pos, 0);
 8470 
 8471   // check if smaller than 256B
 8472   cmpl(len, 256);
 8473   jcc(Assembler::less, L_less_than_256);
 8474 
 8475   // load the initial crc value
 8476   movdl(xmm10, crc);
 8477 
 8478   // receive the initial 64B data, xor the initial crc value
 8479   evmovdquq(xmm0, Address(buf, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit);
 8480   evmovdquq(xmm4, Address(buf, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit);
 8481   evpxorq(xmm0, xmm0, xmm10, Assembler::AVX_512bit);
 8482   evbroadcasti32x4(xmm10, Address(table, 2 * 16), Assembler::AVX_512bit); //zmm10 has rk3 and rk4
 8483 
 8484   subl(len, 256);
 8485   cmpl(len, 256);
 8486   jcc(Assembler::less, L_fold_128_B_loop);
 8487 
 8488   evmovdquq(xmm7, Address(buf, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit);
 8489   evmovdquq(xmm8, Address(buf, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit);
 8490   evbroadcasti32x4(xmm16, Address(table, 0 * 16), Assembler::AVX_512bit); //zmm16 has rk-1 and rk-2
 8491   subl(len, 256);
 8492 
 8493   bind(L_fold_256_B_loop);
 8494   addl(pos, 256);
 8495   fold512bit_crc32_avx512(xmm0, xmm16, xmm1, buf, pos, 0 * 64);
 8496   fold512bit_crc32_avx512(xmm4, xmm16, xmm1, buf, pos, 1 * 64);
 8497   fold512bit_crc32_avx512(xmm7, xmm16, xmm1, buf, pos, 2 * 64);
 8498   fold512bit_crc32_avx512(xmm8, xmm16, xmm1, buf, pos, 3 * 64);
 8499 
 8500   subl(len, 256);
 8501   jcc(Assembler::greaterEqual, L_fold_256_B_loop);
 8502 
 8503   // Fold 256 into 128
 8504   addl(pos, 256);
 8505   evpclmulqdq(xmm1, xmm0, xmm10, 0x01, Assembler::AVX_512bit);
 8506   evpclmulqdq(xmm2, xmm0, xmm10, 0x10, Assembler::AVX_512bit);
 8507   vpternlogq(xmm7, 0x96, xmm1, xmm2, Assembler::AVX_512bit); // xor ABC
 8508 
 8509   evpclmulqdq(xmm5, xmm4, xmm10, 0x01, Assembler::AVX_512bit);
 8510   evpclmulqdq(xmm6, xmm4, xmm10, 0x10, Assembler::AVX_512bit);
 8511   vpternlogq(xmm8, 0x96, xmm5, xmm6, Assembler::AVX_512bit); // xor ABC
 8512 
 8513   evmovdquq(xmm0, xmm7, Assembler::AVX_512bit);
 8514   evmovdquq(xmm4, xmm8, Assembler::AVX_512bit);
 8515 
 8516   addl(len, 128);
 8517   jmp(L_fold_128_B_register);
 8518 
 8519   // at this section of the code, there is 128 * x + y(0 <= y<128) bytes of buffer.The fold_128_B_loop
 8520   // loop will fold 128B at a time until we have 128 + y Bytes of buffer
 8521 
 8522   // fold 128B at a time.This section of the code folds 8 xmm registers in parallel
 8523   bind(L_fold_128_B_loop);
 8524   addl(pos, 128);
 8525   fold512bit_crc32_avx512(xmm0, xmm10, xmm1, buf, pos, 0 * 64);
 8526   fold512bit_crc32_avx512(xmm4, xmm10, xmm1, buf, pos, 1 * 64);
 8527 
 8528   subl(len, 128);
 8529   jcc(Assembler::greaterEqual, L_fold_128_B_loop);
 8530 
 8531   addl(pos, 128);
 8532 
 8533   // at this point, the buffer pointer is pointing at the last y Bytes of the buffer, where 0 <= y < 128
 8534   // the 128B of folded data is in 8 of the xmm registers : xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7
 8535   bind(L_fold_128_B_register);
 8536   evmovdquq(xmm16, Address(table, 5 * 16), Assembler::AVX_512bit); // multiply by rk9-rk16
 8537   evmovdquq(xmm11, Address(table, 9 * 16), Assembler::AVX_512bit); // multiply by rk17-rk20, rk1,rk2, 0,0
 8538   evpclmulqdq(xmm1, xmm0, xmm16, 0x01, Assembler::AVX_512bit);
 8539   evpclmulqdq(xmm2, xmm0, xmm16, 0x10, Assembler::AVX_512bit);
 8540   // save last that has no multiplicand
 8541   vextracti64x2(xmm7, xmm4, 3);
 8542 
 8543   evpclmulqdq(xmm5, xmm4, xmm11, 0x01, Assembler::AVX_512bit);
 8544   evpclmulqdq(xmm6, xmm4, xmm11, 0x10, Assembler::AVX_512bit);
 8545   // Needed later in reduction loop
 8546   movdqu(xmm10, Address(table, 1 * 16));
 8547   vpternlogq(xmm1, 0x96, xmm2, xmm5, Assembler::AVX_512bit); // xor ABC
 8548   vpternlogq(xmm1, 0x96, xmm6, xmm7, Assembler::AVX_512bit); // xor ABC
 8549 
 8550   // Swap 1,0,3,2 - 01 00 11 10
 8551   evshufi64x2(xmm8, xmm1, xmm1, 0x4e, Assembler::AVX_512bit);
 8552   evpxorq(xmm8, xmm8, xmm1, Assembler::AVX_256bit);
 8553   vextracti128(xmm5, xmm8, 1);
 8554   evpxorq(xmm7, xmm5, xmm8, Assembler::AVX_128bit);
 8555 
 8556   // instead of 128, we add 128 - 16 to the loop counter to save 1 instruction from the loop
 8557   // instead of a cmp instruction, we use the negative flag with the jl instruction
 8558   addl(len, 128 - 16);
 8559   jcc(Assembler::less, L_final_reduction_for_128);
 8560 
 8561   bind(L_16B_reduction_loop);
 8562   vpclmulqdq(xmm8, xmm7, xmm10, 0x01);
 8563   vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
 8564   vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit);
 8565   movdqu(xmm0, Address(buf, pos, Address::times_1, 0 * 16));
 8566   vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
 8567   addl(pos, 16);
 8568   subl(len, 16);
 8569   jcc(Assembler::greaterEqual, L_16B_reduction_loop);
 8570 
 8571   bind(L_final_reduction_for_128);
 8572   addl(len, 16);
 8573   jcc(Assembler::equal, L_128_done);
 8574 
 8575   bind(L_get_last_two_xmms);
 8576   movdqu(xmm2, xmm7);
 8577   addl(pos, len);
 8578   movdqu(xmm1, Address(buf, pos, Address::times_1, -16));
 8579   subl(pos, len);
 8580 
 8581   // get rid of the extra data that was loaded before
 8582   // load the shift constant
 8583   lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr()));
 8584   movdqu(xmm0, Address(rax, len));
 8585   addl(rax, len);
 8586 
 8587   vpshufb(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
 8588   //Change mask to 512
 8589   vpxor(xmm0, xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 2 * 16), Assembler::AVX_128bit, tmp2);
 8590   vpshufb(xmm2, xmm2, xmm0, Assembler::AVX_128bit);
 8591 
 8592   blendvpb(xmm2, xmm2, xmm1, xmm0, Assembler::AVX_128bit);
 8593   vpclmulqdq(xmm8, xmm7, xmm10, 0x01);
 8594   vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
 8595   vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit);
 8596   vpxor(xmm7, xmm7, xmm2, Assembler::AVX_128bit);
 8597 
 8598   bind(L_128_done);
 8599   // compute crc of a 128-bit value
 8600   movdqu(xmm10, Address(table, 3 * 16));
 8601   movdqu(xmm0, xmm7);
 8602 
 8603   // 64b fold
 8604   vpclmulqdq(xmm7, xmm7, xmm10, 0x0);
 8605   vpsrldq(xmm0, xmm0, 0x8, Assembler::AVX_128bit);
 8606   vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
 8607 
 8608   // 32b fold
 8609   movdqu(xmm0, xmm7);
 8610   vpslldq(xmm7, xmm7, 0x4, Assembler::AVX_128bit);
 8611   vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
 8612   vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
 8613   jmp(L_barrett);
 8614 
 8615   bind(L_less_than_256);
 8616   kernel_crc32_avx512_256B(crc, buf, len, table, pos, tmp1, tmp2, L_barrett, L_16B_reduction_loop, L_get_last_two_xmms, L_128_done, L_cleanup);
 8617 
 8618   //barrett reduction
 8619   bind(L_barrett);
 8620   vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 1 * 16), Assembler::AVX_128bit, tmp2);
 8621   movdqu(xmm1, xmm7);
 8622   movdqu(xmm2, xmm7);
 8623   movdqu(xmm10, Address(table, 4 * 16));
 8624 
 8625   pclmulqdq(xmm7, xmm10, 0x0);
 8626   pxor(xmm7, xmm2);
 8627   vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr()), Assembler::AVX_128bit, tmp2);
 8628   movdqu(xmm2, xmm7);
 8629   pclmulqdq(xmm7, xmm10, 0x10);
 8630   pxor(xmm7, xmm2);
 8631   pxor(xmm7, xmm1);
 8632   pextrd(crc, xmm7, 2);
 8633 
 8634   bind(L_cleanup);
 8635   addptr(rsp, 16 * 2 + 8);
 8636   pop(r12);
 8637 }
 8638 
 8639 // S. Gueron / Information Processing Letters 112 (2012) 184
 8640 // Algorithm 4: Computing carry-less multiplication using a precomputed lookup table.
 8641 // Input: A 32 bit value B = [byte3, byte2, byte1, byte0].
 8642 // Output: the 64-bit carry-less product of B * CONST
 8643 void MacroAssembler::crc32c_ipl_alg4(Register in, uint32_t n,
 8644                                      Register tmp1, Register tmp2, Register tmp3) {
 8645   lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr()));
 8646   if (n > 0) {
 8647     addq(tmp3, n * 256 * 8);
 8648   }
 8649   //    Q1 = TABLEExt[n][B & 0xFF];
 8650   movl(tmp1, in);
 8651   andl(tmp1, 0x000000FF);
 8652   shll(tmp1, 3);
 8653   addq(tmp1, tmp3);
 8654   movq(tmp1, Address(tmp1, 0));
 8655 
 8656   //    Q2 = TABLEExt[n][B >> 8 & 0xFF];
 8657   movl(tmp2, in);
 8658   shrl(tmp2, 8);
 8659   andl(tmp2, 0x000000FF);
 8660   shll(tmp2, 3);
 8661   addq(tmp2, tmp3);
 8662   movq(tmp2, Address(tmp2, 0));
 8663 
 8664   shlq(tmp2, 8);
 8665   xorq(tmp1, tmp2);
 8666 
 8667   //    Q3 = TABLEExt[n][B >> 16 & 0xFF];
 8668   movl(tmp2, in);
 8669   shrl(tmp2, 16);
 8670   andl(tmp2, 0x000000FF);
 8671   shll(tmp2, 3);
 8672   addq(tmp2, tmp3);
 8673   movq(tmp2, Address(tmp2, 0));
 8674 
 8675   shlq(tmp2, 16);
 8676   xorq(tmp1, tmp2);
 8677 
 8678   //    Q4 = TABLEExt[n][B >> 24 & 0xFF];
 8679   shrl(in, 24);
 8680   andl(in, 0x000000FF);
 8681   shll(in, 3);
 8682   addq(in, tmp3);
 8683   movq(in, Address(in, 0));
 8684 
 8685   shlq(in, 24);
 8686   xorq(in, tmp1);
 8687   //    return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24;
 8688 }
 8689 
 8690 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1,
 8691                                       Register in_out,
 8692                                       uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
 8693                                       XMMRegister w_xtmp2,
 8694                                       Register tmp1,
 8695                                       Register n_tmp2, Register n_tmp3) {
 8696   if (is_pclmulqdq_supported) {
 8697     movdl(w_xtmp1, in_out); // modified blindly
 8698 
 8699     movl(tmp1, const_or_pre_comp_const_index);
 8700     movdl(w_xtmp2, tmp1);
 8701     pclmulqdq(w_xtmp1, w_xtmp2, 0);
 8702 
 8703     movdq(in_out, w_xtmp1);
 8704   } else {
 8705     crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3);
 8706   }
 8707 }
 8708 
 8709 // Recombination Alternative 2: No bit-reflections
 8710 // T1 = (CRC_A * U1) << 1
 8711 // T2 = (CRC_B * U2) << 1
 8712 // C1 = T1 >> 32
 8713 // C2 = T2 >> 32
 8714 // T1 = T1 & 0xFFFFFFFF
 8715 // T2 = T2 & 0xFFFFFFFF
 8716 // T1 = CRC32(0, T1)
 8717 // T2 = CRC32(0, T2)
 8718 // C1 = C1 ^ T1
 8719 // C2 = C2 ^ T2
 8720 // CRC = C1 ^ C2 ^ CRC_C
 8721 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
 8722                                      XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
 8723                                      Register tmp1, Register tmp2,
 8724                                      Register n_tmp3) {
 8725   crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
 8726   crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
 8727   shlq(in_out, 1);
 8728   movl(tmp1, in_out);
 8729   shrq(in_out, 32);
 8730   xorl(tmp2, tmp2);
 8731   crc32(tmp2, tmp1, 4);
 8732   xorl(in_out, tmp2); // we don't care about upper 32 bit contents here
 8733   shlq(in1, 1);
 8734   movl(tmp1, in1);
 8735   shrq(in1, 32);
 8736   xorl(tmp2, tmp2);
 8737   crc32(tmp2, tmp1, 4);
 8738   xorl(in1, tmp2);
 8739   xorl(in_out, in1);
 8740   xorl(in_out, in2);
 8741 }
 8742 
 8743 // Set N to predefined value
 8744 // Subtract from a length of a buffer
 8745 // execute in a loop:
 8746 // CRC_A = 0xFFFFFFFF, CRC_B = 0, CRC_C = 0
 8747 // for i = 1 to N do
 8748 //  CRC_A = CRC32(CRC_A, A[i])
 8749 //  CRC_B = CRC32(CRC_B, B[i])
 8750 //  CRC_C = CRC32(CRC_C, C[i])
 8751 // end for
 8752 // Recombine
 8753 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
 8754                                        Register in_out1, Register in_out2, Register in_out3,
 8755                                        Register tmp1, Register tmp2, Register tmp3,
 8756                                        XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
 8757                                        Register tmp4, Register tmp5,
 8758                                        Register n_tmp6) {
 8759   Label L_processPartitions;
 8760   Label L_processPartition;
 8761   Label L_exit;
 8762 
 8763   bind(L_processPartitions);
 8764   cmpl(in_out1, 3 * size);
 8765   jcc(Assembler::less, L_exit);
 8766     xorl(tmp1, tmp1);
 8767     xorl(tmp2, tmp2);
 8768     movq(tmp3, in_out2);
 8769     addq(tmp3, size);
 8770 
 8771     bind(L_processPartition);
 8772       crc32(in_out3, Address(in_out2, 0), 8);
 8773       crc32(tmp1, Address(in_out2, size), 8);
 8774       crc32(tmp2, Address(in_out2, size * 2), 8);
 8775       addq(in_out2, 8);
 8776       cmpq(in_out2, tmp3);
 8777       jcc(Assembler::less, L_processPartition);
 8778     crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2,
 8779             w_xtmp1, w_xtmp2, w_xtmp3,
 8780             tmp4, tmp5,
 8781             n_tmp6);
 8782     addq(in_out2, 2 * size);
 8783     subl(in_out1, 3 * size);
 8784     jmp(L_processPartitions);
 8785 
 8786   bind(L_exit);
 8787 }
 8788 #else
 8789 void MacroAssembler::crc32c_ipl_alg4(Register in_out, uint32_t n,
 8790                                      Register tmp1, Register tmp2, Register tmp3,
 8791                                      XMMRegister xtmp1, XMMRegister xtmp2) {
 8792   lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr()));
 8793   if (n > 0) {
 8794     addl(tmp3, n * 256 * 8);
 8795   }
 8796   //    Q1 = TABLEExt[n][B & 0xFF];
 8797   movl(tmp1, in_out);
 8798   andl(tmp1, 0x000000FF);
 8799   shll(tmp1, 3);
 8800   addl(tmp1, tmp3);
 8801   movq(xtmp1, Address(tmp1, 0));
 8802 
 8803   //    Q2 = TABLEExt[n][B >> 8 & 0xFF];
 8804   movl(tmp2, in_out);
 8805   shrl(tmp2, 8);
 8806   andl(tmp2, 0x000000FF);
 8807   shll(tmp2, 3);
 8808   addl(tmp2, tmp3);
 8809   movq(xtmp2, Address(tmp2, 0));
 8810 
 8811   psllq(xtmp2, 8);
 8812   pxor(xtmp1, xtmp2);
 8813 
 8814   //    Q3 = TABLEExt[n][B >> 16 & 0xFF];
 8815   movl(tmp2, in_out);
 8816   shrl(tmp2, 16);
 8817   andl(tmp2, 0x000000FF);
 8818   shll(tmp2, 3);
 8819   addl(tmp2, tmp3);
 8820   movq(xtmp2, Address(tmp2, 0));
 8821 
 8822   psllq(xtmp2, 16);
 8823   pxor(xtmp1, xtmp2);
 8824 
 8825   //    Q4 = TABLEExt[n][B >> 24 & 0xFF];
 8826   shrl(in_out, 24);
 8827   andl(in_out, 0x000000FF);
 8828   shll(in_out, 3);
 8829   addl(in_out, tmp3);
 8830   movq(xtmp2, Address(in_out, 0));
 8831 
 8832   psllq(xtmp2, 24);
 8833   pxor(xtmp1, xtmp2); // Result in CXMM
 8834   //    return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24;
 8835 }
 8836 
 8837 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1,
 8838                                       Register in_out,
 8839                                       uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
 8840                                       XMMRegister w_xtmp2,
 8841                                       Register tmp1,
 8842                                       Register n_tmp2, Register n_tmp3) {
 8843   if (is_pclmulqdq_supported) {
 8844     movdl(w_xtmp1, in_out);
 8845 
 8846     movl(tmp1, const_or_pre_comp_const_index);
 8847     movdl(w_xtmp2, tmp1);
 8848     pclmulqdq(w_xtmp1, w_xtmp2, 0);
 8849     // Keep result in XMM since GPR is 32 bit in length
 8850   } else {
 8851     crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3, w_xtmp1, w_xtmp2);
 8852   }
 8853 }
 8854 
 8855 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
 8856                                      XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
 8857                                      Register tmp1, Register tmp2,
 8858                                      Register n_tmp3) {
 8859   crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
 8860   crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
 8861 
 8862   psllq(w_xtmp1, 1);
 8863   movdl(tmp1, w_xtmp1);
 8864   psrlq(w_xtmp1, 32);
 8865   movdl(in_out, w_xtmp1);
 8866 
 8867   xorl(tmp2, tmp2);
 8868   crc32(tmp2, tmp1, 4);
 8869   xorl(in_out, tmp2);
 8870 
 8871   psllq(w_xtmp2, 1);
 8872   movdl(tmp1, w_xtmp2);
 8873   psrlq(w_xtmp2, 32);
 8874   movdl(in1, w_xtmp2);
 8875 
 8876   xorl(tmp2, tmp2);
 8877   crc32(tmp2, tmp1, 4);
 8878   xorl(in1, tmp2);
 8879   xorl(in_out, in1);
 8880   xorl(in_out, in2);
 8881 }
 8882 
 8883 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
 8884                                        Register in_out1, Register in_out2, Register in_out3,
 8885                                        Register tmp1, Register tmp2, Register tmp3,
 8886                                        XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
 8887                                        Register tmp4, Register tmp5,
 8888                                        Register n_tmp6) {
 8889   Label L_processPartitions;
 8890   Label L_processPartition;
 8891   Label L_exit;
 8892 
 8893   bind(L_processPartitions);
 8894   cmpl(in_out1, 3 * size);
 8895   jcc(Assembler::less, L_exit);
 8896     xorl(tmp1, tmp1);
 8897     xorl(tmp2, tmp2);
 8898     movl(tmp3, in_out2);
 8899     addl(tmp3, size);
 8900 
 8901     bind(L_processPartition);
 8902       crc32(in_out3, Address(in_out2, 0), 4);
 8903       crc32(tmp1, Address(in_out2, size), 4);
 8904       crc32(tmp2, Address(in_out2, size*2), 4);
 8905       crc32(in_out3, Address(in_out2, 0+4), 4);
 8906       crc32(tmp1, Address(in_out2, size+4), 4);
 8907       crc32(tmp2, Address(in_out2, size*2+4), 4);
 8908       addl(in_out2, 8);
 8909       cmpl(in_out2, tmp3);
 8910       jcc(Assembler::less, L_processPartition);
 8911 
 8912         push(tmp3);
 8913         push(in_out1);
 8914         push(in_out2);
 8915         tmp4 = tmp3;
 8916         tmp5 = in_out1;
 8917         n_tmp6 = in_out2;
 8918 
 8919       crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2,
 8920             w_xtmp1, w_xtmp2, w_xtmp3,
 8921             tmp4, tmp5,
 8922             n_tmp6);
 8923 
 8924         pop(in_out2);
 8925         pop(in_out1);
 8926         pop(tmp3);
 8927 
 8928     addl(in_out2, 2 * size);
 8929     subl(in_out1, 3 * size);
 8930     jmp(L_processPartitions);
 8931 
 8932   bind(L_exit);
 8933 }
 8934 #endif //LP64
 8935 
 8936 #ifdef _LP64
 8937 // Algorithm 2: Pipelined usage of the CRC32 instruction.
 8938 // Input: A buffer I of L bytes.
 8939 // Output: the CRC32C value of the buffer.
 8940 // Notations:
 8941 // Write L = 24N + r, with N = floor (L/24).
 8942 // r = L mod 24 (0 <= r < 24).
 8943 // Consider I as the concatenation of A|B|C|R, where A, B, C, each,
 8944 // N quadwords, and R consists of r bytes.
 8945 // A[j] = I [8j+7:8j], j= 0, 1, ..., N-1
 8946 // B[j] = I [N + 8j+7:N + 8j], j= 0, 1, ..., N-1
 8947 // C[j] = I [2N + 8j+7:2N + 8j], j= 0, 1, ..., N-1
 8948 // if r > 0 R[j] = I [3N +j], j= 0, 1, ...,r-1
 8949 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
 8950                                           Register tmp1, Register tmp2, Register tmp3,
 8951                                           Register tmp4, Register tmp5, Register tmp6,
 8952                                           XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
 8953                                           bool is_pclmulqdq_supported) {
 8954   uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS];
 8955   Label L_wordByWord;
 8956   Label L_byteByByteProlog;
 8957   Label L_byteByByte;
 8958   Label L_exit;
 8959 
 8960   if (is_pclmulqdq_supported ) {
 8961     const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr;
 8962     const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr+1);
 8963 
 8964     const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2);
 8965     const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3);
 8966 
 8967     const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4);
 8968     const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5);
 8969     assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\"");
 8970   } else {
 8971     const_or_pre_comp_const_index[0] = 1;
 8972     const_or_pre_comp_const_index[1] = 0;
 8973 
 8974     const_or_pre_comp_const_index[2] = 3;
 8975     const_or_pre_comp_const_index[3] = 2;
 8976 
 8977     const_or_pre_comp_const_index[4] = 5;
 8978     const_or_pre_comp_const_index[5] = 4;
 8979    }
 8980   crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported,
 8981                     in2, in1, in_out,
 8982                     tmp1, tmp2, tmp3,
 8983                     w_xtmp1, w_xtmp2, w_xtmp3,
 8984                     tmp4, tmp5,
 8985                     tmp6);
 8986   crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported,
 8987                     in2, in1, in_out,
 8988                     tmp1, tmp2, tmp3,
 8989                     w_xtmp1, w_xtmp2, w_xtmp3,
 8990                     tmp4, tmp5,
 8991                     tmp6);
 8992   crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported,
 8993                     in2, in1, in_out,
 8994                     tmp1, tmp2, tmp3,
 8995                     w_xtmp1, w_xtmp2, w_xtmp3,
 8996                     tmp4, tmp5,
 8997                     tmp6);
 8998   movl(tmp1, in2);
 8999   andl(tmp1, 0x00000007);
 9000   negl(tmp1);
 9001   addl(tmp1, in2);
 9002   addq(tmp1, in1);
 9003 
 9004   cmpq(in1, tmp1);
 9005   jccb(Assembler::greaterEqual, L_byteByByteProlog);
 9006   align(16);
 9007   BIND(L_wordByWord);
 9008     crc32(in_out, Address(in1, 0), 8);
 9009     addq(in1, 8);
 9010     cmpq(in1, tmp1);
 9011     jcc(Assembler::less, L_wordByWord);
 9012 
 9013   BIND(L_byteByByteProlog);
 9014   andl(in2, 0x00000007);
 9015   movl(tmp2, 1);
 9016 
 9017   cmpl(tmp2, in2);
 9018   jccb(Assembler::greater, L_exit);
 9019   BIND(L_byteByByte);
 9020     crc32(in_out, Address(in1, 0), 1);
 9021     incq(in1);
 9022     incl(tmp2);
 9023     cmpl(tmp2, in2);
 9024     jcc(Assembler::lessEqual, L_byteByByte);
 9025 
 9026   BIND(L_exit);
 9027 }
 9028 #else
 9029 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
 9030                                           Register tmp1, Register  tmp2, Register tmp3,
 9031                                           Register tmp4, Register  tmp5, Register tmp6,
 9032                                           XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
 9033                                           bool is_pclmulqdq_supported) {
 9034   uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS];
 9035   Label L_wordByWord;
 9036   Label L_byteByByteProlog;
 9037   Label L_byteByByte;
 9038   Label L_exit;
 9039 
 9040   if (is_pclmulqdq_supported) {
 9041     const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr;
 9042     const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 1);
 9043 
 9044     const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2);
 9045     const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3);
 9046 
 9047     const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4);
 9048     const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5);
 9049   } else {
 9050     const_or_pre_comp_const_index[0] = 1;
 9051     const_or_pre_comp_const_index[1] = 0;
 9052 
 9053     const_or_pre_comp_const_index[2] = 3;
 9054     const_or_pre_comp_const_index[3] = 2;
 9055 
 9056     const_or_pre_comp_const_index[4] = 5;
 9057     const_or_pre_comp_const_index[5] = 4;
 9058   }
 9059   crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported,
 9060                     in2, in1, in_out,
 9061                     tmp1, tmp2, tmp3,
 9062                     w_xtmp1, w_xtmp2, w_xtmp3,
 9063                     tmp4, tmp5,
 9064                     tmp6);
 9065   crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported,
 9066                     in2, in1, in_out,
 9067                     tmp1, tmp2, tmp3,
 9068                     w_xtmp1, w_xtmp2, w_xtmp3,
 9069                     tmp4, tmp5,
 9070                     tmp6);
 9071   crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported,
 9072                     in2, in1, in_out,
 9073                     tmp1, tmp2, tmp3,
 9074                     w_xtmp1, w_xtmp2, w_xtmp3,
 9075                     tmp4, tmp5,
 9076                     tmp6);
 9077   movl(tmp1, in2);
 9078   andl(tmp1, 0x00000007);
 9079   negl(tmp1);
 9080   addl(tmp1, in2);
 9081   addl(tmp1, in1);
 9082 
 9083   BIND(L_wordByWord);
 9084   cmpl(in1, tmp1);
 9085   jcc(Assembler::greaterEqual, L_byteByByteProlog);
 9086     crc32(in_out, Address(in1,0), 4);
 9087     addl(in1, 4);
 9088     jmp(L_wordByWord);
 9089 
 9090   BIND(L_byteByByteProlog);
 9091   andl(in2, 0x00000007);
 9092   movl(tmp2, 1);
 9093 
 9094   BIND(L_byteByByte);
 9095   cmpl(tmp2, in2);
 9096   jccb(Assembler::greater, L_exit);
 9097     movb(tmp1, Address(in1, 0));
 9098     crc32(in_out, tmp1, 1);
 9099     incl(in1);
 9100     incl(tmp2);
 9101     jmp(L_byteByByte);
 9102 
 9103   BIND(L_exit);
 9104 }
 9105 #endif // LP64
 9106 #undef BIND
 9107 #undef BLOCK_COMMENT
 9108 
 9109 // Compress char[] array to byte[].
 9110 //   ..\jdk\src\java.base\share\classes\java\lang\StringUTF16.java
 9111 //   @IntrinsicCandidate
 9112 //   private static int compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) {
 9113 //     for (int i = 0; i < len; i++) {
 9114 //       int c = src[srcOff++];
 9115 //       if (c >>> 8 != 0) {
 9116 //         return 0;
 9117 //       }
 9118 //       dst[dstOff++] = (byte)c;
 9119 //     }
 9120 //     return len;
 9121 //   }
 9122 void MacroAssembler::char_array_compress(Register src, Register dst, Register len,
 9123   XMMRegister tmp1Reg, XMMRegister tmp2Reg,
 9124   XMMRegister tmp3Reg, XMMRegister tmp4Reg,
 9125   Register tmp5, Register result, KRegister mask1, KRegister mask2) {
 9126   Label copy_chars_loop, return_length, return_zero, done;
 9127 
 9128   // rsi: src
 9129   // rdi: dst
 9130   // rdx: len
 9131   // rcx: tmp5
 9132   // rax: result
 9133 
 9134   // rsi holds start addr of source char[] to be compressed
 9135   // rdi holds start addr of destination byte[]
 9136   // rdx holds length
 9137 
 9138   assert(len != result, "");
 9139 
 9140   // save length for return
 9141   push(len);
 9142 
 9143   if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512
 9144     VM_Version::supports_avx512vlbw() &&
 9145     VM_Version::supports_bmi2()) {
 9146 
 9147     Label copy_32_loop, copy_loop_tail, below_threshold;
 9148 
 9149     // alignment
 9150     Label post_alignment;
 9151 
 9152     // if length of the string is less than 16, handle it in an old fashioned way
 9153     testl(len, -32);
 9154     jcc(Assembler::zero, below_threshold);
 9155 
 9156     // First check whether a character is compressible ( <= 0xFF).
 9157     // Create mask to test for Unicode chars inside zmm vector
 9158     movl(result, 0x00FF);
 9159     evpbroadcastw(tmp2Reg, result, Assembler::AVX_512bit);
 9160 
 9161     testl(len, -64);
 9162     jcc(Assembler::zero, post_alignment);
 9163 
 9164     movl(tmp5, dst);
 9165     andl(tmp5, (32 - 1));
 9166     negl(tmp5);
 9167     andl(tmp5, (32 - 1));
 9168 
 9169     // bail out when there is nothing to be done
 9170     testl(tmp5, 0xFFFFFFFF);
 9171     jcc(Assembler::zero, post_alignment);
 9172 
 9173     // ~(~0 << len), where len is the # of remaining elements to process
 9174     movl(result, 0xFFFFFFFF);
 9175     shlxl(result, result, tmp5);
 9176     notl(result);
 9177     kmovdl(mask2, result);
 9178 
 9179     evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit);
 9180     evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit);
 9181     ktestd(mask1, mask2);
 9182     jcc(Assembler::carryClear, return_zero);
 9183 
 9184     evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit);
 9185 
 9186     addptr(src, tmp5);
 9187     addptr(src, tmp5);
 9188     addptr(dst, tmp5);
 9189     subl(len, tmp5);
 9190 
 9191     bind(post_alignment);
 9192     // end of alignment
 9193 
 9194     movl(tmp5, len);
 9195     andl(tmp5, (32 - 1));    // tail count (in chars)
 9196     andl(len, ~(32 - 1));    // vector count (in chars)
 9197     jcc(Assembler::zero, copy_loop_tail);
 9198 
 9199     lea(src, Address(src, len, Address::times_2));
 9200     lea(dst, Address(dst, len, Address::times_1));
 9201     negptr(len);
 9202 
 9203     bind(copy_32_loop);
 9204     evmovdquw(tmp1Reg, Address(src, len, Address::times_2), Assembler::AVX_512bit);
 9205     evpcmpuw(mask1, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit);
 9206     kortestdl(mask1, mask1);
 9207     jcc(Assembler::carryClear, return_zero);
 9208 
 9209     // All elements in current processed chunk are valid candidates for
 9210     // compression. Write a truncated byte elements to the memory.
 9211     evpmovwb(Address(dst, len, Address::times_1), tmp1Reg, Assembler::AVX_512bit);
 9212     addptr(len, 32);
 9213     jcc(Assembler::notZero, copy_32_loop);
 9214 
 9215     bind(copy_loop_tail);
 9216     // bail out when there is nothing to be done
 9217     testl(tmp5, 0xFFFFFFFF);
 9218     jcc(Assembler::zero, return_length);
 9219 
 9220     movl(len, tmp5);
 9221 
 9222     // ~(~0 << len), where len is the # of remaining elements to process
 9223     movl(result, 0xFFFFFFFF);
 9224     shlxl(result, result, len);
 9225     notl(result);
 9226 
 9227     kmovdl(mask2, result);
 9228 
 9229     evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit);
 9230     evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit);
 9231     ktestd(mask1, mask2);
 9232     jcc(Assembler::carryClear, return_zero);
 9233 
 9234     evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit);
 9235     jmp(return_length);
 9236 
 9237     bind(below_threshold);
 9238   }
 9239 
 9240   if (UseSSE42Intrinsics) {
 9241     Label copy_32_loop, copy_16, copy_tail;
 9242 
 9243     movl(result, len);
 9244 
 9245     movl(tmp5, 0xff00ff00);   // create mask to test for Unicode chars in vectors
 9246 
 9247     // vectored compression
 9248     andl(len, 0xfffffff0);    // vector count (in chars)
 9249     andl(result, 0x0000000f);    // tail count (in chars)
 9250     testl(len, len);
 9251     jcc(Assembler::zero, copy_16);
 9252 
 9253     // compress 16 chars per iter
 9254     movdl(tmp1Reg, tmp5);
 9255     pshufd(tmp1Reg, tmp1Reg, 0);   // store Unicode mask in tmp1Reg
 9256     pxor(tmp4Reg, tmp4Reg);
 9257 
 9258     lea(src, Address(src, len, Address::times_2));
 9259     lea(dst, Address(dst, len, Address::times_1));
 9260     negptr(len);
 9261 
 9262     bind(copy_32_loop);
 9263     movdqu(tmp2Reg, Address(src, len, Address::times_2));     // load 1st 8 characters
 9264     por(tmp4Reg, tmp2Reg);
 9265     movdqu(tmp3Reg, Address(src, len, Address::times_2, 16)); // load next 8 characters
 9266     por(tmp4Reg, tmp3Reg);
 9267     ptest(tmp4Reg, tmp1Reg);       // check for Unicode chars in next vector
 9268     jcc(Assembler::notZero, return_zero);
 9269     packuswb(tmp2Reg, tmp3Reg);    // only ASCII chars; compress each to 1 byte
 9270     movdqu(Address(dst, len, Address::times_1), tmp2Reg);
 9271     addptr(len, 16);
 9272     jcc(Assembler::notZero, copy_32_loop);
 9273 
 9274     // compress next vector of 8 chars (if any)
 9275     bind(copy_16);
 9276     movl(len, result);
 9277     andl(len, 0xfffffff8);    // vector count (in chars)
 9278     andl(result, 0x00000007);    // tail count (in chars)
 9279     testl(len, len);
 9280     jccb(Assembler::zero, copy_tail);
 9281 
 9282     movdl(tmp1Reg, tmp5);
 9283     pshufd(tmp1Reg, tmp1Reg, 0);   // store Unicode mask in tmp1Reg
 9284     pxor(tmp3Reg, tmp3Reg);
 9285 
 9286     movdqu(tmp2Reg, Address(src, 0));
 9287     ptest(tmp2Reg, tmp1Reg);       // check for Unicode chars in vector
 9288     jccb(Assembler::notZero, return_zero);
 9289     packuswb(tmp2Reg, tmp3Reg);    // only LATIN1 chars; compress each to 1 byte
 9290     movq(Address(dst, 0), tmp2Reg);
 9291     addptr(src, 16);
 9292     addptr(dst, 8);
 9293 
 9294     bind(copy_tail);
 9295     movl(len, result);
 9296   }
 9297   // compress 1 char per iter
 9298   testl(len, len);
 9299   jccb(Assembler::zero, return_length);
 9300   lea(src, Address(src, len, Address::times_2));
 9301   lea(dst, Address(dst, len, Address::times_1));
 9302   negptr(len);
 9303 
 9304   bind(copy_chars_loop);
 9305   load_unsigned_short(result, Address(src, len, Address::times_2));
 9306   testl(result, 0xff00);      // check if Unicode char
 9307   jccb(Assembler::notZero, return_zero);
 9308   movb(Address(dst, len, Address::times_1), result);  // ASCII char; compress to 1 byte
 9309   increment(len);
 9310   jcc(Assembler::notZero, copy_chars_loop);
 9311 
 9312   // if compression succeeded, return length
 9313   bind(return_length);
 9314   pop(result);
 9315   jmpb(done);
 9316 
 9317   // if compression failed, return 0
 9318   bind(return_zero);
 9319   xorl(result, result);
 9320   addptr(rsp, wordSize);
 9321 
 9322   bind(done);
 9323 }
 9324 
 9325 // Inflate byte[] array to char[].
 9326 //   ..\jdk\src\java.base\share\classes\java\lang\StringLatin1.java
 9327 //   @IntrinsicCandidate
 9328 //   private static void inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len) {
 9329 //     for (int i = 0; i < len; i++) {
 9330 //       dst[dstOff++] = (char)(src[srcOff++] & 0xff);
 9331 //     }
 9332 //   }
 9333 void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len,
 9334   XMMRegister tmp1, Register tmp2, KRegister mask) {
 9335   Label copy_chars_loop, done, below_threshold, avx3_threshold;
 9336   // rsi: src
 9337   // rdi: dst
 9338   // rdx: len
 9339   // rcx: tmp2
 9340 
 9341   // rsi holds start addr of source byte[] to be inflated
 9342   // rdi holds start addr of destination char[]
 9343   // rdx holds length
 9344   assert_different_registers(src, dst, len, tmp2);
 9345   movl(tmp2, len);
 9346   if ((UseAVX > 2) && // AVX512
 9347     VM_Version::supports_avx512vlbw() &&
 9348     VM_Version::supports_bmi2()) {
 9349 
 9350     Label copy_32_loop, copy_tail;
 9351     Register tmp3_aliased = len;
 9352 
 9353     // if length of the string is less than 16, handle it in an old fashioned way
 9354     testl(len, -16);
 9355     jcc(Assembler::zero, below_threshold);
 9356 
 9357     testl(len, -1 * AVX3Threshold);
 9358     jcc(Assembler::zero, avx3_threshold);
 9359 
 9360     // In order to use only one arithmetic operation for the main loop we use
 9361     // this pre-calculation
 9362     andl(tmp2, (32 - 1)); // tail count (in chars), 32 element wide loop
 9363     andl(len, -32);     // vector count
 9364     jccb(Assembler::zero, copy_tail);
 9365 
 9366     lea(src, Address(src, len, Address::times_1));
 9367     lea(dst, Address(dst, len, Address::times_2));
 9368     negptr(len);
 9369 
 9370 
 9371     // inflate 32 chars per iter
 9372     bind(copy_32_loop);
 9373     vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_512bit);
 9374     evmovdquw(Address(dst, len, Address::times_2), tmp1, Assembler::AVX_512bit);
 9375     addptr(len, 32);
 9376     jcc(Assembler::notZero, copy_32_loop);
 9377 
 9378     bind(copy_tail);
 9379     // bail out when there is nothing to be done
 9380     testl(tmp2, -1); // we don't destroy the contents of tmp2 here
 9381     jcc(Assembler::zero, done);
 9382 
 9383     // ~(~0 << length), where length is the # of remaining elements to process
 9384     movl(tmp3_aliased, -1);
 9385     shlxl(tmp3_aliased, tmp3_aliased, tmp2);
 9386     notl(tmp3_aliased);
 9387     kmovdl(mask, tmp3_aliased);
 9388     evpmovzxbw(tmp1, mask, Address(src, 0), Assembler::AVX_512bit);
 9389     evmovdquw(Address(dst, 0), mask, tmp1, /*merge*/ true, Assembler::AVX_512bit);
 9390 
 9391     jmp(done);
 9392     bind(avx3_threshold);
 9393   }
 9394   if (UseSSE42Intrinsics) {
 9395     Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail;
 9396 
 9397     if (UseAVX > 1) {
 9398       andl(tmp2, (16 - 1));
 9399       andl(len, -16);
 9400       jccb(Assembler::zero, copy_new_tail);
 9401     } else {
 9402       andl(tmp2, 0x00000007);   // tail count (in chars)
 9403       andl(len, 0xfffffff8);    // vector count (in chars)
 9404       jccb(Assembler::zero, copy_tail);
 9405     }
 9406 
 9407     // vectored inflation
 9408     lea(src, Address(src, len, Address::times_1));
 9409     lea(dst, Address(dst, len, Address::times_2));
 9410     negptr(len);
 9411 
 9412     if (UseAVX > 1) {
 9413       bind(copy_16_loop);
 9414       vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_256bit);
 9415       vmovdqu(Address(dst, len, Address::times_2), tmp1);
 9416       addptr(len, 16);
 9417       jcc(Assembler::notZero, copy_16_loop);
 9418 
 9419       bind(below_threshold);
 9420       bind(copy_new_tail);
 9421       movl(len, tmp2);
 9422       andl(tmp2, 0x00000007);
 9423       andl(len, 0xFFFFFFF8);
 9424       jccb(Assembler::zero, copy_tail);
 9425 
 9426       pmovzxbw(tmp1, Address(src, 0));
 9427       movdqu(Address(dst, 0), tmp1);
 9428       addptr(src, 8);
 9429       addptr(dst, 2 * 8);
 9430 
 9431       jmp(copy_tail, true);
 9432     }
 9433 
 9434     // inflate 8 chars per iter
 9435     bind(copy_8_loop);
 9436     pmovzxbw(tmp1, Address(src, len, Address::times_1));  // unpack to 8 words
 9437     movdqu(Address(dst, len, Address::times_2), tmp1);
 9438     addptr(len, 8);
 9439     jcc(Assembler::notZero, copy_8_loop);
 9440 
 9441     bind(copy_tail);
 9442     movl(len, tmp2);
 9443 
 9444     cmpl(len, 4);
 9445     jccb(Assembler::less, copy_bytes);
 9446 
 9447     movdl(tmp1, Address(src, 0));  // load 4 byte chars
 9448     pmovzxbw(tmp1, tmp1);
 9449     movq(Address(dst, 0), tmp1);
 9450     subptr(len, 4);
 9451     addptr(src, 4);
 9452     addptr(dst, 8);
 9453 
 9454     bind(copy_bytes);
 9455   } else {
 9456     bind(below_threshold);
 9457   }
 9458 
 9459   testl(len, len);
 9460   jccb(Assembler::zero, done);
 9461   lea(src, Address(src, len, Address::times_1));
 9462   lea(dst, Address(dst, len, Address::times_2));
 9463   negptr(len);
 9464 
 9465   // inflate 1 char per iter
 9466   bind(copy_chars_loop);
 9467   load_unsigned_byte(tmp2, Address(src, len, Address::times_1));  // load byte char
 9468   movw(Address(dst, len, Address::times_2), tmp2);  // inflate byte char to word
 9469   increment(len);
 9470   jcc(Assembler::notZero, copy_chars_loop);
 9471 
 9472   bind(done);
 9473 }
 9474 
 9475 
 9476 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len) {
 9477   switch(type) {
 9478     case T_BYTE:
 9479     case T_BOOLEAN:
 9480       evmovdqub(dst, kmask, src, merge, vector_len);
 9481       break;
 9482     case T_CHAR:
 9483     case T_SHORT:
 9484       evmovdquw(dst, kmask, src, merge, vector_len);
 9485       break;
 9486     case T_INT:
 9487     case T_FLOAT:
 9488       evmovdqul(dst, kmask, src, merge, vector_len);
 9489       break;
 9490     case T_LONG:
 9491     case T_DOUBLE:
 9492       evmovdquq(dst, kmask, src, merge, vector_len);
 9493       break;
 9494     default:
 9495       fatal("Unexpected type argument %s", type2name(type));
 9496       break;
 9497   }
 9498 }
 9499 
 9500 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len) {
 9501   switch(type) {
 9502     case T_BYTE:
 9503     case T_BOOLEAN:
 9504       evmovdqub(dst, kmask, src, merge, vector_len);
 9505       break;
 9506     case T_CHAR:
 9507     case T_SHORT:
 9508       evmovdquw(dst, kmask, src, merge, vector_len);
 9509       break;
 9510     case T_INT:
 9511     case T_FLOAT:
 9512       evmovdqul(dst, kmask, src, merge, vector_len);
 9513       break;
 9514     case T_LONG:
 9515     case T_DOUBLE:
 9516       evmovdquq(dst, kmask, src, merge, vector_len);
 9517       break;
 9518     default:
 9519       fatal("Unexpected type argument %s", type2name(type));
 9520       break;
 9521   }
 9522 }
 9523 
 9524 void MacroAssembler::knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp, Register rtmp) {
 9525   switch(masklen) {
 9526     case 2:
 9527        knotbl(dst, src);
 9528        movl(rtmp, 3);
 9529        kmovbl(ktmp, rtmp);
 9530        kandbl(dst, ktmp, dst);
 9531        break;
 9532     case 4:
 9533        knotbl(dst, src);
 9534        movl(rtmp, 15);
 9535        kmovbl(ktmp, rtmp);
 9536        kandbl(dst, ktmp, dst);
 9537        break;
 9538     case 8:
 9539        knotbl(dst, src);
 9540        break;
 9541     case 16:
 9542        knotwl(dst, src);
 9543        break;
 9544     case 32:
 9545        knotdl(dst, src);
 9546        break;
 9547     case 64:
 9548        knotql(dst, src);
 9549        break;
 9550     default:
 9551       fatal("Unexpected vector length %d", masklen);
 9552       break;
 9553   }
 9554 }
 9555 
 9556 void MacroAssembler::kand(BasicType type, KRegister dst, KRegister src1, KRegister src2) {
 9557   switch(type) {
 9558     case T_BOOLEAN:
 9559     case T_BYTE:
 9560        kandbl(dst, src1, src2);
 9561        break;
 9562     case T_CHAR:
 9563     case T_SHORT:
 9564        kandwl(dst, src1, src2);
 9565        break;
 9566     case T_INT:
 9567     case T_FLOAT:
 9568        kanddl(dst, src1, src2);
 9569        break;
 9570     case T_LONG:
 9571     case T_DOUBLE:
 9572        kandql(dst, src1, src2);
 9573        break;
 9574     default:
 9575       fatal("Unexpected type argument %s", type2name(type));
 9576       break;
 9577   }
 9578 }
 9579 
 9580 void MacroAssembler::kor(BasicType type, KRegister dst, KRegister src1, KRegister src2) {
 9581   switch(type) {
 9582     case T_BOOLEAN:
 9583     case T_BYTE:
 9584        korbl(dst, src1, src2);
 9585        break;
 9586     case T_CHAR:
 9587     case T_SHORT:
 9588        korwl(dst, src1, src2);
 9589        break;
 9590     case T_INT:
 9591     case T_FLOAT:
 9592        kordl(dst, src1, src2);
 9593        break;
 9594     case T_LONG:
 9595     case T_DOUBLE:
 9596        korql(dst, src1, src2);
 9597        break;
 9598     default:
 9599       fatal("Unexpected type argument %s", type2name(type));
 9600       break;
 9601   }
 9602 }
 9603 
 9604 void MacroAssembler::kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2) {
 9605   switch(type) {
 9606     case T_BOOLEAN:
 9607     case T_BYTE:
 9608        kxorbl(dst, src1, src2);
 9609        break;
 9610     case T_CHAR:
 9611     case T_SHORT:
 9612        kxorwl(dst, src1, src2);
 9613        break;
 9614     case T_INT:
 9615     case T_FLOAT:
 9616        kxordl(dst, src1, src2);
 9617        break;
 9618     case T_LONG:
 9619     case T_DOUBLE:
 9620        kxorql(dst, src1, src2);
 9621        break;
 9622     default:
 9623       fatal("Unexpected type argument %s", type2name(type));
 9624       break;
 9625   }
 9626 }
 9627 
 9628 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9629   switch(type) {
 9630     case T_BOOLEAN:
 9631     case T_BYTE:
 9632       evpermb(dst, mask, nds, src, merge, vector_len); break;
 9633     case T_CHAR:
 9634     case T_SHORT:
 9635       evpermw(dst, mask, nds, src, merge, vector_len); break;
 9636     case T_INT:
 9637     case T_FLOAT:
 9638       evpermd(dst, mask, nds, src, merge, vector_len); break;
 9639     case T_LONG:
 9640     case T_DOUBLE:
 9641       evpermq(dst, mask, nds, src, merge, vector_len); break;
 9642     default:
 9643       fatal("Unexpected type argument %s", type2name(type)); break;
 9644   }
 9645 }
 9646 
 9647 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9648   switch(type) {
 9649     case T_BOOLEAN:
 9650     case T_BYTE:
 9651       evpermb(dst, mask, nds, src, merge, vector_len); break;
 9652     case T_CHAR:
 9653     case T_SHORT:
 9654       evpermw(dst, mask, nds, src, merge, vector_len); break;
 9655     case T_INT:
 9656     case T_FLOAT:
 9657       evpermd(dst, mask, nds, src, merge, vector_len); break;
 9658     case T_LONG:
 9659     case T_DOUBLE:
 9660       evpermq(dst, mask, nds, src, merge, vector_len); break;
 9661     default:
 9662       fatal("Unexpected type argument %s", type2name(type)); break;
 9663   }
 9664 }
 9665 
 9666 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9667   switch(type) {
 9668     case T_BYTE:
 9669       evpminsb(dst, mask, nds, src, merge, vector_len); break;
 9670     case T_SHORT:
 9671       evpminsw(dst, mask, nds, src, merge, vector_len); break;
 9672     case T_INT:
 9673       evpminsd(dst, mask, nds, src, merge, vector_len); break;
 9674     case T_LONG:
 9675       evpminsq(dst, mask, nds, src, merge, vector_len); break;
 9676     default:
 9677       fatal("Unexpected type argument %s", type2name(type)); break;
 9678   }
 9679 }
 9680 
 9681 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9682   switch(type) {
 9683     case T_BYTE:
 9684       evpmaxsb(dst, mask, nds, src, merge, vector_len); break;
 9685     case T_SHORT:
 9686       evpmaxsw(dst, mask, nds, src, merge, vector_len); break;
 9687     case T_INT:
 9688       evpmaxsd(dst, mask, nds, src, merge, vector_len); break;
 9689     case T_LONG:
 9690       evpmaxsq(dst, mask, nds, src, merge, vector_len); break;
 9691     default:
 9692       fatal("Unexpected type argument %s", type2name(type)); break;
 9693   }
 9694 }
 9695 
 9696 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9697   switch(type) {
 9698     case T_BYTE:
 9699       evpminsb(dst, mask, nds, src, merge, vector_len); break;
 9700     case T_SHORT:
 9701       evpminsw(dst, mask, nds, src, merge, vector_len); break;
 9702     case T_INT:
 9703       evpminsd(dst, mask, nds, src, merge, vector_len); break;
 9704     case T_LONG:
 9705       evpminsq(dst, mask, nds, src, merge, vector_len); break;
 9706     default:
 9707       fatal("Unexpected type argument %s", type2name(type)); break;
 9708   }
 9709 }
 9710 
 9711 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9712   switch(type) {
 9713     case T_BYTE:
 9714       evpmaxsb(dst, mask, nds, src, merge, vector_len); break;
 9715     case T_SHORT:
 9716       evpmaxsw(dst, mask, nds, src, merge, vector_len); break;
 9717     case T_INT:
 9718       evpmaxsd(dst, mask, nds, src, merge, vector_len); break;
 9719     case T_LONG:
 9720       evpmaxsq(dst, mask, nds, src, merge, vector_len); break;
 9721     default:
 9722       fatal("Unexpected type argument %s", type2name(type)); break;
 9723   }
 9724 }
 9725 
 9726 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9727   switch(type) {
 9728     case T_INT:
 9729       evpxord(dst, mask, nds, src, merge, vector_len); break;
 9730     case T_LONG:
 9731       evpxorq(dst, mask, nds, src, merge, vector_len); break;
 9732     default:
 9733       fatal("Unexpected type argument %s", type2name(type)); break;
 9734   }
 9735 }
 9736 
 9737 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9738   switch(type) {
 9739     case T_INT:
 9740       evpxord(dst, mask, nds, src, merge, vector_len); break;
 9741     case T_LONG:
 9742       evpxorq(dst, mask, nds, src, merge, vector_len); break;
 9743     default:
 9744       fatal("Unexpected type argument %s", type2name(type)); break;
 9745   }
 9746 }
 9747 
 9748 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9749   switch(type) {
 9750     case T_INT:
 9751       Assembler::evpord(dst, mask, nds, src, merge, vector_len); break;
 9752     case T_LONG:
 9753       evporq(dst, mask, nds, src, merge, vector_len); break;
 9754     default:
 9755       fatal("Unexpected type argument %s", type2name(type)); break;
 9756   }
 9757 }
 9758 
 9759 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9760   switch(type) {
 9761     case T_INT:
 9762       Assembler::evpord(dst, mask, nds, src, merge, vector_len); break;
 9763     case T_LONG:
 9764       evporq(dst, mask, nds, src, merge, vector_len); break;
 9765     default:
 9766       fatal("Unexpected type argument %s", type2name(type)); break;
 9767   }
 9768 }
 9769 
 9770 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9771   switch(type) {
 9772     case T_INT:
 9773       evpandd(dst, mask, nds, src, merge, vector_len); break;
 9774     case T_LONG:
 9775       evpandq(dst, mask, nds, src, merge, vector_len); break;
 9776     default:
 9777       fatal("Unexpected type argument %s", type2name(type)); break;
 9778   }
 9779 }
 9780 
 9781 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9782   switch(type) {
 9783     case T_INT:
 9784       evpandd(dst, mask, nds, src, merge, vector_len); break;
 9785     case T_LONG:
 9786       evpandq(dst, mask, nds, src, merge, vector_len); break;
 9787     default:
 9788       fatal("Unexpected type argument %s", type2name(type)); break;
 9789   }
 9790 }
 9791 
 9792 void MacroAssembler::kortest(uint masklen, KRegister src1, KRegister src2) {
 9793   switch(masklen) {
 9794     case 8:
 9795        kortestbl(src1, src2);
 9796        break;
 9797     case 16:
 9798        kortestwl(src1, src2);
 9799        break;
 9800     case 32:
 9801        kortestdl(src1, src2);
 9802        break;
 9803     case 64:
 9804        kortestql(src1, src2);
 9805        break;
 9806     default:
 9807       fatal("Unexpected mask length %d", masklen);
 9808       break;
 9809   }
 9810 }
 9811 
 9812 
 9813 void MacroAssembler::ktest(uint masklen, KRegister src1, KRegister src2) {
 9814   switch(masklen)  {
 9815     case 8:
 9816        ktestbl(src1, src2);
 9817        break;
 9818     case 16:
 9819        ktestwl(src1, src2);
 9820        break;
 9821     case 32:
 9822        ktestdl(src1, src2);
 9823        break;
 9824     case 64:
 9825        ktestql(src1, src2);
 9826        break;
 9827     default:
 9828       fatal("Unexpected mask length %d", masklen);
 9829       break;
 9830   }
 9831 }
 9832 
 9833 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) {
 9834   switch(type) {
 9835     case T_INT:
 9836       evprold(dst, mask, src, shift, merge, vlen_enc); break;
 9837     case T_LONG:
 9838       evprolq(dst, mask, src, shift, merge, vlen_enc); break;
 9839     default:
 9840       fatal("Unexpected type argument %s", type2name(type)); break;
 9841       break;
 9842   }
 9843 }
 9844 
 9845 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) {
 9846   switch(type) {
 9847     case T_INT:
 9848       evprord(dst, mask, src, shift, merge, vlen_enc); break;
 9849     case T_LONG:
 9850       evprorq(dst, mask, src, shift, merge, vlen_enc); break;
 9851     default:
 9852       fatal("Unexpected type argument %s", type2name(type)); break;
 9853   }
 9854 }
 9855 
 9856 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) {
 9857   switch(type) {
 9858     case T_INT:
 9859       evprolvd(dst, mask, src1, src2, merge, vlen_enc); break;
 9860     case T_LONG:
 9861       evprolvq(dst, mask, src1, src2, merge, vlen_enc); break;
 9862     default:
 9863       fatal("Unexpected type argument %s", type2name(type)); break;
 9864   }
 9865 }
 9866 
 9867 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) {
 9868   switch(type) {
 9869     case T_INT:
 9870       evprorvd(dst, mask, src1, src2, merge, vlen_enc); break;
 9871     case T_LONG:
 9872       evprorvq(dst, mask, src1, src2, merge, vlen_enc); break;
 9873     default:
 9874       fatal("Unexpected type argument %s", type2name(type)); break;
 9875   }
 9876 }
 9877 
 9878 void MacroAssembler::evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 9879   assert(rscratch != noreg || always_reachable(src), "missing");
 9880 
 9881   if (reachable(src)) {
 9882     evpandq(dst, nds, as_Address(src), vector_len);
 9883   } else {
 9884     lea(rscratch, src);
 9885     evpandq(dst, nds, Address(rscratch, 0), vector_len);
 9886   }
 9887 }
 9888 
 9889 void MacroAssembler::evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 9890   assert(rscratch != noreg || always_reachable(src), "missing");
 9891 
 9892   if (reachable(src)) {
 9893     evporq(dst, nds, as_Address(src), vector_len);
 9894   } else {
 9895     lea(rscratch, src);
 9896     evporq(dst, nds, Address(rscratch, 0), vector_len);
 9897   }
 9898 }
 9899 
 9900 void MacroAssembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch) {
 9901   assert(rscratch != noreg || always_reachable(src3), "missing");
 9902 
 9903   if (reachable(src3)) {
 9904     vpternlogq(dst, imm8, src2, as_Address(src3), vector_len);
 9905   } else {
 9906     lea(rscratch, src3);
 9907     vpternlogq(dst, imm8, src2, Address(rscratch, 0), vector_len);
 9908   }
 9909 }
 9910 
 9911 #if COMPILER2_OR_JVMCI
 9912 
 9913 void MacroAssembler::fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask,
 9914                                  Register length, Register temp, int vec_enc) {
 9915   // Computing mask for predicated vector store.
 9916   movptr(temp, -1);
 9917   bzhiq(temp, temp, length);
 9918   kmov(mask, temp);
 9919   evmovdqu(bt, mask, dst, xmm, true, vec_enc);
 9920 }
 9921 
 9922 // Set memory operation for length "less than" 64 bytes.
 9923 void MacroAssembler::fill64_masked(uint shift, Register dst, int disp,
 9924                                        XMMRegister xmm, KRegister mask, Register length,
 9925                                        Register temp, bool use64byteVector) {
 9926   assert(MaxVectorSize >= 32, "vector length should be >= 32");
 9927   BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG};
 9928   if (!use64byteVector) {
 9929     fill32(dst, disp, xmm);
 9930     subptr(length, 32 >> shift);
 9931     fill32_masked(shift, dst, disp + 32, xmm, mask, length, temp);
 9932   } else {
 9933     assert(MaxVectorSize == 64, "vector length != 64");
 9934     fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_512bit);
 9935   }
 9936 }
 9937 
 9938 
 9939 void MacroAssembler::fill32_masked(uint shift, Register dst, int disp,
 9940                                        XMMRegister xmm, KRegister mask, Register length,
 9941                                        Register temp) {
 9942   assert(MaxVectorSize >= 32, "vector length should be >= 32");
 9943   BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG};
 9944   fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_256bit);
 9945 }
 9946 
 9947 
 9948 void MacroAssembler::fill32(Address dst, XMMRegister xmm) {
 9949   assert(MaxVectorSize >= 32, "vector length should be >= 32");
 9950   vmovdqu(dst, xmm);
 9951 }
 9952 
 9953 void MacroAssembler::fill32(Register dst, int disp, XMMRegister xmm) {
 9954   fill32(Address(dst, disp), xmm);
 9955 }
 9956 
 9957 void MacroAssembler::fill64(Address dst, XMMRegister xmm, bool use64byteVector) {
 9958   assert(MaxVectorSize >= 32, "vector length should be >= 32");
 9959   if (!use64byteVector) {
 9960     fill32(dst, xmm);
 9961     fill32(dst.plus_disp(32), xmm);
 9962   } else {
 9963     evmovdquq(dst, xmm, Assembler::AVX_512bit);
 9964   }
 9965 }
 9966 
 9967 void MacroAssembler::fill64(Register dst, int disp, XMMRegister xmm, bool use64byteVector) {
 9968   fill64(Address(dst, disp), xmm, use64byteVector);
 9969 }
 9970 
 9971 #ifdef _LP64
 9972 void MacroAssembler::generate_fill_avx3(BasicType type, Register to, Register value,
 9973                                         Register count, Register rtmp, XMMRegister xtmp) {
 9974   Label L_exit;
 9975   Label L_fill_start;
 9976   Label L_fill_64_bytes;
 9977   Label L_fill_96_bytes;
 9978   Label L_fill_128_bytes;
 9979   Label L_fill_128_bytes_loop;
 9980   Label L_fill_128_loop_header;
 9981   Label L_fill_128_bytes_loop_header;
 9982   Label L_fill_128_bytes_loop_pre_header;
 9983   Label L_fill_zmm_sequence;
 9984 
 9985   int shift = -1;
 9986   int avx3threshold = VM_Version::avx3_threshold();
 9987   switch(type) {
 9988     case T_BYTE:  shift = 0;
 9989       break;
 9990     case T_SHORT: shift = 1;
 9991       break;
 9992     case T_INT:   shift = 2;
 9993       break;
 9994     /* Uncomment when LONG fill stubs are supported.
 9995     case T_LONG:  shift = 3;
 9996       break;
 9997     */
 9998     default:
 9999       fatal("Unhandled type: %s\n", type2name(type));
10000   }
10001 
10002   if ((avx3threshold != 0)  || (MaxVectorSize == 32)) {
10003 
10004     if (MaxVectorSize == 64) {
10005       cmpq(count, avx3threshold >> shift);
10006       jcc(Assembler::greater, L_fill_zmm_sequence);
10007     }
10008 
10009     evpbroadcast(type, xtmp, value, Assembler::AVX_256bit);
10010 
10011     bind(L_fill_start);
10012 
10013     cmpq(count, 32 >> shift);
10014     jccb(Assembler::greater, L_fill_64_bytes);
10015     fill32_masked(shift, to, 0, xtmp, k2, count, rtmp);
10016     jmp(L_exit);
10017 
10018     bind(L_fill_64_bytes);
10019     cmpq(count, 64 >> shift);
10020     jccb(Assembler::greater, L_fill_96_bytes);
10021     fill64_masked(shift, to, 0, xtmp, k2, count, rtmp);
10022     jmp(L_exit);
10023 
10024     bind(L_fill_96_bytes);
10025     cmpq(count, 96 >> shift);
10026     jccb(Assembler::greater, L_fill_128_bytes);
10027     fill64(to, 0, xtmp);
10028     subq(count, 64 >> shift);
10029     fill32_masked(shift, to, 64, xtmp, k2, count, rtmp);
10030     jmp(L_exit);
10031 
10032     bind(L_fill_128_bytes);
10033     cmpq(count, 128 >> shift);
10034     jccb(Assembler::greater, L_fill_128_bytes_loop_pre_header);
10035     fill64(to, 0, xtmp);
10036     fill32(to, 64, xtmp);
10037     subq(count, 96 >> shift);
10038     fill32_masked(shift, to, 96, xtmp, k2, count, rtmp);
10039     jmp(L_exit);
10040 
10041     bind(L_fill_128_bytes_loop_pre_header);
10042     {
10043       mov(rtmp, to);
10044       andq(rtmp, 31);
10045       jccb(Assembler::zero, L_fill_128_bytes_loop_header);
10046       negq(rtmp);
10047       addq(rtmp, 32);
10048       mov64(r8, -1L);
10049       bzhiq(r8, r8, rtmp);
10050       kmovql(k2, r8);
10051       evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_256bit);
10052       addq(to, rtmp);
10053       shrq(rtmp, shift);
10054       subq(count, rtmp);
10055     }
10056 
10057     cmpq(count, 128 >> shift);
10058     jcc(Assembler::less, L_fill_start);
10059 
10060     bind(L_fill_128_bytes_loop_header);
10061     subq(count, 128 >> shift);
10062 
10063     align32();
10064     bind(L_fill_128_bytes_loop);
10065       fill64(to, 0, xtmp);
10066       fill64(to, 64, xtmp);
10067       addq(to, 128);
10068       subq(count, 128 >> shift);
10069       jccb(Assembler::greaterEqual, L_fill_128_bytes_loop);
10070 
10071     addq(count, 128 >> shift);
10072     jcc(Assembler::zero, L_exit);
10073     jmp(L_fill_start);
10074   }
10075 
10076   if (MaxVectorSize == 64) {
10077     // Sequence using 64 byte ZMM register.
10078     Label L_fill_128_bytes_zmm;
10079     Label L_fill_192_bytes_zmm;
10080     Label L_fill_192_bytes_loop_zmm;
10081     Label L_fill_192_bytes_loop_header_zmm;
10082     Label L_fill_192_bytes_loop_pre_header_zmm;
10083     Label L_fill_start_zmm_sequence;
10084 
10085     bind(L_fill_zmm_sequence);
10086     evpbroadcast(type, xtmp, value, Assembler::AVX_512bit);
10087 
10088     bind(L_fill_start_zmm_sequence);
10089     cmpq(count, 64 >> shift);
10090     jccb(Assembler::greater, L_fill_128_bytes_zmm);
10091     fill64_masked(shift, to, 0, xtmp, k2, count, rtmp, true);
10092     jmp(L_exit);
10093 
10094     bind(L_fill_128_bytes_zmm);
10095     cmpq(count, 128 >> shift);
10096     jccb(Assembler::greater, L_fill_192_bytes_zmm);
10097     fill64(to, 0, xtmp, true);
10098     subq(count, 64 >> shift);
10099     fill64_masked(shift, to, 64, xtmp, k2, count, rtmp, true);
10100     jmp(L_exit);
10101 
10102     bind(L_fill_192_bytes_zmm);
10103     cmpq(count, 192 >> shift);
10104     jccb(Assembler::greater, L_fill_192_bytes_loop_pre_header_zmm);
10105     fill64(to, 0, xtmp, true);
10106     fill64(to, 64, xtmp, true);
10107     subq(count, 128 >> shift);
10108     fill64_masked(shift, to, 128, xtmp, k2, count, rtmp, true);
10109     jmp(L_exit);
10110 
10111     bind(L_fill_192_bytes_loop_pre_header_zmm);
10112     {
10113       movq(rtmp, to);
10114       andq(rtmp, 63);
10115       jccb(Assembler::zero, L_fill_192_bytes_loop_header_zmm);
10116       negq(rtmp);
10117       addq(rtmp, 64);
10118       mov64(r8, -1L);
10119       bzhiq(r8, r8, rtmp);
10120       kmovql(k2, r8);
10121       evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_512bit);
10122       addq(to, rtmp);
10123       shrq(rtmp, shift);
10124       subq(count, rtmp);
10125     }
10126 
10127     cmpq(count, 192 >> shift);
10128     jcc(Assembler::less, L_fill_start_zmm_sequence);
10129 
10130     bind(L_fill_192_bytes_loop_header_zmm);
10131     subq(count, 192 >> shift);
10132 
10133     align32();
10134     bind(L_fill_192_bytes_loop_zmm);
10135       fill64(to, 0, xtmp, true);
10136       fill64(to, 64, xtmp, true);
10137       fill64(to, 128, xtmp, true);
10138       addq(to, 192);
10139       subq(count, 192 >> shift);
10140       jccb(Assembler::greaterEqual, L_fill_192_bytes_loop_zmm);
10141 
10142     addq(count, 192 >> shift);
10143     jcc(Assembler::zero, L_exit);
10144     jmp(L_fill_start_zmm_sequence);
10145   }
10146   bind(L_exit);
10147 }
10148 #endif
10149 #endif //COMPILER2_OR_JVMCI
10150 
10151 
10152 #ifdef _LP64
10153 void MacroAssembler::convert_f2i(Register dst, XMMRegister src) {
10154   Label done;
10155   cvttss2sil(dst, src);
10156   // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
10157   cmpl(dst, 0x80000000); // float_sign_flip
10158   jccb(Assembler::notEqual, done);
10159   subptr(rsp, 8);
10160   movflt(Address(rsp, 0), src);
10161   call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2i_fixup())));
10162   pop(dst);
10163   bind(done);
10164 }
10165 
10166 void MacroAssembler::convert_d2i(Register dst, XMMRegister src) {
10167   Label done;
10168   cvttsd2sil(dst, src);
10169   // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
10170   cmpl(dst, 0x80000000); // float_sign_flip
10171   jccb(Assembler::notEqual, done);
10172   subptr(rsp, 8);
10173   movdbl(Address(rsp, 0), src);
10174   call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2i_fixup())));
10175   pop(dst);
10176   bind(done);
10177 }
10178 
10179 void MacroAssembler::convert_f2l(Register dst, XMMRegister src) {
10180   Label done;
10181   cvttss2siq(dst, src);
10182   cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip()));
10183   jccb(Assembler::notEqual, done);
10184   subptr(rsp, 8);
10185   movflt(Address(rsp, 0), src);
10186   call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2l_fixup())));
10187   pop(dst);
10188   bind(done);
10189 }
10190 
10191 void MacroAssembler::round_float(Register dst, XMMRegister src, Register rtmp, Register rcx) {
10192   // Following code is line by line assembly translation rounding algorithm.
10193   // Please refer to java.lang.Math.round(float) algorithm for details.
10194   const int32_t FloatConsts_EXP_BIT_MASK = 0x7F800000;
10195   const int32_t FloatConsts_SIGNIFICAND_WIDTH = 24;
10196   const int32_t FloatConsts_EXP_BIAS = 127;
10197   const int32_t FloatConsts_SIGNIF_BIT_MASK = 0x007FFFFF;
10198   const int32_t MINUS_32 = 0xFFFFFFE0;
10199   Label L_special_case, L_block1, L_exit;
10200   movl(rtmp, FloatConsts_EXP_BIT_MASK);
10201   movdl(dst, src);
10202   andl(dst, rtmp);
10203   sarl(dst, FloatConsts_SIGNIFICAND_WIDTH - 1);
10204   movl(rtmp, FloatConsts_SIGNIFICAND_WIDTH - 2 + FloatConsts_EXP_BIAS);
10205   subl(rtmp, dst);
10206   movl(rcx, rtmp);
10207   movl(dst, MINUS_32);
10208   testl(rtmp, dst);
10209   jccb(Assembler::notEqual, L_special_case);
10210   movdl(dst, src);
10211   andl(dst, FloatConsts_SIGNIF_BIT_MASK);
10212   orl(dst, FloatConsts_SIGNIF_BIT_MASK + 1);
10213   movdl(rtmp, src);
10214   testl(rtmp, rtmp);
10215   jccb(Assembler::greaterEqual, L_block1);
10216   negl(dst);
10217   bind(L_block1);
10218   sarl(dst);
10219   addl(dst, 0x1);
10220   sarl(dst, 0x1);
10221   jmp(L_exit);
10222   bind(L_special_case);
10223   convert_f2i(dst, src);
10224   bind(L_exit);
10225 }
10226 
10227 void MacroAssembler::round_double(Register dst, XMMRegister src, Register rtmp, Register rcx) {
10228   // Following code is line by line assembly translation rounding algorithm.
10229   // Please refer to java.lang.Math.round(double) algorithm for details.
10230   const int64_t DoubleConsts_EXP_BIT_MASK = 0x7FF0000000000000L;
10231   const int64_t DoubleConsts_SIGNIFICAND_WIDTH = 53;
10232   const int64_t DoubleConsts_EXP_BIAS = 1023;
10233   const int64_t DoubleConsts_SIGNIF_BIT_MASK = 0x000FFFFFFFFFFFFFL;
10234   const int64_t MINUS_64 = 0xFFFFFFFFFFFFFFC0L;
10235   Label L_special_case, L_block1, L_exit;
10236   mov64(rtmp, DoubleConsts_EXP_BIT_MASK);
10237   movq(dst, src);
10238   andq(dst, rtmp);
10239   sarq(dst, DoubleConsts_SIGNIFICAND_WIDTH - 1);
10240   mov64(rtmp, DoubleConsts_SIGNIFICAND_WIDTH - 2 + DoubleConsts_EXP_BIAS);
10241   subq(rtmp, dst);
10242   movq(rcx, rtmp);
10243   mov64(dst, MINUS_64);
10244   testq(rtmp, dst);
10245   jccb(Assembler::notEqual, L_special_case);
10246   movq(dst, src);
10247   mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK);
10248   andq(dst, rtmp);
10249   mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK + 1);
10250   orq(dst, rtmp);
10251   movq(rtmp, src);
10252   testq(rtmp, rtmp);
10253   jccb(Assembler::greaterEqual, L_block1);
10254   negq(dst);
10255   bind(L_block1);
10256   sarq(dst);
10257   addq(dst, 0x1);
10258   sarq(dst, 0x1);
10259   jmp(L_exit);
10260   bind(L_special_case);
10261   convert_d2l(dst, src);
10262   bind(L_exit);
10263 }
10264 
10265 void MacroAssembler::convert_d2l(Register dst, XMMRegister src) {
10266   Label done;
10267   cvttsd2siq(dst, src);
10268   cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip()));
10269   jccb(Assembler::notEqual, done);
10270   subptr(rsp, 8);
10271   movdbl(Address(rsp, 0), src);
10272   call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2l_fixup())));
10273   pop(dst);
10274   bind(done);
10275 }
10276 
10277 void MacroAssembler::cache_wb(Address line)
10278 {
10279   // 64 bit cpus always support clflush
10280   assert(VM_Version::supports_clflush(), "clflush should be available");
10281   bool optimized = VM_Version::supports_clflushopt();
10282   bool no_evict = VM_Version::supports_clwb();
10283 
10284   // prefer clwb (writeback without evict) otherwise
10285   // prefer clflushopt (potentially parallel writeback with evict)
10286   // otherwise fallback on clflush (serial writeback with evict)
10287 
10288   if (optimized) {
10289     if (no_evict) {
10290       clwb(line);
10291     } else {
10292       clflushopt(line);
10293     }
10294   } else {
10295     // no need for fence when using CLFLUSH
10296     clflush(line);
10297   }
10298 }
10299 
10300 void MacroAssembler::cache_wbsync(bool is_pre)
10301 {
10302   assert(VM_Version::supports_clflush(), "clflush should be available");
10303   bool optimized = VM_Version::supports_clflushopt();
10304   bool no_evict = VM_Version::supports_clwb();
10305 
10306   // pick the correct implementation
10307 
10308   if (!is_pre && (optimized || no_evict)) {
10309     // need an sfence for post flush when using clflushopt or clwb
10310     // otherwise no no need for any synchroniaztion
10311 
10312     sfence();
10313   }
10314 }
10315 
10316 #endif // _LP64
10317 
10318 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
10319   switch (cond) {
10320     // Note some conditions are synonyms for others
10321     case Assembler::zero:         return Assembler::notZero;
10322     case Assembler::notZero:      return Assembler::zero;
10323     case Assembler::less:         return Assembler::greaterEqual;
10324     case Assembler::lessEqual:    return Assembler::greater;
10325     case Assembler::greater:      return Assembler::lessEqual;
10326     case Assembler::greaterEqual: return Assembler::less;
10327     case Assembler::below:        return Assembler::aboveEqual;
10328     case Assembler::belowEqual:   return Assembler::above;
10329     case Assembler::above:        return Assembler::belowEqual;
10330     case Assembler::aboveEqual:   return Assembler::below;
10331     case Assembler::overflow:     return Assembler::noOverflow;
10332     case Assembler::noOverflow:   return Assembler::overflow;
10333     case Assembler::negative:     return Assembler::positive;
10334     case Assembler::positive:     return Assembler::negative;
10335     case Assembler::parity:       return Assembler::noParity;
10336     case Assembler::noParity:     return Assembler::parity;
10337   }
10338   ShouldNotReachHere(); return Assembler::overflow;
10339 }
10340 
10341 SkipIfEqual::SkipIfEqual(
10342     MacroAssembler* masm, const bool* flag_addr, bool value, Register rscratch) {
10343   _masm = masm;
10344   _masm->cmp8(ExternalAddress((address)flag_addr), value, rscratch);
10345   _masm->jcc(Assembler::equal, _label);
10346 }
10347 
10348 SkipIfEqual::~SkipIfEqual() {
10349   _masm->bind(_label);
10350 }
10351 
10352 // 32-bit Windows has its own fast-path implementation
10353 // of get_thread
10354 #if !defined(WIN32) || defined(_LP64)
10355 
10356 // This is simply a call to Thread::current()
10357 void MacroAssembler::get_thread(Register thread) {
10358   if (thread != rax) {
10359     push(rax);
10360   }
10361   LP64_ONLY(push(rdi);)
10362   LP64_ONLY(push(rsi);)
10363   push(rdx);
10364   push(rcx);
10365 #ifdef _LP64
10366   push(r8);
10367   push(r9);
10368   push(r10);
10369   push(r11);
10370 #endif
10371 
10372   MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, Thread::current), 0);
10373 
10374 #ifdef _LP64
10375   pop(r11);
10376   pop(r10);
10377   pop(r9);
10378   pop(r8);
10379 #endif
10380   pop(rcx);
10381   pop(rdx);
10382   LP64_ONLY(pop(rsi);)
10383   LP64_ONLY(pop(rdi);)
10384   if (thread != rax) {
10385     mov(thread, rax);
10386     pop(rax);
10387   }
10388 }
10389 
10390 
10391 #endif // !WIN32 || _LP64
10392 
10393 void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigned bias, Register tmp) {
10394   Label L_stack_ok;
10395   if (bias == 0) {
10396     testptr(sp, 2 * wordSize - 1);
10397   } else {
10398     // lea(tmp, Address(rsp, bias);
10399     mov(tmp, sp);
10400     addptr(tmp, bias);
10401     testptr(tmp, 2 * wordSize - 1);
10402   }
10403   jcc(Assembler::equal, L_stack_ok);
10404   block_comment(msg);
10405   stop(msg);
10406   bind(L_stack_ok);
10407 }