1 /*
    2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
    3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    4  *
    5  * This code is free software; you can redistribute it and/or modify it
    6  * under the terms of the GNU General Public License version 2 only, as
    7  * published by the Free Software Foundation.
    8  *
    9  * This code is distributed in the hope that it will be useful, but WITHOUT
   10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   12  * version 2 for more details (a copy is included in the LICENSE file that
   13  * accompanied this code).
   14  *
   15  * You should have received a copy of the GNU General Public License version
   16  * 2 along with this work; if not, write to the Free Software Foundation,
   17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   18  *
   19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   20  * or visit www.oracle.com if you need additional information or have any
   21  * questions.
   22  *
   23  */
   24 
   25 #include "asm/assembler.hpp"
   26 #include "asm/assembler.inline.hpp"
   27 #include "code/aotCodeCache.hpp"
   28 #include "code/compiledIC.hpp"
   29 #include "compiler/compiler_globals.hpp"
   30 #include "compiler/disassembler.hpp"
   31 #include "ci/ciInlineKlass.hpp"
   32 #include "crc32c.h"
   33 #include "gc/shared/barrierSet.hpp"
   34 #include "gc/shared/barrierSetAssembler.hpp"
   35 #include "gc/shared/collectedHeap.inline.hpp"
   36 #include "gc/shared/tlab_globals.hpp"
   37 #include "interpreter/bytecodeHistogram.hpp"
   38 #include "interpreter/interpreter.hpp"
   39 #include "interpreter/interpreterRuntime.hpp"
   40 #include "jvm.h"
   41 #include "memory/resourceArea.hpp"
   42 #include "memory/universe.hpp"
   43 #include "oops/accessDecorators.hpp"
   44 #include "oops/compressedKlass.inline.hpp"
   45 #include "oops/compressedOops.inline.hpp"
   46 #include "oops/klass.inline.hpp"
   47 #include "oops/resolvedFieldEntry.hpp"
   48 #include "prims/methodHandles.hpp"
   49 #include "runtime/continuation.hpp"
   50 #include "runtime/interfaceSupport.inline.hpp"
   51 #include "runtime/javaThread.hpp"
   52 #include "runtime/jniHandles.hpp"
   53 #include "runtime/objectMonitor.hpp"
   54 #include "runtime/os.hpp"
   55 #include "runtime/safepoint.hpp"
   56 #include "runtime/safepointMechanism.hpp"
   57 #include "runtime/sharedRuntime.hpp"
   58 #include "runtime/signature_cc.hpp"
   59 #include "runtime/stubRoutines.hpp"
   60 #include "utilities/checkedCast.hpp"
   61 #include "utilities/macros.hpp"
   62 #include "vmreg_x86.inline.hpp"
   63 #ifdef COMPILER2
   64 #include "opto/output.hpp"
   65 #endif
   66 
   67 #ifdef PRODUCT
   68 #define BLOCK_COMMENT(str) /* nothing */
   69 #define STOP(error) stop(error)
   70 #else
   71 #define BLOCK_COMMENT(str) block_comment(str)
   72 #define STOP(error) block_comment(error); stop(error)
   73 #endif
   74 
   75 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
   76 
   77 #ifdef ASSERT
   78 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
   79 #endif
   80 
   81 static const Assembler::Condition reverse[] = {
   82     Assembler::noOverflow     /* overflow      = 0x0 */ ,
   83     Assembler::overflow       /* noOverflow    = 0x1 */ ,
   84     Assembler::aboveEqual     /* carrySet      = 0x2, below         = 0x2 */ ,
   85     Assembler::below          /* aboveEqual    = 0x3, carryClear    = 0x3 */ ,
   86     Assembler::notZero        /* zero          = 0x4, equal         = 0x4 */ ,
   87     Assembler::zero           /* notZero       = 0x5, notEqual      = 0x5 */ ,
   88     Assembler::above          /* belowEqual    = 0x6 */ ,
   89     Assembler::belowEqual     /* above         = 0x7 */ ,
   90     Assembler::positive       /* negative      = 0x8 */ ,
   91     Assembler::negative       /* positive      = 0x9 */ ,
   92     Assembler::noParity       /* parity        = 0xa */ ,
   93     Assembler::parity         /* noParity      = 0xb */ ,
   94     Assembler::greaterEqual   /* less          = 0xc */ ,
   95     Assembler::less           /* greaterEqual  = 0xd */ ,
   96     Assembler::greater        /* lessEqual     = 0xe */ ,
   97     Assembler::lessEqual      /* greater       = 0xf, */
   98 
   99 };
  100 
  101 
  102 // Implementation of MacroAssembler
  103 
  104 Address MacroAssembler::as_Address(AddressLiteral adr) {
  105   // amd64 always does this as a pc-rel
  106   // we can be absolute or disp based on the instruction type
  107   // jmp/call are displacements others are absolute
  108   assert(!adr.is_lval(), "must be rval");
  109   assert(reachable(adr), "must be");
  110   return Address(checked_cast<int32_t>(adr.target() - pc()), adr.target(), adr.reloc());
  111 
  112 }
  113 
  114 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) {
  115   AddressLiteral base = adr.base();
  116   lea(rscratch, base);
  117   Address index = adr.index();
  118   assert(index._disp == 0, "must not have disp"); // maybe it can?
  119   Address array(rscratch, index._index, index._scale, index._disp);
  120   return array;
  121 }
  122 
  123 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
  124   Label L, E;
  125 
  126 #ifdef _WIN64
  127   // Windows always allocates space for it's register args
  128   assert(num_args <= 4, "only register arguments supported");
  129   subq(rsp,  frame::arg_reg_save_area_bytes);
  130 #endif
  131 
  132   // Align stack if necessary
  133   testl(rsp, 15);
  134   jcc(Assembler::zero, L);
  135 
  136   subq(rsp, 8);
  137   call(RuntimeAddress(entry_point));
  138   addq(rsp, 8);
  139   jmp(E);
  140 
  141   bind(L);
  142   call(RuntimeAddress(entry_point));
  143 
  144   bind(E);
  145 
  146 #ifdef _WIN64
  147   // restore stack pointer
  148   addq(rsp, frame::arg_reg_save_area_bytes);
  149 #endif
  150 }
  151 
  152 void MacroAssembler::cmp64(Register src1, AddressLiteral src2, Register rscratch) {
  153   assert(!src2.is_lval(), "should use cmpptr");
  154   assert(rscratch != noreg || always_reachable(src2), "missing");
  155 
  156   if (reachable(src2)) {
  157     cmpq(src1, as_Address(src2));
  158   } else {
  159     lea(rscratch, src2);
  160     Assembler::cmpq(src1, Address(rscratch, 0));
  161   }
  162 }
  163 
  164 int MacroAssembler::corrected_idivq(Register reg) {
  165   // Full implementation of Java ldiv and lrem; checks for special
  166   // case as described in JVM spec., p.243 & p.271.  The function
  167   // returns the (pc) offset of the idivl instruction - may be needed
  168   // for implicit exceptions.
  169   //
  170   //         normal case                           special case
  171   //
  172   // input : rax: dividend                         min_long
  173   //         reg: divisor   (may not be eax/edx)   -1
  174   //
  175   // output: rax: quotient  (= rax idiv reg)       min_long
  176   //         rdx: remainder (= rax irem reg)       0
  177   assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
  178   static const int64_t min_long = 0x8000000000000000;
  179   Label normal_case, special_case;
  180 
  181   // check for special case
  182   cmp64(rax, ExternalAddress((address) &min_long), rdx /*rscratch*/);
  183   jcc(Assembler::notEqual, normal_case);
  184   xorl(rdx, rdx); // prepare rdx for possible special case (where
  185                   // remainder = 0)
  186   cmpq(reg, -1);
  187   jcc(Assembler::equal, special_case);
  188 
  189   // handle normal case
  190   bind(normal_case);
  191   cdqq();
  192   int idivq_offset = offset();
  193   idivq(reg);
  194 
  195   // normal and special case exit
  196   bind(special_case);
  197 
  198   return idivq_offset;
  199 }
  200 
  201 void MacroAssembler::decrementq(Register reg, int value) {
  202   if (value == min_jint) { subq(reg, value); return; }
  203   if (value <  0) { incrementq(reg, -value); return; }
  204   if (value == 0) {                        ; return; }
  205   if (value == 1 && UseIncDec) { decq(reg) ; return; }
  206   /* else */      { subq(reg, value)       ; return; }
  207 }
  208 
  209 void MacroAssembler::decrementq(Address dst, int value) {
  210   if (value == min_jint) { subq(dst, value); return; }
  211   if (value <  0) { incrementq(dst, -value); return; }
  212   if (value == 0) {                        ; return; }
  213   if (value == 1 && UseIncDec) { decq(dst) ; return; }
  214   /* else */      { subq(dst, value)       ; return; }
  215 }
  216 
  217 void MacroAssembler::incrementq(AddressLiteral dst, Register rscratch) {
  218   assert(rscratch != noreg || always_reachable(dst), "missing");
  219 
  220   if (reachable(dst)) {
  221     incrementq(as_Address(dst));
  222   } else {
  223     lea(rscratch, dst);
  224     incrementq(Address(rscratch, 0));
  225   }
  226 }
  227 
  228 void MacroAssembler::incrementq(Register reg, int value) {
  229   if (value == min_jint) { addq(reg, value); return; }
  230   if (value <  0) { decrementq(reg, -value); return; }
  231   if (value == 0) {                        ; return; }
  232   if (value == 1 && UseIncDec) { incq(reg) ; return; }
  233   /* else */      { addq(reg, value)       ; return; }
  234 }
  235 
  236 void MacroAssembler::incrementq(Address dst, int value) {
  237   if (value == min_jint) { addq(dst, value); return; }
  238   if (value <  0) { decrementq(dst, -value); return; }
  239   if (value == 0) {                        ; return; }
  240   if (value == 1 && UseIncDec) { incq(dst) ; return; }
  241   /* else */      { addq(dst, value)       ; return; }
  242 }
  243 
  244 // 32bit can do a case table jump in one instruction but we no longer allow the base
  245 // to be installed in the Address class
  246 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) {
  247   lea(rscratch, entry.base());
  248   Address dispatch = entry.index();
  249   assert(dispatch._base == noreg, "must be");
  250   dispatch._base = rscratch;
  251   jmp(dispatch);
  252 }
  253 
  254 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
  255   ShouldNotReachHere(); // 64bit doesn't use two regs
  256   cmpq(x_lo, y_lo);
  257 }
  258 
  259 void MacroAssembler::lea(Register dst, AddressLiteral src) {
  260   mov_literal64(dst, (intptr_t)src.target(), src.rspec());
  261 }
  262 
  263 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) {
  264   lea(rscratch, adr);
  265   movptr(dst, rscratch);
  266 }
  267 
  268 void MacroAssembler::leave() {
  269   // %%% is this really better? Why not on 32bit too?
  270   emit_int8((unsigned char)0xC9); // LEAVE
  271 }
  272 
  273 void MacroAssembler::lneg(Register hi, Register lo) {
  274   ShouldNotReachHere(); // 64bit doesn't use two regs
  275   negq(lo);
  276 }
  277 
  278 void MacroAssembler::movoop(Register dst, jobject obj) {
  279   mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate());
  280 }
  281 
  282 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) {
  283   mov_literal64(rscratch, (intptr_t)obj, oop_Relocation::spec_for_immediate());
  284   movq(dst, rscratch);
  285 }
  286 
  287 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
  288   mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
  289 }
  290 
  291 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) {
  292   mov_literal64(rscratch, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
  293   movq(dst, rscratch);
  294 }
  295 
  296 void MacroAssembler::movptr(Register dst, AddressLiteral src) {
  297   if (src.is_lval()) {
  298     mov_literal64(dst, (intptr_t)src.target(), src.rspec());
  299   } else {
  300     if (reachable(src)) {
  301       movq(dst, as_Address(src));
  302     } else {
  303       lea(dst, src);
  304       movq(dst, Address(dst, 0));
  305     }
  306   }
  307 }
  308 
  309 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) {
  310   movq(as_Address(dst, rscratch), src);
  311 }
  312 
  313 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
  314   movq(dst, as_Address(src, dst /*rscratch*/));
  315 }
  316 
  317 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
  318 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) {
  319   if (is_simm32(src)) {
  320     movptr(dst, checked_cast<int32_t>(src));
  321   } else {
  322     mov64(rscratch, src);
  323     movq(dst, rscratch);
  324   }
  325 }
  326 
  327 void MacroAssembler::pushoop(jobject obj, Register rscratch) {
  328   movoop(rscratch, obj);
  329   push(rscratch);
  330 }
  331 
  332 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) {
  333   mov_metadata(rscratch, obj);
  334   push(rscratch);
  335 }
  336 
  337 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) {
  338   lea(rscratch, src);
  339   if (src.is_lval()) {
  340     push(rscratch);
  341   } else {
  342     pushq(Address(rscratch, 0));
  343   }
  344 }
  345 
  346 static void pass_arg0(MacroAssembler* masm, Register arg) {
  347   if (c_rarg0 != arg ) {
  348     masm->mov(c_rarg0, arg);
  349   }
  350 }
  351 
  352 static void pass_arg1(MacroAssembler* masm, Register arg) {
  353   if (c_rarg1 != arg ) {
  354     masm->mov(c_rarg1, arg);
  355   }
  356 }
  357 
  358 static void pass_arg2(MacroAssembler* masm, Register arg) {
  359   if (c_rarg2 != arg ) {
  360     masm->mov(c_rarg2, arg);
  361   }
  362 }
  363 
  364 static void pass_arg3(MacroAssembler* masm, Register arg) {
  365   if (c_rarg3 != arg ) {
  366     masm->mov(c_rarg3, arg);
  367   }
  368 }
  369 
  370 void MacroAssembler::stop(const char* msg) {
  371   if (ShowMessageBoxOnError) {
  372     address rip = pc();
  373     pusha(); // get regs on stack
  374     lea(c_rarg1, InternalAddress(rip));
  375     movq(c_rarg2, rsp); // pass pointer to regs array
  376   }
  377   // Skip AOT caching C strings in scratch buffer.
  378   const char* str = (code_section()->scratch_emit()) ? msg : AOTCodeCache::add_C_string(msg);
  379   lea(c_rarg0, ExternalAddress((address) str));
  380   andq(rsp, -16); // align stack as required by ABI
  381   call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
  382   hlt();
  383 }
  384 
  385 void MacroAssembler::warn(const char* msg) {
  386   push(rbp);
  387   movq(rbp, rsp);
  388   andq(rsp, -16);     // align stack as required by push_CPU_state and call
  389   push_CPU_state();   // keeps alignment at 16 bytes
  390 
  391 #ifdef _WIN64
  392   // Windows always allocates space for its register args
  393   subq(rsp,  frame::arg_reg_save_area_bytes);
  394 #endif
  395   lea(c_rarg0, ExternalAddress((address) msg));
  396   call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
  397 
  398 #ifdef _WIN64
  399   // restore stack pointer
  400   addq(rsp, frame::arg_reg_save_area_bytes);
  401 #endif
  402   pop_CPU_state();
  403   mov(rsp, rbp);
  404   pop(rbp);
  405 }
  406 
  407 void MacroAssembler::print_state() {
  408   address rip = pc();
  409   pusha();            // get regs on stack
  410   push(rbp);
  411   movq(rbp, rsp);
  412   andq(rsp, -16);     // align stack as required by push_CPU_state and call
  413   push_CPU_state();   // keeps alignment at 16 bytes
  414 
  415   lea(c_rarg0, InternalAddress(rip));
  416   lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array
  417   call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1);
  418 
  419   pop_CPU_state();
  420   mov(rsp, rbp);
  421   pop(rbp);
  422   popa();
  423 }
  424 
  425 #ifndef PRODUCT
  426 extern "C" void findpc(intptr_t x);
  427 #endif
  428 
  429 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
  430   // In order to get locks to work, we need to fake a in_VM state
  431   if (ShowMessageBoxOnError) {
  432     JavaThread* thread = JavaThread::current();
  433     JavaThreadState saved_state = thread->thread_state();
  434     thread->set_thread_state(_thread_in_vm);
  435 #ifndef PRODUCT
  436     if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
  437       ttyLocker ttyl;
  438       BytecodeCounter::print();
  439     }
  440 #endif
  441     // To see where a verify_oop failed, get $ebx+40/X for this frame.
  442     // XXX correct this offset for amd64
  443     // This is the value of eip which points to where verify_oop will return.
  444     if (os::message_box(msg, "Execution stopped, print registers?")) {
  445       print_state64(pc, regs);
  446       BREAKPOINT;
  447     }
  448   }
  449   fatal("DEBUG MESSAGE: %s", msg);
  450 }
  451 
  452 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) {
  453   ttyLocker ttyl;
  454   DebuggingContext debugging{};
  455   tty->print_cr("rip = 0x%016lx", (intptr_t)pc);
  456 #ifndef PRODUCT
  457   tty->cr();
  458   findpc(pc);
  459   tty->cr();
  460 #endif
  461 #define PRINT_REG(rax, value) \
  462   { tty->print("%s = ", #rax); os::print_location(tty, value); }
  463   PRINT_REG(rax, regs[15]);
  464   PRINT_REG(rbx, regs[12]);
  465   PRINT_REG(rcx, regs[14]);
  466   PRINT_REG(rdx, regs[13]);
  467   PRINT_REG(rdi, regs[8]);
  468   PRINT_REG(rsi, regs[9]);
  469   PRINT_REG(rbp, regs[10]);
  470   // rsp is actually not stored by pusha(), compute the old rsp from regs (rsp after pusha): regs + 16 = old rsp
  471   PRINT_REG(rsp, (intptr_t)(&regs[16]));
  472   PRINT_REG(r8 , regs[7]);
  473   PRINT_REG(r9 , regs[6]);
  474   PRINT_REG(r10, regs[5]);
  475   PRINT_REG(r11, regs[4]);
  476   PRINT_REG(r12, regs[3]);
  477   PRINT_REG(r13, regs[2]);
  478   PRINT_REG(r14, regs[1]);
  479   PRINT_REG(r15, regs[0]);
  480 #undef PRINT_REG
  481   // Print some words near the top of the stack.
  482   int64_t* rsp = &regs[16];
  483   int64_t* dump_sp = rsp;
  484   for (int col1 = 0; col1 < 8; col1++) {
  485     tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
  486     os::print_location(tty, *dump_sp++);
  487   }
  488   for (int row = 0; row < 25; row++) {
  489     tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
  490     for (int col = 0; col < 4; col++) {
  491       tty->print(" 0x%016lx", (intptr_t)*dump_sp++);
  492     }
  493     tty->cr();
  494   }
  495   // Print some instructions around pc:
  496   Disassembler::decode((address)pc-64, (address)pc);
  497   tty->print_cr("--------");
  498   Disassembler::decode((address)pc, (address)pc+32);
  499 }
  500 
  501 // The java_calling_convention describes stack locations as ideal slots on
  502 // a frame with no abi restrictions. Since we must observe abi restrictions
  503 // (like the placement of the register window) the slots must be biased by
  504 // the following value.
  505 static int reg2offset_in(VMReg r) {
  506   // Account for saved rbp and return address
  507   // This should really be in_preserve_stack_slots
  508   return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
  509 }
  510 
  511 static int reg2offset_out(VMReg r) {
  512   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
  513 }
  514 
  515 // A long move
  516 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
  517 
  518   // The calling conventions assures us that each VMregpair is either
  519   // all really one physical register or adjacent stack slots.
  520 
  521   if (src.is_single_phys_reg() ) {
  522     if (dst.is_single_phys_reg()) {
  523       if (dst.first() != src.first()) {
  524         mov(dst.first()->as_Register(), src.first()->as_Register());
  525       }
  526     } else {
  527       assert(dst.is_single_reg(), "not a stack pair: (%s, %s), (%s, %s)",
  528              src.first()->name(), src.second()->name(), dst.first()->name(), dst.second()->name());
  529       movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register());
  530     }
  531   } else if (dst.is_single_phys_reg()) {
  532     assert(src.is_single_reg(),  "not a stack pair");
  533     movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  534   } else {
  535     assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
  536     movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  537     movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
  538   }
  539 }
  540 
  541 // A double move
  542 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
  543 
  544   // The calling conventions assures us that each VMregpair is either
  545   // all really one physical register or adjacent stack slots.
  546 
  547   if (src.is_single_phys_reg() ) {
  548     if (dst.is_single_phys_reg()) {
  549       // In theory these overlap but the ordering is such that this is likely a nop
  550       if ( src.first() != dst.first()) {
  551         movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
  552       }
  553     } else {
  554       assert(dst.is_single_reg(), "not a stack pair");
  555       movdbl(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister());
  556     }
  557   } else if (dst.is_single_phys_reg()) {
  558     assert(src.is_single_reg(),  "not a stack pair");
  559     movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  560   } else {
  561     assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
  562     movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  563     movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
  564   }
  565 }
  566 
  567 
  568 // A float arg may have to do float reg int reg conversion
  569 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
  570   assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
  571 
  572   // The calling conventions assures us that each VMregpair is either
  573   // all really one physical register or adjacent stack slots.
  574 
  575   if (src.first()->is_stack()) {
  576     if (dst.first()->is_stack()) {
  577       movl(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  578       movptr(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
  579     } else {
  580       // stack to reg
  581       assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
  582       movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  583     }
  584   } else if (dst.first()->is_stack()) {
  585     // reg to stack
  586     assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
  587     movflt(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister());
  588   } else {
  589     // reg to reg
  590     // In theory these overlap but the ordering is such that this is likely a nop
  591     if ( src.first() != dst.first()) {
  592       movdbl(dst.first()->as_XMMRegister(),  src.first()->as_XMMRegister());
  593     }
  594   }
  595 }
  596 
  597 // On 64 bit we will store integer like items to the stack as
  598 // 64 bits items (x86_32/64 abi) even though java would only store
  599 // 32bits for a parameter. On 32bit it will simply be 32 bits
  600 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
  601 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
  602   if (src.first()->is_stack()) {
  603     if (dst.first()->is_stack()) {
  604       // stack to stack
  605       movslq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  606       movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
  607     } else {
  608       // stack to reg
  609       movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  610     }
  611   } else if (dst.first()->is_stack()) {
  612     // reg to stack
  613     // Do we really have to sign extend???
  614     // __ movslq(src.first()->as_Register(), src.first()->as_Register());
  615     movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register());
  616   } else {
  617     // Do we really have to sign extend???
  618     // __ movslq(dst.first()->as_Register(), src.first()->as_Register());
  619     if (dst.first() != src.first()) {
  620       movq(dst.first()->as_Register(), src.first()->as_Register());
  621     }
  622   }
  623 }
  624 
  625 void MacroAssembler::move_ptr(VMRegPair src, VMRegPair dst) {
  626   if (src.first()->is_stack()) {
  627     if (dst.first()->is_stack()) {
  628       // stack to stack
  629       movq(rax, Address(rbp, reg2offset_in(src.first())));
  630       movq(Address(rsp, reg2offset_out(dst.first())), rax);
  631     } else {
  632       // stack to reg
  633       movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
  634     }
  635   } else if (dst.first()->is_stack()) {
  636     // reg to stack
  637     movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
  638   } else {
  639     if (dst.first() != src.first()) {
  640       movq(dst.first()->as_Register(), src.first()->as_Register());
  641     }
  642   }
  643 }
  644 
  645 // An oop arg. Must pass a handle not the oop itself
  646 void MacroAssembler::object_move(OopMap* map,
  647                         int oop_handle_offset,
  648                         int framesize_in_slots,
  649                         VMRegPair src,
  650                         VMRegPair dst,
  651                         bool is_receiver,
  652                         int* receiver_offset) {
  653 
  654   // must pass a handle. First figure out the location we use as a handle
  655 
  656   Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();
  657 
  658   // See if oop is null if it is we need no handle
  659 
  660   if (src.first()->is_stack()) {
  661 
  662     // Oop is already on the stack as an argument
  663     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
  664     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
  665     if (is_receiver) {
  666       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
  667     }
  668 
  669     cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD);
  670     lea(rHandle, Address(rbp, reg2offset_in(src.first())));
  671     // conditionally move a null
  672     cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
  673   } else {
  674 
  675     // Oop is in a register we must store it to the space we reserve
  676     // on the stack for oop_handles and pass a handle if oop is non-null
  677 
  678     const Register rOop = src.first()->as_Register();
  679     int oop_slot;
  680     if (rOop == j_rarg0)
  681       oop_slot = 0;
  682     else if (rOop == j_rarg1)
  683       oop_slot = 1;
  684     else if (rOop == j_rarg2)
  685       oop_slot = 2;
  686     else if (rOop == j_rarg3)
  687       oop_slot = 3;
  688     else if (rOop == j_rarg4)
  689       oop_slot = 4;
  690     else {
  691       assert(rOop == j_rarg5, "wrong register");
  692       oop_slot = 5;
  693     }
  694 
  695     oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
  696     int offset = oop_slot*VMRegImpl::stack_slot_size;
  697 
  698     map->set_oop(VMRegImpl::stack2reg(oop_slot));
  699     // Store oop in handle area, may be null
  700     movptr(Address(rsp, offset), rOop);
  701     if (is_receiver) {
  702       *receiver_offset = offset;
  703     }
  704 
  705     cmpptr(rOop, NULL_WORD);
  706     lea(rHandle, Address(rsp, offset));
  707     // conditionally move a null from the handle area where it was just stored
  708     cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
  709   }
  710 
  711   // If arg is on the stack then place it otherwise it is already in correct reg.
  712   if (dst.first()->is_stack()) {
  713     movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
  714   }
  715 }
  716 
  717 void MacroAssembler::addptr(Register dst, int32_t imm32) {
  718   addq(dst, imm32);
  719 }
  720 
  721 void MacroAssembler::addptr(Register dst, Register src) {
  722   addq(dst, src);
  723 }
  724 
  725 void MacroAssembler::addptr(Address dst, Register src) {
  726   addq(dst, src);
  727 }
  728 
  729 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
  730   assert(rscratch != noreg || always_reachable(src), "missing");
  731 
  732   if (reachable(src)) {
  733     Assembler::addsd(dst, as_Address(src));
  734   } else {
  735     lea(rscratch, src);
  736     Assembler::addsd(dst, Address(rscratch, 0));
  737   }
  738 }
  739 
  740 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src, Register rscratch) {
  741   assert(rscratch != noreg || always_reachable(src), "missing");
  742 
  743   if (reachable(src)) {
  744     addss(dst, as_Address(src));
  745   } else {
  746     lea(rscratch, src);
  747     addss(dst, Address(rscratch, 0));
  748   }
  749 }
  750 
  751 void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
  752   assert(rscratch != noreg || always_reachable(src), "missing");
  753 
  754   if (reachable(src)) {
  755     Assembler::addpd(dst, as_Address(src));
  756   } else {
  757     lea(rscratch, src);
  758     Assembler::addpd(dst, Address(rscratch, 0));
  759   }
  760 }
  761 
  762 // See 8273459.  Function for ensuring 64-byte alignment, intended for stubs only.
  763 // Stub code is generated once and never copied.
  764 // NMethods can't use this because they get copied and we can't force alignment > 32 bytes.
  765 void MacroAssembler::align64() {
  766   align(64, (uint)(uintptr_t)pc());
  767 }
  768 
  769 void MacroAssembler::align32() {
  770   align(32, (uint)(uintptr_t)pc());
  771 }
  772 
  773 void MacroAssembler::align(uint modulus) {
  774   // 8273459: Ensure alignment is possible with current segment alignment
  775   assert(modulus <= (uintx)CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment");
  776   align(modulus, offset());
  777 }
  778 
  779 void MacroAssembler::align(uint modulus, uint target) {
  780   if (target % modulus != 0) {
  781     nop(modulus - (target % modulus));
  782   }
  783 }
  784 
  785 void MacroAssembler::push_f(XMMRegister r) {
  786   subptr(rsp, wordSize);
  787   movflt(Address(rsp, 0), r);
  788 }
  789 
  790 void MacroAssembler::pop_f(XMMRegister r) {
  791   movflt(r, Address(rsp, 0));
  792   addptr(rsp, wordSize);
  793 }
  794 
  795 void MacroAssembler::push_d(XMMRegister r) {
  796   subptr(rsp, 2 * wordSize);
  797   movdbl(Address(rsp, 0), r);
  798 }
  799 
  800 void MacroAssembler::pop_d(XMMRegister r) {
  801   movdbl(r, Address(rsp, 0));
  802   addptr(rsp, 2 * Interpreter::stackElementSize);
  803 }
  804 
  805 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
  806   // Used in sign-masking with aligned address.
  807   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
  808   assert(rscratch != noreg || always_reachable(src), "missing");
  809 
  810   if (UseAVX > 2 &&
  811       (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
  812       (dst->encoding() >= 16)) {
  813     vpand(dst, dst, src, AVX_512bit, rscratch);
  814   } else if (reachable(src)) {
  815     Assembler::andpd(dst, as_Address(src));
  816   } else {
  817     lea(rscratch, src);
  818     Assembler::andpd(dst, Address(rscratch, 0));
  819   }
  820 }
  821 
  822 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src, Register rscratch) {
  823   // Used in sign-masking with aligned address.
  824   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
  825   assert(rscratch != noreg || always_reachable(src), "missing");
  826 
  827   if (reachable(src)) {
  828     Assembler::andps(dst, as_Address(src));
  829   } else {
  830     lea(rscratch, src);
  831     Assembler::andps(dst, Address(rscratch, 0));
  832   }
  833 }
  834 
  835 void MacroAssembler::andptr(Register dst, int32_t imm32) {
  836   andq(dst, imm32);
  837 }
  838 
  839 void MacroAssembler::andq(Register dst, AddressLiteral src, Register rscratch) {
  840   assert(rscratch != noreg || always_reachable(src), "missing");
  841 
  842   if (reachable(src)) {
  843     andq(dst, as_Address(src));
  844   } else {
  845     lea(rscratch, src);
  846     andq(dst, Address(rscratch, 0));
  847   }
  848 }
  849 
  850 void MacroAssembler::atomic_incl(Address counter_addr) {
  851   lock();
  852   incrementl(counter_addr);
  853 }
  854 
  855 void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register rscratch) {
  856   assert(rscratch != noreg || always_reachable(counter_addr), "missing");
  857 
  858   if (reachable(counter_addr)) {
  859     atomic_incl(as_Address(counter_addr));
  860   } else {
  861     lea(rscratch, counter_addr);
  862     atomic_incl(Address(rscratch, 0));
  863   }
  864 }
  865 
  866 void MacroAssembler::atomic_incq(Address counter_addr) {
  867   lock();
  868   incrementq(counter_addr);
  869 }
  870 
  871 void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register rscratch) {
  872   assert(rscratch != noreg || always_reachable(counter_addr), "missing");
  873 
  874   if (reachable(counter_addr)) {
  875     atomic_incq(as_Address(counter_addr));
  876   } else {
  877     lea(rscratch, counter_addr);
  878     atomic_incq(Address(rscratch, 0));
  879   }
  880 }
  881 
  882 // Writes to stack successive pages until offset reached to check for
  883 // stack overflow + shadow pages.  This clobbers tmp.
  884 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
  885   movptr(tmp, rsp);
  886   // Bang stack for total size given plus shadow page size.
  887   // Bang one page at a time because large size can bang beyond yellow and
  888   // red zones.
  889   Label loop;
  890   bind(loop);
  891   movl(Address(tmp, (-(int)os::vm_page_size())), size );
  892   subptr(tmp, (int)os::vm_page_size());
  893   subl(size, (int)os::vm_page_size());
  894   jcc(Assembler::greater, loop);
  895 
  896   // Bang down shadow pages too.
  897   // At this point, (tmp-0) is the last address touched, so don't
  898   // touch it again.  (It was touched as (tmp-pagesize) but then tmp
  899   // was post-decremented.)  Skip this address by starting at i=1, and
  900   // touch a few more pages below.  N.B.  It is important to touch all
  901   // the way down including all pages in the shadow zone.
  902   for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()); i++) {
  903     // this could be any sized move but this is can be a debugging crumb
  904     // so the bigger the better.
  905     movptr(Address(tmp, (-i*(int)os::vm_page_size())), size );
  906   }
  907 }
  908 
  909 void MacroAssembler::reserved_stack_check() {
  910   // testing if reserved zone needs to be enabled
  911   Label no_reserved_zone_enabling;
  912 
  913   cmpptr(rsp, Address(r15_thread, JavaThread::reserved_stack_activation_offset()));
  914   jcc(Assembler::below, no_reserved_zone_enabling);
  915 
  916   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), r15_thread);
  917   jump(RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry()));
  918   should_not_reach_here();
  919 
  920   bind(no_reserved_zone_enabling);
  921 }
  922 
  923 void MacroAssembler::c2bool(Register x) {
  924   // implements x == 0 ? 0 : 1
  925   // note: must only look at least-significant byte of x
  926   //       since C-style booleans are stored in one byte
  927   //       only! (was bug)
  928   andl(x, 0xFF);
  929   setb(Assembler::notZero, x);
  930 }
  931 
  932 // Wouldn't need if AddressLiteral version had new name
  933 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
  934   Assembler::call(L, rtype);
  935 }
  936 
  937 void MacroAssembler::call(Register entry) {
  938   Assembler::call(entry);
  939 }
  940 
  941 void MacroAssembler::call(AddressLiteral entry, Register rscratch) {
  942   assert(rscratch != noreg || always_reachable(entry), "missing");
  943 
  944   if (reachable(entry)) {
  945     Assembler::call_literal(entry.target(), entry.rspec());
  946   } else {
  947     lea(rscratch, entry);
  948     Assembler::call(rscratch);
  949   }
  950 }
  951 
  952 void MacroAssembler::ic_call(address entry, jint method_index) {
  953   RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
  954   // Needs full 64-bit immediate for later patching.
  955   mov64(rax, (int64_t)Universe::non_oop_word());
  956   call(AddressLiteral(entry, rh));
  957 }
  958 
  959 int MacroAssembler::ic_check_size() {
  960   return UseCompactObjectHeaders ? 17 : 14;
  961 }
  962 
  963 int MacroAssembler::ic_check(int end_alignment) {
  964   Register receiver = j_rarg0;
  965   Register data = rax;
  966   Register temp = rscratch1;
  967 
  968   // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
  969   // before the inline cache check, so we don't have to execute any nop instructions when dispatching
  970   // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
  971   // before the inline cache check here, and not after
  972   align(end_alignment, offset() + ic_check_size());
  973 
  974   int uep_offset = offset();
  975 
  976   if (UseCompactObjectHeaders) {
  977     load_narrow_klass_compact(temp, receiver);
  978     cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
  979   } else if (UseCompressedClassPointers) {
  980     movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
  981     cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
  982   } else {
  983     movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
  984     cmpptr(temp, Address(data, CompiledICData::speculated_klass_offset()));
  985   }
  986 
  987   // if inline cache check fails, then jump to runtime routine
  988   jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
  989   assert((offset() % end_alignment) == 0, "Misaligned verified entry point (%d, %d, %d)", uep_offset, offset(), end_alignment);
  990 
  991   return uep_offset;
  992 }
  993 
  994 void MacroAssembler::emit_static_call_stub() {
  995   // Static stub relocation also tags the Method* in the code-stream.
  996   mov_metadata(rbx, (Metadata*) nullptr);  // Method is zapped till fixup time.
  997   // This is recognized as unresolved by relocs/nativeinst/ic code.
  998   jump(RuntimeAddress(pc()));
  999 }
 1000 
 1001 // Implementation of call_VM versions
 1002 
 1003 void MacroAssembler::call_VM(Register oop_result,
 1004                              address entry_point,
 1005                              bool check_exceptions) {
 1006   Label C, E;
 1007   call(C, relocInfo::none);
 1008   jmp(E);
 1009 
 1010   bind(C);
 1011   call_VM_helper(oop_result, entry_point, 0, check_exceptions);
 1012   ret(0);
 1013 
 1014   bind(E);
 1015 }
 1016 
 1017 void MacroAssembler::call_VM(Register oop_result,
 1018                              address entry_point,
 1019                              Register arg_1,
 1020                              bool check_exceptions) {
 1021   Label C, E;
 1022   call(C, relocInfo::none);
 1023   jmp(E);
 1024 
 1025   bind(C);
 1026   pass_arg1(this, arg_1);
 1027   call_VM_helper(oop_result, entry_point, 1, check_exceptions);
 1028   ret(0);
 1029 
 1030   bind(E);
 1031 }
 1032 
 1033 void MacroAssembler::call_VM(Register oop_result,
 1034                              address entry_point,
 1035                              Register arg_1,
 1036                              Register arg_2,
 1037                              bool check_exceptions) {
 1038   Label C, E;
 1039   call(C, relocInfo::none);
 1040   jmp(E);
 1041 
 1042   bind(C);
 1043 
 1044   assert_different_registers(arg_1, c_rarg2);
 1045 
 1046   pass_arg2(this, arg_2);
 1047   pass_arg1(this, arg_1);
 1048   call_VM_helper(oop_result, entry_point, 2, check_exceptions);
 1049   ret(0);
 1050 
 1051   bind(E);
 1052 }
 1053 
 1054 void MacroAssembler::call_VM(Register oop_result,
 1055                              address entry_point,
 1056                              Register arg_1,
 1057                              Register arg_2,
 1058                              Register arg_3,
 1059                              bool check_exceptions) {
 1060   Label C, E;
 1061   call(C, relocInfo::none);
 1062   jmp(E);
 1063 
 1064   bind(C);
 1065 
 1066   assert_different_registers(arg_1, c_rarg2, c_rarg3);
 1067   assert_different_registers(arg_2, c_rarg3);
 1068   pass_arg3(this, arg_3);
 1069   pass_arg2(this, arg_2);
 1070   pass_arg1(this, arg_1);
 1071   call_VM_helper(oop_result, entry_point, 3, check_exceptions);
 1072   ret(0);
 1073 
 1074   bind(E);
 1075 }
 1076 
 1077 void MacroAssembler::call_VM(Register oop_result,
 1078                              Register last_java_sp,
 1079                              address entry_point,
 1080                              int number_of_arguments,
 1081                              bool check_exceptions) {
 1082   call_VM_base(oop_result, last_java_sp, entry_point, number_of_arguments, check_exceptions);
 1083 }
 1084 
 1085 void MacroAssembler::call_VM(Register oop_result,
 1086                              Register last_java_sp,
 1087                              address entry_point,
 1088                              Register arg_1,
 1089                              bool check_exceptions) {
 1090   pass_arg1(this, arg_1);
 1091   call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
 1092 }
 1093 
 1094 void MacroAssembler::call_VM(Register oop_result,
 1095                              Register last_java_sp,
 1096                              address entry_point,
 1097                              Register arg_1,
 1098                              Register arg_2,
 1099                              bool check_exceptions) {
 1100 
 1101   assert_different_registers(arg_1, c_rarg2);
 1102   pass_arg2(this, arg_2);
 1103   pass_arg1(this, arg_1);
 1104   call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
 1105 }
 1106 
 1107 void MacroAssembler::call_VM(Register oop_result,
 1108                              Register last_java_sp,
 1109                              address entry_point,
 1110                              Register arg_1,
 1111                              Register arg_2,
 1112                              Register arg_3,
 1113                              bool check_exceptions) {
 1114   assert_different_registers(arg_1, c_rarg2, c_rarg3);
 1115   assert_different_registers(arg_2, c_rarg3);
 1116   pass_arg3(this, arg_3);
 1117   pass_arg2(this, arg_2);
 1118   pass_arg1(this, arg_1);
 1119   call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
 1120 }
 1121 
 1122 void MacroAssembler::super_call_VM(Register oop_result,
 1123                                    Register last_java_sp,
 1124                                    address entry_point,
 1125                                    int number_of_arguments,
 1126                                    bool check_exceptions) {
 1127   MacroAssembler::call_VM_base(oop_result, last_java_sp, entry_point, number_of_arguments, check_exceptions);
 1128 }
 1129 
 1130 void MacroAssembler::super_call_VM(Register oop_result,
 1131                                    Register last_java_sp,
 1132                                    address entry_point,
 1133                                    Register arg_1,
 1134                                    bool check_exceptions) {
 1135   pass_arg1(this, arg_1);
 1136   super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
 1137 }
 1138 
 1139 void MacroAssembler::super_call_VM(Register oop_result,
 1140                                    Register last_java_sp,
 1141                                    address entry_point,
 1142                                    Register arg_1,
 1143                                    Register arg_2,
 1144                                    bool check_exceptions) {
 1145 
 1146   assert_different_registers(arg_1, c_rarg2);
 1147   pass_arg2(this, arg_2);
 1148   pass_arg1(this, arg_1);
 1149   super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
 1150 }
 1151 
 1152 void MacroAssembler::super_call_VM(Register oop_result,
 1153                                    Register last_java_sp,
 1154                                    address entry_point,
 1155                                    Register arg_1,
 1156                                    Register arg_2,
 1157                                    Register arg_3,
 1158                                    bool check_exceptions) {
 1159   assert_different_registers(arg_1, c_rarg2, c_rarg3);
 1160   assert_different_registers(arg_2, c_rarg3);
 1161   pass_arg3(this, arg_3);
 1162   pass_arg2(this, arg_2);
 1163   pass_arg1(this, arg_1);
 1164   super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
 1165 }
 1166 
 1167 void MacroAssembler::call_VM_base(Register oop_result,
 1168                                   Register last_java_sp,
 1169                                   address  entry_point,
 1170                                   int      number_of_arguments,
 1171                                   bool     check_exceptions) {
 1172   Register java_thread = r15_thread;
 1173 
 1174   // determine last_java_sp register
 1175   if (!last_java_sp->is_valid()) {
 1176     last_java_sp = rsp;
 1177   }
 1178   // debugging support
 1179   assert(number_of_arguments >= 0   , "cannot have negative number of arguments");
 1180 #ifdef ASSERT
 1181   // TraceBytecodes does not use r12 but saves it over the call, so don't verify
 1182   // r12 is the heapbase.
 1183   if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");
 1184 #endif // ASSERT
 1185 
 1186   assert(java_thread != oop_result  , "cannot use the same register for java_thread & oop_result");
 1187   assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
 1188 
 1189   // push java thread (becomes first argument of C function)
 1190 
 1191   mov(c_rarg0, r15_thread);
 1192 
 1193   // set last Java frame before call
 1194   assert(last_java_sp != rbp, "can't use ebp/rbp");
 1195 
 1196   // Only interpreter should have to set fp
 1197   set_last_Java_frame(last_java_sp, rbp, nullptr, rscratch1);
 1198 
 1199   // do the call, remove parameters
 1200   MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
 1201 
 1202 #ifdef ASSERT
 1203   // Check that thread register is not clobbered.
 1204   guarantee(java_thread != rax, "change this code");
 1205   push(rax);
 1206   { Label L;
 1207     get_thread_slow(rax);
 1208     cmpptr(java_thread, rax);
 1209     jcc(Assembler::equal, L);
 1210     STOP("MacroAssembler::call_VM_base: java_thread not callee saved?");
 1211     bind(L);
 1212   }
 1213   pop(rax);
 1214 #endif
 1215 
 1216   // reset last Java frame
 1217   // Only interpreter should have to clear fp
 1218   reset_last_Java_frame(true);
 1219 
 1220    // C++ interp handles this in the interpreter
 1221   check_and_handle_popframe();
 1222   check_and_handle_earlyret();
 1223 
 1224   if (check_exceptions) {
 1225     // check for pending exceptions (java_thread is set upon return)
 1226     cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
 1227     // This used to conditionally jump to forward_exception however it is
 1228     // possible if we relocate that the branch will not reach. So we must jump
 1229     // around so we can always reach
 1230 
 1231     Label ok;
 1232     jcc(Assembler::equal, ok);
 1233     jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
 1234     bind(ok);
 1235   }
 1236 
 1237   // get oop result if there is one and reset the value in the thread
 1238   if (oop_result->is_valid()) {
 1239     get_vm_result_oop(oop_result);
 1240   }
 1241 }
 1242 
 1243 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
 1244   // Calculate the value for last_Java_sp somewhat subtle.
 1245   // call_VM does an intermediate call which places a return address on
 1246   // the stack just under the stack pointer as the user finished with it.
 1247   // This allows use to retrieve last_Java_pc from last_Java_sp[-1].
 1248 
 1249   // We've pushed one address, correct last_Java_sp
 1250   lea(rax, Address(rsp, wordSize));
 1251 
 1252   call_VM_base(oop_result, rax, entry_point, number_of_arguments, check_exceptions);
 1253 }
 1254 
 1255 // Use this method when MacroAssembler version of call_VM_leaf_base() should be called from Interpreter.
 1256 void MacroAssembler::call_VM_leaf0(address entry_point) {
 1257   MacroAssembler::call_VM_leaf_base(entry_point, 0);
 1258 }
 1259 
 1260 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
 1261   call_VM_leaf_base(entry_point, number_of_arguments);
 1262 }
 1263 
 1264 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
 1265   pass_arg0(this, arg_0);
 1266   call_VM_leaf(entry_point, 1);
 1267 }
 1268 
 1269 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
 1270 
 1271   assert_different_registers(arg_0, c_rarg1);
 1272   pass_arg1(this, arg_1);
 1273   pass_arg0(this, arg_0);
 1274   call_VM_leaf(entry_point, 2);
 1275 }
 1276 
 1277 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1278   assert_different_registers(arg_0, c_rarg1, c_rarg2);
 1279   assert_different_registers(arg_1, c_rarg2);
 1280   pass_arg2(this, arg_2);
 1281   pass_arg1(this, arg_1);
 1282   pass_arg0(this, arg_0);
 1283   call_VM_leaf(entry_point, 3);
 1284 }
 1285 
 1286 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
 1287   assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
 1288   assert_different_registers(arg_1, c_rarg2, c_rarg3);
 1289   assert_different_registers(arg_2, c_rarg3);
 1290   pass_arg3(this, arg_3);
 1291   pass_arg2(this, arg_2);
 1292   pass_arg1(this, arg_1);
 1293   pass_arg0(this, arg_0);
 1294   call_VM_leaf(entry_point, 3);
 1295 }
 1296 
 1297 void MacroAssembler::super_call_VM_leaf(address entry_point) {
 1298   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1299 }
 1300 
 1301 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
 1302   pass_arg0(this, arg_0);
 1303   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1304 }
 1305 
 1306 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
 1307   assert_different_registers(arg_0, c_rarg1);
 1308   pass_arg1(this, arg_1);
 1309   pass_arg0(this, arg_0);
 1310   MacroAssembler::call_VM_leaf_base(entry_point, 2);
 1311 }
 1312 
 1313 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1314   assert_different_registers(arg_0, c_rarg1, c_rarg2);
 1315   assert_different_registers(arg_1, c_rarg2);
 1316   pass_arg2(this, arg_2);
 1317   pass_arg1(this, arg_1);
 1318   pass_arg0(this, arg_0);
 1319   MacroAssembler::call_VM_leaf_base(entry_point, 3);
 1320 }
 1321 
 1322 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
 1323   assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
 1324   assert_different_registers(arg_1, c_rarg2, c_rarg3);
 1325   assert_different_registers(arg_2, c_rarg3);
 1326   pass_arg3(this, arg_3);
 1327   pass_arg2(this, arg_2);
 1328   pass_arg1(this, arg_1);
 1329   pass_arg0(this, arg_0);
 1330   MacroAssembler::call_VM_leaf_base(entry_point, 4);
 1331 }
 1332 
 1333 void MacroAssembler::get_vm_result_oop(Register oop_result) {
 1334   movptr(oop_result, Address(r15_thread, JavaThread::vm_result_oop_offset()));
 1335   movptr(Address(r15_thread, JavaThread::vm_result_oop_offset()), NULL_WORD);
 1336   verify_oop_msg(oop_result, "broken oop in call_VM_base");
 1337 }
 1338 
 1339 void MacroAssembler::get_vm_result_metadata(Register metadata_result) {
 1340   movptr(metadata_result, Address(r15_thread, JavaThread::vm_result_metadata_offset()));
 1341   movptr(Address(r15_thread, JavaThread::vm_result_metadata_offset()), NULL_WORD);
 1342 }
 1343 
 1344 void MacroAssembler::check_and_handle_earlyret() {
 1345 }
 1346 
 1347 void MacroAssembler::check_and_handle_popframe() {
 1348 }
 1349 
 1350 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm, Register rscratch) {
 1351   assert(rscratch != noreg || always_reachable(src1), "missing");
 1352 
 1353   if (reachable(src1)) {
 1354     cmpl(as_Address(src1), imm);
 1355   } else {
 1356     lea(rscratch, src1);
 1357     cmpl(Address(rscratch, 0), imm);
 1358   }
 1359 }
 1360 
 1361 void MacroAssembler::cmp32(Register src1, AddressLiteral src2, Register rscratch) {
 1362   assert(!src2.is_lval(), "use cmpptr");
 1363   assert(rscratch != noreg || always_reachable(src2), "missing");
 1364 
 1365   if (reachable(src2)) {
 1366     cmpl(src1, as_Address(src2));
 1367   } else {
 1368     lea(rscratch, src2);
 1369     cmpl(src1, Address(rscratch, 0));
 1370   }
 1371 }
 1372 
 1373 void MacroAssembler::cmp32(Register src1, int32_t imm) {
 1374   Assembler::cmpl(src1, imm);
 1375 }
 1376 
 1377 void MacroAssembler::cmp32(Register src1, Address src2) {
 1378   Assembler::cmpl(src1, src2);
 1379 }
 1380 
 1381 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
 1382   ucomisd(opr1, opr2);
 1383 
 1384   Label L;
 1385   if (unordered_is_less) {
 1386     movl(dst, -1);
 1387     jcc(Assembler::parity, L);
 1388     jcc(Assembler::below , L);
 1389     movl(dst, 0);
 1390     jcc(Assembler::equal , L);
 1391     increment(dst);
 1392   } else { // unordered is greater
 1393     movl(dst, 1);
 1394     jcc(Assembler::parity, L);
 1395     jcc(Assembler::above , L);
 1396     movl(dst, 0);
 1397     jcc(Assembler::equal , L);
 1398     decrementl(dst);
 1399   }
 1400   bind(L);
 1401 }
 1402 
 1403 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
 1404   ucomiss(opr1, opr2);
 1405 
 1406   Label L;
 1407   if (unordered_is_less) {
 1408     movl(dst, -1);
 1409     jcc(Assembler::parity, L);
 1410     jcc(Assembler::below , L);
 1411     movl(dst, 0);
 1412     jcc(Assembler::equal , L);
 1413     increment(dst);
 1414   } else { // unordered is greater
 1415     movl(dst, 1);
 1416     jcc(Assembler::parity, L);
 1417     jcc(Assembler::above , L);
 1418     movl(dst, 0);
 1419     jcc(Assembler::equal , L);
 1420     decrementl(dst);
 1421   }
 1422   bind(L);
 1423 }
 1424 
 1425 
 1426 void MacroAssembler::cmp8(AddressLiteral src1, int imm, Register rscratch) {
 1427   assert(rscratch != noreg || always_reachable(src1), "missing");
 1428 
 1429   if (reachable(src1)) {
 1430     cmpb(as_Address(src1), imm);
 1431   } else {
 1432     lea(rscratch, src1);
 1433     cmpb(Address(rscratch, 0), imm);
 1434   }
 1435 }
 1436 
 1437 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2, Register rscratch) {
 1438   assert(rscratch != noreg || always_reachable(src2), "missing");
 1439 
 1440   if (src2.is_lval()) {
 1441     movptr(rscratch, src2);
 1442     Assembler::cmpq(src1, rscratch);
 1443   } else if (reachable(src2)) {
 1444     cmpq(src1, as_Address(src2));
 1445   } else {
 1446     lea(rscratch, src2);
 1447     Assembler::cmpq(src1, Address(rscratch, 0));
 1448   }
 1449 }
 1450 
 1451 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2, Register rscratch) {
 1452   assert(src2.is_lval(), "not a mem-mem compare");
 1453   // moves src2's literal address
 1454   movptr(rscratch, src2);
 1455   Assembler::cmpq(src1, rscratch);
 1456 }
 1457 
 1458 void MacroAssembler::cmpoop(Register src1, Register src2) {
 1459   cmpptr(src1, src2);
 1460 }
 1461 
 1462 void MacroAssembler::cmpoop(Register src1, Address src2) {
 1463   cmpptr(src1, src2);
 1464 }
 1465 
 1466 void MacroAssembler::cmpoop(Register src1, jobject src2, Register rscratch) {
 1467   movoop(rscratch, src2);
 1468   cmpptr(src1, rscratch);
 1469 }
 1470 
 1471 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch) {
 1472   assert(rscratch != noreg || always_reachable(adr), "missing");
 1473 
 1474   if (reachable(adr)) {
 1475     lock();
 1476     cmpxchgptr(reg, as_Address(adr));
 1477   } else {
 1478     lea(rscratch, adr);
 1479     lock();
 1480     cmpxchgptr(reg, Address(rscratch, 0));
 1481   }
 1482 }
 1483 
 1484 void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
 1485   cmpxchgq(reg, adr);
 1486 }
 1487 
 1488 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1489   assert(rscratch != noreg || always_reachable(src), "missing");
 1490 
 1491   if (reachable(src)) {
 1492     Assembler::comisd(dst, as_Address(src));
 1493   } else {
 1494     lea(rscratch, src);
 1495     Assembler::comisd(dst, Address(rscratch, 0));
 1496   }
 1497 }
 1498 
 1499 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1500   assert(rscratch != noreg || always_reachable(src), "missing");
 1501 
 1502   if (reachable(src)) {
 1503     Assembler::comiss(dst, as_Address(src));
 1504   } else {
 1505     lea(rscratch, src);
 1506     Assembler::comiss(dst, Address(rscratch, 0));
 1507   }
 1508 }
 1509 
 1510 
 1511 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch) {
 1512   assert(rscratch != noreg || always_reachable(counter_addr), "missing");
 1513 
 1514   Condition negated_cond = negate_condition(cond);
 1515   Label L;
 1516   jcc(negated_cond, L);
 1517   pushf(); // Preserve flags
 1518   atomic_incl(counter_addr, rscratch);
 1519   popf();
 1520   bind(L);
 1521 }
 1522 
 1523 int MacroAssembler::corrected_idivl(Register reg) {
 1524   // Full implementation of Java idiv and irem; checks for
 1525   // special case as described in JVM spec., p.243 & p.271.
 1526   // The function returns the (pc) offset of the idivl
 1527   // instruction - may be needed for implicit exceptions.
 1528   //
 1529   //         normal case                           special case
 1530   //
 1531   // input : rax,: dividend                         min_int
 1532   //         reg: divisor   (may not be rax,/rdx)   -1
 1533   //
 1534   // output: rax,: quotient  (= rax, idiv reg)       min_int
 1535   //         rdx: remainder (= rax, irem reg)       0
 1536   assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register");
 1537   const int min_int = 0x80000000;
 1538   Label normal_case, special_case;
 1539 
 1540   // check for special case
 1541   cmpl(rax, min_int);
 1542   jcc(Assembler::notEqual, normal_case);
 1543   xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
 1544   cmpl(reg, -1);
 1545   jcc(Assembler::equal, special_case);
 1546 
 1547   // handle normal case
 1548   bind(normal_case);
 1549   cdql();
 1550   int idivl_offset = offset();
 1551   idivl(reg);
 1552 
 1553   // normal and special case exit
 1554   bind(special_case);
 1555 
 1556   return idivl_offset;
 1557 }
 1558 
 1559 
 1560 
 1561 void MacroAssembler::decrementl(Register reg, int value) {
 1562   if (value == min_jint) {subl(reg, value) ; return; }
 1563   if (value <  0) { incrementl(reg, -value); return; }
 1564   if (value == 0) {                        ; return; }
 1565   if (value == 1 && UseIncDec) { decl(reg) ; return; }
 1566   /* else */      { subl(reg, value)       ; return; }
 1567 }
 1568 
 1569 void MacroAssembler::decrementl(Address dst, int value) {
 1570   if (value == min_jint) {subl(dst, value) ; return; }
 1571   if (value <  0) { incrementl(dst, -value); return; }
 1572   if (value == 0) {                        ; return; }
 1573   if (value == 1 && UseIncDec) { decl(dst) ; return; }
 1574   /* else */      { subl(dst, value)       ; return; }
 1575 }
 1576 
 1577 void MacroAssembler::division_with_shift (Register reg, int shift_value) {
 1578   assert(shift_value > 0, "illegal shift value");
 1579   Label _is_positive;
 1580   testl (reg, reg);
 1581   jcc (Assembler::positive, _is_positive);
 1582   int offset = (1 << shift_value) - 1 ;
 1583 
 1584   if (offset == 1) {
 1585     incrementl(reg);
 1586   } else {
 1587     addl(reg, offset);
 1588   }
 1589 
 1590   bind (_is_positive);
 1591   sarl(reg, shift_value);
 1592 }
 1593 
 1594 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1595   assert(rscratch != noreg || always_reachable(src), "missing");
 1596 
 1597   if (reachable(src)) {
 1598     Assembler::divsd(dst, as_Address(src));
 1599   } else {
 1600     lea(rscratch, src);
 1601     Assembler::divsd(dst, Address(rscratch, 0));
 1602   }
 1603 }
 1604 
 1605 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1606   assert(rscratch != noreg || always_reachable(src), "missing");
 1607 
 1608   if (reachable(src)) {
 1609     Assembler::divss(dst, as_Address(src));
 1610   } else {
 1611     lea(rscratch, src);
 1612     Assembler::divss(dst, Address(rscratch, 0));
 1613   }
 1614 }
 1615 
 1616 void MacroAssembler::enter() {
 1617   push(rbp);
 1618   mov(rbp, rsp);
 1619 }
 1620 
 1621 void MacroAssembler::post_call_nop() {
 1622   if (!Continuations::enabled()) {
 1623     return;
 1624   }
 1625   InstructionMark im(this);
 1626   relocate(post_call_nop_Relocation::spec());
 1627   InlineSkippedInstructionsCounter skipCounter(this);
 1628   emit_int8((uint8_t)0x0f);
 1629   emit_int8((uint8_t)0x1f);
 1630   emit_int8((uint8_t)0x84);
 1631   emit_int8((uint8_t)0x00);
 1632   emit_int32(0x00);
 1633 }
 1634 
 1635 void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1636   assert(rscratch != noreg || always_reachable(src), "missing");
 1637   if (reachable(src)) {
 1638     Assembler::mulpd(dst, as_Address(src));
 1639   } else {
 1640     lea(rscratch, src);
 1641     Assembler::mulpd(dst, Address(rscratch, 0));
 1642   }
 1643 }
 1644 
 1645 // dst = c = a * b + c
 1646 void MacroAssembler::fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) {
 1647   Assembler::vfmadd231sd(c, a, b);
 1648   if (dst != c) {
 1649     movdbl(dst, c);
 1650   }
 1651 }
 1652 
 1653 // dst = c = a * b + c
 1654 void MacroAssembler::fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) {
 1655   Assembler::vfmadd231ss(c, a, b);
 1656   if (dst != c) {
 1657     movflt(dst, c);
 1658   }
 1659 }
 1660 
 1661 // dst = c = a * b + c
 1662 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) {
 1663   Assembler::vfmadd231pd(c, a, b, vector_len);
 1664   if (dst != c) {
 1665     vmovdqu(dst, c);
 1666   }
 1667 }
 1668 
 1669 // dst = c = a * b + c
 1670 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) {
 1671   Assembler::vfmadd231ps(c, a, b, vector_len);
 1672   if (dst != c) {
 1673     vmovdqu(dst, c);
 1674   }
 1675 }
 1676 
 1677 // dst = c = a * b + c
 1678 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) {
 1679   Assembler::vfmadd231pd(c, a, b, vector_len);
 1680   if (dst != c) {
 1681     vmovdqu(dst, c);
 1682   }
 1683 }
 1684 
 1685 // dst = c = a * b + c
 1686 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) {
 1687   Assembler::vfmadd231ps(c, a, b, vector_len);
 1688   if (dst != c) {
 1689     vmovdqu(dst, c);
 1690   }
 1691 }
 1692 
 1693 void MacroAssembler::incrementl(AddressLiteral dst, Register rscratch) {
 1694   assert(rscratch != noreg || always_reachable(dst), "missing");
 1695 
 1696   if (reachable(dst)) {
 1697     incrementl(as_Address(dst));
 1698   } else {
 1699     lea(rscratch, dst);
 1700     incrementl(Address(rscratch, 0));
 1701   }
 1702 }
 1703 
 1704 void MacroAssembler::incrementl(ArrayAddress dst, Register rscratch) {
 1705   incrementl(as_Address(dst, rscratch));
 1706 }
 1707 
 1708 void MacroAssembler::incrementl(Register reg, int value) {
 1709   if (value == min_jint) {addl(reg, value) ; return; }
 1710   if (value <  0) { decrementl(reg, -value); return; }
 1711   if (value == 0) {                        ; return; }
 1712   if (value == 1 && UseIncDec) { incl(reg) ; return; }
 1713   /* else */      { addl(reg, value)       ; return; }
 1714 }
 1715 
 1716 void MacroAssembler::incrementl(Address dst, int value) {
 1717   if (value == min_jint) {addl(dst, value) ; return; }
 1718   if (value <  0) { decrementl(dst, -value); return; }
 1719   if (value == 0) {                        ; return; }
 1720   if (value == 1 && UseIncDec) { incl(dst) ; return; }
 1721   /* else */      { addl(dst, value)       ; return; }
 1722 }
 1723 
 1724 void MacroAssembler::jump(AddressLiteral dst, Register rscratch) {
 1725   assert(rscratch != noreg || always_reachable(dst), "missing");
 1726   assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump");
 1727   if (reachable(dst)) {
 1728     jmp_literal(dst.target(), dst.rspec());
 1729   } else {
 1730     lea(rscratch, dst);
 1731     jmp(rscratch);
 1732   }
 1733 }
 1734 
 1735 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst, Register rscratch) {
 1736   assert(rscratch != noreg || always_reachable(dst), "missing");
 1737   assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump_cc");
 1738   if (reachable(dst)) {
 1739     InstructionMark im(this);
 1740     relocate(dst.reloc());
 1741     const int short_size = 2;
 1742     const int long_size = 6;
 1743     int offs = (intptr_t)dst.target() - ((intptr_t)pc());
 1744     if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
 1745       // 0111 tttn #8-bit disp
 1746       emit_int8(0x70 | cc);
 1747       emit_int8((offs - short_size) & 0xFF);
 1748     } else {
 1749       // 0000 1111 1000 tttn #32-bit disp
 1750       emit_int8(0x0F);
 1751       emit_int8((unsigned char)(0x80 | cc));
 1752       emit_int32(offs - long_size);
 1753     }
 1754   } else {
 1755 #ifdef ASSERT
 1756     warning("reversing conditional branch");
 1757 #endif /* ASSERT */
 1758     Label skip;
 1759     jccb(reverse[cc], skip);
 1760     lea(rscratch, dst);
 1761     Assembler::jmp(rscratch);
 1762     bind(skip);
 1763   }
 1764 }
 1765 
 1766 void MacroAssembler::cmp32_mxcsr_std(Address mxcsr_save, Register tmp, Register rscratch) {
 1767   ExternalAddress mxcsr_std(StubRoutines::x86::addr_mxcsr_std());
 1768   assert(rscratch != noreg || always_reachable(mxcsr_std), "missing");
 1769 
 1770   stmxcsr(mxcsr_save);
 1771   movl(tmp, mxcsr_save);
 1772   if (EnableX86ECoreOpts) {
 1773     // The mxcsr_std has status bits set for performance on ECore
 1774     orl(tmp, 0x003f);
 1775   } else {
 1776     // Mask out status bits (only check control and mask bits)
 1777     andl(tmp, 0xFFC0);
 1778   }
 1779   cmp32(tmp, mxcsr_std, rscratch);
 1780 }
 1781 
 1782 void MacroAssembler::ldmxcsr(AddressLiteral src, Register rscratch) {
 1783   assert(rscratch != noreg || always_reachable(src), "missing");
 1784 
 1785   if (reachable(src)) {
 1786     Assembler::ldmxcsr(as_Address(src));
 1787   } else {
 1788     lea(rscratch, src);
 1789     Assembler::ldmxcsr(Address(rscratch, 0));
 1790   }
 1791 }
 1792 
 1793 int MacroAssembler::load_signed_byte(Register dst, Address src) {
 1794   int off = offset();
 1795   movsbl(dst, src); // movsxb
 1796   return off;
 1797 }
 1798 
 1799 // Note: load_signed_short used to be called load_signed_word.
 1800 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler
 1801 // manual, which means 16 bits, that usage is found nowhere in HotSpot code.
 1802 // The term "word" in HotSpot means a 32- or 64-bit machine word.
 1803 int MacroAssembler::load_signed_short(Register dst, Address src) {
 1804   // This is dubious to me since it seems safe to do a signed 16 => 64 bit
 1805   // version but this is what 64bit has always done. This seems to imply
 1806   // that users are only using 32bits worth.
 1807   int off = offset();
 1808   movswl(dst, src); // movsxw
 1809   return off;
 1810 }
 1811 
 1812 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
 1813   // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
 1814   // and "3.9 Partial Register Penalties", p. 22).
 1815   int off = offset();
 1816   movzbl(dst, src); // movzxb
 1817   return off;
 1818 }
 1819 
 1820 // Note: load_unsigned_short used to be called load_unsigned_word.
 1821 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
 1822   // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
 1823   // and "3.9 Partial Register Penalties", p. 22).
 1824   int off = offset();
 1825   movzwl(dst, src); // movzxw
 1826   return off;
 1827 }
 1828 
 1829 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
 1830   switch (size_in_bytes) {
 1831   case  8:  movq(dst, src); break;
 1832   case  4:  movl(dst, src); break;
 1833   case  2:  is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
 1834   case  1:  is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
 1835   default:  ShouldNotReachHere();
 1836   }
 1837 }
 1838 
 1839 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
 1840   switch (size_in_bytes) {
 1841   case  8:  movq(dst, src); break;
 1842   case  4:  movl(dst, src); break;
 1843   case  2:  movw(dst, src); break;
 1844   case  1:  movb(dst, src); break;
 1845   default:  ShouldNotReachHere();
 1846   }
 1847 }
 1848 
 1849 void MacroAssembler::mov32(AddressLiteral dst, Register src, Register rscratch) {
 1850   assert(rscratch != noreg || always_reachable(dst), "missing");
 1851 
 1852   if (reachable(dst)) {
 1853     movl(as_Address(dst), src);
 1854   } else {
 1855     lea(rscratch, dst);
 1856     movl(Address(rscratch, 0), src);
 1857   }
 1858 }
 1859 
 1860 void MacroAssembler::mov32(Register dst, AddressLiteral src) {
 1861   if (reachable(src)) {
 1862     movl(dst, as_Address(src));
 1863   } else {
 1864     lea(dst, src);
 1865     movl(dst, Address(dst, 0));
 1866   }
 1867 }
 1868 
 1869 // C++ bool manipulation
 1870 
 1871 void MacroAssembler::movbool(Register dst, Address src) {
 1872   if(sizeof(bool) == 1)
 1873     movb(dst, src);
 1874   else if(sizeof(bool) == 2)
 1875     movw(dst, src);
 1876   else if(sizeof(bool) == 4)
 1877     movl(dst, src);
 1878   else
 1879     // unsupported
 1880     ShouldNotReachHere();
 1881 }
 1882 
 1883 void MacroAssembler::movbool(Address dst, bool boolconst) {
 1884   if(sizeof(bool) == 1)
 1885     movb(dst, (int) boolconst);
 1886   else if(sizeof(bool) == 2)
 1887     movw(dst, (int) boolconst);
 1888   else if(sizeof(bool) == 4)
 1889     movl(dst, (int) boolconst);
 1890   else
 1891     // unsupported
 1892     ShouldNotReachHere();
 1893 }
 1894 
 1895 void MacroAssembler::movbool(Address dst, Register src) {
 1896   if(sizeof(bool) == 1)
 1897     movb(dst, src);
 1898   else if(sizeof(bool) == 2)
 1899     movw(dst, src);
 1900   else if(sizeof(bool) == 4)
 1901     movl(dst, src);
 1902   else
 1903     // unsupported
 1904     ShouldNotReachHere();
 1905 }
 1906 
 1907 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1908   assert(rscratch != noreg || always_reachable(src), "missing");
 1909 
 1910   if (reachable(src)) {
 1911     movdl(dst, as_Address(src));
 1912   } else {
 1913     lea(rscratch, src);
 1914     movdl(dst, Address(rscratch, 0));
 1915   }
 1916 }
 1917 
 1918 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1919   assert(rscratch != noreg || always_reachable(src), "missing");
 1920 
 1921   if (reachable(src)) {
 1922     movq(dst, as_Address(src));
 1923   } else {
 1924     lea(rscratch, src);
 1925     movq(dst, Address(rscratch, 0));
 1926   }
 1927 }
 1928 
 1929 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1930   assert(rscratch != noreg || always_reachable(src), "missing");
 1931 
 1932   if (reachable(src)) {
 1933     if (UseXmmLoadAndClearUpper) {
 1934       movsd (dst, as_Address(src));
 1935     } else {
 1936       movlpd(dst, as_Address(src));
 1937     }
 1938   } else {
 1939     lea(rscratch, src);
 1940     if (UseXmmLoadAndClearUpper) {
 1941       movsd (dst, Address(rscratch, 0));
 1942     } else {
 1943       movlpd(dst, Address(rscratch, 0));
 1944     }
 1945   }
 1946 }
 1947 
 1948 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1949   assert(rscratch != noreg || always_reachable(src), "missing");
 1950 
 1951   if (reachable(src)) {
 1952     movss(dst, as_Address(src));
 1953   } else {
 1954     lea(rscratch, src);
 1955     movss(dst, Address(rscratch, 0));
 1956   }
 1957 }
 1958 
 1959 void MacroAssembler::movptr(Register dst, Register src) {
 1960   movq(dst, src);
 1961 }
 1962 
 1963 void MacroAssembler::movptr(Register dst, Address src) {
 1964   movq(dst, src);
 1965 }
 1966 
 1967 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
 1968 void MacroAssembler::movptr(Register dst, intptr_t src) {
 1969   if (is_uimm32(src)) {
 1970     movl(dst, checked_cast<uint32_t>(src));
 1971   } else if (is_simm32(src)) {
 1972     movq(dst, checked_cast<int32_t>(src));
 1973   } else {
 1974     mov64(dst, src);
 1975   }
 1976 }
 1977 
 1978 void MacroAssembler::movptr(Address dst, Register src) {
 1979   movq(dst, src);
 1980 }
 1981 
 1982 void MacroAssembler::movptr(Address dst, int32_t src) {
 1983   movslq(dst, src);
 1984 }
 1985 
 1986 void MacroAssembler::movdqu(Address dst, XMMRegister src) {
 1987   assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 1988   Assembler::movdqu(dst, src);
 1989 }
 1990 
 1991 void MacroAssembler::movdqu(XMMRegister dst, Address src) {
 1992   assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 1993   Assembler::movdqu(dst, src);
 1994 }
 1995 
 1996 void MacroAssembler::movdqu(XMMRegister dst, XMMRegister src) {
 1997   assert(((dst->encoding() < 16  && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 1998   Assembler::movdqu(dst, src);
 1999 }
 2000 
 2001 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2002   assert(rscratch != noreg || always_reachable(src), "missing");
 2003 
 2004   if (reachable(src)) {
 2005     movdqu(dst, as_Address(src));
 2006   } else {
 2007     lea(rscratch, src);
 2008     movdqu(dst, Address(rscratch, 0));
 2009   }
 2010 }
 2011 
 2012 void MacroAssembler::vmovdqu(Address dst, XMMRegister src) {
 2013   assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 2014   Assembler::vmovdqu(dst, src);
 2015 }
 2016 
 2017 void MacroAssembler::vmovdqu(XMMRegister dst, Address src) {
 2018   assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 2019   Assembler::vmovdqu(dst, src);
 2020 }
 2021 
 2022 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src) {
 2023   assert(((dst->encoding() < 16  && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 2024   Assembler::vmovdqu(dst, src);
 2025 }
 2026 
 2027 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2028   assert(rscratch != noreg || always_reachable(src), "missing");
 2029 
 2030   if (reachable(src)) {
 2031     vmovdqu(dst, as_Address(src));
 2032   }
 2033   else {
 2034     lea(rscratch, src);
 2035     vmovdqu(dst, Address(rscratch, 0));
 2036   }
 2037 }
 2038 
 2039 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2040   assert(rscratch != noreg || always_reachable(src), "missing");
 2041 
 2042   if (vector_len == AVX_512bit) {
 2043     evmovdquq(dst, src, AVX_512bit, rscratch);
 2044   } else if (vector_len == AVX_256bit) {
 2045     vmovdqu(dst, src, rscratch);
 2046   } else {
 2047     movdqu(dst, src, rscratch);
 2048   }
 2049 }
 2050 
 2051 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src, int vector_len) {
 2052   if (vector_len == AVX_512bit) {
 2053     evmovdquq(dst, src, AVX_512bit);
 2054   } else if (vector_len == AVX_256bit) {
 2055     vmovdqu(dst, src);
 2056   } else {
 2057     movdqu(dst, src);
 2058   }
 2059 }
 2060 
 2061 void MacroAssembler::vmovdqu(Address dst, XMMRegister src, int vector_len) {
 2062   if (vector_len == AVX_512bit) {
 2063     evmovdquq(dst, src, AVX_512bit);
 2064   } else if (vector_len == AVX_256bit) {
 2065     vmovdqu(dst, src);
 2066   } else {
 2067     movdqu(dst, src);
 2068   }
 2069 }
 2070 
 2071 void MacroAssembler::vmovdqu(XMMRegister dst, Address src, int vector_len) {
 2072   if (vector_len == AVX_512bit) {
 2073     evmovdquq(dst, src, AVX_512bit);
 2074   } else if (vector_len == AVX_256bit) {
 2075     vmovdqu(dst, src);
 2076   } else {
 2077     movdqu(dst, src);
 2078   }
 2079 }
 2080 
 2081 void MacroAssembler::vmovdqa(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2082   assert(rscratch != noreg || always_reachable(src), "missing");
 2083 
 2084   if (reachable(src)) {
 2085     vmovdqa(dst, as_Address(src));
 2086   }
 2087   else {
 2088     lea(rscratch, src);
 2089     vmovdqa(dst, Address(rscratch, 0));
 2090   }
 2091 }
 2092 
 2093 void MacroAssembler::vmovdqa(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2094   assert(rscratch != noreg || always_reachable(src), "missing");
 2095 
 2096   if (vector_len == AVX_512bit) {
 2097     evmovdqaq(dst, src, AVX_512bit, rscratch);
 2098   } else if (vector_len == AVX_256bit) {
 2099     vmovdqa(dst, src, rscratch);
 2100   } else {
 2101     movdqa(dst, src, rscratch);
 2102   }
 2103 }
 2104 
 2105 void MacroAssembler::kmov(KRegister dst, Address src) {
 2106   if (VM_Version::supports_avx512bw()) {
 2107     kmovql(dst, src);
 2108   } else {
 2109     assert(VM_Version::supports_evex(), "");
 2110     kmovwl(dst, src);
 2111   }
 2112 }
 2113 
 2114 void MacroAssembler::kmov(Address dst, KRegister src) {
 2115   if (VM_Version::supports_avx512bw()) {
 2116     kmovql(dst, src);
 2117   } else {
 2118     assert(VM_Version::supports_evex(), "");
 2119     kmovwl(dst, src);
 2120   }
 2121 }
 2122 
 2123 void MacroAssembler::kmov(KRegister dst, KRegister src) {
 2124   if (VM_Version::supports_avx512bw()) {
 2125     kmovql(dst, src);
 2126   } else {
 2127     assert(VM_Version::supports_evex(), "");
 2128     kmovwl(dst, src);
 2129   }
 2130 }
 2131 
 2132 void MacroAssembler::kmov(Register dst, KRegister src) {
 2133   if (VM_Version::supports_avx512bw()) {
 2134     kmovql(dst, src);
 2135   } else {
 2136     assert(VM_Version::supports_evex(), "");
 2137     kmovwl(dst, src);
 2138   }
 2139 }
 2140 
 2141 void MacroAssembler::kmov(KRegister dst, Register src) {
 2142   if (VM_Version::supports_avx512bw()) {
 2143     kmovql(dst, src);
 2144   } else {
 2145     assert(VM_Version::supports_evex(), "");
 2146     kmovwl(dst, src);
 2147   }
 2148 }
 2149 
 2150 void MacroAssembler::kmovql(KRegister dst, AddressLiteral src, Register rscratch) {
 2151   assert(rscratch != noreg || always_reachable(src), "missing");
 2152 
 2153   if (reachable(src)) {
 2154     kmovql(dst, as_Address(src));
 2155   } else {
 2156     lea(rscratch, src);
 2157     kmovql(dst, Address(rscratch, 0));
 2158   }
 2159 }
 2160 
 2161 void MacroAssembler::kmovwl(KRegister dst, AddressLiteral src, Register rscratch) {
 2162   assert(rscratch != noreg || always_reachable(src), "missing");
 2163 
 2164   if (reachable(src)) {
 2165     kmovwl(dst, as_Address(src));
 2166   } else {
 2167     lea(rscratch, src);
 2168     kmovwl(dst, Address(rscratch, 0));
 2169   }
 2170 }
 2171 
 2172 void MacroAssembler::evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge,
 2173                                int vector_len, Register rscratch) {
 2174   assert(rscratch != noreg || always_reachable(src), "missing");
 2175 
 2176   if (reachable(src)) {
 2177     Assembler::evmovdqub(dst, mask, as_Address(src), merge, vector_len);
 2178   } else {
 2179     lea(rscratch, src);
 2180     Assembler::evmovdqub(dst, mask, Address(rscratch, 0), merge, vector_len);
 2181   }
 2182 }
 2183 
 2184 void MacroAssembler::evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge,
 2185                                int vector_len, Register rscratch) {
 2186   assert(rscratch != noreg || always_reachable(src), "missing");
 2187 
 2188   if (reachable(src)) {
 2189     Assembler::evmovdquw(dst, mask, as_Address(src), merge, vector_len);
 2190   } else {
 2191     lea(rscratch, src);
 2192     Assembler::evmovdquw(dst, mask, Address(rscratch, 0), merge, vector_len);
 2193   }
 2194 }
 2195 
 2196 void MacroAssembler::evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
 2197   assert(rscratch != noreg || always_reachable(src), "missing");
 2198 
 2199   if (reachable(src)) {
 2200     Assembler::evmovdqul(dst, mask, as_Address(src), merge, vector_len);
 2201   } else {
 2202     lea(rscratch, src);
 2203     Assembler::evmovdqul(dst, mask, Address(rscratch, 0), merge, vector_len);
 2204   }
 2205 }
 2206 
 2207 void MacroAssembler::evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
 2208   assert(rscratch != noreg || always_reachable(src), "missing");
 2209 
 2210   if (reachable(src)) {
 2211     Assembler::evmovdquq(dst, mask, as_Address(src), merge, vector_len);
 2212   } else {
 2213     lea(rscratch, src);
 2214     Assembler::evmovdquq(dst, mask, Address(rscratch, 0), merge, vector_len);
 2215   }
 2216 }
 2217 
 2218 void MacroAssembler::evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2219   assert(rscratch != noreg || always_reachable(src), "missing");
 2220 
 2221   if (reachable(src)) {
 2222     Assembler::evmovdquq(dst, as_Address(src), vector_len);
 2223   } else {
 2224     lea(rscratch, src);
 2225     Assembler::evmovdquq(dst, Address(rscratch, 0), vector_len);
 2226   }
 2227 }
 2228 
 2229 void MacroAssembler::evmovdqaq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
 2230   assert(rscratch != noreg || always_reachable(src), "missing");
 2231 
 2232   if (reachable(src)) {
 2233     Assembler::evmovdqaq(dst, mask, as_Address(src), merge, vector_len);
 2234   } else {
 2235     lea(rscratch, src);
 2236     Assembler::evmovdqaq(dst, mask, Address(rscratch, 0), merge, vector_len);
 2237   }
 2238 }
 2239 
 2240 void MacroAssembler::evmovdqaq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2241   assert(rscratch != noreg || always_reachable(src), "missing");
 2242 
 2243   if (reachable(src)) {
 2244     Assembler::evmovdqaq(dst, as_Address(src), vector_len);
 2245   } else {
 2246     lea(rscratch, src);
 2247     Assembler::evmovdqaq(dst, Address(rscratch, 0), vector_len);
 2248   }
 2249 }
 2250 
 2251 void MacroAssembler::movapd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2252   assert(rscratch != noreg || always_reachable(src), "missing");
 2253 
 2254   if (reachable(src)) {
 2255     Assembler::movapd(dst, as_Address(src));
 2256   } else {
 2257     lea(rscratch, src);
 2258     Assembler::movapd(dst, Address(rscratch, 0));
 2259   }
 2260 }
 2261 
 2262 void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2263   assert(rscratch != noreg || always_reachable(src), "missing");
 2264 
 2265   if (reachable(src)) {
 2266     Assembler::movdqa(dst, as_Address(src));
 2267   } else {
 2268     lea(rscratch, src);
 2269     Assembler::movdqa(dst, Address(rscratch, 0));
 2270   }
 2271 }
 2272 
 2273 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2274   assert(rscratch != noreg || always_reachable(src), "missing");
 2275 
 2276   if (reachable(src)) {
 2277     Assembler::movsd(dst, as_Address(src));
 2278   } else {
 2279     lea(rscratch, src);
 2280     Assembler::movsd(dst, Address(rscratch, 0));
 2281   }
 2282 }
 2283 
 2284 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2285   assert(rscratch != noreg || always_reachable(src), "missing");
 2286 
 2287   if (reachable(src)) {
 2288     Assembler::movss(dst, as_Address(src));
 2289   } else {
 2290     lea(rscratch, src);
 2291     Assembler::movss(dst, Address(rscratch, 0));
 2292   }
 2293 }
 2294 
 2295 void MacroAssembler::movddup(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2296   assert(rscratch != noreg || always_reachable(src), "missing");
 2297 
 2298   if (reachable(src)) {
 2299     Assembler::movddup(dst, as_Address(src));
 2300   } else {
 2301     lea(rscratch, src);
 2302     Assembler::movddup(dst, Address(rscratch, 0));
 2303   }
 2304 }
 2305 
 2306 void MacroAssembler::vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2307   assert(rscratch != noreg || always_reachable(src), "missing");
 2308 
 2309   if (reachable(src)) {
 2310     Assembler::vmovddup(dst, as_Address(src), vector_len);
 2311   } else {
 2312     lea(rscratch, src);
 2313     Assembler::vmovddup(dst, Address(rscratch, 0), vector_len);
 2314   }
 2315 }
 2316 
 2317 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2318   assert(rscratch != noreg || always_reachable(src), "missing");
 2319 
 2320   if (reachable(src)) {
 2321     Assembler::mulsd(dst, as_Address(src));
 2322   } else {
 2323     lea(rscratch, src);
 2324     Assembler::mulsd(dst, Address(rscratch, 0));
 2325   }
 2326 }
 2327 
 2328 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2329   assert(rscratch != noreg || always_reachable(src), "missing");
 2330 
 2331   if (reachable(src)) {
 2332     Assembler::mulss(dst, as_Address(src));
 2333   } else {
 2334     lea(rscratch, src);
 2335     Assembler::mulss(dst, Address(rscratch, 0));
 2336   }
 2337 }
 2338 
 2339 void MacroAssembler::null_check(Register reg, int offset) {
 2340   if (needs_explicit_null_check(offset)) {
 2341     // provoke OS null exception if reg is null by
 2342     // accessing M[reg] w/o changing any (non-CC) registers
 2343     // NOTE: cmpl is plenty here to provoke a segv
 2344     cmpptr(rax, Address(reg, 0));
 2345     // Note: should probably use testl(rax, Address(reg, 0));
 2346     //       may be shorter code (however, this version of
 2347     //       testl needs to be implemented first)
 2348   } else {
 2349     // nothing to do, (later) access of M[reg + offset]
 2350     // will provoke OS null exception if reg is null
 2351   }
 2352 }
 2353 
 2354 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) {
 2355   andptr(markword, markWord::inline_type_mask_in_place);
 2356   cmpptr(markword, markWord::inline_type_pattern);
 2357   jcc(Assembler::equal, is_inline_type);
 2358 }
 2359 
 2360 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type, bool can_be_null) {
 2361   if (can_be_null) {
 2362     testptr(object, object);
 2363     jcc(Assembler::zero, not_inline_type);
 2364   }
 2365   const int is_inline_type_mask = markWord::inline_type_pattern;
 2366   movptr(tmp, Address(object, oopDesc::mark_offset_in_bytes()));
 2367   andptr(tmp, is_inline_type_mask);
 2368   cmpptr(tmp, is_inline_type_mask);
 2369   jcc(Assembler::notEqual, not_inline_type);
 2370 }
 2371 
 2372 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) {
 2373   movl(temp_reg, flags);
 2374   testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift);
 2375   jcc(Assembler::notEqual, is_null_free_inline_type);
 2376 }
 2377 
 2378 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) {
 2379   movl(temp_reg, flags);
 2380   testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift);
 2381   jcc(Assembler::equal, not_null_free_inline_type);
 2382 }
 2383 
 2384 void MacroAssembler::test_field_is_flat(Register flags, Register temp_reg, Label& is_flat) {
 2385   movl(temp_reg, flags);
 2386   testl(temp_reg, 1 << ResolvedFieldEntry::is_flat_shift);
 2387   jcc(Assembler::notEqual, is_flat);
 2388 }
 2389 
 2390 void MacroAssembler::test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker) {
 2391   movl(temp_reg, flags);
 2392   testl(temp_reg, 1 << ResolvedFieldEntry::has_null_marker_shift);
 2393   jcc(Assembler::notEqual, has_null_marker);
 2394 }
 2395 
 2396 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) {
 2397   Label test_mark_word;
 2398   // load mark word
 2399   movptr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes()));
 2400   // check displaced
 2401   testl(temp_reg, markWord::unlocked_value);
 2402   jccb(Assembler::notZero, test_mark_word);
 2403   // slow path use klass prototype
 2404   push(rscratch1);
 2405   load_prototype_header(temp_reg, oop, rscratch1);
 2406   pop(rscratch1);
 2407 
 2408   bind(test_mark_word);
 2409   testl(temp_reg, test_bit);
 2410   jcc((jmp_set) ? Assembler::notZero : Assembler::zero, jmp_label);
 2411 }
 2412 
 2413 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg,
 2414                                          Label& is_flat_array) {
 2415 #ifdef _LP64
 2416   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flat_array);
 2417 #else
 2418   load_klass(temp_reg, oop, noreg);
 2419   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 2420   test_flat_array_layout(temp_reg, is_flat_array);
 2421 #endif
 2422 }
 2423 
 2424 void MacroAssembler::test_non_flat_array_oop(Register oop, Register temp_reg,
 2425                                              Label& is_non_flat_array) {
 2426 #ifdef _LP64
 2427   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flat_array);
 2428 #else
 2429   load_klass(temp_reg, oop, noreg);
 2430   movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
 2431   test_non_flat_array_layout(temp_reg, is_non_flat_array);
 2432 #endif
 2433 }
 2434 
 2435 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array) {
 2436 #ifdef _LP64
 2437   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array);
 2438 #else
 2439   Unimplemented();
 2440 #endif
 2441 }
 2442 
 2443 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) {
 2444 #ifdef _LP64
 2445   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array);
 2446 #else
 2447   Unimplemented();
 2448 #endif
 2449 }
 2450 
 2451 void MacroAssembler::test_flat_array_layout(Register lh, Label& is_flat_array) {
 2452   testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
 2453   jcc(Assembler::notZero, is_flat_array);
 2454 }
 2455 
 2456 void MacroAssembler::test_non_flat_array_layout(Register lh, Label& is_non_flat_array) {
 2457   testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
 2458   jcc(Assembler::zero, is_non_flat_array);
 2459 }
 2460 
 2461 void MacroAssembler::os_breakpoint() {
 2462   // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
 2463   // (e.g., MSVC can't call ps() otherwise)
 2464   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
 2465 }
 2466 
 2467 void MacroAssembler::unimplemented(const char* what) {
 2468   const char* buf = nullptr;
 2469   {
 2470     ResourceMark rm;
 2471     stringStream ss;
 2472     ss.print("unimplemented: %s", what);
 2473     buf = code_string(ss.as_string());
 2474   }
 2475   stop(buf);
 2476 }
 2477 
 2478 #define XSTATE_BV 0x200
 2479 
 2480 void MacroAssembler::pop_CPU_state() {
 2481   pop_FPU_state();
 2482   pop_IU_state();
 2483 }
 2484 
 2485 void MacroAssembler::pop_FPU_state() {
 2486   fxrstor(Address(rsp, 0));
 2487   addptr(rsp, FPUStateSizeInWords * wordSize);
 2488 }
 2489 
 2490 void MacroAssembler::pop_IU_state() {
 2491   popa();
 2492   addq(rsp, 8);
 2493   popf();
 2494 }
 2495 
 2496 // Save Integer and Float state
 2497 // Warning: Stack must be 16 byte aligned (64bit)
 2498 void MacroAssembler::push_CPU_state() {
 2499   push_IU_state();
 2500   push_FPU_state();
 2501 }
 2502 
 2503 void MacroAssembler::push_FPU_state() {
 2504   subptr(rsp, FPUStateSizeInWords * wordSize);
 2505   fxsave(Address(rsp, 0));
 2506 }
 2507 
 2508 void MacroAssembler::push_IU_state() {
 2509   // Push flags first because pusha kills them
 2510   pushf();
 2511   // Make sure rsp stays 16-byte aligned
 2512   subq(rsp, 8);
 2513   pusha();
 2514 }
 2515 
 2516 void MacroAssembler::push_cont_fastpath() {
 2517   if (!Continuations::enabled()) return;
 2518 
 2519   Label L_done;
 2520   cmpptr(rsp, Address(r15_thread, JavaThread::cont_fastpath_offset()));
 2521   jccb(Assembler::belowEqual, L_done);
 2522   movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), rsp);
 2523   bind(L_done);
 2524 }
 2525 
 2526 void MacroAssembler::pop_cont_fastpath() {
 2527   if (!Continuations::enabled()) return;
 2528 
 2529   Label L_done;
 2530   cmpptr(rsp, Address(r15_thread, JavaThread::cont_fastpath_offset()));
 2531   jccb(Assembler::below, L_done);
 2532   movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), 0);
 2533   bind(L_done);
 2534 }
 2535 
 2536 void MacroAssembler::inc_held_monitor_count() {
 2537   incrementq(Address(r15_thread, JavaThread::held_monitor_count_offset()));
 2538 }
 2539 
 2540 void MacroAssembler::dec_held_monitor_count() {
 2541   decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset()));
 2542 }
 2543 
 2544 #ifdef ASSERT
 2545 void MacroAssembler::stop_if_in_cont(Register cont, const char* name) {
 2546   Label no_cont;
 2547   movptr(cont, Address(r15_thread, JavaThread::cont_entry_offset()));
 2548   testl(cont, cont);
 2549   jcc(Assembler::zero, no_cont);
 2550   stop(name);
 2551   bind(no_cont);
 2552 }
 2553 #endif
 2554 
 2555 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { // determine java_thread register
 2556   // we must set sp to zero to clear frame
 2557   movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
 2558   // must clear fp, so that compiled frames are not confused; it is
 2559   // possible that we need it only for debugging
 2560   if (clear_fp) {
 2561     movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
 2562   }
 2563   // Always clear the pc because it could have been set by make_walkable()
 2564   movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
 2565   vzeroupper();
 2566 }
 2567 
 2568 void MacroAssembler::round_to(Register reg, int modulus) {
 2569   addptr(reg, modulus - 1);
 2570   andptr(reg, -modulus);
 2571 }
 2572 
 2573 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod) {
 2574   if (at_return) {
 2575     // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
 2576     // we may safely use rsp instead to perform the stack watermark check.
 2577     cmpptr(in_nmethod ? rsp : rbp, Address(r15_thread, JavaThread::polling_word_offset()));
 2578     jcc(Assembler::above, slow_path);
 2579     return;
 2580   }
 2581   testb(Address(r15_thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
 2582   jcc(Assembler::notZero, slow_path); // handshake bit set implies poll
 2583 }
 2584 
 2585 // Calls to C land
 2586 //
 2587 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded
 2588 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
 2589 // has to be reset to 0. This is required to allow proper stack traversal.
 2590 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
 2591                                          Register last_java_fp,
 2592                                          address  last_java_pc,
 2593                                          Register rscratch) {
 2594   vzeroupper();
 2595   // determine last_java_sp register
 2596   if (!last_java_sp->is_valid()) {
 2597     last_java_sp = rsp;
 2598   }
 2599   // last_java_fp is optional
 2600   if (last_java_fp->is_valid()) {
 2601     movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
 2602   }
 2603   // last_java_pc is optional
 2604   if (last_java_pc != nullptr) {
 2605     Address java_pc(r15_thread,
 2606                     JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
 2607     lea(java_pc, InternalAddress(last_java_pc), rscratch);
 2608   }
 2609   movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
 2610 }
 2611 
 2612 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
 2613                                          Register last_java_fp,
 2614                                          Label &L,
 2615                                          Register scratch) {
 2616   lea(scratch, L);
 2617   movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), scratch);
 2618   set_last_Java_frame(last_java_sp, last_java_fp, nullptr, scratch);
 2619 }
 2620 
 2621 void MacroAssembler::shlptr(Register dst, int imm8) {
 2622   shlq(dst, imm8);
 2623 }
 2624 
 2625 void MacroAssembler::shrptr(Register dst, int imm8) {
 2626   shrq(dst, imm8);
 2627 }
 2628 
 2629 void MacroAssembler::sign_extend_byte(Register reg) {
 2630   movsbl(reg, reg); // movsxb
 2631 }
 2632 
 2633 void MacroAssembler::sign_extend_short(Register reg) {
 2634   movswl(reg, reg); // movsxw
 2635 }
 2636 
 2637 void MacroAssembler::testl(Address dst, int32_t imm32) {
 2638   if (imm32 >= 0 && is8bit(imm32)) {
 2639     testb(dst, imm32);
 2640   } else {
 2641     Assembler::testl(dst, imm32);
 2642   }
 2643 }
 2644 
 2645 void MacroAssembler::testl(Register dst, int32_t imm32) {
 2646   if (imm32 >= 0 && is8bit(imm32) && dst->has_byte_register()) {
 2647     testb(dst, imm32);
 2648   } else {
 2649     Assembler::testl(dst, imm32);
 2650   }
 2651 }
 2652 
 2653 void MacroAssembler::testl(Register dst, AddressLiteral src) {
 2654   assert(always_reachable(src), "Address should be reachable");
 2655   testl(dst, as_Address(src));
 2656 }
 2657 
 2658 void MacroAssembler::testq(Address dst, int32_t imm32) {
 2659   if (imm32 >= 0) {
 2660     testl(dst, imm32);
 2661   } else {
 2662     Assembler::testq(dst, imm32);
 2663   }
 2664 }
 2665 
 2666 void MacroAssembler::testq(Register dst, int32_t imm32) {
 2667   if (imm32 >= 0) {
 2668     testl(dst, imm32);
 2669   } else {
 2670     Assembler::testq(dst, imm32);
 2671   }
 2672 }
 2673 
 2674 void MacroAssembler::pcmpeqb(XMMRegister dst, XMMRegister src) {
 2675   assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2676   Assembler::pcmpeqb(dst, src);
 2677 }
 2678 
 2679 void MacroAssembler::pcmpeqw(XMMRegister dst, XMMRegister src) {
 2680   assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2681   Assembler::pcmpeqw(dst, src);
 2682 }
 2683 
 2684 void MacroAssembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
 2685   assert((dst->encoding() < 16),"XMM register should be 0-15");
 2686   Assembler::pcmpestri(dst, src, imm8);
 2687 }
 2688 
 2689 void MacroAssembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
 2690   assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15");
 2691   Assembler::pcmpestri(dst, src, imm8);
 2692 }
 2693 
 2694 void MacroAssembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
 2695   assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2696   Assembler::pmovzxbw(dst, src);
 2697 }
 2698 
 2699 void MacroAssembler::pmovzxbw(XMMRegister dst, Address src) {
 2700   assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2701   Assembler::pmovzxbw(dst, src);
 2702 }
 2703 
 2704 void MacroAssembler::pmovmskb(Register dst, XMMRegister src) {
 2705   assert((src->encoding() < 16),"XMM register should be 0-15");
 2706   Assembler::pmovmskb(dst, src);
 2707 }
 2708 
 2709 void MacroAssembler::ptest(XMMRegister dst, XMMRegister src) {
 2710   assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15");
 2711   Assembler::ptest(dst, src);
 2712 }
 2713 
 2714 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2715   assert(rscratch != noreg || always_reachable(src), "missing");
 2716 
 2717   if (reachable(src)) {
 2718     Assembler::sqrtss(dst, as_Address(src));
 2719   } else {
 2720     lea(rscratch, src);
 2721     Assembler::sqrtss(dst, Address(rscratch, 0));
 2722   }
 2723 }
 2724 
 2725 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2726   assert(rscratch != noreg || always_reachable(src), "missing");
 2727 
 2728   if (reachable(src)) {
 2729     Assembler::subsd(dst, as_Address(src));
 2730   } else {
 2731     lea(rscratch, src);
 2732     Assembler::subsd(dst, Address(rscratch, 0));
 2733   }
 2734 }
 2735 
 2736 void MacroAssembler::roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch) {
 2737   assert(rscratch != noreg || always_reachable(src), "missing");
 2738 
 2739   if (reachable(src)) {
 2740     Assembler::roundsd(dst, as_Address(src), rmode);
 2741   } else {
 2742     lea(rscratch, src);
 2743     Assembler::roundsd(dst, Address(rscratch, 0), rmode);
 2744   }
 2745 }
 2746 
 2747 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2748   assert(rscratch != noreg || always_reachable(src), "missing");
 2749 
 2750   if (reachable(src)) {
 2751     Assembler::subss(dst, as_Address(src));
 2752   } else {
 2753     lea(rscratch, src);
 2754     Assembler::subss(dst, Address(rscratch, 0));
 2755   }
 2756 }
 2757 
 2758 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2759   assert(rscratch != noreg || always_reachable(src), "missing");
 2760 
 2761   if (reachable(src)) {
 2762     Assembler::ucomisd(dst, as_Address(src));
 2763   } else {
 2764     lea(rscratch, src);
 2765     Assembler::ucomisd(dst, Address(rscratch, 0));
 2766   }
 2767 }
 2768 
 2769 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2770   assert(rscratch != noreg || always_reachable(src), "missing");
 2771 
 2772   if (reachable(src)) {
 2773     Assembler::ucomiss(dst, as_Address(src));
 2774   } else {
 2775     lea(rscratch, src);
 2776     Assembler::ucomiss(dst, Address(rscratch, 0));
 2777   }
 2778 }
 2779 
 2780 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2781   assert(rscratch != noreg || always_reachable(src), "missing");
 2782 
 2783   // Used in sign-bit flipping with aligned address.
 2784   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
 2785 
 2786   if (UseAVX > 2 &&
 2787       (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
 2788       (dst->encoding() >= 16)) {
 2789     vpxor(dst, dst, src, Assembler::AVX_512bit, rscratch);
 2790   } else if (reachable(src)) {
 2791     Assembler::xorpd(dst, as_Address(src));
 2792   } else {
 2793     lea(rscratch, src);
 2794     Assembler::xorpd(dst, Address(rscratch, 0));
 2795   }
 2796 }
 2797 
 2798 void MacroAssembler::xorpd(XMMRegister dst, XMMRegister src) {
 2799   if (UseAVX > 2 &&
 2800       (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
 2801       ((dst->encoding() >= 16) || (src->encoding() >= 16))) {
 2802     Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit);
 2803   } else {
 2804     Assembler::xorpd(dst, src);
 2805   }
 2806 }
 2807 
 2808 void MacroAssembler::xorps(XMMRegister dst, XMMRegister src) {
 2809   if (UseAVX > 2 &&
 2810       (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
 2811       ((dst->encoding() >= 16) || (src->encoding() >= 16))) {
 2812     Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit);
 2813   } else {
 2814     Assembler::xorps(dst, src);
 2815   }
 2816 }
 2817 
 2818 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2819   assert(rscratch != noreg || always_reachable(src), "missing");
 2820 
 2821   // Used in sign-bit flipping with aligned address.
 2822   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
 2823 
 2824   if (UseAVX > 2 &&
 2825       (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
 2826       (dst->encoding() >= 16)) {
 2827     vpxor(dst, dst, src, Assembler::AVX_512bit, rscratch);
 2828   } else if (reachable(src)) {
 2829     Assembler::xorps(dst, as_Address(src));
 2830   } else {
 2831     lea(rscratch, src);
 2832     Assembler::xorps(dst, Address(rscratch, 0));
 2833   }
 2834 }
 2835 
 2836 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2837   assert(rscratch != noreg || always_reachable(src), "missing");
 2838 
 2839   // Used in sign-bit flipping with aligned address.
 2840   bool aligned_adr = (((intptr_t)src.target() & 15) == 0);
 2841   assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes");
 2842   if (reachable(src)) {
 2843     Assembler::pshufb(dst, as_Address(src));
 2844   } else {
 2845     lea(rscratch, src);
 2846     Assembler::pshufb(dst, Address(rscratch, 0));
 2847   }
 2848 }
 2849 
 2850 // AVX 3-operands instructions
 2851 
 2852 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 2853   assert(rscratch != noreg || always_reachable(src), "missing");
 2854 
 2855   if (reachable(src)) {
 2856     vaddsd(dst, nds, as_Address(src));
 2857   } else {
 2858     lea(rscratch, src);
 2859     vaddsd(dst, nds, Address(rscratch, 0));
 2860   }
 2861 }
 2862 
 2863 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 2864   assert(rscratch != noreg || always_reachable(src), "missing");
 2865 
 2866   if (reachable(src)) {
 2867     vaddss(dst, nds, as_Address(src));
 2868   } else {
 2869     lea(rscratch, src);
 2870     vaddss(dst, nds, Address(rscratch, 0));
 2871   }
 2872 }
 2873 
 2874 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 2875   assert(UseAVX > 0, "requires some form of AVX");
 2876   assert(rscratch != noreg || always_reachable(src), "missing");
 2877 
 2878   if (reachable(src)) {
 2879     Assembler::vpaddb(dst, nds, as_Address(src), vector_len);
 2880   } else {
 2881     lea(rscratch, src);
 2882     Assembler::vpaddb(dst, nds, Address(rscratch, 0), vector_len);
 2883   }
 2884 }
 2885 
 2886 void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 2887   assert(UseAVX > 0, "requires some form of AVX");
 2888   assert(rscratch != noreg || always_reachable(src), "missing");
 2889 
 2890   if (reachable(src)) {
 2891     Assembler::vpaddd(dst, nds, as_Address(src), vector_len);
 2892   } else {
 2893     lea(rscratch, src);
 2894     Assembler::vpaddd(dst, nds, Address(rscratch, 0), vector_len);
 2895   }
 2896 }
 2897 
 2898 void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) {
 2899   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
 2900   assert(rscratch != noreg || always_reachable(negate_field), "missing");
 2901 
 2902   vandps(dst, nds, negate_field, vector_len, rscratch);
 2903 }
 2904 
 2905 void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) {
 2906   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
 2907   assert(rscratch != noreg || always_reachable(negate_field), "missing");
 2908 
 2909   vandpd(dst, nds, negate_field, vector_len, rscratch);
 2910 }
 2911 
 2912 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 2913   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2914   Assembler::vpaddb(dst, nds, src, vector_len);
 2915 }
 2916 
 2917 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 2918   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2919   Assembler::vpaddb(dst, nds, src, vector_len);
 2920 }
 2921 
 2922 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 2923   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2924   Assembler::vpaddw(dst, nds, src, vector_len);
 2925 }
 2926 
 2927 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 2928   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2929   Assembler::vpaddw(dst, nds, src, vector_len);
 2930 }
 2931 
 2932 void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 2933   assert(rscratch != noreg || always_reachable(src), "missing");
 2934 
 2935   if (reachable(src)) {
 2936     Assembler::vpand(dst, nds, as_Address(src), vector_len);
 2937   } else {
 2938     lea(rscratch, src);
 2939     Assembler::vpand(dst, nds, Address(rscratch, 0), vector_len);
 2940   }
 2941 }
 2942 
 2943 void MacroAssembler::vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2944   assert(rscratch != noreg || always_reachable(src), "missing");
 2945 
 2946   if (reachable(src)) {
 2947     Assembler::vpbroadcastd(dst, as_Address(src), vector_len);
 2948   } else {
 2949     lea(rscratch, src);
 2950     Assembler::vpbroadcastd(dst, Address(rscratch, 0), vector_len);
 2951   }
 2952 }
 2953 
 2954 void MacroAssembler::vbroadcasti128(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2955   assert(rscratch != noreg || always_reachable(src), "missing");
 2956 
 2957   if (reachable(src)) {
 2958     Assembler::vbroadcasti128(dst, as_Address(src), vector_len);
 2959   } else {
 2960     lea(rscratch, src);
 2961     Assembler::vbroadcasti128(dst, Address(rscratch, 0), vector_len);
 2962   }
 2963 }
 2964 
 2965 void MacroAssembler::vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2966   assert(rscratch != noreg || always_reachable(src), "missing");
 2967 
 2968   if (reachable(src)) {
 2969     Assembler::vpbroadcastq(dst, as_Address(src), vector_len);
 2970   } else {
 2971     lea(rscratch, src);
 2972     Assembler::vpbroadcastq(dst, Address(rscratch, 0), vector_len);
 2973   }
 2974 }
 2975 
 2976 void MacroAssembler::vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2977   assert(rscratch != noreg || always_reachable(src), "missing");
 2978 
 2979   if (reachable(src)) {
 2980     Assembler::vbroadcastsd(dst, as_Address(src), vector_len);
 2981   } else {
 2982     lea(rscratch, src);
 2983     Assembler::vbroadcastsd(dst, Address(rscratch, 0), vector_len);
 2984   }
 2985 }
 2986 
 2987 void MacroAssembler::vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2988   assert(rscratch != noreg || always_reachable(src), "missing");
 2989 
 2990   if (reachable(src)) {
 2991     Assembler::vbroadcastss(dst, as_Address(src), vector_len);
 2992   } else {
 2993     lea(rscratch, src);
 2994     Assembler::vbroadcastss(dst, Address(rscratch, 0), vector_len);
 2995   }
 2996 }
 2997 
 2998 // Vector float blend
 2999 // vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg)
 3000 void MacroAssembler::vblendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) {
 3001   // WARN: Allow dst == (src1|src2), mask == scratch
 3002   bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1;
 3003   bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst;
 3004   bool dst_available = dst != mask && (dst != src1 || dst != src2);
 3005   if (blend_emulation && scratch_available && dst_available) {
 3006     if (compute_mask) {
 3007       vpsrad(scratch, mask, 32, vector_len);
 3008       mask = scratch;
 3009     }
 3010     if (dst == src1) {
 3011       vpandn(dst,     mask, src1, vector_len); // if mask == 0, src1
 3012       vpand (scratch, mask, src2, vector_len); // if mask == 1, src2
 3013     } else {
 3014       vpand (dst,     mask, src2, vector_len); // if mask == 1, src2
 3015       vpandn(scratch, mask, src1, vector_len); // if mask == 0, src1
 3016     }
 3017     vpor(dst, dst, scratch, vector_len);
 3018   } else {
 3019     Assembler::vblendvps(dst, src1, src2, mask, vector_len);
 3020   }
 3021 }
 3022 
 3023 // vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg)
 3024 void MacroAssembler::vblendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) {
 3025   // WARN: Allow dst == (src1|src2), mask == scratch
 3026   bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1;
 3027   bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst && (!compute_mask || scratch != mask);
 3028   bool dst_available = dst != mask && (dst != src1 || dst != src2);
 3029   if (blend_emulation && scratch_available && dst_available) {
 3030     if (compute_mask) {
 3031       vpxor(scratch, scratch, scratch, vector_len);
 3032       vpcmpgtq(scratch, scratch, mask, vector_len);
 3033       mask = scratch;
 3034     }
 3035     if (dst == src1) {
 3036       vpandn(dst,     mask, src1, vector_len); // if mask == 0, src
 3037       vpand (scratch, mask, src2, vector_len); // if mask == 1, src2
 3038     } else {
 3039       vpand (dst,     mask, src2, vector_len); // if mask == 1, src2
 3040       vpandn(scratch, mask, src1, vector_len); // if mask == 0, src
 3041     }
 3042     vpor(dst, dst, scratch, vector_len);
 3043   } else {
 3044     Assembler::vblendvpd(dst, src1, src2, mask, vector_len);
 3045   }
 3046 }
 3047 
 3048 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 3049   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3050   Assembler::vpcmpeqb(dst, nds, src, vector_len);
 3051 }
 3052 
 3053 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) {
 3054   assert(((dst->encoding() < 16 && src1->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3055   Assembler::vpcmpeqb(dst, src1, src2, vector_len);
 3056 }
 3057 
 3058 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 3059   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3060   Assembler::vpcmpeqw(dst, nds, src, vector_len);
 3061 }
 3062 
 3063 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 3064   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3065   Assembler::vpcmpeqw(dst, nds, src, vector_len);
 3066 }
 3067 
 3068 void MacroAssembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3069   assert(rscratch != noreg || always_reachable(src), "missing");
 3070 
 3071   if (reachable(src)) {
 3072     Assembler::evpcmpeqd(kdst, mask, nds, as_Address(src), vector_len);
 3073   } else {
 3074     lea(rscratch, src);
 3075     Assembler::evpcmpeqd(kdst, mask, nds, Address(rscratch, 0), vector_len);
 3076   }
 3077 }
 3078 
 3079 void MacroAssembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
 3080                              int comparison, bool is_signed, int vector_len, Register rscratch) {
 3081   assert(rscratch != noreg || always_reachable(src), "missing");
 3082 
 3083   if (reachable(src)) {
 3084     Assembler::evpcmpd(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
 3085   } else {
 3086     lea(rscratch, src);
 3087     Assembler::evpcmpd(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
 3088   }
 3089 }
 3090 
 3091 void MacroAssembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
 3092                              int comparison, bool is_signed, int vector_len, Register rscratch) {
 3093   assert(rscratch != noreg || always_reachable(src), "missing");
 3094 
 3095   if (reachable(src)) {
 3096     Assembler::evpcmpq(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
 3097   } else {
 3098     lea(rscratch, src);
 3099     Assembler::evpcmpq(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
 3100   }
 3101 }
 3102 
 3103 void MacroAssembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
 3104                              int comparison, bool is_signed, int vector_len, Register rscratch) {
 3105   assert(rscratch != noreg || always_reachable(src), "missing");
 3106 
 3107   if (reachable(src)) {
 3108     Assembler::evpcmpb(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
 3109   } else {
 3110     lea(rscratch, src);
 3111     Assembler::evpcmpb(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
 3112   }
 3113 }
 3114 
 3115 void MacroAssembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
 3116                              int comparison, bool is_signed, int vector_len, Register rscratch) {
 3117   assert(rscratch != noreg || always_reachable(src), "missing");
 3118 
 3119   if (reachable(src)) {
 3120     Assembler::evpcmpw(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
 3121   } else {
 3122     lea(rscratch, src);
 3123     Assembler::evpcmpw(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
 3124   }
 3125 }
 3126 
 3127 void MacroAssembler::vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len) {
 3128   if (width == Assembler::Q) {
 3129     Assembler::vpcmpCCq(dst, nds, src, cond_encoding, vector_len);
 3130   } else {
 3131     Assembler::vpcmpCCbwd(dst, nds, src, cond_encoding, vector_len);
 3132   }
 3133 }
 3134 
 3135 void MacroAssembler::vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len) {
 3136   int eq_cond_enc = 0x29;
 3137   int gt_cond_enc = 0x37;
 3138   if (width != Assembler::Q) {
 3139     eq_cond_enc = 0x74 + width;
 3140     gt_cond_enc = 0x64 + width;
 3141   }
 3142   switch (cond) {
 3143   case eq:
 3144     vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len);
 3145     break;
 3146   case neq:
 3147     vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len);
 3148     vallones(xtmp, vector_len);
 3149     vpxor(dst, xtmp, dst, vector_len);
 3150     break;
 3151   case le:
 3152     vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len);
 3153     vallones(xtmp, vector_len);
 3154     vpxor(dst, xtmp, dst, vector_len);
 3155     break;
 3156   case nlt:
 3157     vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len);
 3158     vallones(xtmp, vector_len);
 3159     vpxor(dst, xtmp, dst, vector_len);
 3160     break;
 3161   case lt:
 3162     vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len);
 3163     break;
 3164   case nle:
 3165     vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len);
 3166     break;
 3167   default:
 3168     assert(false, "Should not reach here");
 3169   }
 3170 }
 3171 
 3172 void MacroAssembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) {
 3173   assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3174   Assembler::vpmovzxbw(dst, src, vector_len);
 3175 }
 3176 
 3177 void MacroAssembler::vpmovmskb(Register dst, XMMRegister src, int vector_len) {
 3178   assert((src->encoding() < 16),"XMM register should be 0-15");
 3179   Assembler::vpmovmskb(dst, src, vector_len);
 3180 }
 3181 
 3182 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 3183   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3184   Assembler::vpmullw(dst, nds, src, vector_len);
 3185 }
 3186 
 3187 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 3188   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3189   Assembler::vpmullw(dst, nds, src, vector_len);
 3190 }
 3191 
 3192 void MacroAssembler::vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3193   assert((UseAVX > 0), "AVX support is needed");
 3194   assert(rscratch != noreg || always_reachable(src), "missing");
 3195 
 3196   if (reachable(src)) {
 3197     Assembler::vpmulld(dst, nds, as_Address(src), vector_len);
 3198   } else {
 3199     lea(rscratch, src);
 3200     Assembler::vpmulld(dst, nds, Address(rscratch, 0), vector_len);
 3201   }
 3202 }
 3203 
 3204 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 3205   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3206   Assembler::vpsubb(dst, nds, src, vector_len);
 3207 }
 3208 
 3209 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 3210   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3211   Assembler::vpsubb(dst, nds, src, vector_len);
 3212 }
 3213 
 3214 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 3215   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3216   Assembler::vpsubw(dst, nds, src, vector_len);
 3217 }
 3218 
 3219 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 3220   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3221   Assembler::vpsubw(dst, nds, src, vector_len);
 3222 }
 3223 
 3224 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
 3225   assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3226   Assembler::vpsraw(dst, nds, shift, vector_len);
 3227 }
 3228 
 3229 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
 3230   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3231   Assembler::vpsraw(dst, nds, shift, vector_len);
 3232 }
 3233 
 3234 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
 3235   assert(UseAVX > 2,"");
 3236   if (!VM_Version::supports_avx512vl() && vector_len < 2) {
 3237      vector_len = 2;
 3238   }
 3239   Assembler::evpsraq(dst, nds, shift, vector_len);
 3240 }
 3241 
 3242 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
 3243   assert(UseAVX > 2,"");
 3244   if (!VM_Version::supports_avx512vl() && vector_len < 2) {
 3245      vector_len = 2;
 3246   }
 3247   Assembler::evpsraq(dst, nds, shift, vector_len);
 3248 }
 3249 
 3250 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
 3251   assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3252   Assembler::vpsrlw(dst, nds, shift, vector_len);
 3253 }
 3254 
 3255 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
 3256   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3257   Assembler::vpsrlw(dst, nds, shift, vector_len);
 3258 }
 3259 
 3260 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
 3261   assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3262   Assembler::vpsllw(dst, nds, shift, vector_len);
 3263 }
 3264 
 3265 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
 3266   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3267   Assembler::vpsllw(dst, nds, shift, vector_len);
 3268 }
 3269 
 3270 void MacroAssembler::vptest(XMMRegister dst, XMMRegister src) {
 3271   assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15");
 3272   Assembler::vptest(dst, src);
 3273 }
 3274 
 3275 void MacroAssembler::punpcklbw(XMMRegister dst, XMMRegister src) {
 3276   assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3277   Assembler::punpcklbw(dst, src);
 3278 }
 3279 
 3280 void MacroAssembler::pshufd(XMMRegister dst, Address src, int mode) {
 3281   assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 3282   Assembler::pshufd(dst, src, mode);
 3283 }
 3284 
 3285 void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
 3286   assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3287   Assembler::pshuflw(dst, src, mode);
 3288 }
 3289 
 3290 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3291   assert(rscratch != noreg || always_reachable(src), "missing");
 3292 
 3293   if (reachable(src)) {
 3294     vandpd(dst, nds, as_Address(src), vector_len);
 3295   } else {
 3296     lea(rscratch, src);
 3297     vandpd(dst, nds, Address(rscratch, 0), vector_len);
 3298   }
 3299 }
 3300 
 3301 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3302   assert(rscratch != noreg || always_reachable(src), "missing");
 3303 
 3304   if (reachable(src)) {
 3305     vandps(dst, nds, as_Address(src), vector_len);
 3306   } else {
 3307     lea(rscratch, src);
 3308     vandps(dst, nds, Address(rscratch, 0), vector_len);
 3309   }
 3310 }
 3311 
 3312 void MacroAssembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src,
 3313                             bool merge, int vector_len, Register rscratch) {
 3314   assert(rscratch != noreg || always_reachable(src), "missing");
 3315 
 3316   if (reachable(src)) {
 3317     Assembler::evpord(dst, mask, nds, as_Address(src), merge, vector_len);
 3318   } else {
 3319     lea(rscratch, src);
 3320     Assembler::evpord(dst, mask, nds, Address(rscratch, 0), merge, vector_len);
 3321   }
 3322 }
 3323 
 3324 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3325   assert(rscratch != noreg || always_reachable(src), "missing");
 3326 
 3327   if (reachable(src)) {
 3328     vdivsd(dst, nds, as_Address(src));
 3329   } else {
 3330     lea(rscratch, src);
 3331     vdivsd(dst, nds, Address(rscratch, 0));
 3332   }
 3333 }
 3334 
 3335 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3336   assert(rscratch != noreg || always_reachable(src), "missing");
 3337 
 3338   if (reachable(src)) {
 3339     vdivss(dst, nds, as_Address(src));
 3340   } else {
 3341     lea(rscratch, src);
 3342     vdivss(dst, nds, Address(rscratch, 0));
 3343   }
 3344 }
 3345 
 3346 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3347   assert(rscratch != noreg || always_reachable(src), "missing");
 3348 
 3349   if (reachable(src)) {
 3350     vmulsd(dst, nds, as_Address(src));
 3351   } else {
 3352     lea(rscratch, src);
 3353     vmulsd(dst, nds, Address(rscratch, 0));
 3354   }
 3355 }
 3356 
 3357 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3358   assert(rscratch != noreg || always_reachable(src), "missing");
 3359 
 3360   if (reachable(src)) {
 3361     vmulss(dst, nds, as_Address(src));
 3362   } else {
 3363     lea(rscratch, src);
 3364     vmulss(dst, nds, Address(rscratch, 0));
 3365   }
 3366 }
 3367 
 3368 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3369   assert(rscratch != noreg || always_reachable(src), "missing");
 3370 
 3371   if (reachable(src)) {
 3372     vsubsd(dst, nds, as_Address(src));
 3373   } else {
 3374     lea(rscratch, src);
 3375     vsubsd(dst, nds, Address(rscratch, 0));
 3376   }
 3377 }
 3378 
 3379 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3380   assert(rscratch != noreg || always_reachable(src), "missing");
 3381 
 3382   if (reachable(src)) {
 3383     vsubss(dst, nds, as_Address(src));
 3384   } else {
 3385     lea(rscratch, src);
 3386     vsubss(dst, nds, Address(rscratch, 0));
 3387   }
 3388 }
 3389 
 3390 void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3391   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
 3392   assert(rscratch != noreg || always_reachable(src), "missing");
 3393 
 3394   vxorps(dst, nds, src, Assembler::AVX_128bit, rscratch);
 3395 }
 3396 
 3397 void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3398   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
 3399   assert(rscratch != noreg || always_reachable(src), "missing");
 3400 
 3401   vxorpd(dst, nds, src, Assembler::AVX_128bit, rscratch);
 3402 }
 3403 
 3404 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3405   assert(rscratch != noreg || always_reachable(src), "missing");
 3406 
 3407   if (reachable(src)) {
 3408     vxorpd(dst, nds, as_Address(src), vector_len);
 3409   } else {
 3410     lea(rscratch, src);
 3411     vxorpd(dst, nds, Address(rscratch, 0), vector_len);
 3412   }
 3413 }
 3414 
 3415 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3416   assert(rscratch != noreg || always_reachable(src), "missing");
 3417 
 3418   if (reachable(src)) {
 3419     vxorps(dst, nds, as_Address(src), vector_len);
 3420   } else {
 3421     lea(rscratch, src);
 3422     vxorps(dst, nds, Address(rscratch, 0), vector_len);
 3423   }
 3424 }
 3425 
 3426 void MacroAssembler::vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3427   assert(rscratch != noreg || always_reachable(src), "missing");
 3428 
 3429   if (UseAVX > 1 || (vector_len < 1)) {
 3430     if (reachable(src)) {
 3431       Assembler::vpxor(dst, nds, as_Address(src), vector_len);
 3432     } else {
 3433       lea(rscratch, src);
 3434       Assembler::vpxor(dst, nds, Address(rscratch, 0), vector_len);
 3435     }
 3436   } else {
 3437     MacroAssembler::vxorpd(dst, nds, src, vector_len, rscratch);
 3438   }
 3439 }
 3440 
 3441 void MacroAssembler::vpermd(XMMRegister dst,  XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3442   assert(rscratch != noreg || always_reachable(src), "missing");
 3443 
 3444   if (reachable(src)) {
 3445     Assembler::vpermd(dst, nds, as_Address(src), vector_len);
 3446   } else {
 3447     lea(rscratch, src);
 3448     Assembler::vpermd(dst, nds, Address(rscratch, 0), vector_len);
 3449   }
 3450 }
 3451 
 3452 void MacroAssembler::clear_jobject_tag(Register possibly_non_local) {
 3453   const int32_t inverted_mask = ~static_cast<int32_t>(JNIHandles::tag_mask);
 3454   STATIC_ASSERT(inverted_mask == -4); // otherwise check this code
 3455   // The inverted mask is sign-extended
 3456   andptr(possibly_non_local, inverted_mask);
 3457 }
 3458 
 3459 void MacroAssembler::resolve_jobject(Register value,
 3460                                      Register tmp) {
 3461   Register thread = r15_thread;
 3462   assert_different_registers(value, thread, tmp);
 3463   Label done, tagged, weak_tagged;
 3464   testptr(value, value);
 3465   jcc(Assembler::zero, done);           // Use null as-is.
 3466   testptr(value, JNIHandles::tag_mask); // Test for tag.
 3467   jcc(Assembler::notZero, tagged);
 3468 
 3469   // Resolve local handle
 3470   access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp);
 3471   verify_oop(value);
 3472   jmp(done);
 3473 
 3474   bind(tagged);
 3475   testptr(value, JNIHandles::TypeTag::weak_global); // Test for weak tag.
 3476   jcc(Assembler::notZero, weak_tagged);
 3477 
 3478   // Resolve global handle
 3479   access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp);
 3480   verify_oop(value);
 3481   jmp(done);
 3482 
 3483   bind(weak_tagged);
 3484   // Resolve jweak.
 3485   access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
 3486                  value, Address(value, -JNIHandles::TypeTag::weak_global), tmp);
 3487   verify_oop(value);
 3488 
 3489   bind(done);
 3490 }
 3491 
 3492 void MacroAssembler::resolve_global_jobject(Register value,
 3493                                             Register tmp) {
 3494   Register thread = r15_thread;
 3495   assert_different_registers(value, thread, tmp);
 3496   Label done;
 3497 
 3498   testptr(value, value);
 3499   jcc(Assembler::zero, done);           // Use null as-is.
 3500 
 3501 #ifdef ASSERT
 3502   {
 3503     Label valid_global_tag;
 3504     testptr(value, JNIHandles::TypeTag::global); // Test for global tag.
 3505     jcc(Assembler::notZero, valid_global_tag);
 3506     stop("non global jobject using resolve_global_jobject");
 3507     bind(valid_global_tag);
 3508   }
 3509 #endif
 3510 
 3511   // Resolve global handle
 3512   access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp);
 3513   verify_oop(value);
 3514 
 3515   bind(done);
 3516 }
 3517 
 3518 void MacroAssembler::subptr(Register dst, int32_t imm32) {
 3519   subq(dst, imm32);
 3520 }
 3521 
 3522 // Force generation of a 4 byte immediate value even if it fits into 8bit
 3523 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) {
 3524   subq_imm32(dst, imm32);
 3525 }
 3526 
 3527 void MacroAssembler::subptr(Register dst, Register src) {
 3528   subq(dst, src);
 3529 }
 3530 
 3531 // C++ bool manipulation
 3532 void MacroAssembler::testbool(Register dst) {
 3533   if(sizeof(bool) == 1)
 3534     testb(dst, 0xff);
 3535   else if(sizeof(bool) == 2) {
 3536     // testw implementation needed for two byte bools
 3537     ShouldNotReachHere();
 3538   } else if(sizeof(bool) == 4)
 3539     testl(dst, dst);
 3540   else
 3541     // unsupported
 3542     ShouldNotReachHere();
 3543 }
 3544 
 3545 void MacroAssembler::testptr(Register dst, Register src) {
 3546   testq(dst, src);
 3547 }
 3548 
 3549 // Object / value buffer allocation...
 3550 //
 3551 // Kills klass and rsi on LP64
 3552 void MacroAssembler::allocate_instance(Register klass, Register new_obj,
 3553                                        Register t1, Register t2,
 3554                                        bool clear_fields, Label& alloc_failed)
 3555 {
 3556   Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop;
 3557   Register layout_size = t1;
 3558   assert(new_obj == rax, "needs to be rax");
 3559   assert_different_registers(klass, new_obj, t1, t2);
 3560 
 3561   // get instance_size in InstanceKlass (scaled to a count of bytes)
 3562   movl(layout_size, Address(klass, Klass::layout_helper_offset()));
 3563   // test to see if it is malformed in some way
 3564   testl(layout_size, Klass::_lh_instance_slow_path_bit);
 3565   jcc(Assembler::notZero, slow_case_no_pop);
 3566 
 3567   // Allocate the instance:
 3568   //  If TLAB is enabled:
 3569   //    Try to allocate in the TLAB.
 3570   //    If fails, go to the slow path.
 3571   //  Else If inline contiguous allocations are enabled:
 3572   //    Try to allocate in eden.
 3573   //    If fails due to heap end, go to slow path.
 3574   //
 3575   //  If TLAB is enabled OR inline contiguous is enabled:
 3576   //    Initialize the allocation.
 3577   //    Exit.
 3578   //
 3579   //  Go to slow path.
 3580 
 3581   push(klass);
 3582   if (UseTLAB) {
 3583     tlab_allocate(new_obj, layout_size, 0, klass, t2, slow_case);
 3584     if (ZeroTLAB || (!clear_fields)) {
 3585       // the fields have been already cleared
 3586       jmp(initialize_header);
 3587     } else {
 3588       // initialize both the header and fields
 3589       jmp(initialize_object);
 3590     }
 3591   } else {
 3592     jmp(slow_case);
 3593   }
 3594 
 3595   // If UseTLAB is true, the object is created above and there is an initialize need.
 3596   // Otherwise, skip and go to the slow path.
 3597   if (UseTLAB) {
 3598     if (clear_fields) {
 3599       // The object is initialized before the header.  If the object size is
 3600       // zero, go directly to the header initialization.
 3601       bind(initialize_object);
 3602       if (UseCompactObjectHeaders) {
 3603         assert(is_aligned(oopDesc::base_offset_in_bytes(), BytesPerLong), "oop base offset must be 8-byte-aligned");
 3604         decrement(layout_size, oopDesc::base_offset_in_bytes());
 3605       } else {
 3606         decrement(layout_size, sizeof(oopDesc));
 3607       }
 3608       jcc(Assembler::zero, initialize_header);
 3609 
 3610       // Initialize topmost object field, divide size by 8, check if odd and
 3611       // test if zero.
 3612       Register zero = klass;
 3613       xorl(zero, zero);    // use zero reg to clear memory (shorter code)
 3614       shrl(layout_size, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
 3615 
 3616   #ifdef ASSERT
 3617       // make sure instance_size was multiple of 8
 3618       Label L;
 3619       // Ignore partial flag stall after shrl() since it is debug VM
 3620       jcc(Assembler::carryClear, L);
 3621       stop("object size is not multiple of 2 - adjust this code");
 3622       bind(L);
 3623       // must be > 0, no extra check needed here
 3624   #endif
 3625 
 3626       // initialize remaining object fields: instance_size was a multiple of 8
 3627       {
 3628         Label loop;
 3629         bind(loop);
 3630         int header_size_bytes = oopDesc::header_size() * HeapWordSize;
 3631         assert(is_aligned(header_size_bytes, BytesPerLong), "oop header size must be 8-byte-aligned");
 3632         movptr(Address(new_obj, layout_size, Address::times_8, header_size_bytes - 1*oopSize), zero);
 3633         decrement(layout_size);
 3634         jcc(Assembler::notZero, loop);
 3635       }
 3636     } // clear_fields
 3637 
 3638     // initialize object header only.
 3639     bind(initialize_header);
 3640     if (UseCompactObjectHeaders || EnableValhalla) {
 3641       pop(klass);
 3642       Register mark_word = t2;
 3643       movptr(mark_word, Address(klass, Klass::prototype_header_offset()));
 3644       movptr(Address(new_obj, oopDesc::mark_offset_in_bytes ()), mark_word);
 3645     } else {
 3646      movptr(Address(new_obj, oopDesc::mark_offset_in_bytes()),
 3647             (intptr_t)markWord::prototype().value()); // header
 3648      pop(klass);   // get saved klass back in the register.
 3649     }
 3650     if (!UseCompactObjectHeaders) {
 3651       xorl(rsi, rsi);                 // use zero reg to clear memory (shorter code)
 3652       store_klass_gap(new_obj, rsi);  // zero klass gap for compressed oops
 3653       movptr(t2, klass);         // preserve klass
 3654       store_klass(new_obj, t2, rscratch1);  // src klass reg is potentially compressed
 3655     }
 3656     jmp(done);
 3657   }
 3658 
 3659   bind(slow_case);
 3660   pop(klass);
 3661   bind(slow_case_no_pop);
 3662   jmp(alloc_failed);
 3663 
 3664   bind(done);
 3665 }
 3666 
 3667 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
 3668 void MacroAssembler::tlab_allocate(Register obj,
 3669                                    Register var_size_in_bytes,
 3670                                    int con_size_in_bytes,
 3671                                    Register t1,
 3672                                    Register t2,
 3673                                    Label& slow_case) {
 3674   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 3675   bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
 3676 }
 3677 
 3678 RegSet MacroAssembler::call_clobbered_gp_registers() {
 3679   RegSet regs;
 3680   regs += RegSet::of(rax, rcx, rdx);
 3681 #ifndef _WINDOWS
 3682   regs += RegSet::of(rsi, rdi);
 3683 #endif
 3684   regs += RegSet::range(r8, r11);
 3685   if (UseAPX) {
 3686     regs += RegSet::range(r16, as_Register(Register::number_of_registers - 1));
 3687   }
 3688   return regs;
 3689 }
 3690 
 3691 XMMRegSet MacroAssembler::call_clobbered_xmm_registers() {
 3692   int num_xmm_registers = XMMRegister::available_xmm_registers();
 3693 #if defined(_WINDOWS)
 3694   XMMRegSet result = XMMRegSet::range(xmm0, xmm5);
 3695   if (num_xmm_registers > 16) {
 3696      result += XMMRegSet::range(xmm16, as_XMMRegister(num_xmm_registers - 1));
 3697   }
 3698   return result;
 3699 #else
 3700   return XMMRegSet::range(xmm0, as_XMMRegister(num_xmm_registers - 1));
 3701 #endif
 3702 }
 3703 
 3704 // C1 only ever uses the first double/float of the XMM register.
 3705 static int xmm_save_size() { return sizeof(double); }
 3706 
 3707 static void save_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) {
 3708   masm->movdbl(Address(rsp, offset), reg);
 3709 }
 3710 
 3711 static void restore_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) {
 3712   masm->movdbl(reg, Address(rsp, offset));
 3713 }
 3714 
 3715 static int register_section_sizes(RegSet gp_registers, XMMRegSet xmm_registers,
 3716                                   bool save_fpu, int& gp_area_size, int& xmm_area_size) {
 3717 
 3718   gp_area_size = align_up(gp_registers.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size,
 3719                          StackAlignmentInBytes);
 3720   xmm_area_size = save_fpu ? xmm_registers.size() * xmm_save_size() : 0;
 3721 
 3722   return gp_area_size + xmm_area_size;
 3723 }
 3724 
 3725 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude, bool save_fpu) {
 3726   block_comment("push_call_clobbered_registers start");
 3727   // Regular registers
 3728   RegSet gp_registers_to_push = call_clobbered_gp_registers() - exclude;
 3729 
 3730   int gp_area_size;
 3731   int xmm_area_size;
 3732   int total_save_size = register_section_sizes(gp_registers_to_push, call_clobbered_xmm_registers(), save_fpu,
 3733                                                gp_area_size, xmm_area_size);
 3734   subptr(rsp, total_save_size);
 3735 
 3736   push_set(gp_registers_to_push, 0);
 3737 
 3738   if (save_fpu) {
 3739     push_set(call_clobbered_xmm_registers(), gp_area_size);
 3740   }
 3741 
 3742   block_comment("push_call_clobbered_registers end");
 3743 }
 3744 
 3745 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu) {
 3746   block_comment("pop_call_clobbered_registers start");
 3747 
 3748   RegSet gp_registers_to_pop = call_clobbered_gp_registers() - exclude;
 3749 
 3750   int gp_area_size;
 3751   int xmm_area_size;
 3752   int total_save_size = register_section_sizes(gp_registers_to_pop, call_clobbered_xmm_registers(), restore_fpu,
 3753                                                gp_area_size, xmm_area_size);
 3754 
 3755   if (restore_fpu) {
 3756     pop_set(call_clobbered_xmm_registers(), gp_area_size);
 3757   }
 3758 
 3759   pop_set(gp_registers_to_pop, 0);
 3760 
 3761   addptr(rsp, total_save_size);
 3762 
 3763   vzeroupper();
 3764 
 3765   block_comment("pop_call_clobbered_registers end");
 3766 }
 3767 
 3768 void MacroAssembler::push_set(XMMRegSet set, int offset) {
 3769   assert(is_aligned(set.size() * xmm_save_size(), StackAlignmentInBytes), "must be");
 3770   int spill_offset = offset;
 3771 
 3772   for (RegSetIterator<XMMRegister> it = set.begin(); *it != xnoreg; ++it) {
 3773     save_xmm_register(this, spill_offset, *it);
 3774     spill_offset += xmm_save_size();
 3775   }
 3776 }
 3777 
 3778 void MacroAssembler::pop_set(XMMRegSet set, int offset) {
 3779   int restore_size = set.size() * xmm_save_size();
 3780   assert(is_aligned(restore_size, StackAlignmentInBytes), "must be");
 3781 
 3782   int restore_offset = offset + restore_size - xmm_save_size();
 3783 
 3784   for (ReverseRegSetIterator<XMMRegister> it = set.rbegin(); *it != xnoreg; ++it) {
 3785     restore_xmm_register(this, restore_offset, *it);
 3786     restore_offset -= xmm_save_size();
 3787   }
 3788 }
 3789 
 3790 void MacroAssembler::push_set(RegSet set, int offset) {
 3791   int spill_offset;
 3792   if (offset == -1) {
 3793     int register_push_size = set.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size;
 3794     int aligned_size = align_up(register_push_size, StackAlignmentInBytes);
 3795     subptr(rsp, aligned_size);
 3796     spill_offset = 0;
 3797   } else {
 3798     spill_offset = offset;
 3799   }
 3800 
 3801   for (RegSetIterator<Register> it = set.begin(); *it != noreg; ++it) {
 3802     movptr(Address(rsp, spill_offset), *it);
 3803     spill_offset += Register::max_slots_per_register * VMRegImpl::stack_slot_size;
 3804   }
 3805 }
 3806 
 3807 void MacroAssembler::pop_set(RegSet set, int offset) {
 3808 
 3809   int gp_reg_size = Register::max_slots_per_register * VMRegImpl::stack_slot_size;
 3810   int restore_size = set.size() * gp_reg_size;
 3811   int aligned_size = align_up(restore_size, StackAlignmentInBytes);
 3812 
 3813   int restore_offset;
 3814   if (offset == -1) {
 3815     restore_offset = restore_size - gp_reg_size;
 3816   } else {
 3817     restore_offset = offset + restore_size - gp_reg_size;
 3818   }
 3819   for (ReverseRegSetIterator<Register> it = set.rbegin(); *it != noreg; ++it) {
 3820     movptr(*it, Address(rsp, restore_offset));
 3821     restore_offset -= gp_reg_size;
 3822   }
 3823 
 3824   if (offset == -1) {
 3825     addptr(rsp, aligned_size);
 3826   }
 3827 }
 3828 
 3829 // Preserves the contents of address, destroys the contents length_in_bytes and temp.
 3830 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) {
 3831   assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different");
 3832   assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord");
 3833   Label done;
 3834 
 3835   testptr(length_in_bytes, length_in_bytes);
 3836   jcc(Assembler::zero, done);
 3837 
 3838   // initialize topmost word, divide index by 2, check if odd and test if zero
 3839   // note: for the remaining code to work, index must be a multiple of BytesPerWord
 3840 #ifdef ASSERT
 3841   {
 3842     Label L;
 3843     testptr(length_in_bytes, BytesPerWord - 1);
 3844     jcc(Assembler::zero, L);
 3845     stop("length must be a multiple of BytesPerWord");
 3846     bind(L);
 3847   }
 3848 #endif
 3849   Register index = length_in_bytes;
 3850   xorptr(temp, temp);    // use _zero reg to clear memory (shorter code)
 3851   if (UseIncDec) {
 3852     shrptr(index, 3);  // divide by 8/16 and set carry flag if bit 2 was set
 3853   } else {
 3854     shrptr(index, 2);  // use 2 instructions to avoid partial flag stall
 3855     shrptr(index, 1);
 3856   }
 3857 
 3858   // initialize remaining object fields: index is a multiple of 2 now
 3859   {
 3860     Label loop;
 3861     bind(loop);
 3862     movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp);
 3863     decrement(index);
 3864     jcc(Assembler::notZero, loop);
 3865   }
 3866 
 3867   bind(done);
 3868 }
 3869 
 3870 void MacroAssembler::get_inline_type_field_klass(Register holder_klass, Register index, Register inline_klass) {
 3871   inline_layout_info(holder_klass, index, inline_klass);
 3872   movptr(inline_klass, Address(inline_klass, InlineLayoutInfo::klass_offset()));
 3873 }
 3874 
 3875 void MacroAssembler::inline_layout_info(Register holder_klass, Register index, Register layout_info) {
 3876   movptr(layout_info, Address(holder_klass, InstanceKlass::inline_layout_info_array_offset()));
 3877 #ifdef ASSERT
 3878   {
 3879     Label done;
 3880     cmpptr(layout_info, 0);
 3881     jcc(Assembler::notEqual, done);
 3882     stop("inline_layout_info_array is null");
 3883     bind(done);
 3884   }
 3885 #endif
 3886 
 3887   InlineLayoutInfo array[2];
 3888   int size = (char*)&array[1] - (char*)&array[0]; // computing size of array elements
 3889   if (is_power_of_2(size)) {
 3890     shll(index, log2i_exact(size)); // Scale index by power of 2
 3891   } else {
 3892     imull(index, index, size); // Scale the index to be the entry index * array_element_size
 3893   }
 3894   lea(layout_info, Address(layout_info, index, Address::times_1, Array<InlineLayoutInfo>::base_offset_in_bytes()));
 3895 }
 3896 
 3897 // Look up the method for a megamorphic invokeinterface call.
 3898 // The target method is determined by <intf_klass, itable_index>.
 3899 // The receiver klass is in recv_klass.
 3900 // On success, the result will be in method_result, and execution falls through.
 3901 // On failure, execution transfers to the given label.
 3902 void MacroAssembler::lookup_interface_method(Register recv_klass,
 3903                                              Register intf_klass,
 3904                                              RegisterOrConstant itable_index,
 3905                                              Register method_result,
 3906                                              Register scan_temp,
 3907                                              Label& L_no_such_interface,
 3908                                              bool return_method) {
 3909   assert_different_registers(recv_klass, intf_klass, scan_temp);
 3910   assert_different_registers(method_result, intf_klass, scan_temp);
 3911   assert(recv_klass != method_result || !return_method,
 3912          "recv_klass can be destroyed when method isn't needed");
 3913 
 3914   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
 3915          "caller must use same register for non-constant itable index as for method");
 3916 
 3917   // Compute start of first itableOffsetEntry (which is at the end of the vtable)
 3918   int vtable_base = in_bytes(Klass::vtable_start_offset());
 3919   int itentry_off = in_bytes(itableMethodEntry::method_offset());
 3920   int scan_step   = itableOffsetEntry::size() * wordSize;
 3921   int vte_size    = vtableEntry::size_in_bytes();
 3922   Address::ScaleFactor times_vte_scale = Address::times_ptr;
 3923   assert(vte_size == wordSize, "else adjust times_vte_scale");
 3924 
 3925   movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
 3926 
 3927   // Could store the aligned, prescaled offset in the klass.
 3928   lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
 3929 
 3930   if (return_method) {
 3931     // Adjust recv_klass by scaled itable_index, so we can free itable_index.
 3932     assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
 3933     lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
 3934   }
 3935 
 3936   // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) {
 3937   //   if (scan->interface() == intf) {
 3938   //     result = (klass + scan->offset() + itable_index);
 3939   //   }
 3940   // }
 3941   Label search, found_method;
 3942 
 3943   for (int peel = 1; peel >= 0; peel--) {
 3944     movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset()));
 3945     cmpptr(intf_klass, method_result);
 3946 
 3947     if (peel) {
 3948       jccb(Assembler::equal, found_method);
 3949     } else {
 3950       jccb(Assembler::notEqual, search);
 3951       // (invert the test to fall through to found_method...)
 3952     }
 3953 
 3954     if (!peel)  break;
 3955 
 3956     bind(search);
 3957 
 3958     // Check that the previous entry is non-null.  A null entry means that
 3959     // the receiver class doesn't implement the interface, and wasn't the
 3960     // same as when the caller was compiled.
 3961     testptr(method_result, method_result);
 3962     jcc(Assembler::zero, L_no_such_interface);
 3963     addptr(scan_temp, scan_step);
 3964   }
 3965 
 3966   bind(found_method);
 3967 
 3968   if (return_method) {
 3969     // Got a hit.
 3970     movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset()));
 3971     movptr(method_result, Address(recv_klass, scan_temp, Address::times_1));
 3972   }
 3973 }
 3974 
 3975 // Look up the method for a megamorphic invokeinterface call in a single pass over itable:
 3976 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData
 3977 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index
 3978 // The target method is determined by <holder_klass, itable_index>.
 3979 // The receiver klass is in recv_klass.
 3980 // On success, the result will be in method_result, and execution falls through.
 3981 // On failure, execution transfers to the given label.
 3982 void MacroAssembler::lookup_interface_method_stub(Register recv_klass,
 3983                                                   Register holder_klass,
 3984                                                   Register resolved_klass,
 3985                                                   Register method_result,
 3986                                                   Register scan_temp,
 3987                                                   Register temp_reg2,
 3988                                                   Register receiver,
 3989                                                   int itable_index,
 3990                                                   Label& L_no_such_interface) {
 3991   assert_different_registers(recv_klass, method_result, holder_klass, resolved_klass, scan_temp, temp_reg2, receiver);
 3992   Register temp_itbl_klass = method_result;
 3993   Register temp_reg = (temp_reg2 == noreg ? recv_klass : temp_reg2); // reuse recv_klass register on 32-bit x86 impl
 3994 
 3995   int vtable_base = in_bytes(Klass::vtable_start_offset());
 3996   int itentry_off = in_bytes(itableMethodEntry::method_offset());
 3997   int scan_step = itableOffsetEntry::size() * wordSize;
 3998   int vte_size = vtableEntry::size_in_bytes();
 3999   int ioffset = in_bytes(itableOffsetEntry::interface_offset());
 4000   int ooffset = in_bytes(itableOffsetEntry::offset_offset());
 4001   Address::ScaleFactor times_vte_scale = Address::times_ptr;
 4002   assert(vte_size == wordSize, "adjust times_vte_scale");
 4003 
 4004   Label L_loop_scan_resolved_entry, L_resolved_found, L_holder_found;
 4005 
 4006   // temp_itbl_klass = recv_klass.itable[0]
 4007   // scan_temp = &recv_klass.itable[0] + step
 4008   movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
 4009   movptr(temp_itbl_klass, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset));
 4010   lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset + scan_step));
 4011   xorptr(temp_reg, temp_reg);
 4012 
 4013   // Initial checks:
 4014   //   - if (holder_klass != resolved_klass), go to "scan for resolved"
 4015   //   - if (itable[0] == 0), no such interface
 4016   //   - if (itable[0] == holder_klass), shortcut to "holder found"
 4017   cmpptr(holder_klass, resolved_klass);
 4018   jccb(Assembler::notEqual, L_loop_scan_resolved_entry);
 4019   testptr(temp_itbl_klass, temp_itbl_klass);
 4020   jccb(Assembler::zero, L_no_such_interface);
 4021   cmpptr(holder_klass, temp_itbl_klass);
 4022   jccb(Assembler::equal, L_holder_found);
 4023 
 4024   // Loop: Look for holder_klass record in itable
 4025   //   do {
 4026   //     tmp = itable[index];
 4027   //     index += step;
 4028   //     if (tmp == holder_klass) {
 4029   //       goto L_holder_found; // Found!
 4030   //     }
 4031   //   } while (tmp != 0);
 4032   //   goto L_no_such_interface // Not found.
 4033   Label L_scan_holder;
 4034   bind(L_scan_holder);
 4035     movptr(temp_itbl_klass, Address(scan_temp, 0));
 4036     addptr(scan_temp, scan_step);
 4037     cmpptr(holder_klass, temp_itbl_klass);
 4038     jccb(Assembler::equal, L_holder_found);
 4039     testptr(temp_itbl_klass, temp_itbl_klass);
 4040     jccb(Assembler::notZero, L_scan_holder);
 4041 
 4042   jmpb(L_no_such_interface);
 4043 
 4044   // Loop: Look for resolved_class record in itable
 4045   //   do {
 4046   //     tmp = itable[index];
 4047   //     index += step;
 4048   //     if (tmp == holder_klass) {
 4049   //        // Also check if we have met a holder klass
 4050   //        holder_tmp = itable[index-step-ioffset];
 4051   //     }
 4052   //     if (tmp == resolved_klass) {
 4053   //        goto L_resolved_found;  // Found!
 4054   //     }
 4055   //   } while (tmp != 0);
 4056   //   goto L_no_such_interface // Not found.
 4057   //
 4058   Label L_loop_scan_resolved;
 4059   bind(L_loop_scan_resolved);
 4060     movptr(temp_itbl_klass, Address(scan_temp, 0));
 4061     addptr(scan_temp, scan_step);
 4062     bind(L_loop_scan_resolved_entry);
 4063     cmpptr(holder_klass, temp_itbl_klass);
 4064     cmovl(Assembler::equal, temp_reg, Address(scan_temp, ooffset - ioffset - scan_step));
 4065     cmpptr(resolved_klass, temp_itbl_klass);
 4066     jccb(Assembler::equal, L_resolved_found);
 4067     testptr(temp_itbl_klass, temp_itbl_klass);
 4068     jccb(Assembler::notZero, L_loop_scan_resolved);
 4069 
 4070   jmpb(L_no_such_interface);
 4071 
 4072   Label L_ready;
 4073 
 4074   // See if we already have a holder klass. If not, go and scan for it.
 4075   bind(L_resolved_found);
 4076   testptr(temp_reg, temp_reg);
 4077   jccb(Assembler::zero, L_scan_holder);
 4078   jmpb(L_ready);
 4079 
 4080   bind(L_holder_found);
 4081   movl(temp_reg, Address(scan_temp, ooffset - ioffset - scan_step));
 4082 
 4083   // Finally, temp_reg contains holder_klass vtable offset
 4084   bind(L_ready);
 4085   assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
 4086   if (temp_reg2 == noreg) { // recv_klass register is clobbered for 32-bit x86 impl
 4087     load_klass(scan_temp, receiver, noreg);
 4088     movptr(method_result, Address(scan_temp, temp_reg, Address::times_1, itable_index * wordSize + itentry_off));
 4089   } else {
 4090     movptr(method_result, Address(recv_klass, temp_reg, Address::times_1, itable_index * wordSize + itentry_off));
 4091   }
 4092 }
 4093 
 4094 
 4095 // virtual method calling
 4096 void MacroAssembler::lookup_virtual_method(Register recv_klass,
 4097                                            RegisterOrConstant vtable_index,
 4098                                            Register method_result) {
 4099   const ByteSize base = Klass::vtable_start_offset();
 4100   assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
 4101   Address vtable_entry_addr(recv_klass,
 4102                             vtable_index, Address::times_ptr,
 4103                             base + vtableEntry::method_offset());
 4104   movptr(method_result, vtable_entry_addr);
 4105 }
 4106 
 4107 
 4108 void MacroAssembler::check_klass_subtype(Register sub_klass,
 4109                            Register super_klass,
 4110                            Register temp_reg,
 4111                            Label& L_success) {
 4112   Label L_failure;
 4113   check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg,        &L_success, &L_failure, nullptr);
 4114   check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr);
 4115   bind(L_failure);
 4116 }
 4117 
 4118 
 4119 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
 4120                                                    Register super_klass,
 4121                                                    Register temp_reg,
 4122                                                    Label* L_success,
 4123                                                    Label* L_failure,
 4124                                                    Label* L_slow_path,
 4125                                         RegisterOrConstant super_check_offset) {
 4126   assert_different_registers(sub_klass, super_klass, temp_reg);
 4127   bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
 4128   if (super_check_offset.is_register()) {
 4129     assert_different_registers(sub_klass, super_klass,
 4130                                super_check_offset.as_register());
 4131   } else if (must_load_sco) {
 4132     assert(temp_reg != noreg, "supply either a temp or a register offset");
 4133   }
 4134 
 4135   Label L_fallthrough;
 4136   int label_nulls = 0;
 4137   if (L_success == nullptr)   { L_success   = &L_fallthrough; label_nulls++; }
 4138   if (L_failure == nullptr)   { L_failure   = &L_fallthrough; label_nulls++; }
 4139   if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; }
 4140   assert(label_nulls <= 1, "at most one null in the batch");
 4141 
 4142   int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
 4143   int sco_offset = in_bytes(Klass::super_check_offset_offset());
 4144   Address super_check_offset_addr(super_klass, sco_offset);
 4145 
 4146   // Hacked jcc, which "knows" that L_fallthrough, at least, is in
 4147   // range of a jccb.  If this routine grows larger, reconsider at
 4148   // least some of these.
 4149 #define local_jcc(assembler_cond, label)                                \
 4150   if (&(label) == &L_fallthrough)  jccb(assembler_cond, label);         \
 4151   else                             jcc( assembler_cond, label) /*omit semi*/
 4152 
 4153   // Hacked jmp, which may only be used just before L_fallthrough.
 4154 #define final_jmp(label)                                                \
 4155   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
 4156   else                            jmp(label)                /*omit semi*/
 4157 
 4158   // If the pointers are equal, we are done (e.g., String[] elements).
 4159   // This self-check enables sharing of secondary supertype arrays among
 4160   // non-primary types such as array-of-interface.  Otherwise, each such
 4161   // type would need its own customized SSA.
 4162   // We move this check to the front of the fast path because many
 4163   // type checks are in fact trivially successful in this manner,
 4164   // so we get a nicely predicted branch right at the start of the check.
 4165   cmpptr(sub_klass, super_klass);
 4166   local_jcc(Assembler::equal, *L_success);
 4167 
 4168   // Check the supertype display:
 4169   if (must_load_sco) {
 4170     // Positive movl does right thing on LP64.
 4171     movl(temp_reg, super_check_offset_addr);
 4172     super_check_offset = RegisterOrConstant(temp_reg);
 4173   }
 4174   Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
 4175   cmpptr(super_klass, super_check_addr); // load displayed supertype
 4176 
 4177   // This check has worked decisively for primary supers.
 4178   // Secondary supers are sought in the super_cache ('super_cache_addr').
 4179   // (Secondary supers are interfaces and very deeply nested subtypes.)
 4180   // This works in the same check above because of a tricky aliasing
 4181   // between the super_cache and the primary super display elements.
 4182   // (The 'super_check_addr' can address either, as the case requires.)
 4183   // Note that the cache is updated below if it does not help us find
 4184   // what we need immediately.
 4185   // So if it was a primary super, we can just fail immediately.
 4186   // Otherwise, it's the slow path for us (no success at this point).
 4187 
 4188   if (super_check_offset.is_register()) {
 4189     local_jcc(Assembler::equal, *L_success);
 4190     cmpl(super_check_offset.as_register(), sc_offset);
 4191     if (L_failure == &L_fallthrough) {
 4192       local_jcc(Assembler::equal, *L_slow_path);
 4193     } else {
 4194       local_jcc(Assembler::notEqual, *L_failure);
 4195       final_jmp(*L_slow_path);
 4196     }
 4197   } else if (super_check_offset.as_constant() == sc_offset) {
 4198     // Need a slow path; fast failure is impossible.
 4199     if (L_slow_path == &L_fallthrough) {
 4200       local_jcc(Assembler::equal, *L_success);
 4201     } else {
 4202       local_jcc(Assembler::notEqual, *L_slow_path);
 4203       final_jmp(*L_success);
 4204     }
 4205   } else {
 4206     // No slow path; it's a fast decision.
 4207     if (L_failure == &L_fallthrough) {
 4208       local_jcc(Assembler::equal, *L_success);
 4209     } else {
 4210       local_jcc(Assembler::notEqual, *L_failure);
 4211       final_jmp(*L_success);
 4212     }
 4213   }
 4214 
 4215   bind(L_fallthrough);
 4216 
 4217 #undef local_jcc
 4218 #undef final_jmp
 4219 }
 4220 
 4221 
 4222 void MacroAssembler::check_klass_subtype_slow_path_linear(Register sub_klass,
 4223                                                           Register super_klass,
 4224                                                           Register temp_reg,
 4225                                                           Register temp2_reg,
 4226                                                           Label* L_success,
 4227                                                           Label* L_failure,
 4228                                                           bool set_cond_codes) {
 4229   assert_different_registers(sub_klass, super_klass, temp_reg);
 4230   if (temp2_reg != noreg)
 4231     assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg);
 4232 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
 4233 
 4234   Label L_fallthrough;
 4235   int label_nulls = 0;
 4236   if (L_success == nullptr)   { L_success   = &L_fallthrough; label_nulls++; }
 4237   if (L_failure == nullptr)   { L_failure   = &L_fallthrough; label_nulls++; }
 4238   assert(label_nulls <= 1, "at most one null in the batch");
 4239 
 4240   // a couple of useful fields in sub_klass:
 4241   int ss_offset = in_bytes(Klass::secondary_supers_offset());
 4242   int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
 4243   Address secondary_supers_addr(sub_klass, ss_offset);
 4244   Address super_cache_addr(     sub_klass, sc_offset);
 4245 
 4246   // Do a linear scan of the secondary super-klass chain.
 4247   // This code is rarely used, so simplicity is a virtue here.
 4248   // The repne_scan instruction uses fixed registers, which we must spill.
 4249   // Don't worry too much about pre-existing connections with the input regs.
 4250 
 4251   assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super)
 4252   assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter)
 4253 
 4254   // Get super_klass value into rax (even if it was in rdi or rcx).
 4255   bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false;
 4256   if (super_klass != rax) {
 4257     if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; }
 4258     mov(rax, super_klass);
 4259   }
 4260   if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; }
 4261   if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; }
 4262 
 4263 #ifndef PRODUCT
 4264   uint* pst_counter = &SharedRuntime::_partial_subtype_ctr;
 4265   ExternalAddress pst_counter_addr((address) pst_counter);
 4266   lea(rcx, pst_counter_addr);
 4267   incrementl(Address(rcx, 0));
 4268 #endif //PRODUCT
 4269 
 4270   // We will consult the secondary-super array.
 4271   movptr(rdi, secondary_supers_addr);
 4272   // Load the array length.  (Positive movl does right thing on LP64.)
 4273   movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes()));
 4274   // Skip to start of data.
 4275   addptr(rdi, Array<Klass*>::base_offset_in_bytes());
 4276 
 4277   // Scan RCX words at [RDI] for an occurrence of RAX.
 4278   // Set NZ/Z based on last compare.
 4279   // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does
 4280   // not change flags (only scas instruction which is repeated sets flags).
 4281   // Set Z = 0 (not equal) before 'repne' to indicate that class was not found.
 4282 
 4283     testptr(rax,rax); // Set Z = 0
 4284     repne_scan();
 4285 
 4286   // Unspill the temp. registers:
 4287   if (pushed_rdi)  pop(rdi);
 4288   if (pushed_rcx)  pop(rcx);
 4289   if (pushed_rax)  pop(rax);
 4290 
 4291   if (set_cond_codes) {
 4292     // Special hack for the AD files:  rdi is guaranteed non-zero.
 4293     assert(!pushed_rdi, "rdi must be left non-null");
 4294     // Also, the condition codes are properly set Z/NZ on succeed/failure.
 4295   }
 4296 
 4297   if (L_failure == &L_fallthrough)
 4298         jccb(Assembler::notEqual, *L_failure);
 4299   else  jcc(Assembler::notEqual, *L_failure);
 4300 
 4301   // Success.  Cache the super we found and proceed in triumph.
 4302   movptr(super_cache_addr, super_klass);
 4303 
 4304   if (L_success != &L_fallthrough) {
 4305     jmp(*L_success);
 4306   }
 4307 
 4308 #undef IS_A_TEMP
 4309 
 4310   bind(L_fallthrough);
 4311 }
 4312 
 4313 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
 4314                                                    Register super_klass,
 4315                                                    Register temp_reg,
 4316                                                    Register temp2_reg,
 4317                                                    Label* L_success,
 4318                                                    Label* L_failure,
 4319                                                    bool set_cond_codes) {
 4320   assert(set_cond_codes == false, "must be false on 64-bit x86");
 4321   check_klass_subtype_slow_path
 4322     (sub_klass, super_klass, temp_reg, temp2_reg, noreg, noreg,
 4323      L_success, L_failure);
 4324 }
 4325 
 4326 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
 4327                                                    Register super_klass,
 4328                                                    Register temp_reg,
 4329                                                    Register temp2_reg,
 4330                                                    Register temp3_reg,
 4331                                                    Register temp4_reg,
 4332                                                    Label* L_success,
 4333                                                    Label* L_failure) {
 4334   if (UseSecondarySupersTable) {
 4335     check_klass_subtype_slow_path_table
 4336       (sub_klass, super_klass, temp_reg, temp2_reg, temp3_reg, temp4_reg,
 4337        L_success, L_failure);
 4338   } else {
 4339     check_klass_subtype_slow_path_linear
 4340       (sub_klass, super_klass, temp_reg, temp2_reg, L_success, L_failure, /*set_cond_codes*/false);
 4341   }
 4342 }
 4343 
 4344 Register MacroAssembler::allocate_if_noreg(Register r,
 4345                                   RegSetIterator<Register> &available_regs,
 4346                                   RegSet &regs_to_push) {
 4347   if (!r->is_valid()) {
 4348     r = *available_regs++;
 4349     regs_to_push += r;
 4350   }
 4351   return r;
 4352 }
 4353 
 4354 void MacroAssembler::check_klass_subtype_slow_path_table(Register sub_klass,
 4355                                                          Register super_klass,
 4356                                                          Register temp_reg,
 4357                                                          Register temp2_reg,
 4358                                                          Register temp3_reg,
 4359                                                          Register result_reg,
 4360                                                          Label* L_success,
 4361                                                          Label* L_failure) {
 4362   // NB! Callers may assume that, when temp2_reg is a valid register,
 4363   // this code sets it to a nonzero value.
 4364   bool temp2_reg_was_valid = temp2_reg->is_valid();
 4365 
 4366   RegSet temps = RegSet::of(temp_reg, temp2_reg, temp3_reg);
 4367 
 4368   Label L_fallthrough;
 4369   int label_nulls = 0;
 4370   if (L_success == nullptr)   { L_success   = &L_fallthrough; label_nulls++; }
 4371   if (L_failure == nullptr)   { L_failure   = &L_fallthrough; label_nulls++; }
 4372   assert(label_nulls <= 1, "at most one null in the batch");
 4373 
 4374   BLOCK_COMMENT("check_klass_subtype_slow_path_table");
 4375 
 4376   RegSetIterator<Register> available_regs
 4377     = (RegSet::of(rax, rcx, rdx, r8) + r9 + r10 + r11 + r12 - temps - sub_klass - super_klass).begin();
 4378 
 4379   RegSet pushed_regs;
 4380 
 4381   temp_reg = allocate_if_noreg(temp_reg, available_regs, pushed_regs);
 4382   temp2_reg = allocate_if_noreg(temp2_reg, available_regs, pushed_regs);
 4383   temp3_reg = allocate_if_noreg(temp3_reg, available_regs, pushed_regs);
 4384   result_reg = allocate_if_noreg(result_reg, available_regs, pushed_regs);
 4385   Register temp4_reg = allocate_if_noreg(noreg, available_regs, pushed_regs);
 4386 
 4387   assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, temp3_reg, result_reg);
 4388 
 4389   {
 4390 
 4391     int register_push_size = pushed_regs.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size;
 4392     int aligned_size = align_up(register_push_size, StackAlignmentInBytes);
 4393     subptr(rsp, aligned_size);
 4394     push_set(pushed_regs, 0);
 4395 
 4396     lookup_secondary_supers_table_var(sub_klass,
 4397                                       super_klass,
 4398                                       temp_reg, temp2_reg, temp3_reg, temp4_reg, result_reg);
 4399     cmpq(result_reg, 0);
 4400 
 4401     // Unspill the temp. registers:
 4402     pop_set(pushed_regs, 0);
 4403     // Increment SP but do not clobber flags.
 4404     lea(rsp, Address(rsp, aligned_size));
 4405   }
 4406 
 4407   if (temp2_reg_was_valid) {
 4408     movq(temp2_reg, 1);
 4409   }
 4410 
 4411   jcc(Assembler::notEqual, *L_failure);
 4412 
 4413   if (L_success != &L_fallthrough) {
 4414     jmp(*L_success);
 4415   }
 4416 
 4417   bind(L_fallthrough);
 4418 }
 4419 
 4420 // population_count variant for running without the POPCNT
 4421 // instruction, which was introduced with SSE4.2 in 2008.
 4422 void MacroAssembler::population_count(Register dst, Register src,
 4423                                       Register scratch1, Register scratch2) {
 4424   assert_different_registers(src, scratch1, scratch2);
 4425   if (UsePopCountInstruction) {
 4426     Assembler::popcntq(dst, src);
 4427   } else {
 4428     assert_different_registers(src, scratch1, scratch2);
 4429     assert_different_registers(dst, scratch1, scratch2);
 4430     Label loop, done;
 4431 
 4432     mov(scratch1, src);
 4433     // dst = 0;
 4434     // while(scratch1 != 0) {
 4435     //   dst++;
 4436     //   scratch1 &= (scratch1 - 1);
 4437     // }
 4438     xorl(dst, dst);
 4439     testq(scratch1, scratch1);
 4440     jccb(Assembler::equal, done);
 4441     {
 4442       bind(loop);
 4443       incq(dst);
 4444       movq(scratch2, scratch1);
 4445       decq(scratch2);
 4446       andq(scratch1, scratch2);
 4447       jccb(Assembler::notEqual, loop);
 4448     }
 4449     bind(done);
 4450   }
 4451 #ifdef ASSERT
 4452   mov64(scratch1, 0xCafeBabeDeadBeef);
 4453   movq(scratch2, scratch1);
 4454 #endif
 4455 }
 4456 
 4457 // Ensure that the inline code and the stub are using the same registers.
 4458 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS                      \
 4459 do {                                                                 \
 4460   assert(r_super_klass  == rax, "mismatch");                         \
 4461   assert(r_array_base   == rbx, "mismatch");                         \
 4462   assert(r_array_length == rcx, "mismatch");                         \
 4463   assert(r_array_index  == rdx, "mismatch");                         \
 4464   assert(r_sub_klass    == rsi || r_sub_klass == noreg, "mismatch"); \
 4465   assert(r_bitmap       == r11 || r_bitmap    == noreg, "mismatch"); \
 4466   assert(result         == rdi || result      == noreg, "mismatch"); \
 4467 } while(0)
 4468 
 4469 // Versions of salq and rorq that don't need count to be in rcx
 4470 
 4471 void MacroAssembler::salq(Register dest, Register count) {
 4472   if (count == rcx) {
 4473     Assembler::salq(dest);
 4474   } else {
 4475     assert_different_registers(rcx, dest);
 4476     xchgq(rcx, count);
 4477     Assembler::salq(dest);
 4478     xchgq(rcx, count);
 4479   }
 4480 }
 4481 
 4482 void MacroAssembler::rorq(Register dest, Register count) {
 4483   if (count == rcx) {
 4484     Assembler::rorq(dest);
 4485   } else {
 4486     assert_different_registers(rcx, dest);
 4487     xchgq(rcx, count);
 4488     Assembler::rorq(dest);
 4489     xchgq(rcx, count);
 4490   }
 4491 }
 4492 
 4493 // Return true: we succeeded in generating this code
 4494 //
 4495 // At runtime, return 0 in result if r_super_klass is a superclass of
 4496 // r_sub_klass, otherwise return nonzero. Use this if you know the
 4497 // super_klass_slot of the class you're looking for. This is always
 4498 // the case for instanceof and checkcast.
 4499 void MacroAssembler::lookup_secondary_supers_table_const(Register r_sub_klass,
 4500                                                          Register r_super_klass,
 4501                                                          Register temp1,
 4502                                                          Register temp2,
 4503                                                          Register temp3,
 4504                                                          Register temp4,
 4505                                                          Register result,
 4506                                                          u1 super_klass_slot) {
 4507   assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result);
 4508 
 4509   Label L_fallthrough, L_success, L_failure;
 4510 
 4511   BLOCK_COMMENT("lookup_secondary_supers_table {");
 4512 
 4513   const Register
 4514     r_array_index  = temp1,
 4515     r_array_length = temp2,
 4516     r_array_base   = temp3,
 4517     r_bitmap       = temp4;
 4518 
 4519   LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS;
 4520 
 4521   xorq(result, result); // = 0
 4522 
 4523   movq(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
 4524   movq(r_array_index, r_bitmap);
 4525 
 4526   // First check the bitmap to see if super_klass might be present. If
 4527   // the bit is zero, we are certain that super_klass is not one of
 4528   // the secondary supers.
 4529   u1 bit = super_klass_slot;
 4530   {
 4531     // NB: If the count in a x86 shift instruction is 0, the flags are
 4532     // not affected, so we do a testq instead.
 4533     int shift_count = Klass::SECONDARY_SUPERS_TABLE_MASK - bit;
 4534     if (shift_count != 0) {
 4535       salq(r_array_index, shift_count);
 4536     } else {
 4537       testq(r_array_index, r_array_index);
 4538     }
 4539   }
 4540   // We test the MSB of r_array_index, i.e. its sign bit
 4541   jcc(Assembler::positive, L_failure);
 4542 
 4543   // Get the first array index that can contain super_klass into r_array_index.
 4544   if (bit != 0) {
 4545     population_count(r_array_index, r_array_index, temp2, temp3);
 4546   } else {
 4547     movl(r_array_index, 1);
 4548   }
 4549   // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
 4550 
 4551   // We will consult the secondary-super array.
 4552   movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
 4553 
 4554   // We're asserting that the first word in an Array<Klass*> is the
 4555   // length, and the second word is the first word of the data. If
 4556   // that ever changes, r_array_base will have to be adjusted here.
 4557   assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code");
 4558   assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code");
 4559 
 4560   cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8));
 4561   jccb(Assembler::equal, L_success);
 4562 
 4563   // Is there another entry to check? Consult the bitmap.
 4564   btq(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK);
 4565   jccb(Assembler::carryClear, L_failure);
 4566 
 4567   // Linear probe. Rotate the bitmap so that the next bit to test is
 4568   // in Bit 1.
 4569   if (bit != 0) {
 4570     rorq(r_bitmap, bit);
 4571   }
 4572 
 4573   // Calls into the stub generated by lookup_secondary_supers_table_slow_path.
 4574   // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap.
 4575   // Kills: r_array_length.
 4576   // Returns: result.
 4577   call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub()));
 4578   // Result (0/1) is in rdi
 4579   jmpb(L_fallthrough);
 4580 
 4581   bind(L_failure);
 4582   incq(result); // 0 => 1
 4583 
 4584   bind(L_success);
 4585   // result = 0;
 4586 
 4587   bind(L_fallthrough);
 4588   BLOCK_COMMENT("} lookup_secondary_supers_table");
 4589 
 4590   if (VerifySecondarySupers) {
 4591     verify_secondary_supers_table(r_sub_klass, r_super_klass, result,
 4592                                   temp1, temp2, temp3);
 4593   }
 4594 }
 4595 
 4596 // At runtime, return 0 in result if r_super_klass is a superclass of
 4597 // r_sub_klass, otherwise return nonzero. Use this version of
 4598 // lookup_secondary_supers_table() if you don't know ahead of time
 4599 // which superclass will be searched for. Used by interpreter and
 4600 // runtime stubs. It is larger and has somewhat greater latency than
 4601 // the version above, which takes a constant super_klass_slot.
 4602 void MacroAssembler::lookup_secondary_supers_table_var(Register r_sub_klass,
 4603                                                        Register r_super_klass,
 4604                                                        Register temp1,
 4605                                                        Register temp2,
 4606                                                        Register temp3,
 4607                                                        Register temp4,
 4608                                                        Register result) {
 4609   assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result);
 4610   assert_different_registers(r_sub_klass, r_super_klass, rcx);
 4611   RegSet temps = RegSet::of(temp1, temp2, temp3, temp4);
 4612 
 4613   Label L_fallthrough, L_success, L_failure;
 4614 
 4615   BLOCK_COMMENT("lookup_secondary_supers_table {");
 4616 
 4617   RegSetIterator<Register> available_regs = (temps - rcx).begin();
 4618 
 4619   // FIXME. Once we are sure that all paths reaching this point really
 4620   // do pass rcx as one of our temps we can get rid of the following
 4621   // workaround.
 4622   assert(temps.contains(rcx), "fix this code");
 4623 
 4624   // We prefer to have our shift count in rcx. If rcx is one of our
 4625   // temps, use it for slot. If not, pick any of our temps.
 4626   Register slot;
 4627   if (!temps.contains(rcx)) {
 4628     slot = *available_regs++;
 4629   } else {
 4630     slot = rcx;
 4631   }
 4632 
 4633   const Register r_array_index = *available_regs++;
 4634   const Register r_bitmap      = *available_regs++;
 4635 
 4636   // The logic above guarantees this property, but we state it here.
 4637   assert_different_registers(r_array_index, r_bitmap, rcx);
 4638 
 4639   movq(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
 4640   movq(r_array_index, r_bitmap);
 4641 
 4642   // First check the bitmap to see if super_klass might be present. If
 4643   // the bit is zero, we are certain that super_klass is not one of
 4644   // the secondary supers.
 4645   movb(slot, Address(r_super_klass, Klass::hash_slot_offset()));
 4646   xorl(slot, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1)); // slot ^ 63 === 63 - slot (mod 64)
 4647   salq(r_array_index, slot);
 4648 
 4649   testq(r_array_index, r_array_index);
 4650   // We test the MSB of r_array_index, i.e. its sign bit
 4651   jcc(Assembler::positive, L_failure);
 4652 
 4653   const Register r_array_base = *available_regs++;
 4654 
 4655   // Get the first array index that can contain super_klass into r_array_index.
 4656   // Note: Clobbers r_array_base and slot.
 4657   population_count(r_array_index, r_array_index, /*temp2*/r_array_base, /*temp3*/slot);
 4658 
 4659   // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
 4660 
 4661   // We will consult the secondary-super array.
 4662   movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
 4663 
 4664   // We're asserting that the first word in an Array<Klass*> is the
 4665   // length, and the second word is the first word of the data. If
 4666   // that ever changes, r_array_base will have to be adjusted here.
 4667   assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code");
 4668   assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code");
 4669 
 4670   cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8));
 4671   jccb(Assembler::equal, L_success);
 4672 
 4673   // Restore slot to its true value
 4674   movb(slot, Address(r_super_klass, Klass::hash_slot_offset()));
 4675 
 4676   // Linear probe. Rotate the bitmap so that the next bit to test is
 4677   // in Bit 1.
 4678   rorq(r_bitmap, slot);
 4679 
 4680   // Is there another entry to check? Consult the bitmap.
 4681   btq(r_bitmap, 1);
 4682   jccb(Assembler::carryClear, L_failure);
 4683 
 4684   // Calls into the stub generated by lookup_secondary_supers_table_slow_path.
 4685   // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap.
 4686   // Kills: r_array_length.
 4687   // Returns: result.
 4688   lookup_secondary_supers_table_slow_path(r_super_klass,
 4689                                           r_array_base,
 4690                                           r_array_index,
 4691                                           r_bitmap,
 4692                                           /*temp1*/result,
 4693                                           /*temp2*/slot,
 4694                                           &L_success,
 4695                                           nullptr);
 4696 
 4697   bind(L_failure);
 4698   movq(result, 1);
 4699   jmpb(L_fallthrough);
 4700 
 4701   bind(L_success);
 4702   xorq(result, result); // = 0
 4703 
 4704   bind(L_fallthrough);
 4705   BLOCK_COMMENT("} lookup_secondary_supers_table");
 4706 
 4707   if (VerifySecondarySupers) {
 4708     verify_secondary_supers_table(r_sub_klass, r_super_klass, result,
 4709                                   temp1, temp2, temp3);
 4710   }
 4711 }
 4712 
 4713 void MacroAssembler::repne_scanq(Register addr, Register value, Register count, Register limit,
 4714                                  Label* L_success, Label* L_failure) {
 4715   Label L_loop, L_fallthrough;
 4716   {
 4717     int label_nulls = 0;
 4718     if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
 4719     if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
 4720     assert(label_nulls <= 1, "at most one null in the batch");
 4721   }
 4722   bind(L_loop);
 4723   cmpq(value, Address(addr, count, Address::times_8));
 4724   jcc(Assembler::equal, *L_success);
 4725   addl(count, 1);
 4726   cmpl(count, limit);
 4727   jcc(Assembler::less, L_loop);
 4728 
 4729   if (&L_fallthrough != L_failure) {
 4730     jmp(*L_failure);
 4731   }
 4732   bind(L_fallthrough);
 4733 }
 4734 
 4735 // Called by code generated by check_klass_subtype_slow_path
 4736 // above. This is called when there is a collision in the hashed
 4737 // lookup in the secondary supers array.
 4738 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass,
 4739                                                              Register r_array_base,
 4740                                                              Register r_array_index,
 4741                                                              Register r_bitmap,
 4742                                                              Register temp1,
 4743                                                              Register temp2,
 4744                                                              Label* L_success,
 4745                                                              Label* L_failure) {
 4746   assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, temp2);
 4747 
 4748   const Register
 4749     r_array_length = temp1,
 4750     r_sub_klass    = noreg,
 4751     result         = noreg;
 4752 
 4753   Label L_fallthrough;
 4754   int label_nulls = 0;
 4755   if (L_success == nullptr)   { L_success   = &L_fallthrough; label_nulls++; }
 4756   if (L_failure == nullptr)   { L_failure   = &L_fallthrough; label_nulls++; }
 4757   assert(label_nulls <= 1, "at most one null in the batch");
 4758 
 4759   // Load the array length.
 4760   movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
 4761   // And adjust the array base to point to the data.
 4762   // NB! Effectively increments current slot index by 1.
 4763   assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "");
 4764   addptr(r_array_base, Array<Klass*>::base_offset_in_bytes());
 4765 
 4766   // Linear probe
 4767   Label L_huge;
 4768 
 4769   // The bitmap is full to bursting.
 4770   // Implicit invariant: BITMAP_FULL implies (length > 0)
 4771   cmpl(r_array_length, (int32_t)Klass::SECONDARY_SUPERS_TABLE_SIZE - 2);
 4772   jcc(Assembler::greater, L_huge);
 4773 
 4774   // NB! Our caller has checked bits 0 and 1 in the bitmap. The
 4775   // current slot (at secondary_supers[r_array_index]) has not yet
 4776   // been inspected, and r_array_index may be out of bounds if we
 4777   // wrapped around the end of the array.
 4778 
 4779   { // This is conventional linear probing, but instead of terminating
 4780     // when a null entry is found in the table, we maintain a bitmap
 4781     // in which a 0 indicates missing entries.
 4782     // The check above guarantees there are 0s in the bitmap, so the loop
 4783     // eventually terminates.
 4784 
 4785     xorl(temp2, temp2); // = 0;
 4786 
 4787     Label L_again;
 4788     bind(L_again);
 4789 
 4790     // Check for array wraparound.
 4791     cmpl(r_array_index, r_array_length);
 4792     cmovl(Assembler::greaterEqual, r_array_index, temp2);
 4793 
 4794     cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8));
 4795     jcc(Assembler::equal, *L_success);
 4796 
 4797     // If the next bit in bitmap is zero, we're done.
 4798     btq(r_bitmap, 2); // look-ahead check (Bit 2); Bits 0 and 1 are tested by now
 4799     jcc(Assembler::carryClear, *L_failure);
 4800 
 4801     rorq(r_bitmap, 1); // Bits 1/2 => 0/1
 4802     addl(r_array_index, 1);
 4803 
 4804     jmp(L_again);
 4805   }
 4806 
 4807   { // Degenerate case: more than 64 secondary supers.
 4808     // FIXME: We could do something smarter here, maybe a vectorized
 4809     // comparison or a binary search, but is that worth any added
 4810     // complexity?
 4811     bind(L_huge);
 4812     xorl(r_array_index, r_array_index); // = 0
 4813     repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length,
 4814                 L_success,
 4815                 (&L_fallthrough != L_failure ? L_failure : nullptr));
 4816 
 4817     bind(L_fallthrough);
 4818   }
 4819 }
 4820 
 4821 struct VerifyHelperArguments {
 4822   Klass* _super;
 4823   Klass* _sub;
 4824   intptr_t _linear_result;
 4825   intptr_t _table_result;
 4826 };
 4827 
 4828 static void verify_secondary_supers_table_helper(const char* msg, VerifyHelperArguments* args) {
 4829   Klass::on_secondary_supers_verification_failure(args->_super,
 4830                                                   args->_sub,
 4831                                                   args->_linear_result,
 4832                                                   args->_table_result,
 4833                                                   msg);
 4834 }
 4835 
 4836 // Make sure that the hashed lookup and a linear scan agree.
 4837 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass,
 4838                                                    Register r_super_klass,
 4839                                                    Register result,
 4840                                                    Register temp1,
 4841                                                    Register temp2,
 4842                                                    Register temp3) {
 4843   const Register
 4844       r_array_index  = temp1,
 4845       r_array_length = temp2,
 4846       r_array_base   = temp3,
 4847       r_bitmap       = noreg;
 4848 
 4849   BLOCK_COMMENT("verify_secondary_supers_table {");
 4850 
 4851   Label L_success, L_failure, L_check, L_done;
 4852 
 4853   movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
 4854   movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
 4855   // And adjust the array base to point to the data.
 4856   addptr(r_array_base, Array<Klass*>::base_offset_in_bytes());
 4857 
 4858   testl(r_array_length, r_array_length); // array_length == 0?
 4859   jcc(Assembler::zero, L_failure);
 4860 
 4861   movl(r_array_index, 0);
 4862   repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length, &L_success);
 4863   // fall through to L_failure
 4864 
 4865   const Register linear_result = r_array_index; // reuse temp1
 4866 
 4867   bind(L_failure); // not present
 4868   movl(linear_result, 1);
 4869   jmp(L_check);
 4870 
 4871   bind(L_success); // present
 4872   movl(linear_result, 0);
 4873 
 4874   bind(L_check);
 4875   cmpl(linear_result, result);
 4876   jcc(Assembler::equal, L_done);
 4877 
 4878   { // To avoid calling convention issues, build a record on the stack
 4879     // and pass the pointer to that instead.
 4880     push(result);
 4881     push(linear_result);
 4882     push(r_sub_klass);
 4883     push(r_super_klass);
 4884     movptr(c_rarg1, rsp);
 4885     movptr(c_rarg0, (uintptr_t) "mismatch");
 4886     call(RuntimeAddress(CAST_FROM_FN_PTR(address, verify_secondary_supers_table_helper)));
 4887     should_not_reach_here();
 4888   }
 4889   bind(L_done);
 4890 
 4891   BLOCK_COMMENT("} verify_secondary_supers_table");
 4892 }
 4893 
 4894 #undef LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS
 4895 
 4896 void MacroAssembler::clinit_barrier(Register klass, Label* L_fast_path, Label* L_slow_path) {
 4897   assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required");
 4898 
 4899   Label L_fallthrough;
 4900   if (L_fast_path == nullptr) {
 4901     L_fast_path = &L_fallthrough;
 4902   } else if (L_slow_path == nullptr) {
 4903     L_slow_path = &L_fallthrough;
 4904   }
 4905 
 4906   // Fast path check: class is fully initialized.
 4907   // init_state needs acquire, but x86 is TSO, and so we are already good.
 4908   cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
 4909   jcc(Assembler::equal, *L_fast_path);
 4910 
 4911   // Fast path check: current thread is initializer thread
 4912   cmpptr(r15_thread, Address(klass, InstanceKlass::init_thread_offset()));
 4913   if (L_slow_path == &L_fallthrough) {
 4914     jcc(Assembler::equal, *L_fast_path);
 4915     bind(*L_slow_path);
 4916   } else if (L_fast_path == &L_fallthrough) {
 4917     jcc(Assembler::notEqual, *L_slow_path);
 4918     bind(*L_fast_path);
 4919   } else {
 4920     Unimplemented();
 4921   }
 4922 }
 4923 
 4924 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) {
 4925   if (VM_Version::supports_cmov()) {
 4926     cmovl(cc, dst, src);
 4927   } else {
 4928     Label L;
 4929     jccb(negate_condition(cc), L);
 4930     movl(dst, src);
 4931     bind(L);
 4932   }
 4933 }
 4934 
 4935 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
 4936   if (VM_Version::supports_cmov()) {
 4937     cmovl(cc, dst, src);
 4938   } else {
 4939     Label L;
 4940     jccb(negate_condition(cc), L);
 4941     movl(dst, src);
 4942     bind(L);
 4943   }
 4944 }
 4945 
 4946 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
 4947   if (!VerifyOops || VerifyAdapterSharing) {
 4948     // Below address of the code string confuses VerifyAdapterSharing
 4949     // because it may differ between otherwise equivalent adapters.
 4950     return;
 4951   }
 4952 
 4953   BLOCK_COMMENT("verify_oop {");
 4954   push(rscratch1);
 4955   push(rax);                          // save rax
 4956   push(reg);                          // pass register argument
 4957 
 4958   // Pass register number to verify_oop_subroutine
 4959   const char* b = nullptr;
 4960   {
 4961     ResourceMark rm;
 4962     stringStream ss;
 4963     ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
 4964     b = code_string(ss.as_string());
 4965   }
 4966   AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate());
 4967   pushptr(buffer.addr(), rscratch1);
 4968 
 4969   // call indirectly to solve generation ordering problem
 4970   movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
 4971   call(rax);
 4972   // Caller pops the arguments (oop, message) and restores rax, r10
 4973   BLOCK_COMMENT("} verify_oop");
 4974 }
 4975 
 4976 void MacroAssembler::vallones(XMMRegister dst, int vector_len) {
 4977   if (UseAVX > 2 && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl())) {
 4978     // Only pcmpeq has dependency breaking treatment (i.e the execution can begin without
 4979     // waiting for the previous result on dst), not vpcmpeqd, so just use vpternlog
 4980     vpternlogd(dst, 0xFF, dst, dst, vector_len);
 4981   } else if (VM_Version::supports_avx()) {
 4982     vpcmpeqd(dst, dst, dst, vector_len);
 4983   } else {
 4984     pcmpeqd(dst, dst);
 4985   }
 4986 }
 4987 
 4988 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
 4989                                          int extra_slot_offset) {
 4990   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
 4991   int stackElementSize = Interpreter::stackElementSize;
 4992   int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
 4993 #ifdef ASSERT
 4994   int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
 4995   assert(offset1 - offset == stackElementSize, "correct arithmetic");
 4996 #endif
 4997   Register             scale_reg    = noreg;
 4998   Address::ScaleFactor scale_factor = Address::no_scale;
 4999   if (arg_slot.is_constant()) {
 5000     offset += arg_slot.as_constant() * stackElementSize;
 5001   } else {
 5002     scale_reg    = arg_slot.as_register();
 5003     scale_factor = Address::times(stackElementSize);
 5004   }
 5005   offset += wordSize;           // return PC is on stack
 5006   return Address(rsp, scale_reg, scale_factor, offset);
 5007 }
 5008 
 5009 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
 5010   if (!VerifyOops || VerifyAdapterSharing) {
 5011     // Below address of the code string confuses VerifyAdapterSharing
 5012     // because it may differ between otherwise equivalent adapters.
 5013     return;
 5014   }
 5015 
 5016   push(rscratch1);
 5017   push(rax); // save rax,
 5018   // addr may contain rsp so we will have to adjust it based on the push
 5019   // we just did (and on 64 bit we do two pushes)
 5020   // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
 5021   // stores rax into addr which is backwards of what was intended.
 5022   if (addr.uses(rsp)) {
 5023     lea(rax, addr);
 5024     pushptr(Address(rax, 2 * BytesPerWord));
 5025   } else {
 5026     pushptr(addr);
 5027   }
 5028 
 5029   // Pass register number to verify_oop_subroutine
 5030   const char* b = nullptr;
 5031   {
 5032     ResourceMark rm;
 5033     stringStream ss;
 5034     ss.print("verify_oop_addr: %s (%s:%d)", s, file, line);
 5035     b = code_string(ss.as_string());
 5036   }
 5037   AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate());
 5038   pushptr(buffer.addr(), rscratch1);
 5039 
 5040   // call indirectly to solve generation ordering problem
 5041   movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
 5042   call(rax);
 5043   // Caller pops the arguments (addr, message) and restores rax, r10.
 5044 }
 5045 
 5046 void MacroAssembler::verify_tlab() {
 5047 #ifdef ASSERT
 5048   if (UseTLAB && VerifyOops) {
 5049     Label next, ok;
 5050     Register t1 = rsi;
 5051 
 5052     push(t1);
 5053 
 5054     movptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
 5055     cmpptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_start_offset())));
 5056     jcc(Assembler::aboveEqual, next);
 5057     STOP("assert(top >= start)");
 5058     should_not_reach_here();
 5059 
 5060     bind(next);
 5061     movptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset())));
 5062     cmpptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
 5063     jcc(Assembler::aboveEqual, ok);
 5064     STOP("assert(top <= end)");
 5065     should_not_reach_here();
 5066 
 5067     bind(ok);
 5068     pop(t1);
 5069   }
 5070 #endif
 5071 }
 5072 
 5073 class ControlWord {
 5074  public:
 5075   int32_t _value;
 5076 
 5077   int  rounding_control() const        { return  (_value >> 10) & 3      ; }
 5078   int  precision_control() const       { return  (_value >>  8) & 3      ; }
 5079   bool precision() const               { return ((_value >>  5) & 1) != 0; }
 5080   bool underflow() const               { return ((_value >>  4) & 1) != 0; }
 5081   bool overflow() const                { return ((_value >>  3) & 1) != 0; }
 5082   bool zero_divide() const             { return ((_value >>  2) & 1) != 0; }
 5083   bool denormalized() const            { return ((_value >>  1) & 1) != 0; }
 5084   bool invalid() const                 { return ((_value >>  0) & 1) != 0; }
 5085 
 5086   void print() const {
 5087     // rounding control
 5088     const char* rc;
 5089     switch (rounding_control()) {
 5090       case 0: rc = "round near"; break;
 5091       case 1: rc = "round down"; break;
 5092       case 2: rc = "round up  "; break;
 5093       case 3: rc = "chop      "; break;
 5094       default:
 5095         rc = nullptr; // silence compiler warnings
 5096         fatal("Unknown rounding control: %d", rounding_control());
 5097     };
 5098     // precision control
 5099     const char* pc;
 5100     switch (precision_control()) {
 5101       case 0: pc = "24 bits "; break;
 5102       case 1: pc = "reserved"; break;
 5103       case 2: pc = "53 bits "; break;
 5104       case 3: pc = "64 bits "; break;
 5105       default:
 5106         pc = nullptr; // silence compiler warnings
 5107         fatal("Unknown precision control: %d", precision_control());
 5108     };
 5109     // flags
 5110     char f[9];
 5111     f[0] = ' ';
 5112     f[1] = ' ';
 5113     f[2] = (precision   ()) ? 'P' : 'p';
 5114     f[3] = (underflow   ()) ? 'U' : 'u';
 5115     f[4] = (overflow    ()) ? 'O' : 'o';
 5116     f[5] = (zero_divide ()) ? 'Z' : 'z';
 5117     f[6] = (denormalized()) ? 'D' : 'd';
 5118     f[7] = (invalid     ()) ? 'I' : 'i';
 5119     f[8] = '\x0';
 5120     // output
 5121     printf("%04x  masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc);
 5122   }
 5123 
 5124 };
 5125 
 5126 class StatusWord {
 5127  public:
 5128   int32_t _value;
 5129 
 5130   bool busy() const                    { return ((_value >> 15) & 1) != 0; }
 5131   bool C3() const                      { return ((_value >> 14) & 1) != 0; }
 5132   bool C2() const                      { return ((_value >> 10) & 1) != 0; }
 5133   bool C1() const                      { return ((_value >>  9) & 1) != 0; }
 5134   bool C0() const                      { return ((_value >>  8) & 1) != 0; }
 5135   int  top() const                     { return  (_value >> 11) & 7      ; }
 5136   bool error_status() const            { return ((_value >>  7) & 1) != 0; }
 5137   bool stack_fault() const             { return ((_value >>  6) & 1) != 0; }
 5138   bool precision() const               { return ((_value >>  5) & 1) != 0; }
 5139   bool underflow() const               { return ((_value >>  4) & 1) != 0; }
 5140   bool overflow() const                { return ((_value >>  3) & 1) != 0; }
 5141   bool zero_divide() const             { return ((_value >>  2) & 1) != 0; }
 5142   bool denormalized() const            { return ((_value >>  1) & 1) != 0; }
 5143   bool invalid() const                 { return ((_value >>  0) & 1) != 0; }
 5144 
 5145   void print() const {
 5146     // condition codes
 5147     char c[5];
 5148     c[0] = (C3()) ? '3' : '-';
 5149     c[1] = (C2()) ? '2' : '-';
 5150     c[2] = (C1()) ? '1' : '-';
 5151     c[3] = (C0()) ? '0' : '-';
 5152     c[4] = '\x0';
 5153     // flags
 5154     char f[9];
 5155     f[0] = (error_status()) ? 'E' : '-';
 5156     f[1] = (stack_fault ()) ? 'S' : '-';
 5157     f[2] = (precision   ()) ? 'P' : '-';
 5158     f[3] = (underflow   ()) ? 'U' : '-';
 5159     f[4] = (overflow    ()) ? 'O' : '-';
 5160     f[5] = (zero_divide ()) ? 'Z' : '-';
 5161     f[6] = (denormalized()) ? 'D' : '-';
 5162     f[7] = (invalid     ()) ? 'I' : '-';
 5163     f[8] = '\x0';
 5164     // output
 5165     printf("%04x  flags = %s, cc =  %s, top = %d", _value & 0xFFFF, f, c, top());
 5166   }
 5167 
 5168 };
 5169 
 5170 class TagWord {
 5171  public:
 5172   int32_t _value;
 5173 
 5174   int tag_at(int i) const              { return (_value >> (i*2)) & 3; }
 5175 
 5176   void print() const {
 5177     printf("%04x", _value & 0xFFFF);
 5178   }
 5179 
 5180 };
 5181 
 5182 class FPU_Register {
 5183  public:
 5184   int32_t _m0;
 5185   int32_t _m1;
 5186   int16_t _ex;
 5187 
 5188   bool is_indefinite() const           {
 5189     return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0;
 5190   }
 5191 
 5192   void print() const {
 5193     char  sign = (_ex < 0) ? '-' : '+';
 5194     const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : "   ";
 5195     printf("%c%04hx.%08x%08x  %s", sign, _ex, _m1, _m0, kind);
 5196   };
 5197 
 5198 };
 5199 
 5200 class FPU_State {
 5201  public:
 5202   enum {
 5203     register_size       = 10,
 5204     number_of_registers =  8,
 5205     register_mask       =  7
 5206   };
 5207 
 5208   ControlWord  _control_word;
 5209   StatusWord   _status_word;
 5210   TagWord      _tag_word;
 5211   int32_t      _error_offset;
 5212   int32_t      _error_selector;
 5213   int32_t      _data_offset;
 5214   int32_t      _data_selector;
 5215   int8_t       _register[register_size * number_of_registers];
 5216 
 5217   int tag_for_st(int i) const          { return _tag_word.tag_at((_status_word.top() + i) & register_mask); }
 5218   FPU_Register* st(int i) const        { return (FPU_Register*)&_register[register_size * i]; }
 5219 
 5220   const char* tag_as_string(int tag) const {
 5221     switch (tag) {
 5222       case 0: return "valid";
 5223       case 1: return "zero";
 5224       case 2: return "special";
 5225       case 3: return "empty";
 5226     }
 5227     ShouldNotReachHere();
 5228     return nullptr;
 5229   }
 5230 
 5231   void print() const {
 5232     // print computation registers
 5233     { int t = _status_word.top();
 5234       for (int i = 0; i < number_of_registers; i++) {
 5235         int j = (i - t) & register_mask;
 5236         printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j);
 5237         st(j)->print();
 5238         printf(" %s\n", tag_as_string(_tag_word.tag_at(i)));
 5239       }
 5240     }
 5241     printf("\n");
 5242     // print control registers
 5243     printf("ctrl = "); _control_word.print(); printf("\n");
 5244     printf("stat = "); _status_word .print(); printf("\n");
 5245     printf("tags = "); _tag_word    .print(); printf("\n");
 5246   }
 5247 
 5248 };
 5249 
 5250 class Flag_Register {
 5251  public:
 5252   int32_t _value;
 5253 
 5254   bool overflow() const                { return ((_value >> 11) & 1) != 0; }
 5255   bool direction() const               { return ((_value >> 10) & 1) != 0; }
 5256   bool sign() const                    { return ((_value >>  7) & 1) != 0; }
 5257   bool zero() const                    { return ((_value >>  6) & 1) != 0; }
 5258   bool auxiliary_carry() const         { return ((_value >>  4) & 1) != 0; }
 5259   bool parity() const                  { return ((_value >>  2) & 1) != 0; }
 5260   bool carry() const                   { return ((_value >>  0) & 1) != 0; }
 5261 
 5262   void print() const {
 5263     // flags
 5264     char f[8];
 5265     f[0] = (overflow       ()) ? 'O' : '-';
 5266     f[1] = (direction      ()) ? 'D' : '-';
 5267     f[2] = (sign           ()) ? 'S' : '-';
 5268     f[3] = (zero           ()) ? 'Z' : '-';
 5269     f[4] = (auxiliary_carry()) ? 'A' : '-';
 5270     f[5] = (parity         ()) ? 'P' : '-';
 5271     f[6] = (carry          ()) ? 'C' : '-';
 5272     f[7] = '\x0';
 5273     // output
 5274     printf("%08x  flags = %s", _value, f);
 5275   }
 5276 
 5277 };
 5278 
 5279 class IU_Register {
 5280  public:
 5281   int32_t _value;
 5282 
 5283   void print() const {
 5284     printf("%08x  %11d", _value, _value);
 5285   }
 5286 
 5287 };
 5288 
 5289 class IU_State {
 5290  public:
 5291   Flag_Register _eflags;
 5292   IU_Register   _rdi;
 5293   IU_Register   _rsi;
 5294   IU_Register   _rbp;
 5295   IU_Register   _rsp;
 5296   IU_Register   _rbx;
 5297   IU_Register   _rdx;
 5298   IU_Register   _rcx;
 5299   IU_Register   _rax;
 5300 
 5301   void print() const {
 5302     // computation registers
 5303     printf("rax,  = "); _rax.print(); printf("\n");
 5304     printf("rbx,  = "); _rbx.print(); printf("\n");
 5305     printf("rcx  = "); _rcx.print(); printf("\n");
 5306     printf("rdx  = "); _rdx.print(); printf("\n");
 5307     printf("rdi  = "); _rdi.print(); printf("\n");
 5308     printf("rsi  = "); _rsi.print(); printf("\n");
 5309     printf("rbp,  = "); _rbp.print(); printf("\n");
 5310     printf("rsp  = "); _rsp.print(); printf("\n");
 5311     printf("\n");
 5312     // control registers
 5313     printf("flgs = "); _eflags.print(); printf("\n");
 5314   }
 5315 };
 5316 
 5317 
 5318 class CPU_State {
 5319  public:
 5320   FPU_State _fpu_state;
 5321   IU_State  _iu_state;
 5322 
 5323   void print() const {
 5324     printf("--------------------------------------------------\n");
 5325     _iu_state .print();
 5326     printf("\n");
 5327     _fpu_state.print();
 5328     printf("--------------------------------------------------\n");
 5329   }
 5330 
 5331 };
 5332 
 5333 
 5334 static void _print_CPU_state(CPU_State* state) {
 5335   state->print();
 5336 };
 5337 
 5338 
 5339 void MacroAssembler::print_CPU_state() {
 5340   push_CPU_state();
 5341   push(rsp);                // pass CPU state
 5342   call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state)));
 5343   addptr(rsp, wordSize);       // discard argument
 5344   pop_CPU_state();
 5345 }
 5346 
 5347 void MacroAssembler::restore_cpu_control_state_after_jni(Register rscratch) {
 5348   // Either restore the MXCSR register after returning from the JNI Call
 5349   // or verify that it wasn't changed (with -Xcheck:jni flag).
 5350   if (VM_Version::supports_sse()) {
 5351     if (RestoreMXCSROnJNICalls) {
 5352       ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), rscratch);
 5353     } else if (CheckJNICalls) {
 5354       call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
 5355     }
 5356   }
 5357   // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty.
 5358   vzeroupper();
 5359 }
 5360 
 5361 // ((OopHandle)result).resolve();
 5362 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) {
 5363   assert_different_registers(result, tmp);
 5364 
 5365   // Only 64 bit platforms support GCs that require a tmp register
 5366   // Only IN_HEAP loads require a thread_tmp register
 5367   // OopHandle::resolve is an indirection like jobject.
 5368   access_load_at(T_OBJECT, IN_NATIVE,
 5369                  result, Address(result, 0), tmp);
 5370 }
 5371 
 5372 // ((WeakHandle)result).resolve();
 5373 void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) {
 5374   assert_different_registers(rresult, rtmp);
 5375   Label resolved;
 5376 
 5377   // A null weak handle resolves to null.
 5378   cmpptr(rresult, 0);
 5379   jcc(Assembler::equal, resolved);
 5380 
 5381   // Only 64 bit platforms support GCs that require a tmp register
 5382   // Only IN_HEAP loads require a thread_tmp register
 5383   // WeakHandle::resolve is an indirection like jweak.
 5384   access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
 5385                  rresult, Address(rresult, 0), rtmp);
 5386   bind(resolved);
 5387 }
 5388 
 5389 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
 5390   // get mirror
 5391   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
 5392   load_method_holder(mirror, method);
 5393   movptr(mirror, Address(mirror, mirror_offset));
 5394   resolve_oop_handle(mirror, tmp);
 5395 }
 5396 
 5397 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
 5398   load_method_holder(rresult, rmethod);
 5399   movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
 5400 }
 5401 
 5402 void MacroAssembler::load_method_holder(Register holder, Register method) {
 5403   movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
 5404   movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
 5405   movptr(holder, Address(holder, ConstantPool::pool_holder_offset()));          // InstanceKlass*
 5406 }
 5407 
 5408 void MacroAssembler::load_metadata(Register dst, Register src) {
 5409   if (UseCompactObjectHeaders) {
 5410     load_narrow_klass_compact(dst, src);
 5411   } else if (UseCompressedClassPointers) {
 5412     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5413   } else {
 5414     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5415   }
 5416 }
 5417 
 5418 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
 5419   assert(UseCompactObjectHeaders, "expect compact object headers");
 5420   movq(dst, Address(src, oopDesc::mark_offset_in_bytes()));
 5421   shrq(dst, markWord::klass_shift);
 5422 }
 5423 
 5424 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
 5425   assert_different_registers(src, tmp);
 5426   assert_different_registers(dst, tmp);
 5427 
 5428   if (UseCompactObjectHeaders) {
 5429     load_narrow_klass_compact(dst, src);
 5430     decode_klass_not_null(dst, tmp);
 5431   } else if (UseCompressedClassPointers) {
 5432     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5433     decode_klass_not_null(dst, tmp);
 5434   } else {
 5435     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5436   }
 5437 }
 5438 
 5439 void MacroAssembler::load_prototype_header(Register dst, Register src, Register tmp) {
 5440   load_klass(dst, src, tmp);
 5441   movptr(dst, Address(dst, Klass::prototype_header_offset()));
 5442 }
 5443 
 5444 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
 5445   assert(!UseCompactObjectHeaders, "not with compact headers");
 5446   assert_different_registers(src, tmp);
 5447   assert_different_registers(dst, tmp);
 5448   if (UseCompressedClassPointers) {
 5449     encode_klass_not_null(src, tmp);
 5450     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5451   } else {
 5452     movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5453   }
 5454 }
 5455 
 5456 void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) {
 5457   if (UseCompactObjectHeaders) {
 5458     assert(tmp != noreg, "need tmp");
 5459     assert_different_registers(klass, obj, tmp);
 5460     load_narrow_klass_compact(tmp, obj);
 5461     cmpl(klass, tmp);
 5462   } else if (UseCompressedClassPointers) {
 5463     cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
 5464   } else {
 5465     cmpptr(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
 5466   }
 5467 }
 5468 
 5469 void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) {
 5470   if (UseCompactObjectHeaders) {
 5471     assert(tmp2 != noreg, "need tmp2");
 5472     assert_different_registers(obj1, obj2, tmp1, tmp2);
 5473     load_narrow_klass_compact(tmp1, obj1);
 5474     load_narrow_klass_compact(tmp2, obj2);
 5475     cmpl(tmp1, tmp2);
 5476   } else if (UseCompressedClassPointers) {
 5477     movl(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
 5478     cmpl(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes()));
 5479   } else {
 5480     movptr(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
 5481     cmpptr(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes()));
 5482   }
 5483 }
 5484 
 5485 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
 5486                                     Register tmp1) {
 5487   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5488   decorators = AccessInternal::decorator_fixup(decorators, type);
 5489   bool as_raw = (decorators & AS_RAW) != 0;
 5490   if (as_raw) {
 5491     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1);
 5492   } else {
 5493     bs->load_at(this, decorators, type, dst, src, tmp1);
 5494   }
 5495 }
 5496 
 5497 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
 5498                                      Register tmp1, Register tmp2, Register tmp3) {
 5499   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5500   decorators = AccessInternal::decorator_fixup(decorators, type);
 5501   bool as_raw = (decorators & AS_RAW) != 0;
 5502   if (as_raw) {
 5503     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5504   } else {
 5505     bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5506   }
 5507 }
 5508 
 5509 void MacroAssembler::flat_field_copy(DecoratorSet decorators, Register src, Register dst,
 5510                                      Register inline_layout_info) {
 5511   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5512   bs->flat_field_copy(this, decorators, src, dst, inline_layout_info);
 5513 }
 5514 
 5515 void MacroAssembler::payload_offset(Register inline_klass, Register offset) {
 5516   movptr(offset, Address(inline_klass, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 5517   movl(offset, Address(offset, InlineKlass::payload_offset_offset()));
 5518 }
 5519 
 5520 void MacroAssembler::payload_addr(Register oop, Register data, Register inline_klass) {
 5521   // ((address) (void*) o) + vk->payload_offset();
 5522   Register offset = (data == oop) ? rscratch1 : data;
 5523   payload_offset(inline_klass, offset);
 5524   if (data == oop) {
 5525     addptr(data, offset);
 5526   } else {
 5527     lea(data, Address(oop, offset));
 5528   }
 5529 }
 5530 
 5531 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass,
 5532                                                 Register index, Register data) {
 5533   assert(index != rcx, "index needs to shift by rcx");
 5534   assert_different_registers(array, array_klass, index);
 5535   assert_different_registers(rcx, array, index);
 5536 
 5537   // array->base() + (index << Klass::layout_helper_log2_element_size(lh));
 5538   movl(rcx, Address(array_klass, Klass::layout_helper_offset()));
 5539 
 5540   // Klass::layout_helper_log2_element_size(lh)
 5541   // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask;
 5542   shrl(rcx, Klass::_lh_log2_element_size_shift);
 5543   andl(rcx, Klass::_lh_log2_element_size_mask);
 5544   shlptr(index); // index << rcx
 5545 
 5546   lea(data, Address(array, index, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT)));
 5547 }
 5548 
 5549 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
 5550   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1);
 5551 }
 5552 
 5553 // Doesn't do verification, generates fixed size code
 5554 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
 5555   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1);
 5556 }
 5557 
 5558 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
 5559                                     Register tmp2, Register tmp3, DecoratorSet decorators) {
 5560   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
 5561 }
 5562 
 5563 // Used for storing nulls.
 5564 void MacroAssembler::store_heap_oop_null(Address dst) {
 5565   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
 5566 }
 5567 
 5568 void MacroAssembler::store_klass_gap(Register dst, Register src) {
 5569   assert(!UseCompactObjectHeaders, "Don't use with compact headers");
 5570   if (UseCompressedClassPointers) {
 5571     // Store to klass gap in destination
 5572     movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
 5573   }
 5574 }
 5575 
 5576 #ifdef ASSERT
 5577 void MacroAssembler::verify_heapbase(const char* msg) {
 5578   assert (UseCompressedOops, "should be compressed");
 5579   assert (Universe::heap() != nullptr, "java heap should be initialized");
 5580   if (CheckCompressedOops) {
 5581     Label ok;
 5582     ExternalAddress src2(CompressedOops::base_addr());
 5583     const bool is_src2_reachable = reachable(src2);
 5584     if (!is_src2_reachable) {
 5585       push(rscratch1);  // cmpptr trashes rscratch1
 5586     }
 5587     cmpptr(r12_heapbase, src2, rscratch1);
 5588     jcc(Assembler::equal, ok);
 5589     STOP(msg);
 5590     bind(ok);
 5591     if (!is_src2_reachable) {
 5592       pop(rscratch1);
 5593     }
 5594   }
 5595 }
 5596 #endif
 5597 
 5598 // Algorithm must match oop.inline.hpp encode_heap_oop.
 5599 void MacroAssembler::encode_heap_oop(Register r) {
 5600 #ifdef ASSERT
 5601   verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
 5602 #endif
 5603   verify_oop_msg(r, "broken oop in encode_heap_oop");
 5604   if (CompressedOops::base() == nullptr) {
 5605     if (CompressedOops::shift() != 0) {
 5606       assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
 5607       shrq(r, LogMinObjAlignmentInBytes);
 5608     }
 5609     return;
 5610   }
 5611   testq(r, r);
 5612   cmovq(Assembler::equal, r, r12_heapbase);
 5613   subq(r, r12_heapbase);
 5614   shrq(r, LogMinObjAlignmentInBytes);
 5615 }
 5616 
 5617 void MacroAssembler::encode_heap_oop_not_null(Register r) {
 5618 #ifdef ASSERT
 5619   verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
 5620   if (CheckCompressedOops) {
 5621     Label ok;
 5622     testq(r, r);
 5623     jcc(Assembler::notEqual, ok);
 5624     STOP("null oop passed to encode_heap_oop_not_null");
 5625     bind(ok);
 5626   }
 5627 #endif
 5628   verify_oop_msg(r, "broken oop in encode_heap_oop_not_null");
 5629   if (CompressedOops::base() != nullptr) {
 5630     subq(r, r12_heapbase);
 5631   }
 5632   if (CompressedOops::shift() != 0) {
 5633     assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
 5634     shrq(r, LogMinObjAlignmentInBytes);
 5635   }
 5636 }
 5637 
 5638 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
 5639 #ifdef ASSERT
 5640   verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
 5641   if (CheckCompressedOops) {
 5642     Label ok;
 5643     testq(src, src);
 5644     jcc(Assembler::notEqual, ok);
 5645     STOP("null oop passed to encode_heap_oop_not_null2");
 5646     bind(ok);
 5647   }
 5648 #endif
 5649   verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2");
 5650   if (dst != src) {
 5651     movq(dst, src);
 5652   }
 5653   if (CompressedOops::base() != nullptr) {
 5654     subq(dst, r12_heapbase);
 5655   }
 5656   if (CompressedOops::shift() != 0) {
 5657     assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
 5658     shrq(dst, LogMinObjAlignmentInBytes);
 5659   }
 5660 }
 5661 
 5662 void  MacroAssembler::decode_heap_oop(Register r) {
 5663 #ifdef ASSERT
 5664   verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
 5665 #endif
 5666   if (CompressedOops::base() == nullptr) {
 5667     if (CompressedOops::shift() != 0) {
 5668       assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
 5669       shlq(r, LogMinObjAlignmentInBytes);
 5670     }
 5671   } else {
 5672     Label done;
 5673     shlq(r, LogMinObjAlignmentInBytes);
 5674     jccb(Assembler::equal, done);
 5675     addq(r, r12_heapbase);
 5676     bind(done);
 5677   }
 5678   verify_oop_msg(r, "broken oop in decode_heap_oop");
 5679 }
 5680 
 5681 void  MacroAssembler::decode_heap_oop_not_null(Register r) {
 5682   // Note: it will change flags
 5683   assert (UseCompressedOops, "should only be used for compressed headers");
 5684   assert (Universe::heap() != nullptr, "java heap should be initialized");
 5685   // Cannot assert, unverified entry point counts instructions (see .ad file)
 5686   // vtableStubs also counts instructions in pd_code_size_limit.
 5687   // Also do not verify_oop as this is called by verify_oop.
 5688   if (CompressedOops::shift() != 0) {
 5689     assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
 5690     shlq(r, LogMinObjAlignmentInBytes);
 5691     if (CompressedOops::base() != nullptr) {
 5692       addq(r, r12_heapbase);
 5693     }
 5694   } else {
 5695     assert (CompressedOops::base() == nullptr, "sanity");
 5696   }
 5697 }
 5698 
 5699 void  MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
 5700   // Note: it will change flags
 5701   assert (UseCompressedOops, "should only be used for compressed headers");
 5702   assert (Universe::heap() != nullptr, "java heap should be initialized");
 5703   // Cannot assert, unverified entry point counts instructions (see .ad file)
 5704   // vtableStubs also counts instructions in pd_code_size_limit.
 5705   // Also do not verify_oop as this is called by verify_oop.
 5706   if (CompressedOops::shift() != 0) {
 5707     assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
 5708     if (LogMinObjAlignmentInBytes == Address::times_8) {
 5709       leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
 5710     } else {
 5711       if (dst != src) {
 5712         movq(dst, src);
 5713       }
 5714       shlq(dst, LogMinObjAlignmentInBytes);
 5715       if (CompressedOops::base() != nullptr) {
 5716         addq(dst, r12_heapbase);
 5717       }
 5718     }
 5719   } else {
 5720     assert (CompressedOops::base() == nullptr, "sanity");
 5721     if (dst != src) {
 5722       movq(dst, src);
 5723     }
 5724   }
 5725 }
 5726 
 5727 void MacroAssembler::encode_klass_not_null(Register r, Register tmp) {
 5728   BLOCK_COMMENT("encode_klass_not_null {");
 5729   assert_different_registers(r, tmp);
 5730   if (CompressedKlassPointers::base() != nullptr) {
 5731     if (AOTCodeCache::is_on_for_dump()) {
 5732       movptr(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
 5733     } else {
 5734       movptr(tmp, (intptr_t)CompressedKlassPointers::base());
 5735     }
 5736     subq(r, tmp);
 5737   }
 5738   if (CompressedKlassPointers::shift() != 0) {
 5739     shrq(r, CompressedKlassPointers::shift());
 5740   }
 5741   BLOCK_COMMENT("} encode_klass_not_null");
 5742 }
 5743 
 5744 void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) {
 5745   BLOCK_COMMENT("encode_and_move_klass_not_null {");
 5746   assert_different_registers(src, dst);
 5747   if (CompressedKlassPointers::base() != nullptr) {
 5748     movptr(dst, -(intptr_t)CompressedKlassPointers::base());
 5749     addq(dst, src);
 5750   } else {
 5751     movptr(dst, src);
 5752   }
 5753   if (CompressedKlassPointers::shift() != 0) {
 5754     shrq(dst, CompressedKlassPointers::shift());
 5755   }
 5756   BLOCK_COMMENT("} encode_and_move_klass_not_null");
 5757 }
 5758 
 5759 void  MacroAssembler::decode_klass_not_null(Register r, Register tmp) {
 5760   BLOCK_COMMENT("decode_klass_not_null {");
 5761   assert_different_registers(r, tmp);
 5762   // Note: it will change flags
 5763   assert(UseCompressedClassPointers, "should only be used for compressed headers");
 5764   // Cannot assert, unverified entry point counts instructions (see .ad file)
 5765   // vtableStubs also counts instructions in pd_code_size_limit.
 5766   // Also do not verify_oop as this is called by verify_oop.
 5767   if (CompressedKlassPointers::shift() != 0) {
 5768     shlq(r, CompressedKlassPointers::shift());
 5769   }
 5770   if (CompressedKlassPointers::base() != nullptr) {
 5771     if (AOTCodeCache::is_on_for_dump()) {
 5772       movptr(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
 5773     } else {
 5774       movptr(tmp, (intptr_t)CompressedKlassPointers::base());
 5775     }
 5776     addq(r, tmp);
 5777   }
 5778   BLOCK_COMMENT("} decode_klass_not_null");
 5779 }
 5780 
 5781 void  MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) {
 5782   BLOCK_COMMENT("decode_and_move_klass_not_null {");
 5783   assert_different_registers(src, dst);
 5784   // Note: it will change flags
 5785   assert (UseCompressedClassPointers, "should only be used for compressed headers");
 5786   // Cannot assert, unverified entry point counts instructions (see .ad file)
 5787   // vtableStubs also counts instructions in pd_code_size_limit.
 5788   // Also do not verify_oop as this is called by verify_oop.
 5789 
 5790   if (CompressedKlassPointers::base() == nullptr &&
 5791       CompressedKlassPointers::shift() == 0) {
 5792     // The best case scenario is that there is no base or shift. Then it is already
 5793     // a pointer that needs nothing but a register rename.
 5794     movl(dst, src);
 5795   } else {
 5796     if (CompressedKlassPointers::shift() <= Address::times_8) {
 5797       if (CompressedKlassPointers::base() != nullptr) {
 5798         movptr(dst, (intptr_t)CompressedKlassPointers::base());
 5799       } else {
 5800         xorq(dst, dst);
 5801       }
 5802       if (CompressedKlassPointers::shift() != 0) {
 5803         assert(CompressedKlassPointers::shift() == Address::times_8, "klass not aligned on 64bits?");
 5804         leaq(dst, Address(dst, src, Address::times_8, 0));
 5805       } else {
 5806         addq(dst, src);
 5807       }
 5808     } else {
 5809       if (CompressedKlassPointers::base() != nullptr) {
 5810         const intptr_t base_right_shifted =
 5811             (intptr_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
 5812         movptr(dst, base_right_shifted);
 5813       } else {
 5814         xorq(dst, dst);
 5815       }
 5816       addq(dst, src);
 5817       shlq(dst, CompressedKlassPointers::shift());
 5818     }
 5819   }
 5820   BLOCK_COMMENT("} decode_and_move_klass_not_null");
 5821 }
 5822 
 5823 void  MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
 5824   assert (UseCompressedOops, "should only be used for compressed headers");
 5825   assert (Universe::heap() != nullptr, "java heap should be initialized");
 5826   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5827   int oop_index = oop_recorder()->find_index(obj);
 5828   RelocationHolder rspec = oop_Relocation::spec(oop_index);
 5829   mov_narrow_oop(dst, oop_index, rspec);
 5830 }
 5831 
 5832 void  MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
 5833   assert (UseCompressedOops, "should only be used for compressed headers");
 5834   assert (Universe::heap() != nullptr, "java heap should be initialized");
 5835   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5836   int oop_index = oop_recorder()->find_index(obj);
 5837   RelocationHolder rspec = oop_Relocation::spec(oop_index);
 5838   mov_narrow_oop(dst, oop_index, rspec);
 5839 }
 5840 
 5841 void  MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
 5842   assert (UseCompressedClassPointers, "should only be used for compressed headers");
 5843   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5844   int klass_index = oop_recorder()->find_index(k);
 5845   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
 5846   mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 5847 }
 5848 
 5849 void  MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
 5850   assert (UseCompressedClassPointers, "should only be used for compressed headers");
 5851   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5852   int klass_index = oop_recorder()->find_index(k);
 5853   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
 5854   mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 5855 }
 5856 
 5857 void  MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
 5858   assert (UseCompressedOops, "should only be used for compressed headers");
 5859   assert (Universe::heap() != nullptr, "java heap should be initialized");
 5860   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5861   int oop_index = oop_recorder()->find_index(obj);
 5862   RelocationHolder rspec = oop_Relocation::spec(oop_index);
 5863   Assembler::cmp_narrow_oop(dst, oop_index, rspec);
 5864 }
 5865 
 5866 void  MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
 5867   assert (UseCompressedOops, "should only be used for compressed headers");
 5868   assert (Universe::heap() != nullptr, "java heap should be initialized");
 5869   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5870   int oop_index = oop_recorder()->find_index(obj);
 5871   RelocationHolder rspec = oop_Relocation::spec(oop_index);
 5872   Assembler::cmp_narrow_oop(dst, oop_index, rspec);
 5873 }
 5874 
 5875 void  MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
 5876   assert (UseCompressedClassPointers, "should only be used for compressed headers");
 5877   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5878   int klass_index = oop_recorder()->find_index(k);
 5879   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
 5880   Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 5881 }
 5882 
 5883 void  MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
 5884   assert (UseCompressedClassPointers, "should only be used for compressed headers");
 5885   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5886   int klass_index = oop_recorder()->find_index(k);
 5887   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
 5888   Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 5889 }
 5890 
 5891 void MacroAssembler::reinit_heapbase() {
 5892   if (UseCompressedOops) {
 5893     if (Universe::heap() != nullptr) {
 5894       if (CompressedOops::base() == nullptr) {
 5895         MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
 5896       } else {
 5897         mov64(r12_heapbase, (int64_t)CompressedOops::base());
 5898       }
 5899     } else {
 5900       movptr(r12_heapbase, ExternalAddress(CompressedOops::base_addr()));
 5901     }
 5902   }
 5903 }
 5904 
 5905 #if COMPILER2_OR_JVMCI
 5906 
 5907 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
 5908 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, KRegister mask) {
 5909   // cnt - number of qwords (8-byte words).
 5910   // base - start address, qword aligned.
 5911   Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
 5912   bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0);
 5913   if (use64byteVector) {
 5914     evpbroadcastq(xtmp, val, AVX_512bit);
 5915   } else if (MaxVectorSize >= 32) {
 5916     movdq(xtmp, val);
 5917     punpcklqdq(xtmp, xtmp);
 5918     vinserti128_high(xtmp, xtmp);
 5919   } else {
 5920     movdq(xtmp, val);
 5921     punpcklqdq(xtmp, xtmp);
 5922   }
 5923   jmp(L_zero_64_bytes);
 5924 
 5925   BIND(L_loop);
 5926   if (MaxVectorSize >= 32) {
 5927     fill64(base, 0, xtmp, use64byteVector);
 5928   } else {
 5929     movdqu(Address(base,  0), xtmp);
 5930     movdqu(Address(base, 16), xtmp);
 5931     movdqu(Address(base, 32), xtmp);
 5932     movdqu(Address(base, 48), xtmp);
 5933   }
 5934   addptr(base, 64);
 5935 
 5936   BIND(L_zero_64_bytes);
 5937   subptr(cnt, 8);
 5938   jccb(Assembler::greaterEqual, L_loop);
 5939 
 5940   // Copy trailing 64 bytes
 5941   if (use64byteVector) {
 5942     addptr(cnt, 8);
 5943     jccb(Assembler::equal, L_end);
 5944     fill64_masked(3, base, 0, xtmp, mask, cnt, val, true);
 5945     jmp(L_end);
 5946   } else {
 5947     addptr(cnt, 4);
 5948     jccb(Assembler::less, L_tail);
 5949     if (MaxVectorSize >= 32) {
 5950       vmovdqu(Address(base, 0), xtmp);
 5951     } else {
 5952       movdqu(Address(base,  0), xtmp);
 5953       movdqu(Address(base, 16), xtmp);
 5954     }
 5955   }
 5956   addptr(base, 32);
 5957   subptr(cnt, 4);
 5958 
 5959   BIND(L_tail);
 5960   addptr(cnt, 4);
 5961   jccb(Assembler::lessEqual, L_end);
 5962   if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
 5963     fill32_masked(3, base, 0, xtmp, mask, cnt, val);
 5964   } else {
 5965     decrement(cnt);
 5966 
 5967     BIND(L_sloop);
 5968     movq(Address(base, 0), xtmp);
 5969     addptr(base, 8);
 5970     decrement(cnt);
 5971     jccb(Assembler::greaterEqual, L_sloop);
 5972   }
 5973   BIND(L_end);
 5974 }
 5975 
 5976 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) {
 5977   assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
 5978   // An inline type might be returned. If fields are in registers we
 5979   // need to allocate an inline type instance and initialize it with
 5980   // the value of the fields.
 5981   Label skip;
 5982   // We only need a new buffered inline type if a new one is not returned
 5983   testptr(rax, 1);
 5984   jcc(Assembler::zero, skip);
 5985   int call_offset = -1;
 5986 
 5987 #ifdef _LP64
 5988   // The following code is similar to allocate_instance but has some slight differences,
 5989   // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after
 5990   // allocating is not necessary if vk != nullptr, etc. allocate_instance is not aware of these.
 5991   Label slow_case;
 5992   // 1. Try to allocate a new buffered inline instance either from TLAB or eden space
 5993   mov(rscratch1, rax); // save rax for slow_case since *_allocate may corrupt it when allocation failed
 5994   if (vk != nullptr) {
 5995     // Called from C1, where the return type is statically known.
 5996     movptr(rbx, (intptr_t)vk->get_InlineKlass());
 5997     jint lh = vk->layout_helper();
 5998     assert(lh != Klass::_lh_neutral_value, "inline class in return type must have been resolved");
 5999     if (UseTLAB && !Klass::layout_helper_needs_slow_path(lh)) {
 6000       tlab_allocate(rax, noreg, lh, r13, r14, slow_case);
 6001     } else {
 6002       jmp(slow_case);
 6003     }
 6004   } else {
 6005     // Call from interpreter. RAX contains ((the InlineKlass* of the return type) | 0x01)
 6006     mov(rbx, rax);
 6007     andptr(rbx, -2);
 6008     if (UseTLAB) {
 6009       movl(r14, Address(rbx, Klass::layout_helper_offset()));
 6010       testl(r14, Klass::_lh_instance_slow_path_bit);
 6011       jcc(Assembler::notZero, slow_case);
 6012       tlab_allocate(rax, r14, 0, r13, r14, slow_case);
 6013     } else {
 6014       jmp(slow_case);
 6015     }
 6016   }
 6017   if (UseTLAB) {
 6018     // 2. Initialize buffered inline instance header
 6019     Register buffer_obj = rax;
 6020     if (UseCompactObjectHeaders) {
 6021       Register mark_word = r13;
 6022       movptr(mark_word, Address(rbx, Klass::prototype_header_offset()));
 6023       movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes ()), mark_word);
 6024     } else {
 6025       movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), (intptr_t)markWord::inline_type_prototype().value());
 6026       xorl(r13, r13);
 6027       store_klass_gap(buffer_obj, r13);
 6028       if (vk == nullptr) {
 6029         // store_klass corrupts rbx(klass), so save it in r13 for later use (interpreter case only).
 6030         mov(r13, rbx);
 6031       }
 6032       store_klass(buffer_obj, rbx, rscratch1);
 6033     }
 6034     // 3. Initialize its fields with an inline class specific handler
 6035     if (vk != nullptr) {
 6036       call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint.
 6037     } else {
 6038       movptr(rbx, Address(r13, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 6039       movptr(rbx, Address(rbx, InlineKlass::pack_handler_offset()));
 6040       call(rbx);
 6041     }
 6042     jmp(skip);
 6043   }
 6044   bind(slow_case);
 6045   // We failed to allocate a new inline type, fall back to a runtime
 6046   // call. Some oop field may be live in some registers but we can't
 6047   // tell. That runtime call will take care of preserving them
 6048   // across a GC if there's one.
 6049   mov(rax, rscratch1);
 6050 #endif
 6051 
 6052   if (from_interpreter) {
 6053     super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf());
 6054   } else {
 6055     call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf()));
 6056     call_offset = offset();
 6057   }
 6058 
 6059   bind(skip);
 6060   return call_offset;
 6061 }
 6062 
 6063 // Move a value between registers/stack slots and update the reg_state
 6064 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) {
 6065   assert(from->is_valid() && to->is_valid(), "source and destination must be valid");
 6066   if (reg_state[to->value()] == reg_written) {
 6067     return true; // Already written
 6068   }
 6069   if (from != to && bt != T_VOID) {
 6070     if (reg_state[to->value()] == reg_readonly) {
 6071       return false; // Not yet writable
 6072     }
 6073     if (from->is_reg()) {
 6074       if (to->is_reg()) {
 6075         if (from->is_XMMRegister()) {
 6076           if (bt == T_DOUBLE) {
 6077             movdbl(to->as_XMMRegister(), from->as_XMMRegister());
 6078           } else {
 6079             assert(bt == T_FLOAT, "must be float");
 6080             movflt(to->as_XMMRegister(), from->as_XMMRegister());
 6081           }
 6082         } else {
 6083           movq(to->as_Register(), from->as_Register());
 6084         }
 6085       } else {
 6086         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6087         Address to_addr = Address(rsp, st_off);
 6088         if (from->is_XMMRegister()) {
 6089           if (bt == T_DOUBLE) {
 6090             movdbl(to_addr, from->as_XMMRegister());
 6091           } else {
 6092             assert(bt == T_FLOAT, "must be float");
 6093             movflt(to_addr, from->as_XMMRegister());
 6094           }
 6095         } else {
 6096           movq(to_addr, from->as_Register());
 6097         }
 6098       }
 6099     } else {
 6100       Address from_addr = Address(rsp, from->reg2stack() * VMRegImpl::stack_slot_size + wordSize);
 6101       if (to->is_reg()) {
 6102         if (to->is_XMMRegister()) {
 6103           if (bt == T_DOUBLE) {
 6104             movdbl(to->as_XMMRegister(), from_addr);
 6105           } else {
 6106             assert(bt == T_FLOAT, "must be float");
 6107             movflt(to->as_XMMRegister(), from_addr);
 6108           }
 6109         } else {
 6110           movq(to->as_Register(), from_addr);
 6111         }
 6112       } else {
 6113         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6114         movq(r13, from_addr);
 6115         movq(Address(rsp, st_off), r13);
 6116       }
 6117     }
 6118   }
 6119   // Update register states
 6120   reg_state[from->value()] = reg_writable;
 6121   reg_state[to->value()] = reg_written;
 6122   return true;
 6123 }
 6124 
 6125 // Calculate the extra stack space required for packing or unpacking inline
 6126 // args and adjust the stack pointer
 6127 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) {
 6128   // Two additional slots to account for return address
 6129   int sp_inc = (args_on_stack + 2) * VMRegImpl::stack_slot_size;
 6130   sp_inc = align_up(sp_inc, StackAlignmentInBytes);
 6131   // Save the return address, adjust the stack (make sure it is properly
 6132   // 16-byte aligned) and copy the return address to the new top of the stack.
 6133   // The stack will be repaired on return (see MacroAssembler::remove_frame).
 6134   assert(sp_inc > 0, "sanity");
 6135   pop(r13);
 6136   subptr(rsp, sp_inc);
 6137   push(r13);
 6138   return sp_inc;
 6139 }
 6140 
 6141 // Read all fields from an inline type buffer and store the field values in registers/stack slots.
 6142 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
 6143                                           VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
 6144                                           RegState reg_state[]) {
 6145   assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
 6146   assert(from->is_valid(), "source must be valid");
 6147   bool progress = false;
 6148 #ifdef ASSERT
 6149   const int start_offset = offset();
 6150 #endif
 6151 
 6152   Label L_null, L_notNull;
 6153   // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for)
 6154   Register tmp1 = r10;
 6155   Register tmp2 = r13;
 6156   Register fromReg = noreg;
 6157   ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, -1);
 6158   bool done = true;
 6159   bool mark_done = true;
 6160   VMReg toReg;
 6161   BasicType bt;
 6162   // Check if argument requires a null check
 6163   bool null_check = false;
 6164   VMReg nullCheckReg;
 6165   while (stream.next(nullCheckReg, bt)) {
 6166     if (sig->at(stream.sig_index())._offset == -1) {
 6167       null_check = true;
 6168       break;
 6169     }
 6170   }
 6171   stream.reset(sig_index, to_index);
 6172   while (stream.next(toReg, bt)) {
 6173     assert(toReg->is_valid(), "destination must be valid");
 6174     int idx = (int)toReg->value();
 6175     if (reg_state[idx] == reg_readonly) {
 6176       if (idx != from->value()) {
 6177         mark_done = false;
 6178       }
 6179       done = false;
 6180       continue;
 6181     } else if (reg_state[idx] == reg_written) {
 6182       continue;
 6183     }
 6184     assert(reg_state[idx] == reg_writable, "must be writable");
 6185     reg_state[idx] = reg_written;
 6186     progress = true;
 6187 
 6188     if (fromReg == noreg) {
 6189       if (from->is_reg()) {
 6190         fromReg = from->as_Register();
 6191       } else {
 6192         int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6193         movq(tmp1, Address(rsp, st_off));
 6194         fromReg = tmp1;
 6195       }
 6196       if (null_check) {
 6197         // Nullable inline type argument, emit null check
 6198         testptr(fromReg, fromReg);
 6199         jcc(Assembler::zero, L_null);
 6200       }
 6201     }
 6202     int off = sig->at(stream.sig_index())._offset;
 6203     if (off == -1) {
 6204       assert(null_check, "Missing null check at");
 6205       if (toReg->is_stack()) {
 6206         int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6207         movq(Address(rsp, st_off), 1);
 6208       } else {
 6209         movq(toReg->as_Register(), 1);
 6210       }
 6211       continue;
 6212     }
 6213     assert(off > 0, "offset in object should be positive");
 6214     Address fromAddr = Address(fromReg, off);
 6215     if (!toReg->is_XMMRegister()) {
 6216       Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register();
 6217       if (is_reference_type(bt)) {
 6218         load_heap_oop(dst, fromAddr);
 6219       } else {
 6220         bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
 6221         load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed);
 6222       }
 6223       if (toReg->is_stack()) {
 6224         int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6225         movq(Address(rsp, st_off), dst);
 6226       }
 6227     } else if (bt == T_DOUBLE) {
 6228       movdbl(toReg->as_XMMRegister(), fromAddr);
 6229     } else {
 6230       assert(bt == T_FLOAT, "must be float");
 6231       movflt(toReg->as_XMMRegister(), fromAddr);
 6232     }
 6233   }
 6234   if (progress && null_check) {
 6235     if (done) {
 6236       jmp(L_notNull);
 6237       bind(L_null);
 6238       // Set null marker to zero to signal that the argument is null.
 6239       // Also set all oop fields to zero to make the GC happy.
 6240       stream.reset(sig_index, to_index);
 6241       while (stream.next(toReg, bt)) {
 6242         if (sig->at(stream.sig_index())._offset == -1 ||
 6243             bt == T_OBJECT || bt == T_ARRAY) {
 6244           if (toReg->is_stack()) {
 6245             int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6246             movq(Address(rsp, st_off), 0);
 6247           } else {
 6248             xorq(toReg->as_Register(), toReg->as_Register());
 6249           }
 6250         }
 6251       }
 6252       bind(L_notNull);
 6253     } else {
 6254       bind(L_null);
 6255     }
 6256   }
 6257 
 6258   sig_index = stream.sig_index();
 6259   to_index = stream.regs_index();
 6260 
 6261   if (mark_done && reg_state[from->value()] != reg_written) {
 6262     // This is okay because no one else will write to that slot
 6263     reg_state[from->value()] = reg_writable;
 6264   }
 6265   from_index--;
 6266   assert(progress || (start_offset == offset()), "should not emit code");
 6267   return done;
 6268 }
 6269 
 6270 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
 6271                                         VMRegPair* from, int from_count, int& from_index, VMReg to,
 6272                                         RegState reg_state[], Register val_array) {
 6273   assert(sig->at(sig_index)._bt == T_METADATA, "should be at delimiter");
 6274   assert(to->is_valid(), "destination must be valid");
 6275 
 6276   if (reg_state[to->value()] == reg_written) {
 6277     skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
 6278     return true; // Already written
 6279   }
 6280 
 6281   // TODO 8284443 Isn't it an issue if below code uses r14 as tmp when it contains a spilled value?
 6282   // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for).
 6283   Register val_obj_tmp = r11;
 6284   Register from_reg_tmp = r14;
 6285   Register tmp1 = r10;
 6286   Register tmp2 = r13;
 6287   Register tmp3 = rbx;
 6288   Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register();
 6289 
 6290   assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array);
 6291 
 6292   if (reg_state[to->value()] == reg_readonly) {
 6293     if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) {
 6294       skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
 6295       return false; // Not yet writable
 6296     }
 6297     val_obj = val_obj_tmp;
 6298   }
 6299 
 6300   int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_OBJECT);
 6301   load_heap_oop(val_obj, Address(val_array, index));
 6302 
 6303   ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index);
 6304   VMReg fromReg;
 6305   BasicType bt;
 6306   Label L_null;
 6307   while (stream.next(fromReg, bt)) {
 6308     assert(fromReg->is_valid(), "source must be valid");
 6309     reg_state[fromReg->value()] = reg_writable;
 6310 
 6311     int off = sig->at(stream.sig_index())._offset;
 6312     if (off == -1) {
 6313       // Nullable inline type argument, emit null check
 6314       Label L_notNull;
 6315       if (fromReg->is_stack()) {
 6316         int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6317         testb(Address(rsp, ld_off), 1);
 6318       } else {
 6319         testb(fromReg->as_Register(), 1);
 6320       }
 6321       jcc(Assembler::notZero, L_notNull);
 6322       movptr(val_obj, 0);
 6323       jmp(L_null);
 6324       bind(L_notNull);
 6325       continue;
 6326     }
 6327 
 6328     assert(off > 0, "offset in object should be positive");
 6329     size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
 6330 
 6331     // Pack the scalarized field into the value object.
 6332     Address dst(val_obj, off);
 6333     if (!fromReg->is_XMMRegister()) {
 6334       Register src;
 6335       if (fromReg->is_stack()) {
 6336         src = from_reg_tmp;
 6337         int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 6338         load_sized_value(src, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false);
 6339       } else {
 6340         src = fromReg->as_Register();
 6341       }
 6342       assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array);
 6343       if (is_reference_type(bt)) {
 6344         store_heap_oop(dst, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
 6345       } else {
 6346         store_sized_value(dst, src, size_in_bytes);
 6347       }
 6348     } else if (bt == T_DOUBLE) {
 6349       movdbl(dst, fromReg->as_XMMRegister());
 6350     } else {
 6351       assert(bt == T_FLOAT, "must be float");
 6352       movflt(dst, fromReg->as_XMMRegister());
 6353     }
 6354   }
 6355   bind(L_null);
 6356   sig_index = stream.sig_index();
 6357   from_index = stream.regs_index();
 6358 
 6359   assert(reg_state[to->value()] == reg_writable, "must have already been read");
 6360   bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state);
 6361   assert(success, "to register must be writeable");
 6362   return true;
 6363 }
 6364 
 6365 VMReg MacroAssembler::spill_reg_for(VMReg reg) {
 6366   return reg->is_XMMRegister() ? xmm8->as_VMReg() : r14->as_VMReg();
 6367 }
 6368 
 6369 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) {
 6370   assert((initial_framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
 6371   if (needs_stack_repair) {
 6372     // TODO 8284443 Add a comment drawing the frame like in Aarch64's version of MacroAssembler::remove_frame
 6373     movq(rbp, Address(rsp, initial_framesize));
 6374     // The stack increment resides just below the saved rbp
 6375     addq(rsp, Address(rsp, initial_framesize - wordSize));
 6376   } else {
 6377     if (initial_framesize > 0) {
 6378       addq(rsp, initial_framesize);
 6379     }
 6380     pop(rbp);
 6381   }
 6382 }
 6383 
 6384 // Clearing constant sized memory using YMM/ZMM registers.
 6385 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 6386   assert(UseAVX > 2 && VM_Version::supports_avx512vl(), "");
 6387   bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
 6388 
 6389   int vector64_count = (cnt & (~0x7)) >> 3;
 6390   cnt = cnt & 0x7;
 6391   const int fill64_per_loop = 4;
 6392   const int max_unrolled_fill64 = 8;
 6393 
 6394   // 64 byte initialization loop.
 6395   vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit);
 6396   int start64 = 0;
 6397   if (vector64_count > max_unrolled_fill64) {
 6398     Label LOOP;
 6399     Register index = rtmp;
 6400 
 6401     start64 = vector64_count - (vector64_count % fill64_per_loop);
 6402 
 6403     movl(index, 0);
 6404     BIND(LOOP);
 6405     for (int i = 0; i < fill64_per_loop; i++) {
 6406       fill64(Address(base, index, Address::times_1, i * 64), xtmp, use64byteVector);
 6407     }
 6408     addl(index, fill64_per_loop * 64);
 6409     cmpl(index, start64 * 64);
 6410     jccb(Assembler::less, LOOP);
 6411   }
 6412   for (int i = start64; i < vector64_count; i++) {
 6413     fill64(base, i * 64, xtmp, use64byteVector);
 6414   }
 6415 
 6416   // Clear remaining 64 byte tail.
 6417   int disp = vector64_count * 64;
 6418   if (cnt) {
 6419     switch (cnt) {
 6420       case 1:
 6421         movq(Address(base, disp), xtmp);
 6422         break;
 6423       case 2:
 6424         evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_128bit);
 6425         break;
 6426       case 3:
 6427         movl(rtmp, 0x7);
 6428         kmovwl(mask, rtmp);
 6429         evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_256bit);
 6430         break;
 6431       case 4:
 6432         evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 6433         break;
 6434       case 5:
 6435         if (use64byteVector) {
 6436           movl(rtmp, 0x1F);
 6437           kmovwl(mask, rtmp);
 6438           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 6439         } else {
 6440           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 6441           movq(Address(base, disp + 32), xtmp);
 6442         }
 6443         break;
 6444       case 6:
 6445         if (use64byteVector) {
 6446           movl(rtmp, 0x3F);
 6447           kmovwl(mask, rtmp);
 6448           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 6449         } else {
 6450           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 6451           evmovdqu(T_LONG, k0, Address(base, disp + 32), xtmp, false, Assembler::AVX_128bit);
 6452         }
 6453         break;
 6454       case 7:
 6455         if (use64byteVector) {
 6456           movl(rtmp, 0x7F);
 6457           kmovwl(mask, rtmp);
 6458           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 6459         } else {
 6460           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 6461           movl(rtmp, 0x7);
 6462           kmovwl(mask, rtmp);
 6463           evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit);
 6464         }
 6465         break;
 6466       default:
 6467         fatal("Unexpected length : %d\n",cnt);
 6468         break;
 6469     }
 6470   }
 6471 }
 6472 
 6473 void MacroAssembler::clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp,
 6474                                bool is_large, bool word_copy_only, KRegister mask) {
 6475   // cnt      - number of qwords (8-byte words).
 6476   // base     - start address, qword aligned.
 6477   // is_large - if optimizers know cnt is larger than InitArrayShortSize
 6478   assert(base==rdi, "base register must be edi for rep stos");
 6479   assert(val==rax,   "val register must be eax for rep stos");
 6480   assert(cnt==rcx,   "cnt register must be ecx for rep stos");
 6481   assert(InitArrayShortSize % BytesPerLong == 0,
 6482     "InitArrayShortSize should be the multiple of BytesPerLong");
 6483 
 6484   Label DONE;
 6485 
 6486   if (!is_large) {
 6487     Label LOOP, LONG;
 6488     cmpptr(cnt, InitArrayShortSize/BytesPerLong);
 6489     jccb(Assembler::greater, LONG);
 6490 
 6491     decrement(cnt);
 6492     jccb(Assembler::negative, DONE); // Zero length
 6493 
 6494     // Use individual pointer-sized stores for small counts:
 6495     BIND(LOOP);
 6496     movptr(Address(base, cnt, Address::times_ptr), val);
 6497     decrement(cnt);
 6498     jccb(Assembler::greaterEqual, LOOP);
 6499     jmpb(DONE);
 6500 
 6501     BIND(LONG);
 6502   }
 6503 
 6504   // Use longer rep-prefixed ops for non-small counts:
 6505   if (UseFastStosb && !word_copy_only) {
 6506     shlptr(cnt, 3); // convert to number of bytes
 6507     rep_stosb();
 6508   } else if (UseXMMForObjInit) {
 6509     xmm_clear_mem(base, cnt, val, xtmp, mask);
 6510   } else {
 6511     rep_stos();
 6512   }
 6513 
 6514   BIND(DONE);
 6515 }
 6516 
 6517 #endif //COMPILER2_OR_JVMCI
 6518 
 6519 
 6520 void MacroAssembler::generate_fill(BasicType t, bool aligned,
 6521                                    Register to, Register value, Register count,
 6522                                    Register rtmp, XMMRegister xtmp) {
 6523   ShortBranchVerifier sbv(this);
 6524   assert_different_registers(to, value, count, rtmp);
 6525   Label L_exit;
 6526   Label L_fill_2_bytes, L_fill_4_bytes;
 6527 
 6528 #if defined(COMPILER2)
 6529   if(MaxVectorSize >=32 &&
 6530      VM_Version::supports_avx512vlbw() &&
 6531      VM_Version::supports_bmi2()) {
 6532     generate_fill_avx3(t, to, value, count, rtmp, xtmp);
 6533     return;
 6534   }
 6535 #endif
 6536 
 6537   int shift = -1;
 6538   switch (t) {
 6539     case T_BYTE:
 6540       shift = 2;
 6541       break;
 6542     case T_SHORT:
 6543       shift = 1;
 6544       break;
 6545     case T_INT:
 6546       shift = 0;
 6547       break;
 6548     default: ShouldNotReachHere();
 6549   }
 6550 
 6551   if (t == T_BYTE) {
 6552     andl(value, 0xff);
 6553     movl(rtmp, value);
 6554     shll(rtmp, 8);
 6555     orl(value, rtmp);
 6556   }
 6557   if (t == T_SHORT) {
 6558     andl(value, 0xffff);
 6559   }
 6560   if (t == T_BYTE || t == T_SHORT) {
 6561     movl(rtmp, value);
 6562     shll(rtmp, 16);
 6563     orl(value, rtmp);
 6564   }
 6565 
 6566   cmpptr(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
 6567   jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp
 6568   if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
 6569     Label L_skip_align2;
 6570     // align source address at 4 bytes address boundary
 6571     if (t == T_BYTE) {
 6572       Label L_skip_align1;
 6573       // One byte misalignment happens only for byte arrays
 6574       testptr(to, 1);
 6575       jccb(Assembler::zero, L_skip_align1);
 6576       movb(Address(to, 0), value);
 6577       increment(to);
 6578       decrement(count);
 6579       BIND(L_skip_align1);
 6580     }
 6581     // Two bytes misalignment happens only for byte and short (char) arrays
 6582     testptr(to, 2);
 6583     jccb(Assembler::zero, L_skip_align2);
 6584     movw(Address(to, 0), value);
 6585     addptr(to, 2);
 6586     subptr(count, 1<<(shift-1));
 6587     BIND(L_skip_align2);
 6588   }
 6589   {
 6590     Label L_fill_32_bytes;
 6591     if (!UseUnalignedLoadStores) {
 6592       // align to 8 bytes, we know we are 4 byte aligned to start
 6593       testptr(to, 4);
 6594       jccb(Assembler::zero, L_fill_32_bytes);
 6595       movl(Address(to, 0), value);
 6596       addptr(to, 4);
 6597       subptr(count, 1<<shift);
 6598     }
 6599     BIND(L_fill_32_bytes);
 6600     {
 6601       Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
 6602       movdl(xtmp, value);
 6603       if (UseAVX >= 2 && UseUnalignedLoadStores) {
 6604         Label L_check_fill_32_bytes;
 6605         if (UseAVX > 2) {
 6606           // Fill 64-byte chunks
 6607           Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2;
 6608 
 6609           // If number of bytes to fill < VM_Version::avx3_threshold(), perform fill using AVX2
 6610           cmpptr(count, VM_Version::avx3_threshold());
 6611           jccb(Assembler::below, L_check_fill_64_bytes_avx2);
 6612 
 6613           vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
 6614 
 6615           subptr(count, 16 << shift);
 6616           jccb(Assembler::less, L_check_fill_32_bytes);
 6617           align(16);
 6618 
 6619           BIND(L_fill_64_bytes_loop_avx3);
 6620           evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit);
 6621           addptr(to, 64);
 6622           subptr(count, 16 << shift);
 6623           jcc(Assembler::greaterEqual, L_fill_64_bytes_loop_avx3);
 6624           jmpb(L_check_fill_32_bytes);
 6625 
 6626           BIND(L_check_fill_64_bytes_avx2);
 6627         }
 6628         // Fill 64-byte chunks
 6629         Label L_fill_64_bytes_loop;
 6630         vpbroadcastd(xtmp, xtmp, Assembler::AVX_256bit);
 6631 
 6632         subptr(count, 16 << shift);
 6633         jcc(Assembler::less, L_check_fill_32_bytes);
 6634         align(16);
 6635 
 6636         BIND(L_fill_64_bytes_loop);
 6637         vmovdqu(Address(to, 0), xtmp);
 6638         vmovdqu(Address(to, 32), xtmp);
 6639         addptr(to, 64);
 6640         subptr(count, 16 << shift);
 6641         jcc(Assembler::greaterEqual, L_fill_64_bytes_loop);
 6642 
 6643         BIND(L_check_fill_32_bytes);
 6644         addptr(count, 8 << shift);
 6645         jccb(Assembler::less, L_check_fill_8_bytes);
 6646         vmovdqu(Address(to, 0), xtmp);
 6647         addptr(to, 32);
 6648         subptr(count, 8 << shift);
 6649 
 6650         BIND(L_check_fill_8_bytes);
 6651         // clean upper bits of YMM registers
 6652         movdl(xtmp, value);
 6653         pshufd(xtmp, xtmp, 0);
 6654       } else {
 6655         // Fill 32-byte chunks
 6656         pshufd(xtmp, xtmp, 0);
 6657 
 6658         subptr(count, 8 << shift);
 6659         jcc(Assembler::less, L_check_fill_8_bytes);
 6660         align(16);
 6661 
 6662         BIND(L_fill_32_bytes_loop);
 6663 
 6664         if (UseUnalignedLoadStores) {
 6665           movdqu(Address(to, 0), xtmp);
 6666           movdqu(Address(to, 16), xtmp);
 6667         } else {
 6668           movq(Address(to, 0), xtmp);
 6669           movq(Address(to, 8), xtmp);
 6670           movq(Address(to, 16), xtmp);
 6671           movq(Address(to, 24), xtmp);
 6672         }
 6673 
 6674         addptr(to, 32);
 6675         subptr(count, 8 << shift);
 6676         jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
 6677 
 6678         BIND(L_check_fill_8_bytes);
 6679       }
 6680       addptr(count, 8 << shift);
 6681       jccb(Assembler::zero, L_exit);
 6682       jmpb(L_fill_8_bytes);
 6683 
 6684       //
 6685       // length is too short, just fill qwords
 6686       //
 6687       BIND(L_fill_8_bytes_loop);
 6688       movq(Address(to, 0), xtmp);
 6689       addptr(to, 8);
 6690       BIND(L_fill_8_bytes);
 6691       subptr(count, 1 << (shift + 1));
 6692       jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
 6693     }
 6694   }
 6695   // fill trailing 4 bytes
 6696   BIND(L_fill_4_bytes);
 6697   testl(count, 1<<shift);
 6698   jccb(Assembler::zero, L_fill_2_bytes);
 6699   movl(Address(to, 0), value);
 6700   if (t == T_BYTE || t == T_SHORT) {
 6701     Label L_fill_byte;
 6702     addptr(to, 4);
 6703     BIND(L_fill_2_bytes);
 6704     // fill trailing 2 bytes
 6705     testl(count, 1<<(shift-1));
 6706     jccb(Assembler::zero, L_fill_byte);
 6707     movw(Address(to, 0), value);
 6708     if (t == T_BYTE) {
 6709       addptr(to, 2);
 6710       BIND(L_fill_byte);
 6711       // fill trailing byte
 6712       testl(count, 1);
 6713       jccb(Assembler::zero, L_exit);
 6714       movb(Address(to, 0), value);
 6715     } else {
 6716       BIND(L_fill_byte);
 6717     }
 6718   } else {
 6719     BIND(L_fill_2_bytes);
 6720   }
 6721   BIND(L_exit);
 6722 }
 6723 
 6724 void MacroAssembler::evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len) {
 6725   switch(type) {
 6726     case T_BYTE:
 6727     case T_BOOLEAN:
 6728       evpbroadcastb(dst, src, vector_len);
 6729       break;
 6730     case T_SHORT:
 6731     case T_CHAR:
 6732       evpbroadcastw(dst, src, vector_len);
 6733       break;
 6734     case T_INT:
 6735     case T_FLOAT:
 6736       evpbroadcastd(dst, src, vector_len);
 6737       break;
 6738     case T_LONG:
 6739     case T_DOUBLE:
 6740       evpbroadcastq(dst, src, vector_len);
 6741       break;
 6742     default:
 6743       fatal("Unhandled type : %s", type2name(type));
 6744       break;
 6745   }
 6746 }
 6747 
 6748 // encode char[] to byte[] in ISO_8859_1 or ASCII
 6749    //@IntrinsicCandidate
 6750    //private static int implEncodeISOArray(byte[] sa, int sp,
 6751    //byte[] da, int dp, int len) {
 6752    //  int i = 0;
 6753    //  for (; i < len; i++) {
 6754    //    char c = StringUTF16.getChar(sa, sp++);
 6755    //    if (c > '\u00FF')
 6756    //      break;
 6757    //    da[dp++] = (byte)c;
 6758    //  }
 6759    //  return i;
 6760    //}
 6761    //
 6762    //@IntrinsicCandidate
 6763    //private static int implEncodeAsciiArray(char[] sa, int sp,
 6764    //    byte[] da, int dp, int len) {
 6765    //  int i = 0;
 6766    //  for (; i < len; i++) {
 6767    //    char c = sa[sp++];
 6768    //    if (c >= '\u0080')
 6769    //      break;
 6770    //    da[dp++] = (byte)c;
 6771    //  }
 6772    //  return i;
 6773    //}
 6774 void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
 6775   XMMRegister tmp1Reg, XMMRegister tmp2Reg,
 6776   XMMRegister tmp3Reg, XMMRegister tmp4Reg,
 6777   Register tmp5, Register result, bool ascii) {
 6778 
 6779   // rsi: src
 6780   // rdi: dst
 6781   // rdx: len
 6782   // rcx: tmp5
 6783   // rax: result
 6784   ShortBranchVerifier sbv(this);
 6785   assert_different_registers(src, dst, len, tmp5, result);
 6786   Label L_done, L_copy_1_char, L_copy_1_char_exit;
 6787 
 6788   int mask = ascii ? 0xff80ff80 : 0xff00ff00;
 6789   int short_mask = ascii ? 0xff80 : 0xff00;
 6790 
 6791   // set result
 6792   xorl(result, result);
 6793   // check for zero length
 6794   testl(len, len);
 6795   jcc(Assembler::zero, L_done);
 6796 
 6797   movl(result, len);
 6798 
 6799   // Setup pointers
 6800   lea(src, Address(src, len, Address::times_2)); // char[]
 6801   lea(dst, Address(dst, len, Address::times_1)); // byte[]
 6802   negptr(len);
 6803 
 6804   if (UseSSE42Intrinsics || UseAVX >= 2) {
 6805     Label L_copy_8_chars, L_copy_8_chars_exit;
 6806     Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit;
 6807 
 6808     if (UseAVX >= 2) {
 6809       Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit;
 6810       movl(tmp5, mask);   // create mask to test for Unicode or non-ASCII chars in vector
 6811       movdl(tmp1Reg, tmp5);
 6812       vpbroadcastd(tmp1Reg, tmp1Reg, Assembler::AVX_256bit);
 6813       jmp(L_chars_32_check);
 6814 
 6815       bind(L_copy_32_chars);
 6816       vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64));
 6817       vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32));
 6818       vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1);
 6819       vptest(tmp2Reg, tmp1Reg);       // check for Unicode or non-ASCII chars in vector
 6820       jccb(Assembler::notZero, L_copy_32_chars_exit);
 6821       vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1);
 6822       vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector_len */ 1);
 6823       vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg);
 6824 
 6825       bind(L_chars_32_check);
 6826       addptr(len, 32);
 6827       jcc(Assembler::lessEqual, L_copy_32_chars);
 6828 
 6829       bind(L_copy_32_chars_exit);
 6830       subptr(len, 16);
 6831       jccb(Assembler::greater, L_copy_16_chars_exit);
 6832 
 6833     } else if (UseSSE42Intrinsics) {
 6834       movl(tmp5, mask);   // create mask to test for Unicode or non-ASCII chars in vector
 6835       movdl(tmp1Reg, tmp5);
 6836       pshufd(tmp1Reg, tmp1Reg, 0);
 6837       jmpb(L_chars_16_check);
 6838     }
 6839 
 6840     bind(L_copy_16_chars);
 6841     if (UseAVX >= 2) {
 6842       vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32));
 6843       vptest(tmp2Reg, tmp1Reg);
 6844       jcc(Assembler::notZero, L_copy_16_chars_exit);
 6845       vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector_len */ 1);
 6846       vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector_len */ 1);
 6847     } else {
 6848       if (UseAVX > 0) {
 6849         movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
 6850         movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
 6851         vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 0);
 6852       } else {
 6853         movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
 6854         por(tmp2Reg, tmp3Reg);
 6855         movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
 6856         por(tmp2Reg, tmp4Reg);
 6857       }
 6858       ptest(tmp2Reg, tmp1Reg);       // check for Unicode or non-ASCII chars in vector
 6859       jccb(Assembler::notZero, L_copy_16_chars_exit);
 6860       packuswb(tmp3Reg, tmp4Reg);
 6861     }
 6862     movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg);
 6863 
 6864     bind(L_chars_16_check);
 6865     addptr(len, 16);
 6866     jcc(Assembler::lessEqual, L_copy_16_chars);
 6867 
 6868     bind(L_copy_16_chars_exit);
 6869     if (UseAVX >= 2) {
 6870       // clean upper bits of YMM registers
 6871       vpxor(tmp2Reg, tmp2Reg);
 6872       vpxor(tmp3Reg, tmp3Reg);
 6873       vpxor(tmp4Reg, tmp4Reg);
 6874       movdl(tmp1Reg, tmp5);
 6875       pshufd(tmp1Reg, tmp1Reg, 0);
 6876     }
 6877     subptr(len, 8);
 6878     jccb(Assembler::greater, L_copy_8_chars_exit);
 6879 
 6880     bind(L_copy_8_chars);
 6881     movdqu(tmp3Reg, Address(src, len, Address::times_2, -16));
 6882     ptest(tmp3Reg, tmp1Reg);
 6883     jccb(Assembler::notZero, L_copy_8_chars_exit);
 6884     packuswb(tmp3Reg, tmp1Reg);
 6885     movq(Address(dst, len, Address::times_1, -8), tmp3Reg);
 6886     addptr(len, 8);
 6887     jccb(Assembler::lessEqual, L_copy_8_chars);
 6888 
 6889     bind(L_copy_8_chars_exit);
 6890     subptr(len, 8);
 6891     jccb(Assembler::zero, L_done);
 6892   }
 6893 
 6894   bind(L_copy_1_char);
 6895   load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0));
 6896   testl(tmp5, short_mask);      // check if Unicode or non-ASCII char
 6897   jccb(Assembler::notZero, L_copy_1_char_exit);
 6898   movb(Address(dst, len, Address::times_1, 0), tmp5);
 6899   addptr(len, 1);
 6900   jccb(Assembler::less, L_copy_1_char);
 6901 
 6902   bind(L_copy_1_char_exit);
 6903   addptr(result, len); // len is negative count of not processed elements
 6904 
 6905   bind(L_done);
 6906 }
 6907 
 6908 /**
 6909  * Helper for multiply_to_len().
 6910  */
 6911 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) {
 6912   addq(dest_lo, src1);
 6913   adcq(dest_hi, 0);
 6914   addq(dest_lo, src2);
 6915   adcq(dest_hi, 0);
 6916 }
 6917 
 6918 /**
 6919  * Multiply 64 bit by 64 bit first loop.
 6920  */
 6921 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
 6922                                            Register y, Register y_idx, Register z,
 6923                                            Register carry, Register product,
 6924                                            Register idx, Register kdx) {
 6925   //
 6926   //  jlong carry, x[], y[], z[];
 6927   //  for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
 6928   //    huge_128 product = y[idx] * x[xstart] + carry;
 6929   //    z[kdx] = (jlong)product;
 6930   //    carry  = (jlong)(product >>> 64);
 6931   //  }
 6932   //  z[xstart] = carry;
 6933   //
 6934 
 6935   Label L_first_loop, L_first_loop_exit;
 6936   Label L_one_x, L_one_y, L_multiply;
 6937 
 6938   decrementl(xstart);
 6939   jcc(Assembler::negative, L_one_x);
 6940 
 6941   movq(x_xstart, Address(x, xstart, Address::times_4,  0));
 6942   rorq(x_xstart, 32); // convert big-endian to little-endian
 6943 
 6944   bind(L_first_loop);
 6945   decrementl(idx);
 6946   jcc(Assembler::negative, L_first_loop_exit);
 6947   decrementl(idx);
 6948   jcc(Assembler::negative, L_one_y);
 6949   movq(y_idx, Address(y, idx, Address::times_4,  0));
 6950   rorq(y_idx, 32); // convert big-endian to little-endian
 6951   bind(L_multiply);
 6952   movq(product, x_xstart);
 6953   mulq(y_idx); // product(rax) * y_idx -> rdx:rax
 6954   addq(product, carry);
 6955   adcq(rdx, 0);
 6956   subl(kdx, 2);
 6957   movl(Address(z, kdx, Address::times_4,  4), product);
 6958   shrq(product, 32);
 6959   movl(Address(z, kdx, Address::times_4,  0), product);
 6960   movq(carry, rdx);
 6961   jmp(L_first_loop);
 6962 
 6963   bind(L_one_y);
 6964   movl(y_idx, Address(y,  0));
 6965   jmp(L_multiply);
 6966 
 6967   bind(L_one_x);
 6968   movl(x_xstart, Address(x,  0));
 6969   jmp(L_first_loop);
 6970 
 6971   bind(L_first_loop_exit);
 6972 }
 6973 
 6974 /**
 6975  * Multiply 64 bit by 64 bit and add 128 bit.
 6976  */
 6977 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z,
 6978                                             Register yz_idx, Register idx,
 6979                                             Register carry, Register product, int offset) {
 6980   //     huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry;
 6981   //     z[kdx] = (jlong)product;
 6982 
 6983   movq(yz_idx, Address(y, idx, Address::times_4,  offset));
 6984   rorq(yz_idx, 32); // convert big-endian to little-endian
 6985   movq(product, x_xstart);
 6986   mulq(yz_idx);     // product(rax) * yz_idx -> rdx:product(rax)
 6987   movq(yz_idx, Address(z, idx, Address::times_4,  offset));
 6988   rorq(yz_idx, 32); // convert big-endian to little-endian
 6989 
 6990   add2_with_carry(rdx, product, carry, yz_idx);
 6991 
 6992   movl(Address(z, idx, Address::times_4,  offset+4), product);
 6993   shrq(product, 32);
 6994   movl(Address(z, idx, Address::times_4,  offset), product);
 6995 
 6996 }
 6997 
 6998 /**
 6999  * Multiply 128 bit by 128 bit. Unrolled inner loop.
 7000  */
 7001 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
 7002                                              Register yz_idx, Register idx, Register jdx,
 7003                                              Register carry, Register product,
 7004                                              Register carry2) {
 7005   //   jlong carry, x[], y[], z[];
 7006   //   int kdx = ystart+1;
 7007   //   for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
 7008   //     huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry;
 7009   //     z[kdx+idx+1] = (jlong)product;
 7010   //     jlong carry2  = (jlong)(product >>> 64);
 7011   //     product = (y[idx] * x_xstart) + z[kdx+idx] + carry2;
 7012   //     z[kdx+idx] = (jlong)product;
 7013   //     carry  = (jlong)(product >>> 64);
 7014   //   }
 7015   //   idx += 2;
 7016   //   if (idx > 0) {
 7017   //     product = (y[idx] * x_xstart) + z[kdx+idx] + carry;
 7018   //     z[kdx+idx] = (jlong)product;
 7019   //     carry  = (jlong)(product >>> 64);
 7020   //   }
 7021   //
 7022 
 7023   Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
 7024 
 7025   movl(jdx, idx);
 7026   andl(jdx, 0xFFFFFFFC);
 7027   shrl(jdx, 2);
 7028 
 7029   bind(L_third_loop);
 7030   subl(jdx, 1);
 7031   jcc(Assembler::negative, L_third_loop_exit);
 7032   subl(idx, 4);
 7033 
 7034   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8);
 7035   movq(carry2, rdx);
 7036 
 7037   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0);
 7038   movq(carry, rdx);
 7039   jmp(L_third_loop);
 7040 
 7041   bind (L_third_loop_exit);
 7042 
 7043   andl (idx, 0x3);
 7044   jcc(Assembler::zero, L_post_third_loop_done);
 7045 
 7046   Label L_check_1;
 7047   subl(idx, 2);
 7048   jcc(Assembler::negative, L_check_1);
 7049 
 7050   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0);
 7051   movq(carry, rdx);
 7052 
 7053   bind (L_check_1);
 7054   addl (idx, 0x2);
 7055   andl (idx, 0x1);
 7056   subl(idx, 1);
 7057   jcc(Assembler::negative, L_post_third_loop_done);
 7058 
 7059   movl(yz_idx, Address(y, idx, Address::times_4,  0));
 7060   movq(product, x_xstart);
 7061   mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax)
 7062   movl(yz_idx, Address(z, idx, Address::times_4,  0));
 7063 
 7064   add2_with_carry(rdx, product, yz_idx, carry);
 7065 
 7066   movl(Address(z, idx, Address::times_4,  0), product);
 7067   shrq(product, 32);
 7068 
 7069   shlq(rdx, 32);
 7070   orq(product, rdx);
 7071   movq(carry, product);
 7072 
 7073   bind(L_post_third_loop_done);
 7074 }
 7075 
 7076 /**
 7077  * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop.
 7078  *
 7079  */
 7080 void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z,
 7081                                                   Register carry, Register carry2,
 7082                                                   Register idx, Register jdx,
 7083                                                   Register yz_idx1, Register yz_idx2,
 7084                                                   Register tmp, Register tmp3, Register tmp4) {
 7085   assert(UseBMI2Instructions, "should be used only when BMI2 is available");
 7086 
 7087   //   jlong carry, x[], y[], z[];
 7088   //   int kdx = ystart+1;
 7089   //   for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
 7090   //     huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry;
 7091   //     jlong carry2  = (jlong)(tmp3 >>> 64);
 7092   //     huge_128 tmp4 = (y[idx]   * rdx) + z[kdx+idx] + carry2;
 7093   //     carry  = (jlong)(tmp4 >>> 64);
 7094   //     z[kdx+idx+1] = (jlong)tmp3;
 7095   //     z[kdx+idx] = (jlong)tmp4;
 7096   //   }
 7097   //   idx += 2;
 7098   //   if (idx > 0) {
 7099   //     yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry;
 7100   //     z[kdx+idx] = (jlong)yz_idx1;
 7101   //     carry  = (jlong)(yz_idx1 >>> 64);
 7102   //   }
 7103   //
 7104 
 7105   Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
 7106 
 7107   movl(jdx, idx);
 7108   andl(jdx, 0xFFFFFFFC);
 7109   shrl(jdx, 2);
 7110 
 7111   bind(L_third_loop);
 7112   subl(jdx, 1);
 7113   jcc(Assembler::negative, L_third_loop_exit);
 7114   subl(idx, 4);
 7115 
 7116   movq(yz_idx1,  Address(y, idx, Address::times_4,  8));
 7117   rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian
 7118   movq(yz_idx2, Address(y, idx, Address::times_4,  0));
 7119   rorxq(yz_idx2, yz_idx2, 32);
 7120 
 7121   mulxq(tmp4, tmp3, yz_idx1);  //  yz_idx1 * rdx -> tmp4:tmp3
 7122   mulxq(carry2, tmp, yz_idx2); //  yz_idx2 * rdx -> carry2:tmp
 7123 
 7124   movq(yz_idx1,  Address(z, idx, Address::times_4,  8));
 7125   rorxq(yz_idx1, yz_idx1, 32);
 7126   movq(yz_idx2, Address(z, idx, Address::times_4,  0));
 7127   rorxq(yz_idx2, yz_idx2, 32);
 7128 
 7129   if (VM_Version::supports_adx()) {
 7130     adcxq(tmp3, carry);
 7131     adoxq(tmp3, yz_idx1);
 7132 
 7133     adcxq(tmp4, tmp);
 7134     adoxq(tmp4, yz_idx2);
 7135 
 7136     movl(carry, 0); // does not affect flags
 7137     adcxq(carry2, carry);
 7138     adoxq(carry2, carry);
 7139   } else {
 7140     add2_with_carry(tmp4, tmp3, carry, yz_idx1);
 7141     add2_with_carry(carry2, tmp4, tmp, yz_idx2);
 7142   }
 7143   movq(carry, carry2);
 7144 
 7145   movl(Address(z, idx, Address::times_4, 12), tmp3);
 7146   shrq(tmp3, 32);
 7147   movl(Address(z, idx, Address::times_4,  8), tmp3);
 7148 
 7149   movl(Address(z, idx, Address::times_4,  4), tmp4);
 7150   shrq(tmp4, 32);
 7151   movl(Address(z, idx, Address::times_4,  0), tmp4);
 7152 
 7153   jmp(L_third_loop);
 7154 
 7155   bind (L_third_loop_exit);
 7156 
 7157   andl (idx, 0x3);
 7158   jcc(Assembler::zero, L_post_third_loop_done);
 7159 
 7160   Label L_check_1;
 7161   subl(idx, 2);
 7162   jcc(Assembler::negative, L_check_1);
 7163 
 7164   movq(yz_idx1, Address(y, idx, Address::times_4,  0));
 7165   rorxq(yz_idx1, yz_idx1, 32);
 7166   mulxq(tmp4, tmp3, yz_idx1); //  yz_idx1 * rdx -> tmp4:tmp3
 7167   movq(yz_idx2, Address(z, idx, Address::times_4,  0));
 7168   rorxq(yz_idx2, yz_idx2, 32);
 7169 
 7170   add2_with_carry(tmp4, tmp3, carry, yz_idx2);
 7171 
 7172   movl(Address(z, idx, Address::times_4,  4), tmp3);
 7173   shrq(tmp3, 32);
 7174   movl(Address(z, idx, Address::times_4,  0), tmp3);
 7175   movq(carry, tmp4);
 7176 
 7177   bind (L_check_1);
 7178   addl (idx, 0x2);
 7179   andl (idx, 0x1);
 7180   subl(idx, 1);
 7181   jcc(Assembler::negative, L_post_third_loop_done);
 7182   movl(tmp4, Address(y, idx, Address::times_4,  0));
 7183   mulxq(carry2, tmp3, tmp4);  //  tmp4 * rdx -> carry2:tmp3
 7184   movl(tmp4, Address(z, idx, Address::times_4,  0));
 7185 
 7186   add2_with_carry(carry2, tmp3, tmp4, carry);
 7187 
 7188   movl(Address(z, idx, Address::times_4,  0), tmp3);
 7189   shrq(tmp3, 32);
 7190 
 7191   shlq(carry2, 32);
 7192   orq(tmp3, carry2);
 7193   movq(carry, tmp3);
 7194 
 7195   bind(L_post_third_loop_done);
 7196 }
 7197 
 7198 /**
 7199  * Code for BigInteger::multiplyToLen() intrinsic.
 7200  *
 7201  * rdi: x
 7202  * rax: xlen
 7203  * rsi: y
 7204  * rcx: ylen
 7205  * r8:  z
 7206  * r11: tmp0
 7207  * r12: tmp1
 7208  * r13: tmp2
 7209  * r14: tmp3
 7210  * r15: tmp4
 7211  * rbx: tmp5
 7212  *
 7213  */
 7214 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0,
 7215                                      Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) {
 7216   ShortBranchVerifier sbv(this);
 7217   assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, rdx);
 7218 
 7219   push(tmp0);
 7220   push(tmp1);
 7221   push(tmp2);
 7222   push(tmp3);
 7223   push(tmp4);
 7224   push(tmp5);
 7225 
 7226   push(xlen);
 7227 
 7228   const Register idx = tmp1;
 7229   const Register kdx = tmp2;
 7230   const Register xstart = tmp3;
 7231 
 7232   const Register y_idx = tmp4;
 7233   const Register carry = tmp5;
 7234   const Register product  = xlen;
 7235   const Register x_xstart = tmp0;
 7236 
 7237   // First Loop.
 7238   //
 7239   //  final static long LONG_MASK = 0xffffffffL;
 7240   //  int xstart = xlen - 1;
 7241   //  int ystart = ylen - 1;
 7242   //  long carry = 0;
 7243   //  for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
 7244   //    long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
 7245   //    z[kdx] = (int)product;
 7246   //    carry = product >>> 32;
 7247   //  }
 7248   //  z[xstart] = (int)carry;
 7249   //
 7250 
 7251   movl(idx, ylen);               // idx = ylen;
 7252   lea(kdx, Address(xlen, ylen)); // kdx = xlen+ylen;
 7253   xorq(carry, carry);            // carry = 0;
 7254 
 7255   Label L_done;
 7256 
 7257   movl(xstart, xlen);
 7258   decrementl(xstart);
 7259   jcc(Assembler::negative, L_done);
 7260 
 7261   multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
 7262 
 7263   Label L_second_loop;
 7264   testl(kdx, kdx);
 7265   jcc(Assembler::zero, L_second_loop);
 7266 
 7267   Label L_carry;
 7268   subl(kdx, 1);
 7269   jcc(Assembler::zero, L_carry);
 7270 
 7271   movl(Address(z, kdx, Address::times_4,  0), carry);
 7272   shrq(carry, 32);
 7273   subl(kdx, 1);
 7274 
 7275   bind(L_carry);
 7276   movl(Address(z, kdx, Address::times_4,  0), carry);
 7277 
 7278   // Second and third (nested) loops.
 7279   //
 7280   // for (int i = xstart-1; i >= 0; i--) { // Second loop
 7281   //   carry = 0;
 7282   //   for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
 7283   //     long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
 7284   //                    (z[k] & LONG_MASK) + carry;
 7285   //     z[k] = (int)product;
 7286   //     carry = product >>> 32;
 7287   //   }
 7288   //   z[i] = (int)carry;
 7289   // }
 7290   //
 7291   // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx
 7292 
 7293   const Register jdx = tmp1;
 7294 
 7295   bind(L_second_loop);
 7296   xorl(carry, carry);    // carry = 0;
 7297   movl(jdx, ylen);       // j = ystart+1
 7298 
 7299   subl(xstart, 1);       // i = xstart-1;
 7300   jcc(Assembler::negative, L_done);
 7301 
 7302   push (z);
 7303 
 7304   Label L_last_x;
 7305   lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j
 7306   subl(xstart, 1);       // i = xstart-1;
 7307   jcc(Assembler::negative, L_last_x);
 7308 
 7309   if (UseBMI2Instructions) {
 7310     movq(rdx,  Address(x, xstart, Address::times_4,  0));
 7311     rorxq(rdx, rdx, 32); // convert big-endian to little-endian
 7312   } else {
 7313     movq(x_xstart, Address(x, xstart, Address::times_4,  0));
 7314     rorq(x_xstart, 32);  // convert big-endian to little-endian
 7315   }
 7316 
 7317   Label L_third_loop_prologue;
 7318   bind(L_third_loop_prologue);
 7319 
 7320   push (x);
 7321   push (xstart);
 7322   push (ylen);
 7323 
 7324 
 7325   if (UseBMI2Instructions) {
 7326     multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4);
 7327   } else { // !UseBMI2Instructions
 7328     multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x);
 7329   }
 7330 
 7331   pop(ylen);
 7332   pop(xlen);
 7333   pop(x);
 7334   pop(z);
 7335 
 7336   movl(tmp3, xlen);
 7337   addl(tmp3, 1);
 7338   movl(Address(z, tmp3, Address::times_4,  0), carry);
 7339   subl(tmp3, 1);
 7340   jccb(Assembler::negative, L_done);
 7341 
 7342   shrq(carry, 32);
 7343   movl(Address(z, tmp3, Address::times_4,  0), carry);
 7344   jmp(L_second_loop);
 7345 
 7346   // Next infrequent code is moved outside loops.
 7347   bind(L_last_x);
 7348   if (UseBMI2Instructions) {
 7349     movl(rdx, Address(x,  0));
 7350   } else {
 7351     movl(x_xstart, Address(x,  0));
 7352   }
 7353   jmp(L_third_loop_prologue);
 7354 
 7355   bind(L_done);
 7356 
 7357   pop(xlen);
 7358 
 7359   pop(tmp5);
 7360   pop(tmp4);
 7361   pop(tmp3);
 7362   pop(tmp2);
 7363   pop(tmp1);
 7364   pop(tmp0);
 7365 }
 7366 
 7367 void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
 7368   Register result, Register tmp1, Register tmp2, XMMRegister rymm0, XMMRegister rymm1, XMMRegister rymm2){
 7369   assert(UseSSE42Intrinsics, "SSE4.2 must be enabled.");
 7370   Label VECTOR16_LOOP, VECTOR8_LOOP, VECTOR4_LOOP;
 7371   Label VECTOR8_TAIL, VECTOR4_TAIL;
 7372   Label VECTOR32_NOT_EQUAL, VECTOR16_NOT_EQUAL, VECTOR8_NOT_EQUAL, VECTOR4_NOT_EQUAL;
 7373   Label SAME_TILL_END, DONE;
 7374   Label BYTES_LOOP, BYTES_TAIL, BYTES_NOT_EQUAL;
 7375 
 7376   //scale is in rcx in both Win64 and Unix
 7377   ShortBranchVerifier sbv(this);
 7378 
 7379   shlq(length);
 7380   xorq(result, result);
 7381 
 7382   if ((AVX3Threshold == 0) && (UseAVX > 2) &&
 7383       VM_Version::supports_avx512vlbw()) {
 7384     Label VECTOR64_LOOP, VECTOR64_NOT_EQUAL, VECTOR32_TAIL;
 7385 
 7386     cmpq(length, 64);
 7387     jcc(Assembler::less, VECTOR32_TAIL);
 7388 
 7389     movq(tmp1, length);
 7390     andq(tmp1, 0x3F);      // tail count
 7391     andq(length, ~(0x3F)); //vector count
 7392 
 7393     bind(VECTOR64_LOOP);
 7394     // AVX512 code to compare 64 byte vectors.
 7395     evmovdqub(rymm0, Address(obja, result), Assembler::AVX_512bit);
 7396     evpcmpeqb(k7, rymm0, Address(objb, result), Assembler::AVX_512bit);
 7397     kortestql(k7, k7);
 7398     jcc(Assembler::aboveEqual, VECTOR64_NOT_EQUAL);     // mismatch
 7399     addq(result, 64);
 7400     subq(length, 64);
 7401     jccb(Assembler::notZero, VECTOR64_LOOP);
 7402 
 7403     //bind(VECTOR64_TAIL);
 7404     testq(tmp1, tmp1);
 7405     jcc(Assembler::zero, SAME_TILL_END);
 7406 
 7407     //bind(VECTOR64_TAIL);
 7408     // AVX512 code to compare up to 63 byte vectors.
 7409     mov64(tmp2, 0xFFFFFFFFFFFFFFFF);
 7410     shlxq(tmp2, tmp2, tmp1);
 7411     notq(tmp2);
 7412     kmovql(k3, tmp2);
 7413 
 7414     evmovdqub(rymm0, k3, Address(obja, result), false, Assembler::AVX_512bit);
 7415     evpcmpeqb(k7, k3, rymm0, Address(objb, result), Assembler::AVX_512bit);
 7416 
 7417     ktestql(k7, k3);
 7418     jcc(Assembler::below, SAME_TILL_END);     // not mismatch
 7419 
 7420     bind(VECTOR64_NOT_EQUAL);
 7421     kmovql(tmp1, k7);
 7422     notq(tmp1);
 7423     tzcntq(tmp1, tmp1);
 7424     addq(result, tmp1);
 7425     shrq(result);
 7426     jmp(DONE);
 7427     bind(VECTOR32_TAIL);
 7428   }
 7429 
 7430   cmpq(length, 8);
 7431   jcc(Assembler::equal, VECTOR8_LOOP);
 7432   jcc(Assembler::less, VECTOR4_TAIL);
 7433 
 7434   if (UseAVX >= 2) {
 7435     Label VECTOR16_TAIL, VECTOR32_LOOP;
 7436 
 7437     cmpq(length, 16);
 7438     jcc(Assembler::equal, VECTOR16_LOOP);
 7439     jcc(Assembler::less, VECTOR8_LOOP);
 7440 
 7441     cmpq(length, 32);
 7442     jccb(Assembler::less, VECTOR16_TAIL);
 7443 
 7444     subq(length, 32);
 7445     bind(VECTOR32_LOOP);
 7446     vmovdqu(rymm0, Address(obja, result));
 7447     vmovdqu(rymm1, Address(objb, result));
 7448     vpxor(rymm2, rymm0, rymm1, Assembler::AVX_256bit);
 7449     vptest(rymm2, rymm2);
 7450     jcc(Assembler::notZero, VECTOR32_NOT_EQUAL);//mismatch found
 7451     addq(result, 32);
 7452     subq(length, 32);
 7453     jcc(Assembler::greaterEqual, VECTOR32_LOOP);
 7454     addq(length, 32);
 7455     jcc(Assembler::equal, SAME_TILL_END);
 7456     //falling through if less than 32 bytes left //close the branch here.
 7457 
 7458     bind(VECTOR16_TAIL);
 7459     cmpq(length, 16);
 7460     jccb(Assembler::less, VECTOR8_TAIL);
 7461     bind(VECTOR16_LOOP);
 7462     movdqu(rymm0, Address(obja, result));
 7463     movdqu(rymm1, Address(objb, result));
 7464     vpxor(rymm2, rymm0, rymm1, Assembler::AVX_128bit);
 7465     ptest(rymm2, rymm2);
 7466     jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found
 7467     addq(result, 16);
 7468     subq(length, 16);
 7469     jcc(Assembler::equal, SAME_TILL_END);
 7470     //falling through if less than 16 bytes left
 7471   } else {//regular intrinsics
 7472 
 7473     cmpq(length, 16);
 7474     jccb(Assembler::less, VECTOR8_TAIL);
 7475 
 7476     subq(length, 16);
 7477     bind(VECTOR16_LOOP);
 7478     movdqu(rymm0, Address(obja, result));
 7479     movdqu(rymm1, Address(objb, result));
 7480     pxor(rymm0, rymm1);
 7481     ptest(rymm0, rymm0);
 7482     jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found
 7483     addq(result, 16);
 7484     subq(length, 16);
 7485     jccb(Assembler::greaterEqual, VECTOR16_LOOP);
 7486     addq(length, 16);
 7487     jcc(Assembler::equal, SAME_TILL_END);
 7488     //falling through if less than 16 bytes left
 7489   }
 7490 
 7491   bind(VECTOR8_TAIL);
 7492   cmpq(length, 8);
 7493   jccb(Assembler::less, VECTOR4_TAIL);
 7494   bind(VECTOR8_LOOP);
 7495   movq(tmp1, Address(obja, result));
 7496   movq(tmp2, Address(objb, result));
 7497   xorq(tmp1, tmp2);
 7498   testq(tmp1, tmp1);
 7499   jcc(Assembler::notZero, VECTOR8_NOT_EQUAL);//mismatch found
 7500   addq(result, 8);
 7501   subq(length, 8);
 7502   jcc(Assembler::equal, SAME_TILL_END);
 7503   //falling through if less than 8 bytes left
 7504 
 7505   bind(VECTOR4_TAIL);
 7506   cmpq(length, 4);
 7507   jccb(Assembler::less, BYTES_TAIL);
 7508   bind(VECTOR4_LOOP);
 7509   movl(tmp1, Address(obja, result));
 7510   xorl(tmp1, Address(objb, result));
 7511   testl(tmp1, tmp1);
 7512   jcc(Assembler::notZero, VECTOR4_NOT_EQUAL);//mismatch found
 7513   addq(result, 4);
 7514   subq(length, 4);
 7515   jcc(Assembler::equal, SAME_TILL_END);
 7516   //falling through if less than 4 bytes left
 7517 
 7518   bind(BYTES_TAIL);
 7519   bind(BYTES_LOOP);
 7520   load_unsigned_byte(tmp1, Address(obja, result));
 7521   load_unsigned_byte(tmp2, Address(objb, result));
 7522   xorl(tmp1, tmp2);
 7523   testl(tmp1, tmp1);
 7524   jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
 7525   decq(length);
 7526   jcc(Assembler::zero, SAME_TILL_END);
 7527   incq(result);
 7528   load_unsigned_byte(tmp1, Address(obja, result));
 7529   load_unsigned_byte(tmp2, Address(objb, result));
 7530   xorl(tmp1, tmp2);
 7531   testl(tmp1, tmp1);
 7532   jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
 7533   decq(length);
 7534   jcc(Assembler::zero, SAME_TILL_END);
 7535   incq(result);
 7536   load_unsigned_byte(tmp1, Address(obja, result));
 7537   load_unsigned_byte(tmp2, Address(objb, result));
 7538   xorl(tmp1, tmp2);
 7539   testl(tmp1, tmp1);
 7540   jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
 7541   jmp(SAME_TILL_END);
 7542 
 7543   if (UseAVX >= 2) {
 7544     bind(VECTOR32_NOT_EQUAL);
 7545     vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_256bit);
 7546     vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_256bit);
 7547     vpxor(rymm0, rymm0, rymm2, Assembler::AVX_256bit);
 7548     vpmovmskb(tmp1, rymm0);
 7549     bsfq(tmp1, tmp1);
 7550     addq(result, tmp1);
 7551     shrq(result);
 7552     jmp(DONE);
 7553   }
 7554 
 7555   bind(VECTOR16_NOT_EQUAL);
 7556   if (UseAVX >= 2) {
 7557     vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_128bit);
 7558     vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_128bit);
 7559     pxor(rymm0, rymm2);
 7560   } else {
 7561     pcmpeqb(rymm2, rymm2);
 7562     pxor(rymm0, rymm1);
 7563     pcmpeqb(rymm0, rymm1);
 7564     pxor(rymm0, rymm2);
 7565   }
 7566   pmovmskb(tmp1, rymm0);
 7567   bsfq(tmp1, tmp1);
 7568   addq(result, tmp1);
 7569   shrq(result);
 7570   jmpb(DONE);
 7571 
 7572   bind(VECTOR8_NOT_EQUAL);
 7573   bind(VECTOR4_NOT_EQUAL);
 7574   bsfq(tmp1, tmp1);
 7575   shrq(tmp1, 3);
 7576   addq(result, tmp1);
 7577   bind(BYTES_NOT_EQUAL);
 7578   shrq(result);
 7579   jmpb(DONE);
 7580 
 7581   bind(SAME_TILL_END);
 7582   mov64(result, -1);
 7583 
 7584   bind(DONE);
 7585 }
 7586 
 7587 //Helper functions for square_to_len()
 7588 
 7589 /**
 7590  * Store the squares of x[], right shifted one bit (divided by 2) into z[]
 7591  * Preserves x and z and modifies rest of the registers.
 7592  */
 7593 void MacroAssembler::square_rshift(Register x, Register xlen, Register z, Register tmp1, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
 7594   // Perform square and right shift by 1
 7595   // Handle odd xlen case first, then for even xlen do the following
 7596   // jlong carry = 0;
 7597   // for (int j=0, i=0; j < xlen; j+=2, i+=4) {
 7598   //     huge_128 product = x[j:j+1] * x[j:j+1];
 7599   //     z[i:i+1] = (carry << 63) | (jlong)(product >>> 65);
 7600   //     z[i+2:i+3] = (jlong)(product >>> 1);
 7601   //     carry = (jlong)product;
 7602   // }
 7603 
 7604   xorq(tmp5, tmp5);     // carry
 7605   xorq(rdxReg, rdxReg);
 7606   xorl(tmp1, tmp1);     // index for x
 7607   xorl(tmp4, tmp4);     // index for z
 7608 
 7609   Label L_first_loop, L_first_loop_exit;
 7610 
 7611   testl(xlen, 1);
 7612   jccb(Assembler::zero, L_first_loop); //jump if xlen is even
 7613 
 7614   // Square and right shift by 1 the odd element using 32 bit multiply
 7615   movl(raxReg, Address(x, tmp1, Address::times_4, 0));
 7616   imulq(raxReg, raxReg);
 7617   shrq(raxReg, 1);
 7618   adcq(tmp5, 0);
 7619   movq(Address(z, tmp4, Address::times_4, 0), raxReg);
 7620   incrementl(tmp1);
 7621   addl(tmp4, 2);
 7622 
 7623   // Square and  right shift by 1 the rest using 64 bit multiply
 7624   bind(L_first_loop);
 7625   cmpptr(tmp1, xlen);
 7626   jccb(Assembler::equal, L_first_loop_exit);
 7627 
 7628   // Square
 7629   movq(raxReg, Address(x, tmp1, Address::times_4,  0));
 7630   rorq(raxReg, 32);    // convert big-endian to little-endian
 7631   mulq(raxReg);        // 64-bit multiply rax * rax -> rdx:rax
 7632 
 7633   // Right shift by 1 and save carry
 7634   shrq(tmp5, 1);       // rdx:rax:tmp5 = (tmp5:rdx:rax) >>> 1
 7635   rcrq(rdxReg, 1);
 7636   rcrq(raxReg, 1);
 7637   adcq(tmp5, 0);
 7638 
 7639   // Store result in z
 7640   movq(Address(z, tmp4, Address::times_4, 0), rdxReg);
 7641   movq(Address(z, tmp4, Address::times_4, 8), raxReg);
 7642 
 7643   // Update indices for x and z
 7644   addl(tmp1, 2);
 7645   addl(tmp4, 4);
 7646   jmp(L_first_loop);
 7647 
 7648   bind(L_first_loop_exit);
 7649 }
 7650 
 7651 
 7652 /**
 7653  * Perform the following multiply add operation using BMI2 instructions
 7654  * carry:sum = sum + op1*op2 + carry
 7655  * op2 should be in rdx
 7656  * op2 is preserved, all other registers are modified
 7657  */
 7658 void MacroAssembler::multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, Register tmp2) {
 7659   // assert op2 is rdx
 7660   mulxq(tmp2, op1, op1);  //  op1 * op2 -> tmp2:op1
 7661   addq(sum, carry);
 7662   adcq(tmp2, 0);
 7663   addq(sum, op1);
 7664   adcq(tmp2, 0);
 7665   movq(carry, tmp2);
 7666 }
 7667 
 7668 /**
 7669  * Perform the following multiply add operation:
 7670  * carry:sum = sum + op1*op2 + carry
 7671  * Preserves op1, op2 and modifies rest of registers
 7672  */
 7673 void MacroAssembler::multiply_add_64(Register sum, Register op1, Register op2, Register carry, Register rdxReg, Register raxReg) {
 7674   // rdx:rax = op1 * op2
 7675   movq(raxReg, op2);
 7676   mulq(op1);
 7677 
 7678   //  rdx:rax = sum + carry + rdx:rax
 7679   addq(sum, carry);
 7680   adcq(rdxReg, 0);
 7681   addq(sum, raxReg);
 7682   adcq(rdxReg, 0);
 7683 
 7684   // carry:sum = rdx:sum
 7685   movq(carry, rdxReg);
 7686 }
 7687 
 7688 /**
 7689  * Add 64 bit long carry into z[] with carry propagation.
 7690  * Preserves z and carry register values and modifies rest of registers.
 7691  *
 7692  */
 7693 void MacroAssembler::add_one_64(Register z, Register zlen, Register carry, Register tmp1) {
 7694   Label L_fourth_loop, L_fourth_loop_exit;
 7695 
 7696   movl(tmp1, 1);
 7697   subl(zlen, 2);
 7698   addq(Address(z, zlen, Address::times_4, 0), carry);
 7699 
 7700   bind(L_fourth_loop);
 7701   jccb(Assembler::carryClear, L_fourth_loop_exit);
 7702   subl(zlen, 2);
 7703   jccb(Assembler::negative, L_fourth_loop_exit);
 7704   addq(Address(z, zlen, Address::times_4, 0), tmp1);
 7705   jmp(L_fourth_loop);
 7706   bind(L_fourth_loop_exit);
 7707 }
 7708 
 7709 /**
 7710  * Shift z[] left by 1 bit.
 7711  * Preserves x, len, z and zlen registers and modifies rest of the registers.
 7712  *
 7713  */
 7714 void MacroAssembler::lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
 7715 
 7716   Label L_fifth_loop, L_fifth_loop_exit;
 7717 
 7718   // Fifth loop
 7719   // Perform primitiveLeftShift(z, zlen, 1)
 7720 
 7721   const Register prev_carry = tmp1;
 7722   const Register new_carry = tmp4;
 7723   const Register value = tmp2;
 7724   const Register zidx = tmp3;
 7725 
 7726   // int zidx, carry;
 7727   // long value;
 7728   // carry = 0;
 7729   // for (zidx = zlen-2; zidx >=0; zidx -= 2) {
 7730   //    (carry:value)  = (z[i] << 1) | carry ;
 7731   //    z[i] = value;
 7732   // }
 7733 
 7734   movl(zidx, zlen);
 7735   xorl(prev_carry, prev_carry); // clear carry flag and prev_carry register
 7736 
 7737   bind(L_fifth_loop);
 7738   decl(zidx);  // Use decl to preserve carry flag
 7739   decl(zidx);
 7740   jccb(Assembler::negative, L_fifth_loop_exit);
 7741 
 7742   if (UseBMI2Instructions) {
 7743      movq(value, Address(z, zidx, Address::times_4, 0));
 7744      rclq(value, 1);
 7745      rorxq(value, value, 32);
 7746      movq(Address(z, zidx, Address::times_4,  0), value);  // Store back in big endian form
 7747   }
 7748   else {
 7749     // clear new_carry
 7750     xorl(new_carry, new_carry);
 7751 
 7752     // Shift z[i] by 1, or in previous carry and save new carry
 7753     movq(value, Address(z, zidx, Address::times_4, 0));
 7754     shlq(value, 1);
 7755     adcl(new_carry, 0);
 7756 
 7757     orq(value, prev_carry);
 7758     rorq(value, 0x20);
 7759     movq(Address(z, zidx, Address::times_4,  0), value);  // Store back in big endian form
 7760 
 7761     // Set previous carry = new carry
 7762     movl(prev_carry, new_carry);
 7763   }
 7764   jmp(L_fifth_loop);
 7765 
 7766   bind(L_fifth_loop_exit);
 7767 }
 7768 
 7769 
 7770 /**
 7771  * Code for BigInteger::squareToLen() intrinsic
 7772  *
 7773  * rdi: x
 7774  * rsi: len
 7775  * r8:  z
 7776  * rcx: zlen
 7777  * r12: tmp1
 7778  * r13: tmp2
 7779  * r14: tmp3
 7780  * r15: tmp4
 7781  * rbx: tmp5
 7782  *
 7783  */
 7784 void MacroAssembler::square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
 7785 
 7786   Label L_second_loop, L_second_loop_exit, L_third_loop, L_third_loop_exit, L_last_x, L_multiply;
 7787   push(tmp1);
 7788   push(tmp2);
 7789   push(tmp3);
 7790   push(tmp4);
 7791   push(tmp5);
 7792 
 7793   // First loop
 7794   // Store the squares, right shifted one bit (i.e., divided by 2).
 7795   square_rshift(x, len, z, tmp1, tmp3, tmp4, tmp5, rdxReg, raxReg);
 7796 
 7797   // Add in off-diagonal sums.
 7798   //
 7799   // Second, third (nested) and fourth loops.
 7800   // zlen +=2;
 7801   // for (int xidx=len-2,zidx=zlen-4; xidx > 0; xidx-=2,zidx-=4) {
 7802   //    carry = 0;
 7803   //    long op2 = x[xidx:xidx+1];
 7804   //    for (int j=xidx-2,k=zidx; j >= 0; j-=2) {
 7805   //       k -= 2;
 7806   //       long op1 = x[j:j+1];
 7807   //       long sum = z[k:k+1];
 7808   //       carry:sum = multiply_add_64(sum, op1, op2, carry, tmp_regs);
 7809   //       z[k:k+1] = sum;
 7810   //    }
 7811   //    add_one_64(z, k, carry, tmp_regs);
 7812   // }
 7813 
 7814   const Register carry = tmp5;
 7815   const Register sum = tmp3;
 7816   const Register op1 = tmp4;
 7817   Register op2 = tmp2;
 7818 
 7819   push(zlen);
 7820   push(len);
 7821   addl(zlen,2);
 7822   bind(L_second_loop);
 7823   xorq(carry, carry);
 7824   subl(zlen, 4);
 7825   subl(len, 2);
 7826   push(zlen);
 7827   push(len);
 7828   cmpl(len, 0);
 7829   jccb(Assembler::lessEqual, L_second_loop_exit);
 7830 
 7831   // Multiply an array by one 64 bit long.
 7832   if (UseBMI2Instructions) {
 7833     op2 = rdxReg;
 7834     movq(op2, Address(x, len, Address::times_4,  0));
 7835     rorxq(op2, op2, 32);
 7836   }
 7837   else {
 7838     movq(op2, Address(x, len, Address::times_4,  0));
 7839     rorq(op2, 32);
 7840   }
 7841 
 7842   bind(L_third_loop);
 7843   decrementl(len);
 7844   jccb(Assembler::negative, L_third_loop_exit);
 7845   decrementl(len);
 7846   jccb(Assembler::negative, L_last_x);
 7847 
 7848   movq(op1, Address(x, len, Address::times_4,  0));
 7849   rorq(op1, 32);
 7850 
 7851   bind(L_multiply);
 7852   subl(zlen, 2);
 7853   movq(sum, Address(z, zlen, Address::times_4,  0));
 7854 
 7855   // Multiply 64 bit by 64 bit and add 64 bits lower half and upper 64 bits as carry.
 7856   if (UseBMI2Instructions) {
 7857     multiply_add_64_bmi2(sum, op1, op2, carry, tmp2);
 7858   }
 7859   else {
 7860     multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
 7861   }
 7862 
 7863   movq(Address(z, zlen, Address::times_4, 0), sum);
 7864 
 7865   jmp(L_third_loop);
 7866   bind(L_third_loop_exit);
 7867 
 7868   // Fourth loop
 7869   // Add 64 bit long carry into z with carry propagation.
 7870   // Uses offsetted zlen.
 7871   add_one_64(z, zlen, carry, tmp1);
 7872 
 7873   pop(len);
 7874   pop(zlen);
 7875   jmp(L_second_loop);
 7876 
 7877   // Next infrequent code is moved outside loops.
 7878   bind(L_last_x);
 7879   movl(op1, Address(x, 0));
 7880   jmp(L_multiply);
 7881 
 7882   bind(L_second_loop_exit);
 7883   pop(len);
 7884   pop(zlen);
 7885   pop(len);
 7886   pop(zlen);
 7887 
 7888   // Fifth loop
 7889   // Shift z left 1 bit.
 7890   lshift_by_1(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4);
 7891 
 7892   // z[zlen-1] |= x[len-1] & 1;
 7893   movl(tmp3, Address(x, len, Address::times_4, -4));
 7894   andl(tmp3, 1);
 7895   orl(Address(z, zlen, Address::times_4,  -4), tmp3);
 7896 
 7897   pop(tmp5);
 7898   pop(tmp4);
 7899   pop(tmp3);
 7900   pop(tmp2);
 7901   pop(tmp1);
 7902 }
 7903 
 7904 /**
 7905  * Helper function for mul_add()
 7906  * Multiply the in[] by int k and add to out[] starting at offset offs using
 7907  * 128 bit by 32 bit multiply and return the carry in tmp5.
 7908  * Only quad int aligned length of in[] is operated on in this function.
 7909  * k is in rdxReg for BMI2Instructions, for others it is in tmp2.
 7910  * This function preserves out, in and k registers.
 7911  * len and offset point to the appropriate index in "in" & "out" correspondingly
 7912  * tmp5 has the carry.
 7913  * other registers are temporary and are modified.
 7914  *
 7915  */
 7916 void MacroAssembler::mul_add_128_x_32_loop(Register out, Register in,
 7917   Register offset, Register len, Register tmp1, Register tmp2, Register tmp3,
 7918   Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
 7919 
 7920   Label L_first_loop, L_first_loop_exit;
 7921 
 7922   movl(tmp1, len);
 7923   shrl(tmp1, 2);
 7924 
 7925   bind(L_first_loop);
 7926   subl(tmp1, 1);
 7927   jccb(Assembler::negative, L_first_loop_exit);
 7928 
 7929   subl(len, 4);
 7930   subl(offset, 4);
 7931 
 7932   Register op2 = tmp2;
 7933   const Register sum = tmp3;
 7934   const Register op1 = tmp4;
 7935   const Register carry = tmp5;
 7936 
 7937   if (UseBMI2Instructions) {
 7938     op2 = rdxReg;
 7939   }
 7940 
 7941   movq(op1, Address(in, len, Address::times_4,  8));
 7942   rorq(op1, 32);
 7943   movq(sum, Address(out, offset, Address::times_4,  8));
 7944   rorq(sum, 32);
 7945   if (UseBMI2Instructions) {
 7946     multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
 7947   }
 7948   else {
 7949     multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
 7950   }
 7951   // Store back in big endian from little endian
 7952   rorq(sum, 0x20);
 7953   movq(Address(out, offset, Address::times_4,  8), sum);
 7954 
 7955   movq(op1, Address(in, len, Address::times_4,  0));
 7956   rorq(op1, 32);
 7957   movq(sum, Address(out, offset, Address::times_4,  0));
 7958   rorq(sum, 32);
 7959   if (UseBMI2Instructions) {
 7960     multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
 7961   }
 7962   else {
 7963     multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
 7964   }
 7965   // Store back in big endian from little endian
 7966   rorq(sum, 0x20);
 7967   movq(Address(out, offset, Address::times_4,  0), sum);
 7968 
 7969   jmp(L_first_loop);
 7970   bind(L_first_loop_exit);
 7971 }
 7972 
 7973 /**
 7974  * Code for BigInteger::mulAdd() intrinsic
 7975  *
 7976  * rdi: out
 7977  * rsi: in
 7978  * r11: offs (out.length - offset)
 7979  * rcx: len
 7980  * r8:  k
 7981  * r12: tmp1
 7982  * r13: tmp2
 7983  * r14: tmp3
 7984  * r15: tmp4
 7985  * rbx: tmp5
 7986  * Multiply the in[] by word k and add to out[], return the carry in rax
 7987  */
 7988 void MacroAssembler::mul_add(Register out, Register in, Register offs,
 7989    Register len, Register k, Register tmp1, Register tmp2, Register tmp3,
 7990    Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
 7991 
 7992   Label L_carry, L_last_in, L_done;
 7993 
 7994 // carry = 0;
 7995 // for (int j=len-1; j >= 0; j--) {
 7996 //    long product = (in[j] & LONG_MASK) * kLong +
 7997 //                   (out[offs] & LONG_MASK) + carry;
 7998 //    out[offs--] = (int)product;
 7999 //    carry = product >>> 32;
 8000 // }
 8001 //
 8002   push(tmp1);
 8003   push(tmp2);
 8004   push(tmp3);
 8005   push(tmp4);
 8006   push(tmp5);
 8007 
 8008   Register op2 = tmp2;
 8009   const Register sum = tmp3;
 8010   const Register op1 = tmp4;
 8011   const Register carry =  tmp5;
 8012 
 8013   if (UseBMI2Instructions) {
 8014     op2 = rdxReg;
 8015     movl(op2, k);
 8016   }
 8017   else {
 8018     movl(op2, k);
 8019   }
 8020 
 8021   xorq(carry, carry);
 8022 
 8023   //First loop
 8024 
 8025   //Multiply in[] by k in a 4 way unrolled loop using 128 bit by 32 bit multiply
 8026   //The carry is in tmp5
 8027   mul_add_128_x_32_loop(out, in, offs, len, tmp1, tmp2, tmp3, tmp4, tmp5, rdxReg, raxReg);
 8028 
 8029   //Multiply the trailing in[] entry using 64 bit by 32 bit, if any
 8030   decrementl(len);
 8031   jccb(Assembler::negative, L_carry);
 8032   decrementl(len);
 8033   jccb(Assembler::negative, L_last_in);
 8034 
 8035   movq(op1, Address(in, len, Address::times_4,  0));
 8036   rorq(op1, 32);
 8037 
 8038   subl(offs, 2);
 8039   movq(sum, Address(out, offs, Address::times_4,  0));
 8040   rorq(sum, 32);
 8041 
 8042   if (UseBMI2Instructions) {
 8043     multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
 8044   }
 8045   else {
 8046     multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
 8047   }
 8048 
 8049   // Store back in big endian from little endian
 8050   rorq(sum, 0x20);
 8051   movq(Address(out, offs, Address::times_4,  0), sum);
 8052 
 8053   testl(len, len);
 8054   jccb(Assembler::zero, L_carry);
 8055 
 8056   //Multiply the last in[] entry, if any
 8057   bind(L_last_in);
 8058   movl(op1, Address(in, 0));
 8059   movl(sum, Address(out, offs, Address::times_4,  -4));
 8060 
 8061   movl(raxReg, k);
 8062   mull(op1); //tmp4 * eax -> edx:eax
 8063   addl(sum, carry);
 8064   adcl(rdxReg, 0);
 8065   addl(sum, raxReg);
 8066   adcl(rdxReg, 0);
 8067   movl(carry, rdxReg);
 8068 
 8069   movl(Address(out, offs, Address::times_4,  -4), sum);
 8070 
 8071   bind(L_carry);
 8072   //return tmp5/carry as carry in rax
 8073   movl(rax, carry);
 8074 
 8075   bind(L_done);
 8076   pop(tmp5);
 8077   pop(tmp4);
 8078   pop(tmp3);
 8079   pop(tmp2);
 8080   pop(tmp1);
 8081 }
 8082 
 8083 /**
 8084  * Emits code to update CRC-32 with a byte value according to constants in table
 8085  *
 8086  * @param [in,out]crc   Register containing the crc.
 8087  * @param [in]val       Register containing the byte to fold into the CRC.
 8088  * @param [in]table     Register containing the table of crc constants.
 8089  *
 8090  * uint32_t crc;
 8091  * val = crc_table[(val ^ crc) & 0xFF];
 8092  * crc = val ^ (crc >> 8);
 8093  *
 8094  */
 8095 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
 8096   xorl(val, crc);
 8097   andl(val, 0xFF);
 8098   shrl(crc, 8); // unsigned shift
 8099   xorl(crc, Address(table, val, Address::times_4, 0));
 8100 }
 8101 
 8102 /**
 8103  * Fold 128-bit data chunk
 8104  */
 8105 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
 8106   if (UseAVX > 0) {
 8107     vpclmulhdq(xtmp, xK, xcrc); // [123:64]
 8108     vpclmulldq(xcrc, xK, xcrc); // [63:0]
 8109     vpxor(xcrc, xcrc, Address(buf, offset), 0 /* vector_len */);
 8110     pxor(xcrc, xtmp);
 8111   } else {
 8112     movdqa(xtmp, xcrc);
 8113     pclmulhdq(xtmp, xK);   // [123:64]
 8114     pclmulldq(xcrc, xK);   // [63:0]
 8115     pxor(xcrc, xtmp);
 8116     movdqu(xtmp, Address(buf, offset));
 8117     pxor(xcrc, xtmp);
 8118   }
 8119 }
 8120 
 8121 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) {
 8122   if (UseAVX > 0) {
 8123     vpclmulhdq(xtmp, xK, xcrc);
 8124     vpclmulldq(xcrc, xK, xcrc);
 8125     pxor(xcrc, xbuf);
 8126     pxor(xcrc, xtmp);
 8127   } else {
 8128     movdqa(xtmp, xcrc);
 8129     pclmulhdq(xtmp, xK);
 8130     pclmulldq(xcrc, xK);
 8131     pxor(xcrc, xbuf);
 8132     pxor(xcrc, xtmp);
 8133   }
 8134 }
 8135 
 8136 /**
 8137  * 8-bit folds to compute 32-bit CRC
 8138  *
 8139  * uint64_t xcrc;
 8140  * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8);
 8141  */
 8142 void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) {
 8143   movdl(tmp, xcrc);
 8144   andl(tmp, 0xFF);
 8145   movdl(xtmp, Address(table, tmp, Address::times_4, 0));
 8146   psrldq(xcrc, 1); // unsigned shift one byte
 8147   pxor(xcrc, xtmp);
 8148 }
 8149 
 8150 /**
 8151  * uint32_t crc;
 8152  * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
 8153  */
 8154 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
 8155   movl(tmp, crc);
 8156   andl(tmp, 0xFF);
 8157   shrl(crc, 8);
 8158   xorl(crc, Address(table, tmp, Address::times_4, 0));
 8159 }
 8160 
 8161 /**
 8162  * @param crc   register containing existing CRC (32-bit)
 8163  * @param buf   register pointing to input byte buffer (byte*)
 8164  * @param len   register containing number of bytes
 8165  * @param table register that will contain address of CRC table
 8166  * @param tmp   scratch register
 8167  */
 8168 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) {
 8169   assert_different_registers(crc, buf, len, table, tmp, rax);
 8170 
 8171   Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned;
 8172   Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop;
 8173 
 8174   // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
 8175   // context for the registers used, where all instructions below are using 128-bit mode
 8176   // On EVEX without VL and BW, these instructions will all be AVX.
 8177   lea(table, ExternalAddress(StubRoutines::crc_table_addr()));
 8178   notl(crc); // ~crc
 8179   cmpl(len, 16);
 8180   jcc(Assembler::less, L_tail);
 8181 
 8182   // Align buffer to 16 bytes
 8183   movl(tmp, buf);
 8184   andl(tmp, 0xF);
 8185   jccb(Assembler::zero, L_aligned);
 8186   subl(tmp,  16);
 8187   addl(len, tmp);
 8188 
 8189   align(4);
 8190   BIND(L_align_loop);
 8191   movsbl(rax, Address(buf, 0)); // load byte with sign extension
 8192   update_byte_crc32(crc, rax, table);
 8193   increment(buf);
 8194   incrementl(tmp);
 8195   jccb(Assembler::less, L_align_loop);
 8196 
 8197   BIND(L_aligned);
 8198   movl(tmp, len); // save
 8199   shrl(len, 4);
 8200   jcc(Assembler::zero, L_tail_restore);
 8201 
 8202   // Fold crc into first bytes of vector
 8203   movdqa(xmm1, Address(buf, 0));
 8204   movdl(rax, xmm1);
 8205   xorl(crc, rax);
 8206   if (VM_Version::supports_sse4_1()) {
 8207     pinsrd(xmm1, crc, 0);
 8208   } else {
 8209     pinsrw(xmm1, crc, 0);
 8210     shrl(crc, 16);
 8211     pinsrw(xmm1, crc, 1);
 8212   }
 8213   addptr(buf, 16);
 8214   subl(len, 4); // len > 0
 8215   jcc(Assembler::less, L_fold_tail);
 8216 
 8217   movdqa(xmm2, Address(buf,  0));
 8218   movdqa(xmm3, Address(buf, 16));
 8219   movdqa(xmm4, Address(buf, 32));
 8220   addptr(buf, 48);
 8221   subl(len, 3);
 8222   jcc(Assembler::lessEqual, L_fold_512b);
 8223 
 8224   // Fold total 512 bits of polynomial on each iteration,
 8225   // 128 bits per each of 4 parallel streams.
 8226   movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32), rscratch1);
 8227 
 8228   align32();
 8229   BIND(L_fold_512b_loop);
 8230   fold_128bit_crc32(xmm1, xmm0, xmm5, buf,  0);
 8231   fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16);
 8232   fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32);
 8233   fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48);
 8234   addptr(buf, 64);
 8235   subl(len, 4);
 8236   jcc(Assembler::greater, L_fold_512b_loop);
 8237 
 8238   // Fold 512 bits to 128 bits.
 8239   BIND(L_fold_512b);
 8240   movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1);
 8241   fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2);
 8242   fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3);
 8243   fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4);
 8244 
 8245   // Fold the rest of 128 bits data chunks
 8246   BIND(L_fold_tail);
 8247   addl(len, 3);
 8248   jccb(Assembler::lessEqual, L_fold_128b);
 8249   movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1);
 8250 
 8251   BIND(L_fold_tail_loop);
 8252   fold_128bit_crc32(xmm1, xmm0, xmm5, buf,  0);
 8253   addptr(buf, 16);
 8254   decrementl(len);
 8255   jccb(Assembler::greater, L_fold_tail_loop);
 8256 
 8257   // Fold 128 bits in xmm1 down into 32 bits in crc register.
 8258   BIND(L_fold_128b);
 8259   movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()), rscratch1);
 8260   if (UseAVX > 0) {
 8261     vpclmulqdq(xmm2, xmm0, xmm1, 0x1);
 8262     vpand(xmm3, xmm0, xmm2, 0 /* vector_len */);
 8263     vpclmulqdq(xmm0, xmm0, xmm3, 0x1);
 8264   } else {
 8265     movdqa(xmm2, xmm0);
 8266     pclmulqdq(xmm2, xmm1, 0x1);
 8267     movdqa(xmm3, xmm0);
 8268     pand(xmm3, xmm2);
 8269     pclmulqdq(xmm0, xmm3, 0x1);
 8270   }
 8271   psrldq(xmm1, 8);
 8272   psrldq(xmm2, 4);
 8273   pxor(xmm0, xmm1);
 8274   pxor(xmm0, xmm2);
 8275 
 8276   // 8 8-bit folds to compute 32-bit CRC.
 8277   for (int j = 0; j < 4; j++) {
 8278     fold_8bit_crc32(xmm0, table, xmm1, rax);
 8279   }
 8280   movdl(crc, xmm0); // mov 32 bits to general register
 8281   for (int j = 0; j < 4; j++) {
 8282     fold_8bit_crc32(crc, table, rax);
 8283   }
 8284 
 8285   BIND(L_tail_restore);
 8286   movl(len, tmp); // restore
 8287   BIND(L_tail);
 8288   andl(len, 0xf);
 8289   jccb(Assembler::zero, L_exit);
 8290 
 8291   // Fold the rest of bytes
 8292   align(4);
 8293   BIND(L_tail_loop);
 8294   movsbl(rax, Address(buf, 0)); // load byte with sign extension
 8295   update_byte_crc32(crc, rax, table);
 8296   increment(buf);
 8297   decrementl(len);
 8298   jccb(Assembler::greater, L_tail_loop);
 8299 
 8300   BIND(L_exit);
 8301   notl(crc); // ~c
 8302 }
 8303 
 8304 // Helper function for AVX 512 CRC32
 8305 // Fold 512-bit data chunks
 8306 void MacroAssembler::fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf,
 8307                                              Register pos, int offset) {
 8308   evmovdquq(xmm3, Address(buf, pos, Address::times_1, offset), Assembler::AVX_512bit);
 8309   evpclmulqdq(xtmp, xcrc, xK, 0x10, Assembler::AVX_512bit); // [123:64]
 8310   evpclmulqdq(xmm2, xcrc, xK, 0x01, Assembler::AVX_512bit); // [63:0]
 8311   evpxorq(xcrc, xtmp, xmm2, Assembler::AVX_512bit /* vector_len */);
 8312   evpxorq(xcrc, xcrc, xmm3, Assembler::AVX_512bit /* vector_len */);
 8313 }
 8314 
 8315 // Helper function for AVX 512 CRC32
 8316 // Compute CRC32 for < 256B buffers
 8317 void MacroAssembler::kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register table, Register pos,
 8318                                               Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop,
 8319                                               Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup) {
 8320 
 8321   Label L_less_than_32, L_exact_16_left, L_less_than_16_left;
 8322   Label L_less_than_8_left, L_less_than_4_left, L_less_than_2_left, L_zero_left;
 8323   Label L_only_less_than_4, L_only_less_than_3, L_only_less_than_2;
 8324 
 8325   // check if there is enough buffer to be able to fold 16B at a time
 8326   cmpl(len, 32);
 8327   jcc(Assembler::less, L_less_than_32);
 8328 
 8329   // if there is, load the constants
 8330   movdqu(xmm10, Address(table, 1 * 16));    //rk1 and rk2 in xmm10
 8331   movdl(xmm0, crc);                        // get the initial crc value
 8332   movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext
 8333   pxor(xmm7, xmm0);
 8334 
 8335   // update the buffer pointer
 8336   addl(pos, 16);
 8337   //update the counter.subtract 32 instead of 16 to save one instruction from the loop
 8338   subl(len, 32);
 8339   jmp(L_16B_reduction_loop);
 8340 
 8341   bind(L_less_than_32);
 8342   //mov initial crc to the return value. this is necessary for zero - length buffers.
 8343   movl(rax, crc);
 8344   testl(len, len);
 8345   jcc(Assembler::equal, L_cleanup);
 8346 
 8347   movdl(xmm0, crc);                        //get the initial crc value
 8348 
 8349   cmpl(len, 16);
 8350   jcc(Assembler::equal, L_exact_16_left);
 8351   jcc(Assembler::less, L_less_than_16_left);
 8352 
 8353   movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext
 8354   pxor(xmm7, xmm0);                       //xor the initial crc value
 8355   addl(pos, 16);
 8356   subl(len, 16);
 8357   movdqu(xmm10, Address(table, 1 * 16));    // rk1 and rk2 in xmm10
 8358   jmp(L_get_last_two_xmms);
 8359 
 8360   bind(L_less_than_16_left);
 8361   //use stack space to load data less than 16 bytes, zero - out the 16B in memory first.
 8362   pxor(xmm1, xmm1);
 8363   movptr(tmp1, rsp);
 8364   movdqu(Address(tmp1, 0 * 16), xmm1);
 8365 
 8366   cmpl(len, 4);
 8367   jcc(Assembler::less, L_only_less_than_4);
 8368 
 8369   //backup the counter value
 8370   movl(tmp2, len);
 8371   cmpl(len, 8);
 8372   jcc(Assembler::less, L_less_than_8_left);
 8373 
 8374   //load 8 Bytes
 8375   movq(rax, Address(buf, pos, Address::times_1, 0 * 16));
 8376   movq(Address(tmp1, 0 * 16), rax);
 8377   addptr(tmp1, 8);
 8378   subl(len, 8);
 8379   addl(pos, 8);
 8380 
 8381   bind(L_less_than_8_left);
 8382   cmpl(len, 4);
 8383   jcc(Assembler::less, L_less_than_4_left);
 8384 
 8385   //load 4 Bytes
 8386   movl(rax, Address(buf, pos, Address::times_1, 0));
 8387   movl(Address(tmp1, 0 * 16), rax);
 8388   addptr(tmp1, 4);
 8389   subl(len, 4);
 8390   addl(pos, 4);
 8391 
 8392   bind(L_less_than_4_left);
 8393   cmpl(len, 2);
 8394   jcc(Assembler::less, L_less_than_2_left);
 8395 
 8396   // load 2 Bytes
 8397   movw(rax, Address(buf, pos, Address::times_1, 0));
 8398   movl(Address(tmp1, 0 * 16), rax);
 8399   addptr(tmp1, 2);
 8400   subl(len, 2);
 8401   addl(pos, 2);
 8402 
 8403   bind(L_less_than_2_left);
 8404   cmpl(len, 1);
 8405   jcc(Assembler::less, L_zero_left);
 8406 
 8407   // load 1 Byte
 8408   movb(rax, Address(buf, pos, Address::times_1, 0));
 8409   movb(Address(tmp1, 0 * 16), rax);
 8410 
 8411   bind(L_zero_left);
 8412   movdqu(xmm7, Address(rsp, 0));
 8413   pxor(xmm7, xmm0);                       //xor the initial crc value
 8414 
 8415   lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr()));
 8416   movdqu(xmm0, Address(rax, tmp2));
 8417   pshufb(xmm7, xmm0);
 8418   jmp(L_128_done);
 8419 
 8420   bind(L_exact_16_left);
 8421   movdqu(xmm7, Address(buf, pos, Address::times_1, 0));
 8422   pxor(xmm7, xmm0);                       //xor the initial crc value
 8423   jmp(L_128_done);
 8424 
 8425   bind(L_only_less_than_4);
 8426   cmpl(len, 3);
 8427   jcc(Assembler::less, L_only_less_than_3);
 8428 
 8429   // load 3 Bytes
 8430   movb(rax, Address(buf, pos, Address::times_1, 0));
 8431   movb(Address(tmp1, 0), rax);
 8432 
 8433   movb(rax, Address(buf, pos, Address::times_1, 1));
 8434   movb(Address(tmp1, 1), rax);
 8435 
 8436   movb(rax, Address(buf, pos, Address::times_1, 2));
 8437   movb(Address(tmp1, 2), rax);
 8438 
 8439   movdqu(xmm7, Address(rsp, 0));
 8440   pxor(xmm7, xmm0);                     //xor the initial crc value
 8441 
 8442   pslldq(xmm7, 0x5);
 8443   jmp(L_barrett);
 8444   bind(L_only_less_than_3);
 8445   cmpl(len, 2);
 8446   jcc(Assembler::less, L_only_less_than_2);
 8447 
 8448   // load 2 Bytes
 8449   movb(rax, Address(buf, pos, Address::times_1, 0));
 8450   movb(Address(tmp1, 0), rax);
 8451 
 8452   movb(rax, Address(buf, pos, Address::times_1, 1));
 8453   movb(Address(tmp1, 1), rax);
 8454 
 8455   movdqu(xmm7, Address(rsp, 0));
 8456   pxor(xmm7, xmm0);                     //xor the initial crc value
 8457 
 8458   pslldq(xmm7, 0x6);
 8459   jmp(L_barrett);
 8460 
 8461   bind(L_only_less_than_2);
 8462   //load 1 Byte
 8463   movb(rax, Address(buf, pos, Address::times_1, 0));
 8464   movb(Address(tmp1, 0), rax);
 8465 
 8466   movdqu(xmm7, Address(rsp, 0));
 8467   pxor(xmm7, xmm0);                     //xor the initial crc value
 8468 
 8469   pslldq(xmm7, 0x7);
 8470 }
 8471 
 8472 /**
 8473 * Compute CRC32 using AVX512 instructions
 8474 * param crc   register containing existing CRC (32-bit)
 8475 * param buf   register pointing to input byte buffer (byte*)
 8476 * param len   register containing number of bytes
 8477 * param table address of crc or crc32c table
 8478 * param tmp1  scratch register
 8479 * param tmp2  scratch register
 8480 * return rax  result register
 8481 *
 8482 * This routine is identical for crc32c with the exception of the precomputed constant
 8483 * table which will be passed as the table argument.  The calculation steps are
 8484 * the same for both variants.
 8485 */
 8486 void MacroAssembler::kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2) {
 8487   assert_different_registers(crc, buf, len, table, tmp1, tmp2, rax, r12);
 8488 
 8489   Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned;
 8490   Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop;
 8491   Label L_less_than_256, L_fold_128_B_loop, L_fold_256_B_loop;
 8492   Label L_fold_128_B_register, L_final_reduction_for_128, L_16B_reduction_loop;
 8493   Label L_128_done, L_get_last_two_xmms, L_barrett, L_cleanup;
 8494 
 8495   const Register pos = r12;
 8496   push(r12);
 8497   subptr(rsp, 16 * 2 + 8);
 8498 
 8499   // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
 8500   // context for the registers used, where all instructions below are using 128-bit mode
 8501   // On EVEX without VL and BW, these instructions will all be AVX.
 8502   movl(pos, 0);
 8503 
 8504   // check if smaller than 256B
 8505   cmpl(len, 256);
 8506   jcc(Assembler::less, L_less_than_256);
 8507 
 8508   // load the initial crc value
 8509   movdl(xmm10, crc);
 8510 
 8511   // receive the initial 64B data, xor the initial crc value
 8512   evmovdquq(xmm0, Address(buf, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit);
 8513   evmovdquq(xmm4, Address(buf, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit);
 8514   evpxorq(xmm0, xmm0, xmm10, Assembler::AVX_512bit);
 8515   evbroadcasti32x4(xmm10, Address(table, 2 * 16), Assembler::AVX_512bit); //zmm10 has rk3 and rk4
 8516 
 8517   subl(len, 256);
 8518   cmpl(len, 256);
 8519   jcc(Assembler::less, L_fold_128_B_loop);
 8520 
 8521   evmovdquq(xmm7, Address(buf, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit);
 8522   evmovdquq(xmm8, Address(buf, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit);
 8523   evbroadcasti32x4(xmm16, Address(table, 0 * 16), Assembler::AVX_512bit); //zmm16 has rk-1 and rk-2
 8524   subl(len, 256);
 8525 
 8526   bind(L_fold_256_B_loop);
 8527   addl(pos, 256);
 8528   fold512bit_crc32_avx512(xmm0, xmm16, xmm1, buf, pos, 0 * 64);
 8529   fold512bit_crc32_avx512(xmm4, xmm16, xmm1, buf, pos, 1 * 64);
 8530   fold512bit_crc32_avx512(xmm7, xmm16, xmm1, buf, pos, 2 * 64);
 8531   fold512bit_crc32_avx512(xmm8, xmm16, xmm1, buf, pos, 3 * 64);
 8532 
 8533   subl(len, 256);
 8534   jcc(Assembler::greaterEqual, L_fold_256_B_loop);
 8535 
 8536   // Fold 256 into 128
 8537   addl(pos, 256);
 8538   evpclmulqdq(xmm1, xmm0, xmm10, 0x01, Assembler::AVX_512bit);
 8539   evpclmulqdq(xmm2, xmm0, xmm10, 0x10, Assembler::AVX_512bit);
 8540   vpternlogq(xmm7, 0x96, xmm1, xmm2, Assembler::AVX_512bit); // xor ABC
 8541 
 8542   evpclmulqdq(xmm5, xmm4, xmm10, 0x01, Assembler::AVX_512bit);
 8543   evpclmulqdq(xmm6, xmm4, xmm10, 0x10, Assembler::AVX_512bit);
 8544   vpternlogq(xmm8, 0x96, xmm5, xmm6, Assembler::AVX_512bit); // xor ABC
 8545 
 8546   evmovdquq(xmm0, xmm7, Assembler::AVX_512bit);
 8547   evmovdquq(xmm4, xmm8, Assembler::AVX_512bit);
 8548 
 8549   addl(len, 128);
 8550   jmp(L_fold_128_B_register);
 8551 
 8552   // at this section of the code, there is 128 * x + y(0 <= y<128) bytes of buffer.The fold_128_B_loop
 8553   // loop will fold 128B at a time until we have 128 + y Bytes of buffer
 8554 
 8555   // fold 128B at a time.This section of the code folds 8 xmm registers in parallel
 8556   bind(L_fold_128_B_loop);
 8557   addl(pos, 128);
 8558   fold512bit_crc32_avx512(xmm0, xmm10, xmm1, buf, pos, 0 * 64);
 8559   fold512bit_crc32_avx512(xmm4, xmm10, xmm1, buf, pos, 1 * 64);
 8560 
 8561   subl(len, 128);
 8562   jcc(Assembler::greaterEqual, L_fold_128_B_loop);
 8563 
 8564   addl(pos, 128);
 8565 
 8566   // at this point, the buffer pointer is pointing at the last y Bytes of the buffer, where 0 <= y < 128
 8567   // the 128B of folded data is in 8 of the xmm registers : xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7
 8568   bind(L_fold_128_B_register);
 8569   evmovdquq(xmm16, Address(table, 5 * 16), Assembler::AVX_512bit); // multiply by rk9-rk16
 8570   evmovdquq(xmm11, Address(table, 9 * 16), Assembler::AVX_512bit); // multiply by rk17-rk20, rk1,rk2, 0,0
 8571   evpclmulqdq(xmm1, xmm0, xmm16, 0x01, Assembler::AVX_512bit);
 8572   evpclmulqdq(xmm2, xmm0, xmm16, 0x10, Assembler::AVX_512bit);
 8573   // save last that has no multiplicand
 8574   vextracti64x2(xmm7, xmm4, 3);
 8575 
 8576   evpclmulqdq(xmm5, xmm4, xmm11, 0x01, Assembler::AVX_512bit);
 8577   evpclmulqdq(xmm6, xmm4, xmm11, 0x10, Assembler::AVX_512bit);
 8578   // Needed later in reduction loop
 8579   movdqu(xmm10, Address(table, 1 * 16));
 8580   vpternlogq(xmm1, 0x96, xmm2, xmm5, Assembler::AVX_512bit); // xor ABC
 8581   vpternlogq(xmm1, 0x96, xmm6, xmm7, Assembler::AVX_512bit); // xor ABC
 8582 
 8583   // Swap 1,0,3,2 - 01 00 11 10
 8584   evshufi64x2(xmm8, xmm1, xmm1, 0x4e, Assembler::AVX_512bit);
 8585   evpxorq(xmm8, xmm8, xmm1, Assembler::AVX_256bit);
 8586   vextracti128(xmm5, xmm8, 1);
 8587   evpxorq(xmm7, xmm5, xmm8, Assembler::AVX_128bit);
 8588 
 8589   // instead of 128, we add 128 - 16 to the loop counter to save 1 instruction from the loop
 8590   // instead of a cmp instruction, we use the negative flag with the jl instruction
 8591   addl(len, 128 - 16);
 8592   jcc(Assembler::less, L_final_reduction_for_128);
 8593 
 8594   bind(L_16B_reduction_loop);
 8595   vpclmulqdq(xmm8, xmm7, xmm10, 0x01);
 8596   vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
 8597   vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit);
 8598   movdqu(xmm0, Address(buf, pos, Address::times_1, 0 * 16));
 8599   vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
 8600   addl(pos, 16);
 8601   subl(len, 16);
 8602   jcc(Assembler::greaterEqual, L_16B_reduction_loop);
 8603 
 8604   bind(L_final_reduction_for_128);
 8605   addl(len, 16);
 8606   jcc(Assembler::equal, L_128_done);
 8607 
 8608   bind(L_get_last_two_xmms);
 8609   movdqu(xmm2, xmm7);
 8610   addl(pos, len);
 8611   movdqu(xmm1, Address(buf, pos, Address::times_1, -16));
 8612   subl(pos, len);
 8613 
 8614   // get rid of the extra data that was loaded before
 8615   // load the shift constant
 8616   lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr()));
 8617   movdqu(xmm0, Address(rax, len));
 8618   addl(rax, len);
 8619 
 8620   vpshufb(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
 8621   //Change mask to 512
 8622   vpxor(xmm0, xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 2 * 16), Assembler::AVX_128bit, tmp2);
 8623   vpshufb(xmm2, xmm2, xmm0, Assembler::AVX_128bit);
 8624 
 8625   blendvpb(xmm2, xmm2, xmm1, xmm0, Assembler::AVX_128bit);
 8626   vpclmulqdq(xmm8, xmm7, xmm10, 0x01);
 8627   vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
 8628   vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit);
 8629   vpxor(xmm7, xmm7, xmm2, Assembler::AVX_128bit);
 8630 
 8631   bind(L_128_done);
 8632   // compute crc of a 128-bit value
 8633   movdqu(xmm10, Address(table, 3 * 16));
 8634   movdqu(xmm0, xmm7);
 8635 
 8636   // 64b fold
 8637   vpclmulqdq(xmm7, xmm7, xmm10, 0x0);
 8638   vpsrldq(xmm0, xmm0, 0x8, Assembler::AVX_128bit);
 8639   vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
 8640 
 8641   // 32b fold
 8642   movdqu(xmm0, xmm7);
 8643   vpslldq(xmm7, xmm7, 0x4, Assembler::AVX_128bit);
 8644   vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
 8645   vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
 8646   jmp(L_barrett);
 8647 
 8648   bind(L_less_than_256);
 8649   kernel_crc32_avx512_256B(crc, buf, len, table, pos, tmp1, tmp2, L_barrett, L_16B_reduction_loop, L_get_last_two_xmms, L_128_done, L_cleanup);
 8650 
 8651   //barrett reduction
 8652   bind(L_barrett);
 8653   vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 1 * 16), Assembler::AVX_128bit, tmp2);
 8654   movdqu(xmm1, xmm7);
 8655   movdqu(xmm2, xmm7);
 8656   movdqu(xmm10, Address(table, 4 * 16));
 8657 
 8658   pclmulqdq(xmm7, xmm10, 0x0);
 8659   pxor(xmm7, xmm2);
 8660   vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr()), Assembler::AVX_128bit, tmp2);
 8661   movdqu(xmm2, xmm7);
 8662   pclmulqdq(xmm7, xmm10, 0x10);
 8663   pxor(xmm7, xmm2);
 8664   pxor(xmm7, xmm1);
 8665   pextrd(crc, xmm7, 2);
 8666 
 8667   bind(L_cleanup);
 8668   addptr(rsp, 16 * 2 + 8);
 8669   pop(r12);
 8670 }
 8671 
 8672 // S. Gueron / Information Processing Letters 112 (2012) 184
 8673 // Algorithm 4: Computing carry-less multiplication using a precomputed lookup table.
 8674 // Input: A 32 bit value B = [byte3, byte2, byte1, byte0].
 8675 // Output: the 64-bit carry-less product of B * CONST
 8676 void MacroAssembler::crc32c_ipl_alg4(Register in, uint32_t n,
 8677                                      Register tmp1, Register tmp2, Register tmp3) {
 8678   lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr()));
 8679   if (n > 0) {
 8680     addq(tmp3, n * 256 * 8);
 8681   }
 8682   //    Q1 = TABLEExt[n][B & 0xFF];
 8683   movl(tmp1, in);
 8684   andl(tmp1, 0x000000FF);
 8685   shll(tmp1, 3);
 8686   addq(tmp1, tmp3);
 8687   movq(tmp1, Address(tmp1, 0));
 8688 
 8689   //    Q2 = TABLEExt[n][B >> 8 & 0xFF];
 8690   movl(tmp2, in);
 8691   shrl(tmp2, 8);
 8692   andl(tmp2, 0x000000FF);
 8693   shll(tmp2, 3);
 8694   addq(tmp2, tmp3);
 8695   movq(tmp2, Address(tmp2, 0));
 8696 
 8697   shlq(tmp2, 8);
 8698   xorq(tmp1, tmp2);
 8699 
 8700   //    Q3 = TABLEExt[n][B >> 16 & 0xFF];
 8701   movl(tmp2, in);
 8702   shrl(tmp2, 16);
 8703   andl(tmp2, 0x000000FF);
 8704   shll(tmp2, 3);
 8705   addq(tmp2, tmp3);
 8706   movq(tmp2, Address(tmp2, 0));
 8707 
 8708   shlq(tmp2, 16);
 8709   xorq(tmp1, tmp2);
 8710 
 8711   //    Q4 = TABLEExt[n][B >> 24 & 0xFF];
 8712   shrl(in, 24);
 8713   andl(in, 0x000000FF);
 8714   shll(in, 3);
 8715   addq(in, tmp3);
 8716   movq(in, Address(in, 0));
 8717 
 8718   shlq(in, 24);
 8719   xorq(in, tmp1);
 8720   //    return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24;
 8721 }
 8722 
 8723 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1,
 8724                                       Register in_out,
 8725                                       uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
 8726                                       XMMRegister w_xtmp2,
 8727                                       Register tmp1,
 8728                                       Register n_tmp2, Register n_tmp3) {
 8729   if (is_pclmulqdq_supported) {
 8730     movdl(w_xtmp1, in_out); // modified blindly
 8731 
 8732     movl(tmp1, const_or_pre_comp_const_index);
 8733     movdl(w_xtmp2, tmp1);
 8734     pclmulqdq(w_xtmp1, w_xtmp2, 0);
 8735 
 8736     movdq(in_out, w_xtmp1);
 8737   } else {
 8738     crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3);
 8739   }
 8740 }
 8741 
 8742 // Recombination Alternative 2: No bit-reflections
 8743 // T1 = (CRC_A * U1) << 1
 8744 // T2 = (CRC_B * U2) << 1
 8745 // C1 = T1 >> 32
 8746 // C2 = T2 >> 32
 8747 // T1 = T1 & 0xFFFFFFFF
 8748 // T2 = T2 & 0xFFFFFFFF
 8749 // T1 = CRC32(0, T1)
 8750 // T2 = CRC32(0, T2)
 8751 // C1 = C1 ^ T1
 8752 // C2 = C2 ^ T2
 8753 // CRC = C1 ^ C2 ^ CRC_C
 8754 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
 8755                                      XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
 8756                                      Register tmp1, Register tmp2,
 8757                                      Register n_tmp3) {
 8758   crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
 8759   crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
 8760   shlq(in_out, 1);
 8761   movl(tmp1, in_out);
 8762   shrq(in_out, 32);
 8763   xorl(tmp2, tmp2);
 8764   crc32(tmp2, tmp1, 4);
 8765   xorl(in_out, tmp2); // we don't care about upper 32 bit contents here
 8766   shlq(in1, 1);
 8767   movl(tmp1, in1);
 8768   shrq(in1, 32);
 8769   xorl(tmp2, tmp2);
 8770   crc32(tmp2, tmp1, 4);
 8771   xorl(in1, tmp2);
 8772   xorl(in_out, in1);
 8773   xorl(in_out, in2);
 8774 }
 8775 
 8776 // Set N to predefined value
 8777 // Subtract from a length of a buffer
 8778 // execute in a loop:
 8779 // CRC_A = 0xFFFFFFFF, CRC_B = 0, CRC_C = 0
 8780 // for i = 1 to N do
 8781 //  CRC_A = CRC32(CRC_A, A[i])
 8782 //  CRC_B = CRC32(CRC_B, B[i])
 8783 //  CRC_C = CRC32(CRC_C, C[i])
 8784 // end for
 8785 // Recombine
 8786 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
 8787                                        Register in_out1, Register in_out2, Register in_out3,
 8788                                        Register tmp1, Register tmp2, Register tmp3,
 8789                                        XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
 8790                                        Register tmp4, Register tmp5,
 8791                                        Register n_tmp6) {
 8792   Label L_processPartitions;
 8793   Label L_processPartition;
 8794   Label L_exit;
 8795 
 8796   bind(L_processPartitions);
 8797   cmpl(in_out1, 3 * size);
 8798   jcc(Assembler::less, L_exit);
 8799     xorl(tmp1, tmp1);
 8800     xorl(tmp2, tmp2);
 8801     movq(tmp3, in_out2);
 8802     addq(tmp3, size);
 8803 
 8804     bind(L_processPartition);
 8805       crc32(in_out3, Address(in_out2, 0), 8);
 8806       crc32(tmp1, Address(in_out2, size), 8);
 8807       crc32(tmp2, Address(in_out2, size * 2), 8);
 8808       addq(in_out2, 8);
 8809       cmpq(in_out2, tmp3);
 8810       jcc(Assembler::less, L_processPartition);
 8811     crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2,
 8812             w_xtmp1, w_xtmp2, w_xtmp3,
 8813             tmp4, tmp5,
 8814             n_tmp6);
 8815     addq(in_out2, 2 * size);
 8816     subl(in_out1, 3 * size);
 8817     jmp(L_processPartitions);
 8818 
 8819   bind(L_exit);
 8820 }
 8821 
 8822 // Algorithm 2: Pipelined usage of the CRC32 instruction.
 8823 // Input: A buffer I of L bytes.
 8824 // Output: the CRC32C value of the buffer.
 8825 // Notations:
 8826 // Write L = 24N + r, with N = floor (L/24).
 8827 // r = L mod 24 (0 <= r < 24).
 8828 // Consider I as the concatenation of A|B|C|R, where A, B, C, each,
 8829 // N quadwords, and R consists of r bytes.
 8830 // A[j] = I [8j+7:8j], j= 0, 1, ..., N-1
 8831 // B[j] = I [N + 8j+7:N + 8j], j= 0, 1, ..., N-1
 8832 // C[j] = I [2N + 8j+7:2N + 8j], j= 0, 1, ..., N-1
 8833 // if r > 0 R[j] = I [3N +j], j= 0, 1, ...,r-1
 8834 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
 8835                                           Register tmp1, Register tmp2, Register tmp3,
 8836                                           Register tmp4, Register tmp5, Register tmp6,
 8837                                           XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
 8838                                           bool is_pclmulqdq_supported) {
 8839   uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS];
 8840   Label L_wordByWord;
 8841   Label L_byteByByteProlog;
 8842   Label L_byteByByte;
 8843   Label L_exit;
 8844 
 8845   if (is_pclmulqdq_supported ) {
 8846     const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::crc32c_table_addr();
 8847     const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 1);
 8848 
 8849     const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 2);
 8850     const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 3);
 8851 
 8852     const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 4);
 8853     const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 5);
 8854     assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\"");
 8855   } else {
 8856     const_or_pre_comp_const_index[0] = 1;
 8857     const_or_pre_comp_const_index[1] = 0;
 8858 
 8859     const_or_pre_comp_const_index[2] = 3;
 8860     const_or_pre_comp_const_index[3] = 2;
 8861 
 8862     const_or_pre_comp_const_index[4] = 5;
 8863     const_or_pre_comp_const_index[5] = 4;
 8864    }
 8865   crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported,
 8866                     in2, in1, in_out,
 8867                     tmp1, tmp2, tmp3,
 8868                     w_xtmp1, w_xtmp2, w_xtmp3,
 8869                     tmp4, tmp5,
 8870                     tmp6);
 8871   crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported,
 8872                     in2, in1, in_out,
 8873                     tmp1, tmp2, tmp3,
 8874                     w_xtmp1, w_xtmp2, w_xtmp3,
 8875                     tmp4, tmp5,
 8876                     tmp6);
 8877   crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported,
 8878                     in2, in1, in_out,
 8879                     tmp1, tmp2, tmp3,
 8880                     w_xtmp1, w_xtmp2, w_xtmp3,
 8881                     tmp4, tmp5,
 8882                     tmp6);
 8883   movl(tmp1, in2);
 8884   andl(tmp1, 0x00000007);
 8885   negl(tmp1);
 8886   addl(tmp1, in2);
 8887   addq(tmp1, in1);
 8888 
 8889   cmpq(in1, tmp1);
 8890   jccb(Assembler::greaterEqual, L_byteByByteProlog);
 8891   align(16);
 8892   BIND(L_wordByWord);
 8893     crc32(in_out, Address(in1, 0), 8);
 8894     addq(in1, 8);
 8895     cmpq(in1, tmp1);
 8896     jcc(Assembler::less, L_wordByWord);
 8897 
 8898   BIND(L_byteByByteProlog);
 8899   andl(in2, 0x00000007);
 8900   movl(tmp2, 1);
 8901 
 8902   cmpl(tmp2, in2);
 8903   jccb(Assembler::greater, L_exit);
 8904   BIND(L_byteByByte);
 8905     crc32(in_out, Address(in1, 0), 1);
 8906     incq(in1);
 8907     incl(tmp2);
 8908     cmpl(tmp2, in2);
 8909     jcc(Assembler::lessEqual, L_byteByByte);
 8910 
 8911   BIND(L_exit);
 8912 }
 8913 #undef BIND
 8914 #undef BLOCK_COMMENT
 8915 
 8916 // Compress char[] array to byte[].
 8917 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len)
 8918 // Return the array length if every element in array can be encoded,
 8919 // otherwise, the index of first non-latin1 (> 0xff) character.
 8920 //   @IntrinsicCandidate
 8921 //   public static int compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) {
 8922 //     for (int i = 0; i < len; i++) {
 8923 //       char c = src[srcOff];
 8924 //       if (c > 0xff) {
 8925 //           return i;  // return index of non-latin1 char
 8926 //       }
 8927 //       dst[dstOff] = (byte)c;
 8928 //       srcOff++;
 8929 //       dstOff++;
 8930 //     }
 8931 //     return len;
 8932 //   }
 8933 void MacroAssembler::char_array_compress(Register src, Register dst, Register len,
 8934   XMMRegister tmp1Reg, XMMRegister tmp2Reg,
 8935   XMMRegister tmp3Reg, XMMRegister tmp4Reg,
 8936   Register tmp5, Register result, KRegister mask1, KRegister mask2) {
 8937   Label copy_chars_loop, done, reset_sp, copy_tail;
 8938 
 8939   // rsi: src
 8940   // rdi: dst
 8941   // rdx: len
 8942   // rcx: tmp5
 8943   // rax: result
 8944 
 8945   // rsi holds start addr of source char[] to be compressed
 8946   // rdi holds start addr of destination byte[]
 8947   // rdx holds length
 8948 
 8949   assert(len != result, "");
 8950 
 8951   // save length for return
 8952   movl(result, len);
 8953 
 8954   if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512
 8955     VM_Version::supports_avx512vlbw() &&
 8956     VM_Version::supports_bmi2()) {
 8957 
 8958     Label copy_32_loop, copy_loop_tail, below_threshold, reset_for_copy_tail;
 8959 
 8960     // alignment
 8961     Label post_alignment;
 8962 
 8963     // if length of the string is less than 32, handle it the old fashioned way
 8964     testl(len, -32);
 8965     jcc(Assembler::zero, below_threshold);
 8966 
 8967     // First check whether a character is compressible ( <= 0xFF).
 8968     // Create mask to test for Unicode chars inside zmm vector
 8969     movl(tmp5, 0x00FF);
 8970     evpbroadcastw(tmp2Reg, tmp5, Assembler::AVX_512bit);
 8971 
 8972     testl(len, -64);
 8973     jccb(Assembler::zero, post_alignment);
 8974 
 8975     movl(tmp5, dst);
 8976     andl(tmp5, (32 - 1));
 8977     negl(tmp5);
 8978     andl(tmp5, (32 - 1));
 8979 
 8980     // bail out when there is nothing to be done
 8981     testl(tmp5, 0xFFFFFFFF);
 8982     jccb(Assembler::zero, post_alignment);
 8983 
 8984     // ~(~0 << len), where len is the # of remaining elements to process
 8985     movl(len, 0xFFFFFFFF);
 8986     shlxl(len, len, tmp5);
 8987     notl(len);
 8988     kmovdl(mask2, len);
 8989     movl(len, result);
 8990 
 8991     evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit);
 8992     evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit);
 8993     ktestd(mask1, mask2);
 8994     jcc(Assembler::carryClear, copy_tail);
 8995 
 8996     evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit);
 8997 
 8998     addptr(src, tmp5);
 8999     addptr(src, tmp5);
 9000     addptr(dst, tmp5);
 9001     subl(len, tmp5);
 9002 
 9003     bind(post_alignment);
 9004     // end of alignment
 9005 
 9006     movl(tmp5, len);
 9007     andl(tmp5, (32 - 1));    // tail count (in chars)
 9008     andl(len, ~(32 - 1));    // vector count (in chars)
 9009     jccb(Assembler::zero, copy_loop_tail);
 9010 
 9011     lea(src, Address(src, len, Address::times_2));
 9012     lea(dst, Address(dst, len, Address::times_1));
 9013     negptr(len);
 9014 
 9015     bind(copy_32_loop);
 9016     evmovdquw(tmp1Reg, Address(src, len, Address::times_2), Assembler::AVX_512bit);
 9017     evpcmpuw(mask1, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit);
 9018     kortestdl(mask1, mask1);
 9019     jccb(Assembler::carryClear, reset_for_copy_tail);
 9020 
 9021     // All elements in current processed chunk are valid candidates for
 9022     // compression. Write a truncated byte elements to the memory.
 9023     evpmovwb(Address(dst, len, Address::times_1), tmp1Reg, Assembler::AVX_512bit);
 9024     addptr(len, 32);
 9025     jccb(Assembler::notZero, copy_32_loop);
 9026 
 9027     bind(copy_loop_tail);
 9028     // bail out when there is nothing to be done
 9029     testl(tmp5, 0xFFFFFFFF);
 9030     jcc(Assembler::zero, done);
 9031 
 9032     movl(len, tmp5);
 9033 
 9034     // ~(~0 << len), where len is the # of remaining elements to process
 9035     movl(tmp5, 0xFFFFFFFF);
 9036     shlxl(tmp5, tmp5, len);
 9037     notl(tmp5);
 9038 
 9039     kmovdl(mask2, tmp5);
 9040 
 9041     evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit);
 9042     evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit);
 9043     ktestd(mask1, mask2);
 9044     jcc(Assembler::carryClear, copy_tail);
 9045 
 9046     evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit);
 9047     jmp(done);
 9048 
 9049     bind(reset_for_copy_tail);
 9050     lea(src, Address(src, tmp5, Address::times_2));
 9051     lea(dst, Address(dst, tmp5, Address::times_1));
 9052     subptr(len, tmp5);
 9053     jmp(copy_chars_loop);
 9054 
 9055     bind(below_threshold);
 9056   }
 9057 
 9058   if (UseSSE42Intrinsics) {
 9059     Label copy_32_loop, copy_16, copy_tail_sse, reset_for_copy_tail;
 9060 
 9061     // vectored compression
 9062     testl(len, 0xfffffff8);
 9063     jcc(Assembler::zero, copy_tail);
 9064 
 9065     movl(tmp5, 0xff00ff00);   // create mask to test for Unicode chars in vectors
 9066     movdl(tmp1Reg, tmp5);
 9067     pshufd(tmp1Reg, tmp1Reg, 0);   // store Unicode mask in tmp1Reg
 9068 
 9069     andl(len, 0xfffffff0);
 9070     jccb(Assembler::zero, copy_16);
 9071 
 9072     // compress 16 chars per iter
 9073     pxor(tmp4Reg, tmp4Reg);
 9074 
 9075     lea(src, Address(src, len, Address::times_2));
 9076     lea(dst, Address(dst, len, Address::times_1));
 9077     negptr(len);
 9078 
 9079     bind(copy_32_loop);
 9080     movdqu(tmp2Reg, Address(src, len, Address::times_2));     // load 1st 8 characters
 9081     por(tmp4Reg, tmp2Reg);
 9082     movdqu(tmp3Reg, Address(src, len, Address::times_2, 16)); // load next 8 characters
 9083     por(tmp4Reg, tmp3Reg);
 9084     ptest(tmp4Reg, tmp1Reg);       // check for Unicode chars in next vector
 9085     jccb(Assembler::notZero, reset_for_copy_tail);
 9086     packuswb(tmp2Reg, tmp3Reg);    // only ASCII chars; compress each to 1 byte
 9087     movdqu(Address(dst, len, Address::times_1), tmp2Reg);
 9088     addptr(len, 16);
 9089     jccb(Assembler::notZero, copy_32_loop);
 9090 
 9091     // compress next vector of 8 chars (if any)
 9092     bind(copy_16);
 9093     // len = 0
 9094     testl(result, 0x00000008);     // check if there's a block of 8 chars to compress
 9095     jccb(Assembler::zero, copy_tail_sse);
 9096 
 9097     pxor(tmp3Reg, tmp3Reg);
 9098 
 9099     movdqu(tmp2Reg, Address(src, 0));
 9100     ptest(tmp2Reg, tmp1Reg);       // check for Unicode chars in vector
 9101     jccb(Assembler::notZero, reset_for_copy_tail);
 9102     packuswb(tmp2Reg, tmp3Reg);    // only LATIN1 chars; compress each to 1 byte
 9103     movq(Address(dst, 0), tmp2Reg);
 9104     addptr(src, 16);
 9105     addptr(dst, 8);
 9106     jmpb(copy_tail_sse);
 9107 
 9108     bind(reset_for_copy_tail);
 9109     movl(tmp5, result);
 9110     andl(tmp5, 0x0000000f);
 9111     lea(src, Address(src, tmp5, Address::times_2));
 9112     lea(dst, Address(dst, tmp5, Address::times_1));
 9113     subptr(len, tmp5);
 9114     jmpb(copy_chars_loop);
 9115 
 9116     bind(copy_tail_sse);
 9117     movl(len, result);
 9118     andl(len, 0x00000007);    // tail count (in chars)
 9119   }
 9120   // compress 1 char per iter
 9121   bind(copy_tail);
 9122   testl(len, len);
 9123   jccb(Assembler::zero, done);
 9124   lea(src, Address(src, len, Address::times_2));
 9125   lea(dst, Address(dst, len, Address::times_1));
 9126   negptr(len);
 9127 
 9128   bind(copy_chars_loop);
 9129   load_unsigned_short(tmp5, Address(src, len, Address::times_2));
 9130   testl(tmp5, 0xff00);      // check if Unicode char
 9131   jccb(Assembler::notZero, reset_sp);
 9132   movb(Address(dst, len, Address::times_1), tmp5);  // ASCII char; compress to 1 byte
 9133   increment(len);
 9134   jccb(Assembler::notZero, copy_chars_loop);
 9135 
 9136   // add len then return (len will be zero if compress succeeded, otherwise negative)
 9137   bind(reset_sp);
 9138   addl(result, len);
 9139 
 9140   bind(done);
 9141 }
 9142 
 9143 // Inflate byte[] array to char[].
 9144 //   ..\jdk\src\java.base\share\classes\java\lang\StringLatin1.java
 9145 //   @IntrinsicCandidate
 9146 //   private static void inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len) {
 9147 //     for (int i = 0; i < len; i++) {
 9148 //       dst[dstOff++] = (char)(src[srcOff++] & 0xff);
 9149 //     }
 9150 //   }
 9151 void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len,
 9152   XMMRegister tmp1, Register tmp2, KRegister mask) {
 9153   Label copy_chars_loop, done, below_threshold, avx3_threshold;
 9154   // rsi: src
 9155   // rdi: dst
 9156   // rdx: len
 9157   // rcx: tmp2
 9158 
 9159   // rsi holds start addr of source byte[] to be inflated
 9160   // rdi holds start addr of destination char[]
 9161   // rdx holds length
 9162   assert_different_registers(src, dst, len, tmp2);
 9163   movl(tmp2, len);
 9164   if ((UseAVX > 2) && // AVX512
 9165     VM_Version::supports_avx512vlbw() &&
 9166     VM_Version::supports_bmi2()) {
 9167 
 9168     Label copy_32_loop, copy_tail;
 9169     Register tmp3_aliased = len;
 9170 
 9171     // if length of the string is less than 16, handle it in an old fashioned way
 9172     testl(len, -16);
 9173     jcc(Assembler::zero, below_threshold);
 9174 
 9175     testl(len, -1 * AVX3Threshold);
 9176     jcc(Assembler::zero, avx3_threshold);
 9177 
 9178     // In order to use only one arithmetic operation for the main loop we use
 9179     // this pre-calculation
 9180     andl(tmp2, (32 - 1)); // tail count (in chars), 32 element wide loop
 9181     andl(len, -32);     // vector count
 9182     jccb(Assembler::zero, copy_tail);
 9183 
 9184     lea(src, Address(src, len, Address::times_1));
 9185     lea(dst, Address(dst, len, Address::times_2));
 9186     negptr(len);
 9187 
 9188 
 9189     // inflate 32 chars per iter
 9190     bind(copy_32_loop);
 9191     vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_512bit);
 9192     evmovdquw(Address(dst, len, Address::times_2), tmp1, Assembler::AVX_512bit);
 9193     addptr(len, 32);
 9194     jcc(Assembler::notZero, copy_32_loop);
 9195 
 9196     bind(copy_tail);
 9197     // bail out when there is nothing to be done
 9198     testl(tmp2, -1); // we don't destroy the contents of tmp2 here
 9199     jcc(Assembler::zero, done);
 9200 
 9201     // ~(~0 << length), where length is the # of remaining elements to process
 9202     movl(tmp3_aliased, -1);
 9203     shlxl(tmp3_aliased, tmp3_aliased, tmp2);
 9204     notl(tmp3_aliased);
 9205     kmovdl(mask, tmp3_aliased);
 9206     evpmovzxbw(tmp1, mask, Address(src, 0), Assembler::AVX_512bit);
 9207     evmovdquw(Address(dst, 0), mask, tmp1, /*merge*/ true, Assembler::AVX_512bit);
 9208 
 9209     jmp(done);
 9210     bind(avx3_threshold);
 9211   }
 9212   if (UseSSE42Intrinsics) {
 9213     Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail;
 9214 
 9215     if (UseAVX > 1) {
 9216       andl(tmp2, (16 - 1));
 9217       andl(len, -16);
 9218       jccb(Assembler::zero, copy_new_tail);
 9219     } else {
 9220       andl(tmp2, 0x00000007);   // tail count (in chars)
 9221       andl(len, 0xfffffff8);    // vector count (in chars)
 9222       jccb(Assembler::zero, copy_tail);
 9223     }
 9224 
 9225     // vectored inflation
 9226     lea(src, Address(src, len, Address::times_1));
 9227     lea(dst, Address(dst, len, Address::times_2));
 9228     negptr(len);
 9229 
 9230     if (UseAVX > 1) {
 9231       bind(copy_16_loop);
 9232       vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_256bit);
 9233       vmovdqu(Address(dst, len, Address::times_2), tmp1);
 9234       addptr(len, 16);
 9235       jcc(Assembler::notZero, copy_16_loop);
 9236 
 9237       bind(below_threshold);
 9238       bind(copy_new_tail);
 9239       movl(len, tmp2);
 9240       andl(tmp2, 0x00000007);
 9241       andl(len, 0xFFFFFFF8);
 9242       jccb(Assembler::zero, copy_tail);
 9243 
 9244       pmovzxbw(tmp1, Address(src, 0));
 9245       movdqu(Address(dst, 0), tmp1);
 9246       addptr(src, 8);
 9247       addptr(dst, 2 * 8);
 9248 
 9249       jmp(copy_tail, true);
 9250     }
 9251 
 9252     // inflate 8 chars per iter
 9253     bind(copy_8_loop);
 9254     pmovzxbw(tmp1, Address(src, len, Address::times_1));  // unpack to 8 words
 9255     movdqu(Address(dst, len, Address::times_2), tmp1);
 9256     addptr(len, 8);
 9257     jcc(Assembler::notZero, copy_8_loop);
 9258 
 9259     bind(copy_tail);
 9260     movl(len, tmp2);
 9261 
 9262     cmpl(len, 4);
 9263     jccb(Assembler::less, copy_bytes);
 9264 
 9265     movdl(tmp1, Address(src, 0));  // load 4 byte chars
 9266     pmovzxbw(tmp1, tmp1);
 9267     movq(Address(dst, 0), tmp1);
 9268     subptr(len, 4);
 9269     addptr(src, 4);
 9270     addptr(dst, 8);
 9271 
 9272     bind(copy_bytes);
 9273   } else {
 9274     bind(below_threshold);
 9275   }
 9276 
 9277   testl(len, len);
 9278   jccb(Assembler::zero, done);
 9279   lea(src, Address(src, len, Address::times_1));
 9280   lea(dst, Address(dst, len, Address::times_2));
 9281   negptr(len);
 9282 
 9283   // inflate 1 char per iter
 9284   bind(copy_chars_loop);
 9285   load_unsigned_byte(tmp2, Address(src, len, Address::times_1));  // load byte char
 9286   movw(Address(dst, len, Address::times_2), tmp2);  // inflate byte char to word
 9287   increment(len);
 9288   jcc(Assembler::notZero, copy_chars_loop);
 9289 
 9290   bind(done);
 9291 }
 9292 
 9293 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len) {
 9294   switch(type) {
 9295     case T_BYTE:
 9296     case T_BOOLEAN:
 9297       evmovdqub(dst, kmask, src, merge, vector_len);
 9298       break;
 9299     case T_CHAR:
 9300     case T_SHORT:
 9301       evmovdquw(dst, kmask, src, merge, vector_len);
 9302       break;
 9303     case T_INT:
 9304     case T_FLOAT:
 9305       evmovdqul(dst, kmask, src, merge, vector_len);
 9306       break;
 9307     case T_LONG:
 9308     case T_DOUBLE:
 9309       evmovdquq(dst, kmask, src, merge, vector_len);
 9310       break;
 9311     default:
 9312       fatal("Unexpected type argument %s", type2name(type));
 9313       break;
 9314   }
 9315 }
 9316 
 9317 
 9318 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len) {
 9319   switch(type) {
 9320     case T_BYTE:
 9321     case T_BOOLEAN:
 9322       evmovdqub(dst, kmask, src, merge, vector_len);
 9323       break;
 9324     case T_CHAR:
 9325     case T_SHORT:
 9326       evmovdquw(dst, kmask, src, merge, vector_len);
 9327       break;
 9328     case T_INT:
 9329     case T_FLOAT:
 9330       evmovdqul(dst, kmask, src, merge, vector_len);
 9331       break;
 9332     case T_LONG:
 9333     case T_DOUBLE:
 9334       evmovdquq(dst, kmask, src, merge, vector_len);
 9335       break;
 9336     default:
 9337       fatal("Unexpected type argument %s", type2name(type));
 9338       break;
 9339   }
 9340 }
 9341 
 9342 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len) {
 9343   switch(type) {
 9344     case T_BYTE:
 9345     case T_BOOLEAN:
 9346       evmovdqub(dst, kmask, src, merge, vector_len);
 9347       break;
 9348     case T_CHAR:
 9349     case T_SHORT:
 9350       evmovdquw(dst, kmask, src, merge, vector_len);
 9351       break;
 9352     case T_INT:
 9353     case T_FLOAT:
 9354       evmovdqul(dst, kmask, src, merge, vector_len);
 9355       break;
 9356     case T_LONG:
 9357     case T_DOUBLE:
 9358       evmovdquq(dst, kmask, src, merge, vector_len);
 9359       break;
 9360     default:
 9361       fatal("Unexpected type argument %s", type2name(type));
 9362       break;
 9363   }
 9364 }
 9365 
 9366 void MacroAssembler::knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp, Register rtmp) {
 9367   switch(masklen) {
 9368     case 2:
 9369        knotbl(dst, src);
 9370        movl(rtmp, 3);
 9371        kmovbl(ktmp, rtmp);
 9372        kandbl(dst, ktmp, dst);
 9373        break;
 9374     case 4:
 9375        knotbl(dst, src);
 9376        movl(rtmp, 15);
 9377        kmovbl(ktmp, rtmp);
 9378        kandbl(dst, ktmp, dst);
 9379        break;
 9380     case 8:
 9381        knotbl(dst, src);
 9382        break;
 9383     case 16:
 9384        knotwl(dst, src);
 9385        break;
 9386     case 32:
 9387        knotdl(dst, src);
 9388        break;
 9389     case 64:
 9390        knotql(dst, src);
 9391        break;
 9392     default:
 9393       fatal("Unexpected vector length %d", masklen);
 9394       break;
 9395   }
 9396 }
 9397 
 9398 void MacroAssembler::kand(BasicType type, KRegister dst, KRegister src1, KRegister src2) {
 9399   switch(type) {
 9400     case T_BOOLEAN:
 9401     case T_BYTE:
 9402        kandbl(dst, src1, src2);
 9403        break;
 9404     case T_CHAR:
 9405     case T_SHORT:
 9406        kandwl(dst, src1, src2);
 9407        break;
 9408     case T_INT:
 9409     case T_FLOAT:
 9410        kanddl(dst, src1, src2);
 9411        break;
 9412     case T_LONG:
 9413     case T_DOUBLE:
 9414        kandql(dst, src1, src2);
 9415        break;
 9416     default:
 9417       fatal("Unexpected type argument %s", type2name(type));
 9418       break;
 9419   }
 9420 }
 9421 
 9422 void MacroAssembler::kor(BasicType type, KRegister dst, KRegister src1, KRegister src2) {
 9423   switch(type) {
 9424     case T_BOOLEAN:
 9425     case T_BYTE:
 9426        korbl(dst, src1, src2);
 9427        break;
 9428     case T_CHAR:
 9429     case T_SHORT:
 9430        korwl(dst, src1, src2);
 9431        break;
 9432     case T_INT:
 9433     case T_FLOAT:
 9434        kordl(dst, src1, src2);
 9435        break;
 9436     case T_LONG:
 9437     case T_DOUBLE:
 9438        korql(dst, src1, src2);
 9439        break;
 9440     default:
 9441       fatal("Unexpected type argument %s", type2name(type));
 9442       break;
 9443   }
 9444 }
 9445 
 9446 void MacroAssembler::kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2) {
 9447   switch(type) {
 9448     case T_BOOLEAN:
 9449     case T_BYTE:
 9450        kxorbl(dst, src1, src2);
 9451        break;
 9452     case T_CHAR:
 9453     case T_SHORT:
 9454        kxorwl(dst, src1, src2);
 9455        break;
 9456     case T_INT:
 9457     case T_FLOAT:
 9458        kxordl(dst, src1, src2);
 9459        break;
 9460     case T_LONG:
 9461     case T_DOUBLE:
 9462        kxorql(dst, src1, src2);
 9463        break;
 9464     default:
 9465       fatal("Unexpected type argument %s", type2name(type));
 9466       break;
 9467   }
 9468 }
 9469 
 9470 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9471   switch(type) {
 9472     case T_BOOLEAN:
 9473     case T_BYTE:
 9474       evpermb(dst, mask, nds, src, merge, vector_len); break;
 9475     case T_CHAR:
 9476     case T_SHORT:
 9477       evpermw(dst, mask, nds, src, merge, vector_len); break;
 9478     case T_INT:
 9479     case T_FLOAT:
 9480       evpermd(dst, mask, nds, src, merge, vector_len); break;
 9481     case T_LONG:
 9482     case T_DOUBLE:
 9483       evpermq(dst, mask, nds, src, merge, vector_len); break;
 9484     default:
 9485       fatal("Unexpected type argument %s", type2name(type)); break;
 9486   }
 9487 }
 9488 
 9489 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9490   switch(type) {
 9491     case T_BOOLEAN:
 9492     case T_BYTE:
 9493       evpermb(dst, mask, nds, src, merge, vector_len); break;
 9494     case T_CHAR:
 9495     case T_SHORT:
 9496       evpermw(dst, mask, nds, src, merge, vector_len); break;
 9497     case T_INT:
 9498     case T_FLOAT:
 9499       evpermd(dst, mask, nds, src, merge, vector_len); break;
 9500     case T_LONG:
 9501     case T_DOUBLE:
 9502       evpermq(dst, mask, nds, src, merge, vector_len); break;
 9503     default:
 9504       fatal("Unexpected type argument %s", type2name(type)); break;
 9505   }
 9506 }
 9507 
 9508 void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9509   switch(type) {
 9510     case T_BYTE:
 9511       evpminub(dst, mask, nds, src, merge, vector_len); break;
 9512     case T_SHORT:
 9513       evpminuw(dst, mask, nds, src, merge, vector_len); break;
 9514     case T_INT:
 9515       evpminud(dst, mask, nds, src, merge, vector_len); break;
 9516     case T_LONG:
 9517       evpminuq(dst, mask, nds, src, merge, vector_len); break;
 9518     default:
 9519       fatal("Unexpected type argument %s", type2name(type)); break;
 9520   }
 9521 }
 9522 
 9523 void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9524   switch(type) {
 9525     case T_BYTE:
 9526       evpmaxub(dst, mask, nds, src, merge, vector_len); break;
 9527     case T_SHORT:
 9528       evpmaxuw(dst, mask, nds, src, merge, vector_len); break;
 9529     case T_INT:
 9530       evpmaxud(dst, mask, nds, src, merge, vector_len); break;
 9531     case T_LONG:
 9532       evpmaxuq(dst, mask, nds, src, merge, vector_len); break;
 9533     default:
 9534       fatal("Unexpected type argument %s", type2name(type)); break;
 9535   }
 9536 }
 9537 
 9538 void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9539   switch(type) {
 9540     case T_BYTE:
 9541       evpminub(dst, mask, nds, src, merge, vector_len); break;
 9542     case T_SHORT:
 9543       evpminuw(dst, mask, nds, src, merge, vector_len); break;
 9544     case T_INT:
 9545       evpminud(dst, mask, nds, src, merge, vector_len); break;
 9546     case T_LONG:
 9547       evpminuq(dst, mask, nds, src, merge, vector_len); break;
 9548     default:
 9549       fatal("Unexpected type argument %s", type2name(type)); break;
 9550   }
 9551 }
 9552 
 9553 void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9554   switch(type) {
 9555     case T_BYTE:
 9556       evpmaxub(dst, mask, nds, src, merge, vector_len); break;
 9557     case T_SHORT:
 9558       evpmaxuw(dst, mask, nds, src, merge, vector_len); break;
 9559     case T_INT:
 9560       evpmaxud(dst, mask, nds, src, merge, vector_len); break;
 9561     case T_LONG:
 9562       evpmaxuq(dst, mask, nds, src, merge, vector_len); break;
 9563     default:
 9564       fatal("Unexpected type argument %s", type2name(type)); break;
 9565   }
 9566 }
 9567 
 9568 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9569   switch(type) {
 9570     case T_BYTE:
 9571       evpminsb(dst, mask, nds, src, merge, vector_len); break;
 9572     case T_SHORT:
 9573       evpminsw(dst, mask, nds, src, merge, vector_len); break;
 9574     case T_INT:
 9575       evpminsd(dst, mask, nds, src, merge, vector_len); break;
 9576     case T_LONG:
 9577       evpminsq(dst, mask, nds, src, merge, vector_len); break;
 9578     case T_FLOAT:
 9579       evminmaxps(dst, mask, nds, src, merge, AVX10_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
 9580     case T_DOUBLE:
 9581       evminmaxpd(dst, mask, nds, src, merge, AVX10_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
 9582     default:
 9583       fatal("Unexpected type argument %s", type2name(type)); break;
 9584   }
 9585 }
 9586 
 9587 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9588   switch(type) {
 9589     case T_BYTE:
 9590       evpmaxsb(dst, mask, nds, src, merge, vector_len); break;
 9591     case T_SHORT:
 9592       evpmaxsw(dst, mask, nds, src, merge, vector_len); break;
 9593     case T_INT:
 9594       evpmaxsd(dst, mask, nds, src, merge, vector_len); break;
 9595     case T_LONG:
 9596       evpmaxsq(dst, mask, nds, src, merge, vector_len); break;
 9597     case T_FLOAT:
 9598       evminmaxps(dst, mask, nds, src, merge, AVX10_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
 9599     case T_DOUBLE:
 9600       evminmaxpd(dst, mask, nds, src, merge, AVX10_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
 9601     default:
 9602       fatal("Unexpected type argument %s", type2name(type)); break;
 9603   }
 9604 }
 9605 
 9606 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9607   switch(type) {
 9608     case T_BYTE:
 9609       evpminsb(dst, mask, nds, src, merge, vector_len); break;
 9610     case T_SHORT:
 9611       evpminsw(dst, mask, nds, src, merge, vector_len); break;
 9612     case T_INT:
 9613       evpminsd(dst, mask, nds, src, merge, vector_len); break;
 9614     case T_LONG:
 9615       evpminsq(dst, mask, nds, src, merge, vector_len); break;
 9616     case T_FLOAT:
 9617       evminmaxps(dst, mask, nds, src, merge, AVX10_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
 9618     case T_DOUBLE:
 9619       evminmaxpd(dst, mask, nds, src, merge, AVX10_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
 9620     default:
 9621       fatal("Unexpected type argument %s", type2name(type)); break;
 9622   }
 9623 }
 9624 
 9625 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9626   switch(type) {
 9627     case T_BYTE:
 9628       evpmaxsb(dst, mask, nds, src, merge, vector_len); break;
 9629     case T_SHORT:
 9630       evpmaxsw(dst, mask, nds, src, merge, vector_len); break;
 9631     case T_INT:
 9632       evpmaxsd(dst, mask, nds, src, merge, vector_len); break;
 9633     case T_LONG:
 9634       evpmaxsq(dst, mask, nds, src, merge, vector_len); break;
 9635     case T_FLOAT:
 9636       evminmaxps(dst, mask, nds, src, merge, AVX10_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
 9637     case T_DOUBLE:
 9638       evminmaxps(dst, mask, nds, src, merge, AVX10_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
 9639     default:
 9640       fatal("Unexpected type argument %s", type2name(type)); break;
 9641   }
 9642 }
 9643 
 9644 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9645   switch(type) {
 9646     case T_INT:
 9647       evpxord(dst, mask, nds, src, merge, vector_len); break;
 9648     case T_LONG:
 9649       evpxorq(dst, mask, nds, src, merge, vector_len); break;
 9650     default:
 9651       fatal("Unexpected type argument %s", type2name(type)); break;
 9652   }
 9653 }
 9654 
 9655 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9656   switch(type) {
 9657     case T_INT:
 9658       evpxord(dst, mask, nds, src, merge, vector_len); break;
 9659     case T_LONG:
 9660       evpxorq(dst, mask, nds, src, merge, vector_len); break;
 9661     default:
 9662       fatal("Unexpected type argument %s", type2name(type)); break;
 9663   }
 9664 }
 9665 
 9666 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9667   switch(type) {
 9668     case T_INT:
 9669       Assembler::evpord(dst, mask, nds, src, merge, vector_len); break;
 9670     case T_LONG:
 9671       evporq(dst, mask, nds, src, merge, vector_len); break;
 9672     default:
 9673       fatal("Unexpected type argument %s", type2name(type)); break;
 9674   }
 9675 }
 9676 
 9677 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9678   switch(type) {
 9679     case T_INT:
 9680       Assembler::evpord(dst, mask, nds, src, merge, vector_len); break;
 9681     case T_LONG:
 9682       evporq(dst, mask, nds, src, merge, vector_len); break;
 9683     default:
 9684       fatal("Unexpected type argument %s", type2name(type)); break;
 9685   }
 9686 }
 9687 
 9688 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9689   switch(type) {
 9690     case T_INT:
 9691       evpandd(dst, mask, nds, src, merge, vector_len); break;
 9692     case T_LONG:
 9693       evpandq(dst, mask, nds, src, merge, vector_len); break;
 9694     default:
 9695       fatal("Unexpected type argument %s", type2name(type)); break;
 9696   }
 9697 }
 9698 
 9699 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9700   switch(type) {
 9701     case T_INT:
 9702       evpandd(dst, mask, nds, src, merge, vector_len); break;
 9703     case T_LONG:
 9704       evpandq(dst, mask, nds, src, merge, vector_len); break;
 9705     default:
 9706       fatal("Unexpected type argument %s", type2name(type)); break;
 9707   }
 9708 }
 9709 
 9710 void MacroAssembler::kortest(uint masklen, KRegister src1, KRegister src2) {
 9711   switch(masklen) {
 9712     case 8:
 9713        kortestbl(src1, src2);
 9714        break;
 9715     case 16:
 9716        kortestwl(src1, src2);
 9717        break;
 9718     case 32:
 9719        kortestdl(src1, src2);
 9720        break;
 9721     case 64:
 9722        kortestql(src1, src2);
 9723        break;
 9724     default:
 9725       fatal("Unexpected mask length %d", masklen);
 9726       break;
 9727   }
 9728 }
 9729 
 9730 
 9731 void MacroAssembler::ktest(uint masklen, KRegister src1, KRegister src2) {
 9732   switch(masklen)  {
 9733     case 8:
 9734        ktestbl(src1, src2);
 9735        break;
 9736     case 16:
 9737        ktestwl(src1, src2);
 9738        break;
 9739     case 32:
 9740        ktestdl(src1, src2);
 9741        break;
 9742     case 64:
 9743        ktestql(src1, src2);
 9744        break;
 9745     default:
 9746       fatal("Unexpected mask length %d", masklen);
 9747       break;
 9748   }
 9749 }
 9750 
 9751 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) {
 9752   switch(type) {
 9753     case T_INT:
 9754       evprold(dst, mask, src, shift, merge, vlen_enc); break;
 9755     case T_LONG:
 9756       evprolq(dst, mask, src, shift, merge, vlen_enc); break;
 9757     default:
 9758       fatal("Unexpected type argument %s", type2name(type)); break;
 9759       break;
 9760   }
 9761 }
 9762 
 9763 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) {
 9764   switch(type) {
 9765     case T_INT:
 9766       evprord(dst, mask, src, shift, merge, vlen_enc); break;
 9767     case T_LONG:
 9768       evprorq(dst, mask, src, shift, merge, vlen_enc); break;
 9769     default:
 9770       fatal("Unexpected type argument %s", type2name(type)); break;
 9771   }
 9772 }
 9773 
 9774 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) {
 9775   switch(type) {
 9776     case T_INT:
 9777       evprolvd(dst, mask, src1, src2, merge, vlen_enc); break;
 9778     case T_LONG:
 9779       evprolvq(dst, mask, src1, src2, merge, vlen_enc); break;
 9780     default:
 9781       fatal("Unexpected type argument %s", type2name(type)); break;
 9782   }
 9783 }
 9784 
 9785 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) {
 9786   switch(type) {
 9787     case T_INT:
 9788       evprorvd(dst, mask, src1, src2, merge, vlen_enc); break;
 9789     case T_LONG:
 9790       evprorvq(dst, mask, src1, src2, merge, vlen_enc); break;
 9791     default:
 9792       fatal("Unexpected type argument %s", type2name(type)); break;
 9793   }
 9794 }
 9795 
 9796 void MacroAssembler::evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 9797   assert(rscratch != noreg || always_reachable(src), "missing");
 9798 
 9799   if (reachable(src)) {
 9800     evpandq(dst, nds, as_Address(src), vector_len);
 9801   } else {
 9802     lea(rscratch, src);
 9803     evpandq(dst, nds, Address(rscratch, 0), vector_len);
 9804   }
 9805 }
 9806 
 9807 void MacroAssembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
 9808   assert(rscratch != noreg || always_reachable(src), "missing");
 9809 
 9810   if (reachable(src)) {
 9811     Assembler::evpaddq(dst, mask, nds, as_Address(src), merge, vector_len);
 9812   } else {
 9813     lea(rscratch, src);
 9814     Assembler::evpaddq(dst, mask, nds, Address(rscratch, 0), merge, vector_len);
 9815   }
 9816 }
 9817 
 9818 void MacroAssembler::evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 9819   assert(rscratch != noreg || always_reachable(src), "missing");
 9820 
 9821   if (reachable(src)) {
 9822     evporq(dst, nds, as_Address(src), vector_len);
 9823   } else {
 9824     lea(rscratch, src);
 9825     evporq(dst, nds, Address(rscratch, 0), vector_len);
 9826   }
 9827 }
 9828 
 9829 void MacroAssembler::vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 9830   assert(rscratch != noreg || always_reachable(src), "missing");
 9831 
 9832   if (reachable(src)) {
 9833     vpshufb(dst, nds, as_Address(src), vector_len);
 9834   } else {
 9835     lea(rscratch, src);
 9836     vpshufb(dst, nds, Address(rscratch, 0), vector_len);
 9837   }
 9838 }
 9839 
 9840 void MacroAssembler::vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 9841   assert(rscratch != noreg || always_reachable(src), "missing");
 9842 
 9843   if (reachable(src)) {
 9844     Assembler::vpor(dst, nds, as_Address(src), vector_len);
 9845   } else {
 9846     lea(rscratch, src);
 9847     Assembler::vpor(dst, nds, Address(rscratch, 0), vector_len);
 9848   }
 9849 }
 9850 
 9851 void MacroAssembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch) {
 9852   assert(rscratch != noreg || always_reachable(src3), "missing");
 9853 
 9854   if (reachable(src3)) {
 9855     vpternlogq(dst, imm8, src2, as_Address(src3), vector_len);
 9856   } else {
 9857     lea(rscratch, src3);
 9858     vpternlogq(dst, imm8, src2, Address(rscratch, 0), vector_len);
 9859   }
 9860 }
 9861 
 9862 #if COMPILER2_OR_JVMCI
 9863 
 9864 void MacroAssembler::fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask,
 9865                                  Register length, Register temp, int vec_enc) {
 9866   // Computing mask for predicated vector store.
 9867   movptr(temp, -1);
 9868   bzhiq(temp, temp, length);
 9869   kmov(mask, temp);
 9870   evmovdqu(bt, mask, dst, xmm, true, vec_enc);
 9871 }
 9872 
 9873 // Set memory operation for length "less than" 64 bytes.
 9874 void MacroAssembler::fill64_masked(uint shift, Register dst, int disp,
 9875                                        XMMRegister xmm, KRegister mask, Register length,
 9876                                        Register temp, bool use64byteVector) {
 9877   assert(MaxVectorSize >= 32, "vector length should be >= 32");
 9878   const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG};
 9879   if (!use64byteVector) {
 9880     fill32(dst, disp, xmm);
 9881     subptr(length, 32 >> shift);
 9882     fill32_masked(shift, dst, disp + 32, xmm, mask, length, temp);
 9883   } else {
 9884     assert(MaxVectorSize == 64, "vector length != 64");
 9885     fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_512bit);
 9886   }
 9887 }
 9888 
 9889 
 9890 void MacroAssembler::fill32_masked(uint shift, Register dst, int disp,
 9891                                        XMMRegister xmm, KRegister mask, Register length,
 9892                                        Register temp) {
 9893   assert(MaxVectorSize >= 32, "vector length should be >= 32");
 9894   const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG};
 9895   fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_256bit);
 9896 }
 9897 
 9898 
 9899 void MacroAssembler::fill32(Address dst, XMMRegister xmm) {
 9900   assert(MaxVectorSize >= 32, "vector length should be >= 32");
 9901   vmovdqu(dst, xmm);
 9902 }
 9903 
 9904 void MacroAssembler::fill32(Register dst, int disp, XMMRegister xmm) {
 9905   fill32(Address(dst, disp), xmm);
 9906 }
 9907 
 9908 void MacroAssembler::fill64(Address dst, XMMRegister xmm, bool use64byteVector) {
 9909   assert(MaxVectorSize >= 32, "vector length should be >= 32");
 9910   if (!use64byteVector) {
 9911     fill32(dst, xmm);
 9912     fill32(dst.plus_disp(32), xmm);
 9913   } else {
 9914     evmovdquq(dst, xmm, Assembler::AVX_512bit);
 9915   }
 9916 }
 9917 
 9918 void MacroAssembler::fill64(Register dst, int disp, XMMRegister xmm, bool use64byteVector) {
 9919   fill64(Address(dst, disp), xmm, use64byteVector);
 9920 }
 9921 
 9922 void MacroAssembler::generate_fill_avx3(BasicType type, Register to, Register value,
 9923                                         Register count, Register rtmp, XMMRegister xtmp) {
 9924   Label L_exit;
 9925   Label L_fill_start;
 9926   Label L_fill_64_bytes;
 9927   Label L_fill_96_bytes;
 9928   Label L_fill_128_bytes;
 9929   Label L_fill_128_bytes_loop;
 9930   Label L_fill_128_loop_header;
 9931   Label L_fill_128_bytes_loop_header;
 9932   Label L_fill_128_bytes_loop_pre_header;
 9933   Label L_fill_zmm_sequence;
 9934 
 9935   int shift = -1;
 9936   int avx3threshold = VM_Version::avx3_threshold();
 9937   switch(type) {
 9938     case T_BYTE:  shift = 0;
 9939       break;
 9940     case T_SHORT: shift = 1;
 9941       break;
 9942     case T_INT:   shift = 2;
 9943       break;
 9944     /* Uncomment when LONG fill stubs are supported.
 9945     case T_LONG:  shift = 3;
 9946       break;
 9947     */
 9948     default:
 9949       fatal("Unhandled type: %s\n", type2name(type));
 9950   }
 9951 
 9952   if ((avx3threshold != 0)  || (MaxVectorSize == 32)) {
 9953 
 9954     if (MaxVectorSize == 64) {
 9955       cmpq(count, avx3threshold >> shift);
 9956       jcc(Assembler::greater, L_fill_zmm_sequence);
 9957     }
 9958 
 9959     evpbroadcast(type, xtmp, value, Assembler::AVX_256bit);
 9960 
 9961     bind(L_fill_start);
 9962 
 9963     cmpq(count, 32 >> shift);
 9964     jccb(Assembler::greater, L_fill_64_bytes);
 9965     fill32_masked(shift, to, 0, xtmp, k2, count, rtmp);
 9966     jmp(L_exit);
 9967 
 9968     bind(L_fill_64_bytes);
 9969     cmpq(count, 64 >> shift);
 9970     jccb(Assembler::greater, L_fill_96_bytes);
 9971     fill64_masked(shift, to, 0, xtmp, k2, count, rtmp);
 9972     jmp(L_exit);
 9973 
 9974     bind(L_fill_96_bytes);
 9975     cmpq(count, 96 >> shift);
 9976     jccb(Assembler::greater, L_fill_128_bytes);
 9977     fill64(to, 0, xtmp);
 9978     subq(count, 64 >> shift);
 9979     fill32_masked(shift, to, 64, xtmp, k2, count, rtmp);
 9980     jmp(L_exit);
 9981 
 9982     bind(L_fill_128_bytes);
 9983     cmpq(count, 128 >> shift);
 9984     jccb(Assembler::greater, L_fill_128_bytes_loop_pre_header);
 9985     fill64(to, 0, xtmp);
 9986     fill32(to, 64, xtmp);
 9987     subq(count, 96 >> shift);
 9988     fill32_masked(shift, to, 96, xtmp, k2, count, rtmp);
 9989     jmp(L_exit);
 9990 
 9991     bind(L_fill_128_bytes_loop_pre_header);
 9992     {
 9993       mov(rtmp, to);
 9994       andq(rtmp, 31);
 9995       jccb(Assembler::zero, L_fill_128_bytes_loop_header);
 9996       negq(rtmp);
 9997       addq(rtmp, 32);
 9998       mov64(r8, -1L);
 9999       bzhiq(r8, r8, rtmp);
10000       kmovql(k2, r8);
10001       evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_256bit);
10002       addq(to, rtmp);
10003       shrq(rtmp, shift);
10004       subq(count, rtmp);
10005     }
10006 
10007     cmpq(count, 128 >> shift);
10008     jcc(Assembler::less, L_fill_start);
10009 
10010     bind(L_fill_128_bytes_loop_header);
10011     subq(count, 128 >> shift);
10012 
10013     align32();
10014     bind(L_fill_128_bytes_loop);
10015       fill64(to, 0, xtmp);
10016       fill64(to, 64, xtmp);
10017       addq(to, 128);
10018       subq(count, 128 >> shift);
10019       jccb(Assembler::greaterEqual, L_fill_128_bytes_loop);
10020 
10021     addq(count, 128 >> shift);
10022     jcc(Assembler::zero, L_exit);
10023     jmp(L_fill_start);
10024   }
10025 
10026   if (MaxVectorSize == 64) {
10027     // Sequence using 64 byte ZMM register.
10028     Label L_fill_128_bytes_zmm;
10029     Label L_fill_192_bytes_zmm;
10030     Label L_fill_192_bytes_loop_zmm;
10031     Label L_fill_192_bytes_loop_header_zmm;
10032     Label L_fill_192_bytes_loop_pre_header_zmm;
10033     Label L_fill_start_zmm_sequence;
10034 
10035     bind(L_fill_zmm_sequence);
10036     evpbroadcast(type, xtmp, value, Assembler::AVX_512bit);
10037 
10038     bind(L_fill_start_zmm_sequence);
10039     cmpq(count, 64 >> shift);
10040     jccb(Assembler::greater, L_fill_128_bytes_zmm);
10041     fill64_masked(shift, to, 0, xtmp, k2, count, rtmp, true);
10042     jmp(L_exit);
10043 
10044     bind(L_fill_128_bytes_zmm);
10045     cmpq(count, 128 >> shift);
10046     jccb(Assembler::greater, L_fill_192_bytes_zmm);
10047     fill64(to, 0, xtmp, true);
10048     subq(count, 64 >> shift);
10049     fill64_masked(shift, to, 64, xtmp, k2, count, rtmp, true);
10050     jmp(L_exit);
10051 
10052     bind(L_fill_192_bytes_zmm);
10053     cmpq(count, 192 >> shift);
10054     jccb(Assembler::greater, L_fill_192_bytes_loop_pre_header_zmm);
10055     fill64(to, 0, xtmp, true);
10056     fill64(to, 64, xtmp, true);
10057     subq(count, 128 >> shift);
10058     fill64_masked(shift, to, 128, xtmp, k2, count, rtmp, true);
10059     jmp(L_exit);
10060 
10061     bind(L_fill_192_bytes_loop_pre_header_zmm);
10062     {
10063       movq(rtmp, to);
10064       andq(rtmp, 63);
10065       jccb(Assembler::zero, L_fill_192_bytes_loop_header_zmm);
10066       negq(rtmp);
10067       addq(rtmp, 64);
10068       mov64(r8, -1L);
10069       bzhiq(r8, r8, rtmp);
10070       kmovql(k2, r8);
10071       evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_512bit);
10072       addq(to, rtmp);
10073       shrq(rtmp, shift);
10074       subq(count, rtmp);
10075     }
10076 
10077     cmpq(count, 192 >> shift);
10078     jcc(Assembler::less, L_fill_start_zmm_sequence);
10079 
10080     bind(L_fill_192_bytes_loop_header_zmm);
10081     subq(count, 192 >> shift);
10082 
10083     align32();
10084     bind(L_fill_192_bytes_loop_zmm);
10085       fill64(to, 0, xtmp, true);
10086       fill64(to, 64, xtmp, true);
10087       fill64(to, 128, xtmp, true);
10088       addq(to, 192);
10089       subq(count, 192 >> shift);
10090       jccb(Assembler::greaterEqual, L_fill_192_bytes_loop_zmm);
10091 
10092     addq(count, 192 >> shift);
10093     jcc(Assembler::zero, L_exit);
10094     jmp(L_fill_start_zmm_sequence);
10095   }
10096   bind(L_exit);
10097 }
10098 #endif //COMPILER2_OR_JVMCI
10099 
10100 
10101 void MacroAssembler::convert_f2i(Register dst, XMMRegister src) {
10102   Label done;
10103   cvttss2sil(dst, src);
10104   // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
10105   cmpl(dst, 0x80000000); // float_sign_flip
10106   jccb(Assembler::notEqual, done);
10107   subptr(rsp, 8);
10108   movflt(Address(rsp, 0), src);
10109   call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2i_fixup())));
10110   pop(dst);
10111   bind(done);
10112 }
10113 
10114 void MacroAssembler::convert_d2i(Register dst, XMMRegister src) {
10115   Label done;
10116   cvttsd2sil(dst, src);
10117   // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
10118   cmpl(dst, 0x80000000); // float_sign_flip
10119   jccb(Assembler::notEqual, done);
10120   subptr(rsp, 8);
10121   movdbl(Address(rsp, 0), src);
10122   call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2i_fixup())));
10123   pop(dst);
10124   bind(done);
10125 }
10126 
10127 void MacroAssembler::convert_f2l(Register dst, XMMRegister src) {
10128   Label done;
10129   cvttss2siq(dst, src);
10130   cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip()));
10131   jccb(Assembler::notEqual, done);
10132   subptr(rsp, 8);
10133   movflt(Address(rsp, 0), src);
10134   call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2l_fixup())));
10135   pop(dst);
10136   bind(done);
10137 }
10138 
10139 void MacroAssembler::round_float(Register dst, XMMRegister src, Register rtmp, Register rcx) {
10140   // Following code is line by line assembly translation rounding algorithm.
10141   // Please refer to java.lang.Math.round(float) algorithm for details.
10142   const int32_t FloatConsts_EXP_BIT_MASK = 0x7F800000;
10143   const int32_t FloatConsts_SIGNIFICAND_WIDTH = 24;
10144   const int32_t FloatConsts_EXP_BIAS = 127;
10145   const int32_t FloatConsts_SIGNIF_BIT_MASK = 0x007FFFFF;
10146   const int32_t MINUS_32 = 0xFFFFFFE0;
10147   Label L_special_case, L_block1, L_exit;
10148   movl(rtmp, FloatConsts_EXP_BIT_MASK);
10149   movdl(dst, src);
10150   andl(dst, rtmp);
10151   sarl(dst, FloatConsts_SIGNIFICAND_WIDTH - 1);
10152   movl(rtmp, FloatConsts_SIGNIFICAND_WIDTH - 2 + FloatConsts_EXP_BIAS);
10153   subl(rtmp, dst);
10154   movl(rcx, rtmp);
10155   movl(dst, MINUS_32);
10156   testl(rtmp, dst);
10157   jccb(Assembler::notEqual, L_special_case);
10158   movdl(dst, src);
10159   andl(dst, FloatConsts_SIGNIF_BIT_MASK);
10160   orl(dst, FloatConsts_SIGNIF_BIT_MASK + 1);
10161   movdl(rtmp, src);
10162   testl(rtmp, rtmp);
10163   jccb(Assembler::greaterEqual, L_block1);
10164   negl(dst);
10165   bind(L_block1);
10166   sarl(dst);
10167   addl(dst, 0x1);
10168   sarl(dst, 0x1);
10169   jmp(L_exit);
10170   bind(L_special_case);
10171   convert_f2i(dst, src);
10172   bind(L_exit);
10173 }
10174 
10175 void MacroAssembler::round_double(Register dst, XMMRegister src, Register rtmp, Register rcx) {
10176   // Following code is line by line assembly translation rounding algorithm.
10177   // Please refer to java.lang.Math.round(double) algorithm for details.
10178   const int64_t DoubleConsts_EXP_BIT_MASK = 0x7FF0000000000000L;
10179   const int64_t DoubleConsts_SIGNIFICAND_WIDTH = 53;
10180   const int64_t DoubleConsts_EXP_BIAS = 1023;
10181   const int64_t DoubleConsts_SIGNIF_BIT_MASK = 0x000FFFFFFFFFFFFFL;
10182   const int64_t MINUS_64 = 0xFFFFFFFFFFFFFFC0L;
10183   Label L_special_case, L_block1, L_exit;
10184   mov64(rtmp, DoubleConsts_EXP_BIT_MASK);
10185   movq(dst, src);
10186   andq(dst, rtmp);
10187   sarq(dst, DoubleConsts_SIGNIFICAND_WIDTH - 1);
10188   mov64(rtmp, DoubleConsts_SIGNIFICAND_WIDTH - 2 + DoubleConsts_EXP_BIAS);
10189   subq(rtmp, dst);
10190   movq(rcx, rtmp);
10191   mov64(dst, MINUS_64);
10192   testq(rtmp, dst);
10193   jccb(Assembler::notEqual, L_special_case);
10194   movq(dst, src);
10195   mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK);
10196   andq(dst, rtmp);
10197   mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK + 1);
10198   orq(dst, rtmp);
10199   movq(rtmp, src);
10200   testq(rtmp, rtmp);
10201   jccb(Assembler::greaterEqual, L_block1);
10202   negq(dst);
10203   bind(L_block1);
10204   sarq(dst);
10205   addq(dst, 0x1);
10206   sarq(dst, 0x1);
10207   jmp(L_exit);
10208   bind(L_special_case);
10209   convert_d2l(dst, src);
10210   bind(L_exit);
10211 }
10212 
10213 void MacroAssembler::convert_d2l(Register dst, XMMRegister src) {
10214   Label done;
10215   cvttsd2siq(dst, src);
10216   cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip()));
10217   jccb(Assembler::notEqual, done);
10218   subptr(rsp, 8);
10219   movdbl(Address(rsp, 0), src);
10220   call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2l_fixup())));
10221   pop(dst);
10222   bind(done);
10223 }
10224 
10225 void MacroAssembler::cache_wb(Address line)
10226 {
10227   // 64 bit cpus always support clflush
10228   assert(VM_Version::supports_clflush(), "clflush should be available");
10229   bool optimized = VM_Version::supports_clflushopt();
10230   bool no_evict = VM_Version::supports_clwb();
10231 
10232   // prefer clwb (writeback without evict) otherwise
10233   // prefer clflushopt (potentially parallel writeback with evict)
10234   // otherwise fallback on clflush (serial writeback with evict)
10235 
10236   if (optimized) {
10237     if (no_evict) {
10238       clwb(line);
10239     } else {
10240       clflushopt(line);
10241     }
10242   } else {
10243     // no need for fence when using CLFLUSH
10244     clflush(line);
10245   }
10246 }
10247 
10248 void MacroAssembler::cache_wbsync(bool is_pre)
10249 {
10250   assert(VM_Version::supports_clflush(), "clflush should be available");
10251   bool optimized = VM_Version::supports_clflushopt();
10252   bool no_evict = VM_Version::supports_clwb();
10253 
10254   // pick the correct implementation
10255 
10256   if (!is_pre && (optimized || no_evict)) {
10257     // need an sfence for post flush when using clflushopt or clwb
10258     // otherwise no no need for any synchroniaztion
10259 
10260     sfence();
10261   }
10262 }
10263 
10264 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
10265   switch (cond) {
10266     // Note some conditions are synonyms for others
10267     case Assembler::zero:         return Assembler::notZero;
10268     case Assembler::notZero:      return Assembler::zero;
10269     case Assembler::less:         return Assembler::greaterEqual;
10270     case Assembler::lessEqual:    return Assembler::greater;
10271     case Assembler::greater:      return Assembler::lessEqual;
10272     case Assembler::greaterEqual: return Assembler::less;
10273     case Assembler::below:        return Assembler::aboveEqual;
10274     case Assembler::belowEqual:   return Assembler::above;
10275     case Assembler::above:        return Assembler::belowEqual;
10276     case Assembler::aboveEqual:   return Assembler::below;
10277     case Assembler::overflow:     return Assembler::noOverflow;
10278     case Assembler::noOverflow:   return Assembler::overflow;
10279     case Assembler::negative:     return Assembler::positive;
10280     case Assembler::positive:     return Assembler::negative;
10281     case Assembler::parity:       return Assembler::noParity;
10282     case Assembler::noParity:     return Assembler::parity;
10283   }
10284   ShouldNotReachHere(); return Assembler::overflow;
10285 }
10286 
10287 // This is simply a call to Thread::current()
10288 void MacroAssembler::get_thread_slow(Register thread) {
10289   if (thread != rax) {
10290     push(rax);
10291   }
10292   push(rdi);
10293   push(rsi);
10294   push(rdx);
10295   push(rcx);
10296   push(r8);
10297   push(r9);
10298   push(r10);
10299   push(r11);
10300 
10301   MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, Thread::current), 0);
10302 
10303   pop(r11);
10304   pop(r10);
10305   pop(r9);
10306   pop(r8);
10307   pop(rcx);
10308   pop(rdx);
10309   pop(rsi);
10310   pop(rdi);
10311   if (thread != rax) {
10312     mov(thread, rax);
10313     pop(rax);
10314   }
10315 }
10316 
10317 void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigned bias, Register tmp) {
10318   Label L_stack_ok;
10319   if (bias == 0) {
10320     testptr(sp, 2 * wordSize - 1);
10321   } else {
10322     // lea(tmp, Address(rsp, bias);
10323     mov(tmp, sp);
10324     addptr(tmp, bias);
10325     testptr(tmp, 2 * wordSize - 1);
10326   }
10327   jcc(Assembler::equal, L_stack_ok);
10328   block_comment(msg);
10329   stop(msg);
10330   bind(L_stack_ok);
10331 }
10332 
10333 // Implements lightweight-locking.
10334 //
10335 // obj: the object to be locked
10336 // reg_rax: rax
10337 // thread: the thread which attempts to lock obj
10338 // tmp: a temporary register
10339 void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow) {
10340   Register thread = r15_thread;
10341 
10342   assert(reg_rax == rax, "");
10343   assert_different_registers(basic_lock, obj, reg_rax, thread, tmp);
10344 
10345   Label push;
10346   const Register top = tmp;
10347 
10348   // Preload the markWord. It is important that this is the first
10349   // instruction emitted as it is part of C1's null check semantics.
10350   movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes()));
10351 
10352   if (UseObjectMonitorTable) {
10353     // Clear cache in case fast locking succeeds or we need to take the slow-path.
10354     movptr(Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes()))), 0);
10355   }
10356 
10357   if (DiagnoseSyncOnValueBasedClasses != 0) {
10358     load_klass(tmp, obj, rscratch1);
10359     testb(Address(tmp, Klass::misc_flags_offset()), KlassFlags::_misc_is_value_based_class);
10360     jcc(Assembler::notZero, slow);
10361   }
10362 
10363   // Load top.
10364   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10365 
10366   // Check if the lock-stack is full.
10367   cmpl(top, LockStack::end_offset());
10368   jcc(Assembler::greaterEqual, slow);
10369 
10370   // Check for recursion.
10371   cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
10372   jcc(Assembler::equal, push);
10373 
10374   // Check header for monitor (0b10).
10375   testptr(reg_rax, markWord::monitor_value);
10376   jcc(Assembler::notZero, slow);
10377 
10378   // Try to lock. Transition lock bits 0b01 => 0b00
10379   movptr(tmp, reg_rax);
10380   andptr(tmp, ~(int32_t)markWord::unlocked_value);
10381   orptr(reg_rax, markWord::unlocked_value);
10382   if (EnableValhalla) {
10383     // Mask inline_type bit such that we go to the slow path if object is an inline type
10384     andptr(reg_rax, ~((int) markWord::inline_type_bit_in_place));
10385   }
10386   lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
10387   jcc(Assembler::notEqual, slow);
10388 
10389   // Restore top, CAS clobbers register.
10390   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10391 
10392   bind(push);
10393   // After successful lock, push object on lock-stack.
10394   movptr(Address(thread, top), obj);
10395   incrementl(top, oopSize);
10396   movl(Address(thread, JavaThread::lock_stack_top_offset()), top);
10397 }
10398 
10399 // Implements lightweight-unlocking.
10400 //
10401 // obj: the object to be unlocked
10402 // reg_rax: rax
10403 // thread: the thread
10404 // tmp: a temporary register
10405 void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register tmp, Label& slow) {
10406   Register thread = r15_thread;
10407 
10408   assert(reg_rax == rax, "");
10409   assert_different_registers(obj, reg_rax, thread, tmp);
10410 
10411   Label unlocked, push_and_slow;
10412   const Register top = tmp;
10413 
10414   // Check if obj is top of lock-stack.
10415   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10416   cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
10417   jcc(Assembler::notEqual, slow);
10418 
10419   // Pop lock-stack.
10420   DEBUG_ONLY(movptr(Address(thread, top, Address::times_1, -oopSize), 0);)
10421   subl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
10422 
10423   // Check if recursive.
10424   cmpptr(obj, Address(thread, top, Address::times_1, -2 * oopSize));
10425   jcc(Assembler::equal, unlocked);
10426 
10427   // Not recursive. Check header for monitor (0b10).
10428   movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes()));
10429   testptr(reg_rax, markWord::monitor_value);
10430   jcc(Assembler::notZero, push_and_slow);
10431 
10432 #ifdef ASSERT
10433   // Check header not unlocked (0b01).
10434   Label not_unlocked;
10435   testptr(reg_rax, markWord::unlocked_value);
10436   jcc(Assembler::zero, not_unlocked);
10437   stop("lightweight_unlock already unlocked");
10438   bind(not_unlocked);
10439 #endif
10440 
10441   // Try to unlock. Transition lock bits 0b00 => 0b01
10442   movptr(tmp, reg_rax);
10443   orptr(tmp, markWord::unlocked_value);
10444   lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
10445   jcc(Assembler::equal, unlocked);
10446 
10447   bind(push_and_slow);
10448   // Restore lock-stack and handle the unlock in runtime.
10449 #ifdef ASSERT
10450   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
10451   movptr(Address(thread, top), obj);
10452 #endif
10453   addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
10454   jmp(slow);
10455 
10456   bind(unlocked);
10457 }
10458 
10459 // Saves legacy GPRs state on stack.
10460 void MacroAssembler::save_legacy_gprs() {
10461   subq(rsp, 16 * wordSize);
10462   movq(Address(rsp, 15 * wordSize), rax);
10463   movq(Address(rsp, 14 * wordSize), rcx);
10464   movq(Address(rsp, 13 * wordSize), rdx);
10465   movq(Address(rsp, 12 * wordSize), rbx);
10466   movq(Address(rsp, 10 * wordSize), rbp);
10467   movq(Address(rsp, 9 * wordSize), rsi);
10468   movq(Address(rsp, 8 * wordSize), rdi);
10469   movq(Address(rsp, 7 * wordSize), r8);
10470   movq(Address(rsp, 6 * wordSize), r9);
10471   movq(Address(rsp, 5 * wordSize), r10);
10472   movq(Address(rsp, 4 * wordSize), r11);
10473   movq(Address(rsp, 3 * wordSize), r12);
10474   movq(Address(rsp, 2 * wordSize), r13);
10475   movq(Address(rsp, wordSize), r14);
10476   movq(Address(rsp, 0), r15);
10477 }
10478 
10479 // Resotres back legacy GPRs state from stack.
10480 void MacroAssembler::restore_legacy_gprs() {
10481   movq(r15, Address(rsp, 0));
10482   movq(r14, Address(rsp, wordSize));
10483   movq(r13, Address(rsp, 2 * wordSize));
10484   movq(r12, Address(rsp, 3 * wordSize));
10485   movq(r11, Address(rsp, 4 * wordSize));
10486   movq(r10, Address(rsp, 5 * wordSize));
10487   movq(r9,  Address(rsp, 6 * wordSize));
10488   movq(r8,  Address(rsp, 7 * wordSize));
10489   movq(rdi, Address(rsp, 8 * wordSize));
10490   movq(rsi, Address(rsp, 9 * wordSize));
10491   movq(rbp, Address(rsp, 10 * wordSize));
10492   movq(rbx, Address(rsp, 12 * wordSize));
10493   movq(rdx, Address(rsp, 13 * wordSize));
10494   movq(rcx, Address(rsp, 14 * wordSize));
10495   movq(rax, Address(rsp, 15 * wordSize));
10496   addq(rsp, 16 * wordSize);
10497 }
10498 
10499 void MacroAssembler::setcc(Assembler::Condition comparison, Register dst) {
10500   if (VM_Version::supports_apx_f()) {
10501     esetzucc(comparison, dst);
10502   } else {
10503     setb(comparison, dst);
10504     movzbl(dst, dst);
10505   }
10506 }