1 /*
    2  * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
    3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    4  *
    5  * This code is free software; you can redistribute it and/or modify it
    6  * under the terms of the GNU General Public License version 2 only, as
    7  * published by the Free Software Foundation.
    8  *
    9  * This code is distributed in the hope that it will be useful, but WITHOUT
   10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   12  * version 2 for more details (a copy is included in the LICENSE file that
   13  * accompanied this code).
   14  *
   15  * You should have received a copy of the GNU General Public License version
   16  * 2 along with this work; if not, write to the Free Software Foundation,
   17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   18  *
   19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   20  * or visit www.oracle.com if you need additional information or have any
   21  * questions.
   22  *
   23  */
   24 
   25 #include "asm/assembler.hpp"
   26 #include "asm/assembler.inline.hpp"
   27 #include "code/aotCodeCache.hpp"
   28 #include "code/compiledIC.hpp"
   29 #include "compiler/compiler_globals.hpp"
   30 #include "compiler/disassembler.hpp"
   31 #include "crc32c.h"
   32 #include "gc/shared/barrierSet.hpp"
   33 #include "gc/shared/barrierSetAssembler.hpp"
   34 #include "gc/shared/collectedHeap.inline.hpp"
   35 #include "gc/shared/tlab_globals.hpp"
   36 #include "interpreter/bytecodeHistogram.hpp"
   37 #include "interpreter/interpreter.hpp"
   38 #include "interpreter/interpreterRuntime.hpp"
   39 #include "jvm.h"
   40 #include "memory/resourceArea.hpp"
   41 #include "memory/universe.hpp"
   42 #include "oops/accessDecorators.hpp"
   43 #include "oops/compressedKlass.inline.hpp"
   44 #include "oops/compressedOops.inline.hpp"
   45 #include "oops/klass.inline.hpp"
   46 #include "prims/methodHandles.hpp"
   47 #include "runtime/continuation.hpp"
   48 #include "runtime/interfaceSupport.inline.hpp"
   49 #include "runtime/javaThread.hpp"
   50 #include "runtime/jniHandles.hpp"
   51 #include "runtime/objectMonitor.hpp"
   52 #include "runtime/os.hpp"
   53 #include "runtime/safepoint.hpp"
   54 #include "runtime/safepointMechanism.hpp"
   55 #include "runtime/sharedRuntime.hpp"
   56 #include "runtime/stubRoutines.hpp"
   57 #include "utilities/checkedCast.hpp"
   58 #include "utilities/macros.hpp"
   59 
   60 #ifdef PRODUCT
   61 #define BLOCK_COMMENT(str) /* nothing */
   62 #define STOP(error) stop(error)
   63 #else
   64 #define BLOCK_COMMENT(str) block_comment(str)
   65 #define STOP(error) block_comment(error); stop(error)
   66 #endif
   67 
   68 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
   69 
   70 #ifdef ASSERT
   71 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
   72 #endif
   73 
   74 static const Assembler::Condition reverse[] = {
   75     Assembler::noOverflow     /* overflow      = 0x0 */ ,
   76     Assembler::overflow       /* noOverflow    = 0x1 */ ,
   77     Assembler::aboveEqual     /* carrySet      = 0x2, below         = 0x2 */ ,
   78     Assembler::below          /* aboveEqual    = 0x3, carryClear    = 0x3 */ ,
   79     Assembler::notZero        /* zero          = 0x4, equal         = 0x4 */ ,
   80     Assembler::zero           /* notZero       = 0x5, notEqual      = 0x5 */ ,
   81     Assembler::above          /* belowEqual    = 0x6 */ ,
   82     Assembler::belowEqual     /* above         = 0x7 */ ,
   83     Assembler::positive       /* negative      = 0x8 */ ,
   84     Assembler::negative       /* positive      = 0x9 */ ,
   85     Assembler::noParity       /* parity        = 0xa */ ,
   86     Assembler::parity         /* noParity      = 0xb */ ,
   87     Assembler::greaterEqual   /* less          = 0xc */ ,
   88     Assembler::less           /* greaterEqual  = 0xd */ ,
   89     Assembler::greater        /* lessEqual     = 0xe */ ,
   90     Assembler::lessEqual      /* greater       = 0xf, */
   91 
   92 };
   93 
   94 
   95 // Implementation of MacroAssembler
   96 
   97 Address MacroAssembler::as_Address(AddressLiteral adr) {
   98   // amd64 always does this as a pc-rel
   99   // we can be absolute or disp based on the instruction type
  100   // jmp/call are displacements others are absolute
  101   assert(!adr.is_lval(), "must be rval");
  102   assert(reachable(adr), "must be");
  103   return Address(checked_cast<int32_t>(adr.target() - pc()), adr.target(), adr.reloc());
  104 
  105 }
  106 
  107 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) {
  108   AddressLiteral base = adr.base();
  109   lea(rscratch, base);
  110   Address index = adr.index();
  111   assert(index._disp == 0, "must not have disp"); // maybe it can?
  112   Address array(rscratch, index._index, index._scale, index._disp);
  113   return array;
  114 }
  115 
  116 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
  117   Label L, E;
  118 
  119 #ifdef _WIN64
  120   // Windows always allocates space for it's register args
  121   assert(num_args <= 4, "only register arguments supported");
  122   subq(rsp,  frame::arg_reg_save_area_bytes);
  123 #endif
  124 
  125   // Align stack if necessary
  126   testl(rsp, 15);
  127   jcc(Assembler::zero, L);
  128 
  129   subq(rsp, 8);
  130   call(RuntimeAddress(entry_point));
  131   addq(rsp, 8);
  132   jmp(E);
  133 
  134   bind(L);
  135   call(RuntimeAddress(entry_point));
  136 
  137   bind(E);
  138 
  139 #ifdef _WIN64
  140   // restore stack pointer
  141   addq(rsp, frame::arg_reg_save_area_bytes);
  142 #endif
  143 }
  144 
  145 void MacroAssembler::cmp64(Register src1, AddressLiteral src2, Register rscratch) {
  146   assert(!src2.is_lval(), "should use cmpptr");
  147   assert(rscratch != noreg || always_reachable(src2), "missing");
  148 
  149   if (reachable(src2)) {
  150     cmpq(src1, as_Address(src2));
  151   } else {
  152     lea(rscratch, src2);
  153     Assembler::cmpq(src1, Address(rscratch, 0));
  154   }
  155 }
  156 
  157 int MacroAssembler::corrected_idivq(Register reg) {
  158   // Full implementation of Java ldiv and lrem; checks for special
  159   // case as described in JVM spec., p.243 & p.271.  The function
  160   // returns the (pc) offset of the idivl instruction - may be needed
  161   // for implicit exceptions.
  162   //
  163   //         normal case                           special case
  164   //
  165   // input : rax: dividend                         min_long
  166   //         reg: divisor   (may not be eax/edx)   -1
  167   //
  168   // output: rax: quotient  (= rax idiv reg)       min_long
  169   //         rdx: remainder (= rax irem reg)       0
  170   assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
  171   static const int64_t min_long = 0x8000000000000000;
  172   Label normal_case, special_case;
  173 
  174   // check for special case
  175   cmp64(rax, ExternalAddress((address) &min_long), rdx /*rscratch*/);
  176   jcc(Assembler::notEqual, normal_case);
  177   xorl(rdx, rdx); // prepare rdx for possible special case (where
  178                   // remainder = 0)
  179   cmpq(reg, -1);
  180   jcc(Assembler::equal, special_case);
  181 
  182   // handle normal case
  183   bind(normal_case);
  184   cdqq();
  185   int idivq_offset = offset();
  186   idivq(reg);
  187 
  188   // normal and special case exit
  189   bind(special_case);
  190 
  191   return idivq_offset;
  192 }
  193 
  194 void MacroAssembler::decrementq(Register reg, int value) {
  195   if (value == min_jint) { subq(reg, value); return; }
  196   if (value <  0) { incrementq(reg, -value); return; }
  197   if (value == 0) {                        ; return; }
  198   if (value == 1 && UseIncDec) { decq(reg) ; return; }
  199   /* else */      { subq(reg, value)       ; return; }
  200 }
  201 
  202 void MacroAssembler::decrementq(Address dst, int value) {
  203   if (value == min_jint) { subq(dst, value); return; }
  204   if (value <  0) { incrementq(dst, -value); return; }
  205   if (value == 0) {                        ; return; }
  206   if (value == 1 && UseIncDec) { decq(dst) ; return; }
  207   /* else */      { subq(dst, value)       ; return; }
  208 }
  209 
  210 void MacroAssembler::incrementq(AddressLiteral dst, Register rscratch) {
  211   assert(rscratch != noreg || always_reachable(dst), "missing");
  212 
  213   if (reachable(dst)) {
  214     incrementq(as_Address(dst));
  215   } else {
  216     lea(rscratch, dst);
  217     incrementq(Address(rscratch, 0));
  218   }
  219 }
  220 
  221 void MacroAssembler::incrementq(Register reg, int value) {
  222   if (value == min_jint) { addq(reg, value); return; }
  223   if (value <  0) { decrementq(reg, -value); return; }
  224   if (value == 0) {                        ; return; }
  225   if (value == 1 && UseIncDec) { incq(reg) ; return; }
  226   /* else */      { addq(reg, value)       ; return; }
  227 }
  228 
  229 void MacroAssembler::incrementq(Address dst, int value) {
  230   if (value == min_jint) { addq(dst, value); return; }
  231   if (value <  0) { decrementq(dst, -value); return; }
  232   if (value == 0) {                        ; return; }
  233   if (value == 1 && UseIncDec) { incq(dst) ; return; }
  234   /* else */      { addq(dst, value)       ; return; }
  235 }
  236 
  237 // 32bit can do a case table jump in one instruction but we no longer allow the base
  238 // to be installed in the Address class
  239 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) {
  240   lea(rscratch, entry.base());
  241   Address dispatch = entry.index();
  242   assert(dispatch._base == noreg, "must be");
  243   dispatch._base = rscratch;
  244   jmp(dispatch);
  245 }
  246 
  247 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
  248   ShouldNotReachHere(); // 64bit doesn't use two regs
  249   cmpq(x_lo, y_lo);
  250 }
  251 
  252 void MacroAssembler::lea(Register dst, AddressLiteral src) {
  253   mov_literal64(dst, (intptr_t)src.target(), src.rspec());
  254 }
  255 
  256 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) {
  257   lea(rscratch, adr);
  258   movptr(dst, rscratch);
  259 }
  260 
  261 void MacroAssembler::leave() {
  262   // %%% is this really better? Why not on 32bit too?
  263   emit_int8((unsigned char)0xC9); // LEAVE
  264 }
  265 
  266 void MacroAssembler::lneg(Register hi, Register lo) {
  267   ShouldNotReachHere(); // 64bit doesn't use two regs
  268   negq(lo);
  269 }
  270 
  271 void MacroAssembler::movoop(Register dst, jobject obj) {
  272   mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate());
  273 }
  274 
  275 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) {
  276   mov_literal64(rscratch, (intptr_t)obj, oop_Relocation::spec_for_immediate());
  277   movq(dst, rscratch);
  278 }
  279 
  280 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
  281   mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
  282 }
  283 
  284 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) {
  285   mov_literal64(rscratch, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
  286   movq(dst, rscratch);
  287 }
  288 
  289 void MacroAssembler::movptr(Register dst, AddressLiteral src) {
  290   if (src.is_lval()) {
  291     mov_literal64(dst, (intptr_t)src.target(), src.rspec());
  292   } else {
  293     if (reachable(src)) {
  294       movq(dst, as_Address(src));
  295     } else {
  296       lea(dst, src);
  297       movq(dst, Address(dst, 0));
  298     }
  299   }
  300 }
  301 
  302 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) {
  303   movq(as_Address(dst, rscratch), src);
  304 }
  305 
  306 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
  307   movq(dst, as_Address(src, dst /*rscratch*/));
  308 }
  309 
  310 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
  311 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) {
  312   if (is_simm32(src)) {
  313     movptr(dst, checked_cast<int32_t>(src));
  314   } else {
  315     mov64(rscratch, src);
  316     movq(dst, rscratch);
  317   }
  318 }
  319 
  320 void MacroAssembler::pushoop(jobject obj, Register rscratch) {
  321   movoop(rscratch, obj);
  322   push(rscratch);
  323 }
  324 
  325 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) {
  326   mov_metadata(rscratch, obj);
  327   push(rscratch);
  328 }
  329 
  330 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) {
  331   lea(rscratch, src);
  332   if (src.is_lval()) {
  333     push(rscratch);
  334   } else {
  335     pushq(Address(rscratch, 0));
  336   }
  337 }
  338 
  339 static void pass_arg0(MacroAssembler* masm, Register arg) {
  340   if (c_rarg0 != arg ) {
  341     masm->mov(c_rarg0, arg);
  342   }
  343 }
  344 
  345 static void pass_arg1(MacroAssembler* masm, Register arg) {
  346   if (c_rarg1 != arg ) {
  347     masm->mov(c_rarg1, arg);
  348   }
  349 }
  350 
  351 static void pass_arg2(MacroAssembler* masm, Register arg) {
  352   if (c_rarg2 != arg ) {
  353     masm->mov(c_rarg2, arg);
  354   }
  355 }
  356 
  357 static void pass_arg3(MacroAssembler* masm, Register arg) {
  358   if (c_rarg3 != arg ) {
  359     masm->mov(c_rarg3, arg);
  360   }
  361 }
  362 
  363 void MacroAssembler::stop(const char* msg) {
  364   if (ShowMessageBoxOnError) {
  365     address rip = pc();
  366     pusha(); // get regs on stack
  367     lea(c_rarg1, InternalAddress(rip));
  368     movq(c_rarg2, rsp); // pass pointer to regs array
  369   }
  370   // Skip AOT caching C strings in scratch buffer.
  371   const char* str = (code_section()->scratch_emit()) ? msg : AOTCodeCache::add_C_string(msg);
  372   lea(c_rarg0, ExternalAddress((address) str));
  373   andq(rsp, -16); // align stack as required by ABI
  374   call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
  375   hlt();
  376 }
  377 
  378 void MacroAssembler::warn(const char* msg) {
  379   push(rbp);
  380   movq(rbp, rsp);
  381   andq(rsp, -16);     // align stack as required by push_CPU_state and call
  382   push_CPU_state();   // keeps alignment at 16 bytes
  383 
  384 #ifdef _WIN64
  385   // Windows always allocates space for its register args
  386   subq(rsp,  frame::arg_reg_save_area_bytes);
  387 #endif
  388   lea(c_rarg0, ExternalAddress((address) msg));
  389   call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
  390 
  391 #ifdef _WIN64
  392   // restore stack pointer
  393   addq(rsp, frame::arg_reg_save_area_bytes);
  394 #endif
  395   pop_CPU_state();
  396   mov(rsp, rbp);
  397   pop(rbp);
  398 }
  399 
  400 void MacroAssembler::print_state() {
  401   address rip = pc();
  402   pusha();            // get regs on stack
  403   push(rbp);
  404   movq(rbp, rsp);
  405   andq(rsp, -16);     // align stack as required by push_CPU_state and call
  406   push_CPU_state();   // keeps alignment at 16 bytes
  407 
  408   lea(c_rarg0, InternalAddress(rip));
  409   lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array
  410   call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1);
  411 
  412   pop_CPU_state();
  413   mov(rsp, rbp);
  414   pop(rbp);
  415   popa();
  416 }
  417 
  418 #ifndef PRODUCT
  419 extern "C" void findpc(intptr_t x);
  420 #endif
  421 
  422 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
  423   // In order to get locks to work, we need to fake a in_VM state
  424   if (ShowMessageBoxOnError) {
  425     JavaThread* thread = JavaThread::current();
  426     JavaThreadState saved_state = thread->thread_state();
  427     thread->set_thread_state(_thread_in_vm);
  428 #ifndef PRODUCT
  429     if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
  430       ttyLocker ttyl;
  431       BytecodeCounter::print();
  432     }
  433 #endif
  434     // To see where a verify_oop failed, get $ebx+40/X for this frame.
  435     // XXX correct this offset for amd64
  436     // This is the value of eip which points to where verify_oop will return.
  437     if (os::message_box(msg, "Execution stopped, print registers?")) {
  438       print_state64(pc, regs);
  439       BREAKPOINT;
  440     }
  441   }
  442   fatal("DEBUG MESSAGE: %s", msg);
  443 }
  444 
  445 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) {
  446   ttyLocker ttyl;
  447   DebuggingContext debugging{};
  448   tty->print_cr("rip = 0x%016lx", (intptr_t)pc);
  449 #ifndef PRODUCT
  450   tty->cr();
  451   findpc(pc);
  452   tty->cr();
  453 #endif
  454 #define PRINT_REG(rax, value) \
  455   { tty->print("%s = ", #rax); os::print_location(tty, value); }
  456   PRINT_REG(rax, regs[15]);
  457   PRINT_REG(rbx, regs[12]);
  458   PRINT_REG(rcx, regs[14]);
  459   PRINT_REG(rdx, regs[13]);
  460   PRINT_REG(rdi, regs[8]);
  461   PRINT_REG(rsi, regs[9]);
  462   PRINT_REG(rbp, regs[10]);
  463   // rsp is actually not stored by pusha(), compute the old rsp from regs (rsp after pusha): regs + 16 = old rsp
  464   PRINT_REG(rsp, (intptr_t)(&regs[16]));
  465   PRINT_REG(r8 , regs[7]);
  466   PRINT_REG(r9 , regs[6]);
  467   PRINT_REG(r10, regs[5]);
  468   PRINT_REG(r11, regs[4]);
  469   PRINT_REG(r12, regs[3]);
  470   PRINT_REG(r13, regs[2]);
  471   PRINT_REG(r14, regs[1]);
  472   PRINT_REG(r15, regs[0]);
  473 #undef PRINT_REG
  474   // Print some words near the top of the stack.
  475   int64_t* rsp = &regs[16];
  476   int64_t* dump_sp = rsp;
  477   for (int col1 = 0; col1 < 8; col1++) {
  478     tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
  479     os::print_location(tty, *dump_sp++);
  480   }
  481   for (int row = 0; row < 25; row++) {
  482     tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
  483     for (int col = 0; col < 4; col++) {
  484       tty->print(" 0x%016lx", (intptr_t)*dump_sp++);
  485     }
  486     tty->cr();
  487   }
  488   // Print some instructions around pc:
  489   Disassembler::decode((address)pc-64, (address)pc);
  490   tty->print_cr("--------");
  491   Disassembler::decode((address)pc, (address)pc+32);
  492 }
  493 
  494 // The java_calling_convention describes stack locations as ideal slots on
  495 // a frame with no abi restrictions. Since we must observe abi restrictions
  496 // (like the placement of the register window) the slots must be biased by
  497 // the following value.
  498 static int reg2offset_in(VMReg r) {
  499   // Account for saved rbp and return address
  500   // This should really be in_preserve_stack_slots
  501   return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
  502 }
  503 
  504 static int reg2offset_out(VMReg r) {
  505   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
  506 }
  507 
  508 // A long move
  509 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
  510 
  511   // The calling conventions assures us that each VMregpair is either
  512   // all really one physical register or adjacent stack slots.
  513 
  514   if (src.is_single_phys_reg() ) {
  515     if (dst.is_single_phys_reg()) {
  516       if (dst.first() != src.first()) {
  517         mov(dst.first()->as_Register(), src.first()->as_Register());
  518       }
  519     } else {
  520       assert(dst.is_single_reg(), "not a stack pair: (%s, %s), (%s, %s)",
  521              src.first()->name(), src.second()->name(), dst.first()->name(), dst.second()->name());
  522       movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register());
  523     }
  524   } else if (dst.is_single_phys_reg()) {
  525     assert(src.is_single_reg(),  "not a stack pair");
  526     movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  527   } else {
  528     assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
  529     movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  530     movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
  531   }
  532 }
  533 
  534 // A double move
  535 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
  536 
  537   // The calling conventions assures us that each VMregpair is either
  538   // all really one physical register or adjacent stack slots.
  539 
  540   if (src.is_single_phys_reg() ) {
  541     if (dst.is_single_phys_reg()) {
  542       // In theory these overlap but the ordering is such that this is likely a nop
  543       if ( src.first() != dst.first()) {
  544         movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
  545       }
  546     } else {
  547       assert(dst.is_single_reg(), "not a stack pair");
  548       movdbl(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister());
  549     }
  550   } else if (dst.is_single_phys_reg()) {
  551     assert(src.is_single_reg(),  "not a stack pair");
  552     movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  553   } else {
  554     assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
  555     movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  556     movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
  557   }
  558 }
  559 
  560 
  561 // A float arg may have to do float reg int reg conversion
  562 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
  563   assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
  564 
  565   // The calling conventions assures us that each VMregpair is either
  566   // all really one physical register or adjacent stack slots.
  567 
  568   if (src.first()->is_stack()) {
  569     if (dst.first()->is_stack()) {
  570       movl(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  571       movptr(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
  572     } else {
  573       // stack to reg
  574       assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
  575       movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  576     }
  577   } else if (dst.first()->is_stack()) {
  578     // reg to stack
  579     assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
  580     movflt(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister());
  581   } else {
  582     // reg to reg
  583     // In theory these overlap but the ordering is such that this is likely a nop
  584     if ( src.first() != dst.first()) {
  585       movdbl(dst.first()->as_XMMRegister(),  src.first()->as_XMMRegister());
  586     }
  587   }
  588 }
  589 
  590 // On 64 bit we will store integer like items to the stack as
  591 // 64 bits items (x86_32/64 abi) even though java would only store
  592 // 32bits for a parameter. On 32bit it will simply be 32 bits
  593 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
  594 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
  595   if (src.first()->is_stack()) {
  596     if (dst.first()->is_stack()) {
  597       // stack to stack
  598       movslq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  599       movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
  600     } else {
  601       // stack to reg
  602       movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
  603     }
  604   } else if (dst.first()->is_stack()) {
  605     // reg to stack
  606     // Do we really have to sign extend???
  607     // __ movslq(src.first()->as_Register(), src.first()->as_Register());
  608     movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register());
  609   } else {
  610     // Do we really have to sign extend???
  611     // __ movslq(dst.first()->as_Register(), src.first()->as_Register());
  612     if (dst.first() != src.first()) {
  613       movq(dst.first()->as_Register(), src.first()->as_Register());
  614     }
  615   }
  616 }
  617 
  618 void MacroAssembler::move_ptr(VMRegPair src, VMRegPair dst) {
  619   if (src.first()->is_stack()) {
  620     if (dst.first()->is_stack()) {
  621       // stack to stack
  622       movq(rax, Address(rbp, reg2offset_in(src.first())));
  623       movq(Address(rsp, reg2offset_out(dst.first())), rax);
  624     } else {
  625       // stack to reg
  626       movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
  627     }
  628   } else if (dst.first()->is_stack()) {
  629     // reg to stack
  630     movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
  631   } else {
  632     if (dst.first() != src.first()) {
  633       movq(dst.first()->as_Register(), src.first()->as_Register());
  634     }
  635   }
  636 }
  637 
  638 // An oop arg. Must pass a handle not the oop itself
  639 void MacroAssembler::object_move(OopMap* map,
  640                         int oop_handle_offset,
  641                         int framesize_in_slots,
  642                         VMRegPair src,
  643                         VMRegPair dst,
  644                         bool is_receiver,
  645                         int* receiver_offset) {
  646 
  647   // must pass a handle. First figure out the location we use as a handle
  648 
  649   Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();
  650 
  651   // See if oop is null if it is we need no handle
  652 
  653   if (src.first()->is_stack()) {
  654 
  655     // Oop is already on the stack as an argument
  656     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
  657     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
  658     if (is_receiver) {
  659       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
  660     }
  661 
  662     cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD);
  663     lea(rHandle, Address(rbp, reg2offset_in(src.first())));
  664     // conditionally move a null
  665     cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
  666   } else {
  667 
  668     // Oop is in a register we must store it to the space we reserve
  669     // on the stack for oop_handles and pass a handle if oop is non-null
  670 
  671     const Register rOop = src.first()->as_Register();
  672     int oop_slot;
  673     if (rOop == j_rarg0)
  674       oop_slot = 0;
  675     else if (rOop == j_rarg1)
  676       oop_slot = 1;
  677     else if (rOop == j_rarg2)
  678       oop_slot = 2;
  679     else if (rOop == j_rarg3)
  680       oop_slot = 3;
  681     else if (rOop == j_rarg4)
  682       oop_slot = 4;
  683     else {
  684       assert(rOop == j_rarg5, "wrong register");
  685       oop_slot = 5;
  686     }
  687 
  688     oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
  689     int offset = oop_slot*VMRegImpl::stack_slot_size;
  690 
  691     map->set_oop(VMRegImpl::stack2reg(oop_slot));
  692     // Store oop in handle area, may be null
  693     movptr(Address(rsp, offset), rOop);
  694     if (is_receiver) {
  695       *receiver_offset = offset;
  696     }
  697 
  698     cmpptr(rOop, NULL_WORD);
  699     lea(rHandle, Address(rsp, offset));
  700     // conditionally move a null from the handle area where it was just stored
  701     cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
  702   }
  703 
  704   // If arg is on the stack then place it otherwise it is already in correct reg.
  705   if (dst.first()->is_stack()) {
  706     movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
  707   }
  708 }
  709 
  710 void MacroAssembler::addptr(Register dst, int32_t imm32) {
  711   addq(dst, imm32);
  712 }
  713 
  714 void MacroAssembler::addptr(Register dst, Register src) {
  715   addq(dst, src);
  716 }
  717 
  718 void MacroAssembler::addptr(Address dst, Register src) {
  719   addq(dst, src);
  720 }
  721 
  722 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
  723   assert(rscratch != noreg || always_reachable(src), "missing");
  724 
  725   if (reachable(src)) {
  726     Assembler::addsd(dst, as_Address(src));
  727   } else {
  728     lea(rscratch, src);
  729     Assembler::addsd(dst, Address(rscratch, 0));
  730   }
  731 }
  732 
  733 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src, Register rscratch) {
  734   assert(rscratch != noreg || always_reachable(src), "missing");
  735 
  736   if (reachable(src)) {
  737     addss(dst, as_Address(src));
  738   } else {
  739     lea(rscratch, src);
  740     addss(dst, Address(rscratch, 0));
  741   }
  742 }
  743 
  744 void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
  745   assert(rscratch != noreg || always_reachable(src), "missing");
  746 
  747   if (reachable(src)) {
  748     Assembler::addpd(dst, as_Address(src));
  749   } else {
  750     lea(rscratch, src);
  751     Assembler::addpd(dst, Address(rscratch, 0));
  752   }
  753 }
  754 
  755 // See 8273459.  Function for ensuring 64-byte alignment, intended for stubs only.
  756 // Stub code is generated once and never copied.
  757 // NMethods can't use this because they get copied and we can't force alignment > 32 bytes.
  758 void MacroAssembler::align64() {
  759   align(64, (uint)(uintptr_t)pc());
  760 }
  761 
  762 void MacroAssembler::align32() {
  763   align(32, (uint)(uintptr_t)pc());
  764 }
  765 
  766 void MacroAssembler::align(uint modulus) {
  767   // 8273459: Ensure alignment is possible with current segment alignment
  768   assert(modulus <= CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment");
  769   align(modulus, offset());
  770 }
  771 
  772 void MacroAssembler::align(uint modulus, uint target) {
  773   if (target % modulus != 0) {
  774     nop(modulus - (target % modulus));
  775   }
  776 }
  777 
  778 void MacroAssembler::push_f(XMMRegister r) {
  779   subptr(rsp, wordSize);
  780   movflt(Address(rsp, 0), r);
  781 }
  782 
  783 void MacroAssembler::pop_f(XMMRegister r) {
  784   movflt(r, Address(rsp, 0));
  785   addptr(rsp, wordSize);
  786 }
  787 
  788 void MacroAssembler::push_d(XMMRegister r) {
  789   subptr(rsp, 2 * wordSize);
  790   movdbl(Address(rsp, 0), r);
  791 }
  792 
  793 void MacroAssembler::pop_d(XMMRegister r) {
  794   movdbl(r, Address(rsp, 0));
  795   addptr(rsp, 2 * Interpreter::stackElementSize);
  796 }
  797 
  798 void MacroAssembler::push_ppx(Register src) {
  799   if (VM_Version::supports_apx_f()) {
  800     pushp(src);
  801   } else {
  802     Assembler::push(src);
  803   }
  804 }
  805 
  806 void MacroAssembler::pop_ppx(Register dst) {
  807   if (VM_Version::supports_apx_f()) {
  808     popp(dst);
  809   } else {
  810     Assembler::pop(dst);
  811   }
  812 }
  813 
  814 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
  815   // Used in sign-masking with aligned address.
  816   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
  817   assert(rscratch != noreg || always_reachable(src), "missing");
  818 
  819   if (UseAVX > 2 &&
  820       (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
  821       (dst->encoding() >= 16)) {
  822     vpand(dst, dst, src, AVX_512bit, rscratch);
  823   } else if (reachable(src)) {
  824     Assembler::andpd(dst, as_Address(src));
  825   } else {
  826     lea(rscratch, src);
  827     Assembler::andpd(dst, Address(rscratch, 0));
  828   }
  829 }
  830 
  831 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src, Register rscratch) {
  832   // Used in sign-masking with aligned address.
  833   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
  834   assert(rscratch != noreg || always_reachable(src), "missing");
  835 
  836   if (reachable(src)) {
  837     Assembler::andps(dst, as_Address(src));
  838   } else {
  839     lea(rscratch, src);
  840     Assembler::andps(dst, Address(rscratch, 0));
  841   }
  842 }
  843 
  844 void MacroAssembler::andptr(Register dst, int32_t imm32) {
  845   andq(dst, imm32);
  846 }
  847 
  848 void MacroAssembler::andq(Register dst, AddressLiteral src, Register rscratch) {
  849   assert(rscratch != noreg || always_reachable(src), "missing");
  850 
  851   if (reachable(src)) {
  852     andq(dst, as_Address(src));
  853   } else {
  854     lea(rscratch, src);
  855     andq(dst, Address(rscratch, 0));
  856   }
  857 }
  858 
  859 void MacroAssembler::atomic_incl(Address counter_addr) {
  860   lock();
  861   incrementl(counter_addr);
  862 }
  863 
  864 void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register rscratch) {
  865   assert(rscratch != noreg || always_reachable(counter_addr), "missing");
  866 
  867   if (reachable(counter_addr)) {
  868     atomic_incl(as_Address(counter_addr));
  869   } else {
  870     lea(rscratch, counter_addr);
  871     atomic_incl(Address(rscratch, 0));
  872   }
  873 }
  874 
  875 void MacroAssembler::atomic_incq(Address counter_addr) {
  876   lock();
  877   incrementq(counter_addr);
  878 }
  879 
  880 void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register rscratch) {
  881   assert(rscratch != noreg || always_reachable(counter_addr), "missing");
  882 
  883   if (reachable(counter_addr)) {
  884     atomic_incq(as_Address(counter_addr));
  885   } else {
  886     lea(rscratch, counter_addr);
  887     atomic_incq(Address(rscratch, 0));
  888   }
  889 }
  890 
  891 // Writes to stack successive pages until offset reached to check for
  892 // stack overflow + shadow pages.  This clobbers tmp.
  893 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
  894   movptr(tmp, rsp);
  895   // Bang stack for total size given plus shadow page size.
  896   // Bang one page at a time because large size can bang beyond yellow and
  897   // red zones.
  898   Label loop;
  899   bind(loop);
  900   movl(Address(tmp, (-(int)os::vm_page_size())), size );
  901   subptr(tmp, (int)os::vm_page_size());
  902   subl(size, (int)os::vm_page_size());
  903   jcc(Assembler::greater, loop);
  904 
  905   // Bang down shadow pages too.
  906   // At this point, (tmp-0) is the last address touched, so don't
  907   // touch it again.  (It was touched as (tmp-pagesize) but then tmp
  908   // was post-decremented.)  Skip this address by starting at i=1, and
  909   // touch a few more pages below.  N.B.  It is important to touch all
  910   // the way down including all pages in the shadow zone.
  911   for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()); i++) {
  912     // this could be any sized move but this is can be a debugging crumb
  913     // so the bigger the better.
  914     movptr(Address(tmp, (-i*(int)os::vm_page_size())), size );
  915   }
  916 }
  917 
  918 void MacroAssembler::reserved_stack_check() {
  919   // testing if reserved zone needs to be enabled
  920   Label no_reserved_zone_enabling;
  921 
  922   cmpptr(rsp, Address(r15_thread, JavaThread::reserved_stack_activation_offset()));
  923   jcc(Assembler::below, no_reserved_zone_enabling);
  924 
  925   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), r15_thread);
  926   jump(RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry()));
  927   should_not_reach_here();
  928 
  929   bind(no_reserved_zone_enabling);
  930 }
  931 
  932 void MacroAssembler::c2bool(Register x) {
  933   // implements x == 0 ? 0 : 1
  934   // note: must only look at least-significant byte of x
  935   //       since C-style booleans are stored in one byte
  936   //       only! (was bug)
  937   andl(x, 0xFF);
  938   setb(Assembler::notZero, x);
  939 }
  940 
  941 // Wouldn't need if AddressLiteral version had new name
  942 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
  943   Assembler::call(L, rtype);
  944 }
  945 
  946 void MacroAssembler::call(Register entry) {
  947   Assembler::call(entry);
  948 }
  949 
  950 void MacroAssembler::call(AddressLiteral entry, Register rscratch) {
  951   assert(rscratch != noreg || always_reachable(entry), "missing");
  952 
  953   if (reachable(entry)) {
  954     Assembler::call_literal(entry.target(), entry.rspec());
  955   } else {
  956     lea(rscratch, entry);
  957     Assembler::call(rscratch);
  958   }
  959 }
  960 
  961 void MacroAssembler::ic_call(address entry, jint method_index) {
  962   RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
  963   // Needs full 64-bit immediate for later patching.
  964   mov64(rax, (int64_t)Universe::non_oop_word());
  965   call(AddressLiteral(entry, rh));
  966 }
  967 
  968 int MacroAssembler::ic_check_size() {
  969   return UseCompactObjectHeaders ? 17 : 14;
  970 }
  971 
  972 int MacroAssembler::ic_check(int end_alignment) {
  973   Register receiver = j_rarg0;
  974   Register data = rax;
  975   Register temp = rscratch1;
  976 
  977   // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
  978   // before the inline cache check, so we don't have to execute any nop instructions when dispatching
  979   // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
  980   // before the inline cache check here, and not after
  981   align(end_alignment, offset() + ic_check_size());
  982 
  983   int uep_offset = offset();
  984 
  985   if (UseCompactObjectHeaders) {
  986     load_narrow_klass_compact(temp, receiver);
  987     cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
  988   } else if (UseCompressedClassPointers) {
  989     movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
  990     cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
  991   } else {
  992     movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
  993     cmpptr(temp, Address(data, CompiledICData::speculated_klass_offset()));
  994   }
  995 
  996   // if inline cache check fails, then jump to runtime routine
  997   jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
  998   assert((offset() % end_alignment) == 0, "Misaligned verified entry point (%d, %d, %d)", uep_offset, offset(), end_alignment);
  999 
 1000   return uep_offset;
 1001 }
 1002 
 1003 void MacroAssembler::emit_static_call_stub() {
 1004   // Static stub relocation also tags the Method* in the code-stream.
 1005   mov_metadata(rbx, (Metadata*) nullptr);  // Method is zapped till fixup time.
 1006   // This is recognized as unresolved by relocs/nativeinst/ic code.
 1007   jump(RuntimeAddress(pc()));
 1008 }
 1009 
 1010 // Implementation of call_VM versions
 1011 
 1012 void MacroAssembler::call_VM(Register oop_result,
 1013                              address entry_point,
 1014                              bool check_exceptions) {
 1015   Label C, E;
 1016   call(C, relocInfo::none);
 1017   jmp(E);
 1018 
 1019   bind(C);
 1020   call_VM_helper(oop_result, entry_point, 0, check_exceptions);
 1021   ret(0);
 1022 
 1023   bind(E);
 1024 }
 1025 
 1026 void MacroAssembler::call_VM(Register oop_result,
 1027                              address entry_point,
 1028                              Register arg_1,
 1029                              bool check_exceptions) {
 1030   Label C, E;
 1031   call(C, relocInfo::none);
 1032   jmp(E);
 1033 
 1034   bind(C);
 1035   pass_arg1(this, arg_1);
 1036   call_VM_helper(oop_result, entry_point, 1, check_exceptions);
 1037   ret(0);
 1038 
 1039   bind(E);
 1040 }
 1041 
 1042 void MacroAssembler::call_VM(Register oop_result,
 1043                              address entry_point,
 1044                              Register arg_1,
 1045                              Register arg_2,
 1046                              bool check_exceptions) {
 1047   Label C, E;
 1048   call(C, relocInfo::none);
 1049   jmp(E);
 1050 
 1051   bind(C);
 1052 
 1053   assert_different_registers(arg_1, c_rarg2);
 1054 
 1055   pass_arg2(this, arg_2);
 1056   pass_arg1(this, arg_1);
 1057   call_VM_helper(oop_result, entry_point, 2, check_exceptions);
 1058   ret(0);
 1059 
 1060   bind(E);
 1061 }
 1062 
 1063 void MacroAssembler::call_VM(Register oop_result,
 1064                              address entry_point,
 1065                              Register arg_1,
 1066                              Register arg_2,
 1067                              Register arg_3,
 1068                              bool check_exceptions) {
 1069   Label C, E;
 1070   call(C, relocInfo::none);
 1071   jmp(E);
 1072 
 1073   bind(C);
 1074 
 1075   assert_different_registers(arg_1, c_rarg2, c_rarg3);
 1076   assert_different_registers(arg_2, c_rarg3);
 1077   pass_arg3(this, arg_3);
 1078   pass_arg2(this, arg_2);
 1079   pass_arg1(this, arg_1);
 1080   call_VM_helper(oop_result, entry_point, 3, check_exceptions);
 1081   ret(0);
 1082 
 1083   bind(E);
 1084 }
 1085 
 1086 void MacroAssembler::call_VM(Register oop_result,
 1087                              Register last_java_sp,
 1088                              address entry_point,
 1089                              int number_of_arguments,
 1090                              bool check_exceptions) {
 1091   call_VM_base(oop_result, last_java_sp, entry_point, number_of_arguments, check_exceptions);
 1092 }
 1093 
 1094 void MacroAssembler::call_VM(Register oop_result,
 1095                              Register last_java_sp,
 1096                              address entry_point,
 1097                              Register arg_1,
 1098                              bool check_exceptions) {
 1099   pass_arg1(this, arg_1);
 1100   call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
 1101 }
 1102 
 1103 void MacroAssembler::call_VM(Register oop_result,
 1104                              Register last_java_sp,
 1105                              address entry_point,
 1106                              Register arg_1,
 1107                              Register arg_2,
 1108                              bool check_exceptions) {
 1109 
 1110   assert_different_registers(arg_1, c_rarg2);
 1111   pass_arg2(this, arg_2);
 1112   pass_arg1(this, arg_1);
 1113   call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
 1114 }
 1115 
 1116 void MacroAssembler::call_VM(Register oop_result,
 1117                              Register last_java_sp,
 1118                              address entry_point,
 1119                              Register arg_1,
 1120                              Register arg_2,
 1121                              Register arg_3,
 1122                              bool check_exceptions) {
 1123   assert_different_registers(arg_1, c_rarg2, c_rarg3);
 1124   assert_different_registers(arg_2, c_rarg3);
 1125   pass_arg3(this, arg_3);
 1126   pass_arg2(this, arg_2);
 1127   pass_arg1(this, arg_1);
 1128   call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
 1129 }
 1130 
 1131 void MacroAssembler::super_call_VM(Register oop_result,
 1132                                    Register last_java_sp,
 1133                                    address entry_point,
 1134                                    int number_of_arguments,
 1135                                    bool check_exceptions) {
 1136   MacroAssembler::call_VM_base(oop_result, last_java_sp, entry_point, number_of_arguments, check_exceptions);
 1137 }
 1138 
 1139 void MacroAssembler::super_call_VM(Register oop_result,
 1140                                    Register last_java_sp,
 1141                                    address entry_point,
 1142                                    Register arg_1,
 1143                                    bool check_exceptions) {
 1144   pass_arg1(this, arg_1);
 1145   super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
 1146 }
 1147 
 1148 void MacroAssembler::super_call_VM(Register oop_result,
 1149                                    Register last_java_sp,
 1150                                    address entry_point,
 1151                                    Register arg_1,
 1152                                    Register arg_2,
 1153                                    bool check_exceptions) {
 1154 
 1155   assert_different_registers(arg_1, c_rarg2);
 1156   pass_arg2(this, arg_2);
 1157   pass_arg1(this, arg_1);
 1158   super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
 1159 }
 1160 
 1161 void MacroAssembler::super_call_VM(Register oop_result,
 1162                                    Register last_java_sp,
 1163                                    address entry_point,
 1164                                    Register arg_1,
 1165                                    Register arg_2,
 1166                                    Register arg_3,
 1167                                    bool check_exceptions) {
 1168   assert_different_registers(arg_1, c_rarg2, c_rarg3);
 1169   assert_different_registers(arg_2, c_rarg3);
 1170   pass_arg3(this, arg_3);
 1171   pass_arg2(this, arg_2);
 1172   pass_arg1(this, arg_1);
 1173   super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
 1174 }
 1175 
 1176 void MacroAssembler::call_VM_base(Register oop_result,
 1177                                   Register last_java_sp,
 1178                                   address  entry_point,
 1179                                   int      number_of_arguments,
 1180                                   bool     check_exceptions) {
 1181   Register java_thread = r15_thread;
 1182 
 1183   // determine last_java_sp register
 1184   if (!last_java_sp->is_valid()) {
 1185     last_java_sp = rsp;
 1186   }
 1187   // debugging support
 1188   assert(number_of_arguments >= 0   , "cannot have negative number of arguments");
 1189 #ifdef ASSERT
 1190   // TraceBytecodes does not use r12 but saves it over the call, so don't verify
 1191   // r12 is the heapbase.
 1192   if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");
 1193 #endif // ASSERT
 1194 
 1195   assert(java_thread != oop_result  , "cannot use the same register for java_thread & oop_result");
 1196   assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
 1197 
 1198   // push java thread (becomes first argument of C function)
 1199 
 1200   mov(c_rarg0, r15_thread);
 1201 
 1202   // set last Java frame before call
 1203   assert(last_java_sp != rbp, "can't use ebp/rbp");
 1204 
 1205   // Only interpreter should have to set fp
 1206   set_last_Java_frame(last_java_sp, rbp, nullptr, rscratch1);
 1207 
 1208   // do the call, remove parameters
 1209   MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
 1210 
 1211 #ifdef ASSERT
 1212   // Check that thread register is not clobbered.
 1213   guarantee(java_thread != rax, "change this code");
 1214   push(rax);
 1215   { Label L;
 1216     get_thread_slow(rax);
 1217     cmpptr(java_thread, rax);
 1218     jcc(Assembler::equal, L);
 1219     STOP("MacroAssembler::call_VM_base: java_thread not callee saved?");
 1220     bind(L);
 1221   }
 1222   pop(rax);
 1223 #endif
 1224 
 1225   // reset last Java frame
 1226   // Only interpreter should have to clear fp
 1227   reset_last_Java_frame(true);
 1228 
 1229    // C++ interp handles this in the interpreter
 1230   check_and_handle_popframe();
 1231   check_and_handle_earlyret();
 1232 
 1233   if (check_exceptions) {
 1234     // check for pending exceptions (java_thread is set upon return)
 1235     cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
 1236     // This used to conditionally jump to forward_exception however it is
 1237     // possible if we relocate that the branch will not reach. So we must jump
 1238     // around so we can always reach
 1239 
 1240     Label ok;
 1241     jcc(Assembler::equal, ok);
 1242     jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
 1243     bind(ok);
 1244   }
 1245 
 1246   // get oop result if there is one and reset the value in the thread
 1247   if (oop_result->is_valid()) {
 1248     get_vm_result_oop(oop_result);
 1249   }
 1250 }
 1251 
 1252 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
 1253   // Calculate the value for last_Java_sp somewhat subtle.
 1254   // call_VM does an intermediate call which places a return address on
 1255   // the stack just under the stack pointer as the user finished with it.
 1256   // This allows use to retrieve last_Java_pc from last_Java_sp[-1].
 1257 
 1258   // We've pushed one address, correct last_Java_sp
 1259   lea(rax, Address(rsp, wordSize));
 1260 
 1261   call_VM_base(oop_result, rax, entry_point, number_of_arguments, check_exceptions);
 1262 }
 1263 
 1264 // Use this method when MacroAssembler version of call_VM_leaf_base() should be called from Interpreter.
 1265 void MacroAssembler::call_VM_leaf0(address entry_point) {
 1266   MacroAssembler::call_VM_leaf_base(entry_point, 0);
 1267 }
 1268 
 1269 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
 1270   call_VM_leaf_base(entry_point, number_of_arguments);
 1271 }
 1272 
 1273 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
 1274   pass_arg0(this, arg_0);
 1275   call_VM_leaf(entry_point, 1);
 1276 }
 1277 
 1278 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
 1279 
 1280   assert_different_registers(arg_0, c_rarg1);
 1281   pass_arg1(this, arg_1);
 1282   pass_arg0(this, arg_0);
 1283   call_VM_leaf(entry_point, 2);
 1284 }
 1285 
 1286 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1287   assert_different_registers(arg_0, c_rarg1, c_rarg2);
 1288   assert_different_registers(arg_1, c_rarg2);
 1289   pass_arg2(this, arg_2);
 1290   pass_arg1(this, arg_1);
 1291   pass_arg0(this, arg_0);
 1292   call_VM_leaf(entry_point, 3);
 1293 }
 1294 
 1295 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
 1296   assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
 1297   assert_different_registers(arg_1, c_rarg2, c_rarg3);
 1298   assert_different_registers(arg_2, c_rarg3);
 1299   pass_arg3(this, arg_3);
 1300   pass_arg2(this, arg_2);
 1301   pass_arg1(this, arg_1);
 1302   pass_arg0(this, arg_0);
 1303   call_VM_leaf(entry_point, 3);
 1304 }
 1305 
 1306 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
 1307   pass_arg0(this, arg_0);
 1308   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 1309 }
 1310 
 1311 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
 1312   assert_different_registers(arg_0, c_rarg1);
 1313   pass_arg1(this, arg_1);
 1314   pass_arg0(this, arg_0);
 1315   MacroAssembler::call_VM_leaf_base(entry_point, 2);
 1316 }
 1317 
 1318 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
 1319   assert_different_registers(arg_0, c_rarg1, c_rarg2);
 1320   assert_different_registers(arg_1, c_rarg2);
 1321   pass_arg2(this, arg_2);
 1322   pass_arg1(this, arg_1);
 1323   pass_arg0(this, arg_0);
 1324   MacroAssembler::call_VM_leaf_base(entry_point, 3);
 1325 }
 1326 
 1327 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
 1328   assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
 1329   assert_different_registers(arg_1, c_rarg2, c_rarg3);
 1330   assert_different_registers(arg_2, c_rarg3);
 1331   pass_arg3(this, arg_3);
 1332   pass_arg2(this, arg_2);
 1333   pass_arg1(this, arg_1);
 1334   pass_arg0(this, arg_0);
 1335   MacroAssembler::call_VM_leaf_base(entry_point, 4);
 1336 }
 1337 
 1338 void MacroAssembler::get_vm_result_oop(Register oop_result) {
 1339   movptr(oop_result, Address(r15_thread, JavaThread::vm_result_oop_offset()));
 1340   movptr(Address(r15_thread, JavaThread::vm_result_oop_offset()), NULL_WORD);
 1341   verify_oop_msg(oop_result, "broken oop in call_VM_base");
 1342 }
 1343 
 1344 void MacroAssembler::get_vm_result_metadata(Register metadata_result) {
 1345   movptr(metadata_result, Address(r15_thread, JavaThread::vm_result_metadata_offset()));
 1346   movptr(Address(r15_thread, JavaThread::vm_result_metadata_offset()), NULL_WORD);
 1347 }
 1348 
 1349 void MacroAssembler::check_and_handle_earlyret() {
 1350 }
 1351 
 1352 void MacroAssembler::check_and_handle_popframe() {
 1353 }
 1354 
 1355 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm, Register rscratch) {
 1356   assert(rscratch != noreg || always_reachable(src1), "missing");
 1357 
 1358   if (reachable(src1)) {
 1359     cmpl(as_Address(src1), imm);
 1360   } else {
 1361     lea(rscratch, src1);
 1362     cmpl(Address(rscratch, 0), imm);
 1363   }
 1364 }
 1365 
 1366 void MacroAssembler::cmp32(Register src1, AddressLiteral src2, Register rscratch) {
 1367   assert(!src2.is_lval(), "use cmpptr");
 1368   assert(rscratch != noreg || always_reachable(src2), "missing");
 1369 
 1370   if (reachable(src2)) {
 1371     cmpl(src1, as_Address(src2));
 1372   } else {
 1373     lea(rscratch, src2);
 1374     cmpl(src1, Address(rscratch, 0));
 1375   }
 1376 }
 1377 
 1378 void MacroAssembler::cmp32(Register src1, int32_t imm) {
 1379   Assembler::cmpl(src1, imm);
 1380 }
 1381 
 1382 void MacroAssembler::cmp32(Register src1, Address src2) {
 1383   Assembler::cmpl(src1, src2);
 1384 }
 1385 
 1386 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
 1387   ucomisd(opr1, opr2);
 1388 
 1389   Label L;
 1390   if (unordered_is_less) {
 1391     movl(dst, -1);
 1392     jcc(Assembler::parity, L);
 1393     jcc(Assembler::below , L);
 1394     movl(dst, 0);
 1395     jcc(Assembler::equal , L);
 1396     increment(dst);
 1397   } else { // unordered is greater
 1398     movl(dst, 1);
 1399     jcc(Assembler::parity, L);
 1400     jcc(Assembler::above , L);
 1401     movl(dst, 0);
 1402     jcc(Assembler::equal , L);
 1403     decrementl(dst);
 1404   }
 1405   bind(L);
 1406 }
 1407 
 1408 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
 1409   ucomiss(opr1, opr2);
 1410 
 1411   Label L;
 1412   if (unordered_is_less) {
 1413     movl(dst, -1);
 1414     jcc(Assembler::parity, L);
 1415     jcc(Assembler::below , L);
 1416     movl(dst, 0);
 1417     jcc(Assembler::equal , L);
 1418     increment(dst);
 1419   } else { // unordered is greater
 1420     movl(dst, 1);
 1421     jcc(Assembler::parity, L);
 1422     jcc(Assembler::above , L);
 1423     movl(dst, 0);
 1424     jcc(Assembler::equal , L);
 1425     decrementl(dst);
 1426   }
 1427   bind(L);
 1428 }
 1429 
 1430 
 1431 void MacroAssembler::cmp8(AddressLiteral src1, int imm, Register rscratch) {
 1432   assert(rscratch != noreg || always_reachable(src1), "missing");
 1433 
 1434   if (reachable(src1)) {
 1435     cmpb(as_Address(src1), imm);
 1436   } else {
 1437     lea(rscratch, src1);
 1438     cmpb(Address(rscratch, 0), imm);
 1439   }
 1440 }
 1441 
 1442 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2, Register rscratch) {
 1443   assert(rscratch != noreg || always_reachable(src2), "missing");
 1444 
 1445   if (src2.is_lval()) {
 1446     movptr(rscratch, src2);
 1447     Assembler::cmpq(src1, rscratch);
 1448   } else if (reachable(src2)) {
 1449     cmpq(src1, as_Address(src2));
 1450   } else {
 1451     lea(rscratch, src2);
 1452     Assembler::cmpq(src1, Address(rscratch, 0));
 1453   }
 1454 }
 1455 
 1456 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2, Register rscratch) {
 1457   assert(src2.is_lval(), "not a mem-mem compare");
 1458   // moves src2's literal address
 1459   movptr(rscratch, src2);
 1460   Assembler::cmpq(src1, rscratch);
 1461 }
 1462 
 1463 void MacroAssembler::cmpoop(Register src1, Register src2) {
 1464   cmpptr(src1, src2);
 1465 }
 1466 
 1467 void MacroAssembler::cmpoop(Register src1, Address src2) {
 1468   cmpptr(src1, src2);
 1469 }
 1470 
 1471 void MacroAssembler::cmpoop(Register src1, jobject src2, Register rscratch) {
 1472   movoop(rscratch, src2);
 1473   cmpptr(src1, rscratch);
 1474 }
 1475 
 1476 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch) {
 1477   assert(rscratch != noreg || always_reachable(adr), "missing");
 1478 
 1479   if (reachable(adr)) {
 1480     lock();
 1481     cmpxchgptr(reg, as_Address(adr));
 1482   } else {
 1483     lea(rscratch, adr);
 1484     lock();
 1485     cmpxchgptr(reg, Address(rscratch, 0));
 1486   }
 1487 }
 1488 
 1489 void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
 1490   cmpxchgq(reg, adr);
 1491 }
 1492 
 1493 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1494   assert(rscratch != noreg || always_reachable(src), "missing");
 1495 
 1496   if (reachable(src)) {
 1497     Assembler::comisd(dst, as_Address(src));
 1498   } else {
 1499     lea(rscratch, src);
 1500     Assembler::comisd(dst, Address(rscratch, 0));
 1501   }
 1502 }
 1503 
 1504 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1505   assert(rscratch != noreg || always_reachable(src), "missing");
 1506 
 1507   if (reachable(src)) {
 1508     Assembler::comiss(dst, as_Address(src));
 1509   } else {
 1510     lea(rscratch, src);
 1511     Assembler::comiss(dst, Address(rscratch, 0));
 1512   }
 1513 }
 1514 
 1515 
 1516 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch) {
 1517   assert(rscratch != noreg || always_reachable(counter_addr), "missing");
 1518 
 1519   Condition negated_cond = negate_condition(cond);
 1520   Label L;
 1521   jcc(negated_cond, L);
 1522   pushf(); // Preserve flags
 1523   atomic_incl(counter_addr, rscratch);
 1524   popf();
 1525   bind(L);
 1526 }
 1527 
 1528 int MacroAssembler::corrected_idivl(Register reg) {
 1529   // Full implementation of Java idiv and irem; checks for
 1530   // special case as described in JVM spec., p.243 & p.271.
 1531   // The function returns the (pc) offset of the idivl
 1532   // instruction - may be needed for implicit exceptions.
 1533   //
 1534   //         normal case                           special case
 1535   //
 1536   // input : rax,: dividend                         min_int
 1537   //         reg: divisor   (may not be rax,/rdx)   -1
 1538   //
 1539   // output: rax,: quotient  (= rax, idiv reg)       min_int
 1540   //         rdx: remainder (= rax, irem reg)       0
 1541   assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register");
 1542   const int min_int = 0x80000000;
 1543   Label normal_case, special_case;
 1544 
 1545   // check for special case
 1546   cmpl(rax, min_int);
 1547   jcc(Assembler::notEqual, normal_case);
 1548   xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
 1549   cmpl(reg, -1);
 1550   jcc(Assembler::equal, special_case);
 1551 
 1552   // handle normal case
 1553   bind(normal_case);
 1554   cdql();
 1555   int idivl_offset = offset();
 1556   idivl(reg);
 1557 
 1558   // normal and special case exit
 1559   bind(special_case);
 1560 
 1561   return idivl_offset;
 1562 }
 1563 
 1564 
 1565 
 1566 void MacroAssembler::decrementl(Register reg, int value) {
 1567   if (value == min_jint) {subl(reg, value) ; return; }
 1568   if (value <  0) { incrementl(reg, -value); return; }
 1569   if (value == 0) {                        ; return; }
 1570   if (value == 1 && UseIncDec) { decl(reg) ; return; }
 1571   /* else */      { subl(reg, value)       ; return; }
 1572 }
 1573 
 1574 void MacroAssembler::decrementl(Address dst, int value) {
 1575   if (value == min_jint) {subl(dst, value) ; return; }
 1576   if (value <  0) { incrementl(dst, -value); return; }
 1577   if (value == 0) {                        ; return; }
 1578   if (value == 1 && UseIncDec) { decl(dst) ; return; }
 1579   /* else */      { subl(dst, value)       ; return; }
 1580 }
 1581 
 1582 void MacroAssembler::division_with_shift (Register reg, int shift_value) {
 1583   assert(shift_value > 0, "illegal shift value");
 1584   Label _is_positive;
 1585   testl (reg, reg);
 1586   jcc (Assembler::positive, _is_positive);
 1587   int offset = (1 << shift_value) - 1 ;
 1588 
 1589   if (offset == 1) {
 1590     incrementl(reg);
 1591   } else {
 1592     addl(reg, offset);
 1593   }
 1594 
 1595   bind (_is_positive);
 1596   sarl(reg, shift_value);
 1597 }
 1598 
 1599 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1600   assert(rscratch != noreg || always_reachable(src), "missing");
 1601 
 1602   if (reachable(src)) {
 1603     Assembler::divsd(dst, as_Address(src));
 1604   } else {
 1605     lea(rscratch, src);
 1606     Assembler::divsd(dst, Address(rscratch, 0));
 1607   }
 1608 }
 1609 
 1610 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1611   assert(rscratch != noreg || always_reachable(src), "missing");
 1612 
 1613   if (reachable(src)) {
 1614     Assembler::divss(dst, as_Address(src));
 1615   } else {
 1616     lea(rscratch, src);
 1617     Assembler::divss(dst, Address(rscratch, 0));
 1618   }
 1619 }
 1620 
 1621 void MacroAssembler::enter() {
 1622   push(rbp);
 1623   mov(rbp, rsp);
 1624 }
 1625 
 1626 void MacroAssembler::post_call_nop() {
 1627   if (!Continuations::enabled()) {
 1628     return;
 1629   }
 1630   InstructionMark im(this);
 1631   relocate(post_call_nop_Relocation::spec());
 1632   InlineSkippedInstructionsCounter skipCounter(this);
 1633   emit_int8((uint8_t)0x0f);
 1634   emit_int8((uint8_t)0x1f);
 1635   emit_int8((uint8_t)0x84);
 1636   emit_int8((uint8_t)0x00);
 1637   emit_int32(0x00);
 1638 }
 1639 
 1640 void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1641   assert(rscratch != noreg || always_reachable(src), "missing");
 1642   if (reachable(src)) {
 1643     Assembler::mulpd(dst, as_Address(src));
 1644   } else {
 1645     lea(rscratch, src);
 1646     Assembler::mulpd(dst, Address(rscratch, 0));
 1647   }
 1648 }
 1649 
 1650 // dst = c = a * b + c
 1651 void MacroAssembler::fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) {
 1652   Assembler::vfmadd231sd(c, a, b);
 1653   if (dst != c) {
 1654     movdbl(dst, c);
 1655   }
 1656 }
 1657 
 1658 // dst = c = a * b + c
 1659 void MacroAssembler::fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) {
 1660   Assembler::vfmadd231ss(c, a, b);
 1661   if (dst != c) {
 1662     movflt(dst, c);
 1663   }
 1664 }
 1665 
 1666 // dst = c = a * b + c
 1667 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) {
 1668   Assembler::vfmadd231pd(c, a, b, vector_len);
 1669   if (dst != c) {
 1670     vmovdqu(dst, c);
 1671   }
 1672 }
 1673 
 1674 // dst = c = a * b + c
 1675 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) {
 1676   Assembler::vfmadd231ps(c, a, b, vector_len);
 1677   if (dst != c) {
 1678     vmovdqu(dst, c);
 1679   }
 1680 }
 1681 
 1682 // dst = c = a * b + c
 1683 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) {
 1684   Assembler::vfmadd231pd(c, a, b, vector_len);
 1685   if (dst != c) {
 1686     vmovdqu(dst, c);
 1687   }
 1688 }
 1689 
 1690 // dst = c = a * b + c
 1691 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) {
 1692   Assembler::vfmadd231ps(c, a, b, vector_len);
 1693   if (dst != c) {
 1694     vmovdqu(dst, c);
 1695   }
 1696 }
 1697 
 1698 void MacroAssembler::incrementl(AddressLiteral dst, Register rscratch) {
 1699   assert(rscratch != noreg || always_reachable(dst), "missing");
 1700 
 1701   if (reachable(dst)) {
 1702     incrementl(as_Address(dst));
 1703   } else {
 1704     lea(rscratch, dst);
 1705     incrementl(Address(rscratch, 0));
 1706   }
 1707 }
 1708 
 1709 void MacroAssembler::incrementl(ArrayAddress dst, Register rscratch) {
 1710   incrementl(as_Address(dst, rscratch));
 1711 }
 1712 
 1713 void MacroAssembler::incrementl(Register reg, int value) {
 1714   if (value == min_jint) {addl(reg, value) ; return; }
 1715   if (value <  0) { decrementl(reg, -value); return; }
 1716   if (value == 0) {                        ; return; }
 1717   if (value == 1 && UseIncDec) { incl(reg) ; return; }
 1718   /* else */      { addl(reg, value)       ; return; }
 1719 }
 1720 
 1721 void MacroAssembler::incrementl(Address dst, int value) {
 1722   if (value == min_jint) {addl(dst, value) ; return; }
 1723   if (value <  0) { decrementl(dst, -value); return; }
 1724   if (value == 0) {                        ; return; }
 1725   if (value == 1 && UseIncDec) { incl(dst) ; return; }
 1726   /* else */      { addl(dst, value)       ; return; }
 1727 }
 1728 
 1729 void MacroAssembler::jump(AddressLiteral dst, Register rscratch) {
 1730   assert(rscratch != noreg || always_reachable(dst), "missing");
 1731   assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump");
 1732   if (reachable(dst)) {
 1733     jmp_literal(dst.target(), dst.rspec());
 1734   } else {
 1735     lea(rscratch, dst);
 1736     jmp(rscratch);
 1737   }
 1738 }
 1739 
 1740 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst, Register rscratch) {
 1741   assert(rscratch != noreg || always_reachable(dst), "missing");
 1742   assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump_cc");
 1743   if (reachable(dst)) {
 1744     InstructionMark im(this);
 1745     relocate(dst.reloc());
 1746     const int short_size = 2;
 1747     const int long_size = 6;
 1748     int offs = (intptr_t)dst.target() - ((intptr_t)pc());
 1749     if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
 1750       // 0111 tttn #8-bit disp
 1751       emit_int8(0x70 | cc);
 1752       emit_int8((offs - short_size) & 0xFF);
 1753     } else {
 1754       // 0000 1111 1000 tttn #32-bit disp
 1755       emit_int8(0x0F);
 1756       emit_int8((unsigned char)(0x80 | cc));
 1757       emit_int32(offs - long_size);
 1758     }
 1759   } else {
 1760 #ifdef ASSERT
 1761     warning("reversing conditional branch");
 1762 #endif /* ASSERT */
 1763     Label skip;
 1764     jccb(reverse[cc], skip);
 1765     lea(rscratch, dst);
 1766     Assembler::jmp(rscratch);
 1767     bind(skip);
 1768   }
 1769 }
 1770 
 1771 void MacroAssembler::cmp32_mxcsr_std(Address mxcsr_save, Register tmp, Register rscratch) {
 1772   ExternalAddress mxcsr_std(StubRoutines::x86::addr_mxcsr_std());
 1773   assert(rscratch != noreg || always_reachable(mxcsr_std), "missing");
 1774 
 1775   stmxcsr(mxcsr_save);
 1776   movl(tmp, mxcsr_save);
 1777   if (EnableX86ECoreOpts) {
 1778     // The mxcsr_std has status bits set for performance on ECore
 1779     orl(tmp, 0x003f);
 1780   } else {
 1781     // Mask out status bits (only check control and mask bits)
 1782     andl(tmp, 0xFFC0);
 1783   }
 1784   cmp32(tmp, mxcsr_std, rscratch);
 1785 }
 1786 
 1787 void MacroAssembler::ldmxcsr(AddressLiteral src, Register rscratch) {
 1788   assert(rscratch != noreg || always_reachable(src), "missing");
 1789 
 1790   if (reachable(src)) {
 1791     Assembler::ldmxcsr(as_Address(src));
 1792   } else {
 1793     lea(rscratch, src);
 1794     Assembler::ldmxcsr(Address(rscratch, 0));
 1795   }
 1796 }
 1797 
 1798 int MacroAssembler::load_signed_byte(Register dst, Address src) {
 1799   int off = offset();
 1800   movsbl(dst, src); // movsxb
 1801   return off;
 1802 }
 1803 
 1804 // Note: load_signed_short used to be called load_signed_word.
 1805 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler
 1806 // manual, which means 16 bits, that usage is found nowhere in HotSpot code.
 1807 // The term "word" in HotSpot means a 32- or 64-bit machine word.
 1808 int MacroAssembler::load_signed_short(Register dst, Address src) {
 1809   // This is dubious to me since it seems safe to do a signed 16 => 64 bit
 1810   // version but this is what 64bit has always done. This seems to imply
 1811   // that users are only using 32bits worth.
 1812   int off = offset();
 1813   movswl(dst, src); // movsxw
 1814   return off;
 1815 }
 1816 
 1817 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
 1818   // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
 1819   // and "3.9 Partial Register Penalties", p. 22).
 1820   int off = offset();
 1821   movzbl(dst, src); // movzxb
 1822   return off;
 1823 }
 1824 
 1825 // Note: load_unsigned_short used to be called load_unsigned_word.
 1826 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
 1827   // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
 1828   // and "3.9 Partial Register Penalties", p. 22).
 1829   int off = offset();
 1830   movzwl(dst, src); // movzxw
 1831   return off;
 1832 }
 1833 
 1834 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
 1835   switch (size_in_bytes) {
 1836   case  8:  movq(dst, src); break;
 1837   case  4:  movl(dst, src); break;
 1838   case  2:  is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
 1839   case  1:  is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
 1840   default:  ShouldNotReachHere();
 1841   }
 1842 }
 1843 
 1844 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
 1845   switch (size_in_bytes) {
 1846   case  8:  movq(dst, src); break;
 1847   case  4:  movl(dst, src); break;
 1848   case  2:  movw(dst, src); break;
 1849   case  1:  movb(dst, src); break;
 1850   default:  ShouldNotReachHere();
 1851   }
 1852 }
 1853 
 1854 void MacroAssembler::mov32(AddressLiteral dst, Register src, Register rscratch) {
 1855   assert(rscratch != noreg || always_reachable(dst), "missing");
 1856 
 1857   if (reachable(dst)) {
 1858     movl(as_Address(dst), src);
 1859   } else {
 1860     lea(rscratch, dst);
 1861     movl(Address(rscratch, 0), src);
 1862   }
 1863 }
 1864 
 1865 void MacroAssembler::mov32(Register dst, AddressLiteral src) {
 1866   if (reachable(src)) {
 1867     movl(dst, as_Address(src));
 1868   } else {
 1869     lea(dst, src);
 1870     movl(dst, Address(dst, 0));
 1871   }
 1872 }
 1873 
 1874 // C++ bool manipulation
 1875 
 1876 void MacroAssembler::movbool(Register dst, Address src) {
 1877   if(sizeof(bool) == 1)
 1878     movb(dst, src);
 1879   else if(sizeof(bool) == 2)
 1880     movw(dst, src);
 1881   else if(sizeof(bool) == 4)
 1882     movl(dst, src);
 1883   else
 1884     // unsupported
 1885     ShouldNotReachHere();
 1886 }
 1887 
 1888 void MacroAssembler::movbool(Address dst, bool boolconst) {
 1889   if(sizeof(bool) == 1)
 1890     movb(dst, (int) boolconst);
 1891   else if(sizeof(bool) == 2)
 1892     movw(dst, (int) boolconst);
 1893   else if(sizeof(bool) == 4)
 1894     movl(dst, (int) boolconst);
 1895   else
 1896     // unsupported
 1897     ShouldNotReachHere();
 1898 }
 1899 
 1900 void MacroAssembler::movbool(Address dst, Register src) {
 1901   if(sizeof(bool) == 1)
 1902     movb(dst, src);
 1903   else if(sizeof(bool) == 2)
 1904     movw(dst, src);
 1905   else if(sizeof(bool) == 4)
 1906     movl(dst, src);
 1907   else
 1908     // unsupported
 1909     ShouldNotReachHere();
 1910 }
 1911 
 1912 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1913   assert(rscratch != noreg || always_reachable(src), "missing");
 1914 
 1915   if (reachable(src)) {
 1916     movdl(dst, as_Address(src));
 1917   } else {
 1918     lea(rscratch, src);
 1919     movdl(dst, Address(rscratch, 0));
 1920   }
 1921 }
 1922 
 1923 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1924   assert(rscratch != noreg || always_reachable(src), "missing");
 1925 
 1926   if (reachable(src)) {
 1927     movq(dst, as_Address(src));
 1928   } else {
 1929     lea(rscratch, src);
 1930     movq(dst, Address(rscratch, 0));
 1931   }
 1932 }
 1933 
 1934 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1935   assert(rscratch != noreg || always_reachable(src), "missing");
 1936 
 1937   if (reachable(src)) {
 1938     if (UseXmmLoadAndClearUpper) {
 1939       movsd (dst, as_Address(src));
 1940     } else {
 1941       movlpd(dst, as_Address(src));
 1942     }
 1943   } else {
 1944     lea(rscratch, src);
 1945     if (UseXmmLoadAndClearUpper) {
 1946       movsd (dst, Address(rscratch, 0));
 1947     } else {
 1948       movlpd(dst, Address(rscratch, 0));
 1949     }
 1950   }
 1951 }
 1952 
 1953 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src, Register rscratch) {
 1954   assert(rscratch != noreg || always_reachable(src), "missing");
 1955 
 1956   if (reachable(src)) {
 1957     movss(dst, as_Address(src));
 1958   } else {
 1959     lea(rscratch, src);
 1960     movss(dst, Address(rscratch, 0));
 1961   }
 1962 }
 1963 
 1964 void MacroAssembler::movptr(Register dst, Register src) {
 1965   movq(dst, src);
 1966 }
 1967 
 1968 void MacroAssembler::movptr(Register dst, Address src) {
 1969   movq(dst, src);
 1970 }
 1971 
 1972 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
 1973 void MacroAssembler::movptr(Register dst, intptr_t src) {
 1974   if (is_uimm32(src)) {
 1975     movl(dst, checked_cast<uint32_t>(src));
 1976   } else if (is_simm32(src)) {
 1977     movq(dst, checked_cast<int32_t>(src));
 1978   } else {
 1979     mov64(dst, src);
 1980   }
 1981 }
 1982 
 1983 void MacroAssembler::movptr(Address dst, Register src) {
 1984   movq(dst, src);
 1985 }
 1986 
 1987 void MacroAssembler::movptr(Address dst, int32_t src) {
 1988   movslq(dst, src);
 1989 }
 1990 
 1991 void MacroAssembler::movdqu(Address dst, XMMRegister src) {
 1992   assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 1993   Assembler::movdqu(dst, src);
 1994 }
 1995 
 1996 void MacroAssembler::movdqu(XMMRegister dst, Address src) {
 1997   assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 1998   Assembler::movdqu(dst, src);
 1999 }
 2000 
 2001 void MacroAssembler::movdqu(XMMRegister dst, XMMRegister src) {
 2002   assert(((dst->encoding() < 16  && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 2003   Assembler::movdqu(dst, src);
 2004 }
 2005 
 2006 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2007   assert(rscratch != noreg || always_reachable(src), "missing");
 2008 
 2009   if (reachable(src)) {
 2010     movdqu(dst, as_Address(src));
 2011   } else {
 2012     lea(rscratch, src);
 2013     movdqu(dst, Address(rscratch, 0));
 2014   }
 2015 }
 2016 
 2017 void MacroAssembler::vmovdqu(Address dst, XMMRegister src) {
 2018   assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 2019   Assembler::vmovdqu(dst, src);
 2020 }
 2021 
 2022 void MacroAssembler::vmovdqu(XMMRegister dst, Address src) {
 2023   assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 2024   Assembler::vmovdqu(dst, src);
 2025 }
 2026 
 2027 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src) {
 2028   assert(((dst->encoding() < 16  && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 2029   Assembler::vmovdqu(dst, src);
 2030 }
 2031 
 2032 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2033   assert(rscratch != noreg || always_reachable(src), "missing");
 2034 
 2035   if (reachable(src)) {
 2036     vmovdqu(dst, as_Address(src));
 2037   }
 2038   else {
 2039     lea(rscratch, src);
 2040     vmovdqu(dst, Address(rscratch, 0));
 2041   }
 2042 }
 2043 
 2044 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2045   assert(rscratch != noreg || always_reachable(src), "missing");
 2046 
 2047   if (vector_len == AVX_512bit) {
 2048     evmovdquq(dst, src, AVX_512bit, rscratch);
 2049   } else if (vector_len == AVX_256bit) {
 2050     vmovdqu(dst, src, rscratch);
 2051   } else {
 2052     movdqu(dst, src, rscratch);
 2053   }
 2054 }
 2055 
 2056 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src, int vector_len) {
 2057   if (vector_len == AVX_512bit) {
 2058     evmovdquq(dst, src, AVX_512bit);
 2059   } else if (vector_len == AVX_256bit) {
 2060     vmovdqu(dst, src);
 2061   } else {
 2062     movdqu(dst, src);
 2063   }
 2064 }
 2065 
 2066 void MacroAssembler::vmovdqu(Address dst, XMMRegister src, int vector_len) {
 2067   if (vector_len == AVX_512bit) {
 2068     evmovdquq(dst, src, AVX_512bit);
 2069   } else if (vector_len == AVX_256bit) {
 2070     vmovdqu(dst, src);
 2071   } else {
 2072     movdqu(dst, src);
 2073   }
 2074 }
 2075 
 2076 void MacroAssembler::vmovdqu(XMMRegister dst, Address src, int vector_len) {
 2077   if (vector_len == AVX_512bit) {
 2078     evmovdquq(dst, src, AVX_512bit);
 2079   } else if (vector_len == AVX_256bit) {
 2080     vmovdqu(dst, src);
 2081   } else {
 2082     movdqu(dst, src);
 2083   }
 2084 }
 2085 
 2086 void MacroAssembler::vmovdqa(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2087   assert(rscratch != noreg || always_reachable(src), "missing");
 2088 
 2089   if (reachable(src)) {
 2090     vmovdqa(dst, as_Address(src));
 2091   }
 2092   else {
 2093     lea(rscratch, src);
 2094     vmovdqa(dst, Address(rscratch, 0));
 2095   }
 2096 }
 2097 
 2098 void MacroAssembler::vmovdqa(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2099   assert(rscratch != noreg || always_reachable(src), "missing");
 2100 
 2101   if (vector_len == AVX_512bit) {
 2102     evmovdqaq(dst, src, AVX_512bit, rscratch);
 2103   } else if (vector_len == AVX_256bit) {
 2104     vmovdqa(dst, src, rscratch);
 2105   } else {
 2106     movdqa(dst, src, rscratch);
 2107   }
 2108 }
 2109 
 2110 void MacroAssembler::kmov(KRegister dst, Address src) {
 2111   if (VM_Version::supports_avx512bw()) {
 2112     kmovql(dst, src);
 2113   } else {
 2114     assert(VM_Version::supports_evex(), "");
 2115     kmovwl(dst, src);
 2116   }
 2117 }
 2118 
 2119 void MacroAssembler::kmov(Address dst, KRegister src) {
 2120   if (VM_Version::supports_avx512bw()) {
 2121     kmovql(dst, src);
 2122   } else {
 2123     assert(VM_Version::supports_evex(), "");
 2124     kmovwl(dst, src);
 2125   }
 2126 }
 2127 
 2128 void MacroAssembler::kmov(KRegister dst, KRegister src) {
 2129   if (VM_Version::supports_avx512bw()) {
 2130     kmovql(dst, src);
 2131   } else {
 2132     assert(VM_Version::supports_evex(), "");
 2133     kmovwl(dst, src);
 2134   }
 2135 }
 2136 
 2137 void MacroAssembler::kmov(Register dst, KRegister src) {
 2138   if (VM_Version::supports_avx512bw()) {
 2139     kmovql(dst, src);
 2140   } else {
 2141     assert(VM_Version::supports_evex(), "");
 2142     kmovwl(dst, src);
 2143   }
 2144 }
 2145 
 2146 void MacroAssembler::kmov(KRegister dst, Register src) {
 2147   if (VM_Version::supports_avx512bw()) {
 2148     kmovql(dst, src);
 2149   } else {
 2150     assert(VM_Version::supports_evex(), "");
 2151     kmovwl(dst, src);
 2152   }
 2153 }
 2154 
 2155 void MacroAssembler::kmovql(KRegister dst, AddressLiteral src, Register rscratch) {
 2156   assert(rscratch != noreg || always_reachable(src), "missing");
 2157 
 2158   if (reachable(src)) {
 2159     kmovql(dst, as_Address(src));
 2160   } else {
 2161     lea(rscratch, src);
 2162     kmovql(dst, Address(rscratch, 0));
 2163   }
 2164 }
 2165 
 2166 void MacroAssembler::kmovwl(KRegister dst, AddressLiteral src, Register rscratch) {
 2167   assert(rscratch != noreg || always_reachable(src), "missing");
 2168 
 2169   if (reachable(src)) {
 2170     kmovwl(dst, as_Address(src));
 2171   } else {
 2172     lea(rscratch, src);
 2173     kmovwl(dst, Address(rscratch, 0));
 2174   }
 2175 }
 2176 
 2177 void MacroAssembler::evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge,
 2178                                int vector_len, Register rscratch) {
 2179   assert(rscratch != noreg || always_reachable(src), "missing");
 2180 
 2181   if (reachable(src)) {
 2182     Assembler::evmovdqub(dst, mask, as_Address(src), merge, vector_len);
 2183   } else {
 2184     lea(rscratch, src);
 2185     Assembler::evmovdqub(dst, mask, Address(rscratch, 0), merge, vector_len);
 2186   }
 2187 }
 2188 
 2189 void MacroAssembler::evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge,
 2190                                int vector_len, Register rscratch) {
 2191   assert(rscratch != noreg || always_reachable(src), "missing");
 2192 
 2193   if (reachable(src)) {
 2194     Assembler::evmovdquw(dst, mask, as_Address(src), merge, vector_len);
 2195   } else {
 2196     lea(rscratch, src);
 2197     Assembler::evmovdquw(dst, mask, Address(rscratch, 0), merge, vector_len);
 2198   }
 2199 }
 2200 
 2201 void MacroAssembler::evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
 2202   assert(rscratch != noreg || always_reachable(src), "missing");
 2203 
 2204   if (reachable(src)) {
 2205     Assembler::evmovdqul(dst, mask, as_Address(src), merge, vector_len);
 2206   } else {
 2207     lea(rscratch, src);
 2208     Assembler::evmovdqul(dst, mask, Address(rscratch, 0), merge, vector_len);
 2209   }
 2210 }
 2211 
 2212 void MacroAssembler::evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
 2213   assert(rscratch != noreg || always_reachable(src), "missing");
 2214 
 2215   if (reachable(src)) {
 2216     Assembler::evmovdquq(dst, mask, as_Address(src), merge, vector_len);
 2217   } else {
 2218     lea(rscratch, src);
 2219     Assembler::evmovdquq(dst, mask, Address(rscratch, 0), merge, vector_len);
 2220   }
 2221 }
 2222 
 2223 void MacroAssembler::evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2224   assert(rscratch != noreg || always_reachable(src), "missing");
 2225 
 2226   if (reachable(src)) {
 2227     Assembler::evmovdquq(dst, as_Address(src), vector_len);
 2228   } else {
 2229     lea(rscratch, src);
 2230     Assembler::evmovdquq(dst, Address(rscratch, 0), vector_len);
 2231   }
 2232 }
 2233 
 2234 void MacroAssembler::evmovdqaq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
 2235   assert(rscratch != noreg || always_reachable(src), "missing");
 2236 
 2237   if (reachable(src)) {
 2238     Assembler::evmovdqaq(dst, mask, as_Address(src), merge, vector_len);
 2239   } else {
 2240     lea(rscratch, src);
 2241     Assembler::evmovdqaq(dst, mask, Address(rscratch, 0), merge, vector_len);
 2242   }
 2243 }
 2244 
 2245 void MacroAssembler::evmovdqaq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2246   assert(rscratch != noreg || always_reachable(src), "missing");
 2247 
 2248   if (reachable(src)) {
 2249     Assembler::evmovdqaq(dst, as_Address(src), vector_len);
 2250   } else {
 2251     lea(rscratch, src);
 2252     Assembler::evmovdqaq(dst, Address(rscratch, 0), vector_len);
 2253   }
 2254 }
 2255 
 2256 void MacroAssembler::movapd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2257   assert(rscratch != noreg || always_reachable(src), "missing");
 2258 
 2259   if (reachable(src)) {
 2260     Assembler::movapd(dst, as_Address(src));
 2261   } else {
 2262     lea(rscratch, src);
 2263     Assembler::movapd(dst, Address(rscratch, 0));
 2264   }
 2265 }
 2266 
 2267 void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2268   assert(rscratch != noreg || always_reachable(src), "missing");
 2269 
 2270   if (reachable(src)) {
 2271     Assembler::movdqa(dst, as_Address(src));
 2272   } else {
 2273     lea(rscratch, src);
 2274     Assembler::movdqa(dst, Address(rscratch, 0));
 2275   }
 2276 }
 2277 
 2278 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2279   assert(rscratch != noreg || always_reachable(src), "missing");
 2280 
 2281   if (reachable(src)) {
 2282     Assembler::movsd(dst, as_Address(src));
 2283   } else {
 2284     lea(rscratch, src);
 2285     Assembler::movsd(dst, Address(rscratch, 0));
 2286   }
 2287 }
 2288 
 2289 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2290   assert(rscratch != noreg || always_reachable(src), "missing");
 2291 
 2292   if (reachable(src)) {
 2293     Assembler::movss(dst, as_Address(src));
 2294   } else {
 2295     lea(rscratch, src);
 2296     Assembler::movss(dst, Address(rscratch, 0));
 2297   }
 2298 }
 2299 
 2300 void MacroAssembler::movddup(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2301   assert(rscratch != noreg || always_reachable(src), "missing");
 2302 
 2303   if (reachable(src)) {
 2304     Assembler::movddup(dst, as_Address(src));
 2305   } else {
 2306     lea(rscratch, src);
 2307     Assembler::movddup(dst, Address(rscratch, 0));
 2308   }
 2309 }
 2310 
 2311 void MacroAssembler::vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2312   assert(rscratch != noreg || always_reachable(src), "missing");
 2313 
 2314   if (reachable(src)) {
 2315     Assembler::vmovddup(dst, as_Address(src), vector_len);
 2316   } else {
 2317     lea(rscratch, src);
 2318     Assembler::vmovddup(dst, Address(rscratch, 0), vector_len);
 2319   }
 2320 }
 2321 
 2322 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2323   assert(rscratch != noreg || always_reachable(src), "missing");
 2324 
 2325   if (reachable(src)) {
 2326     Assembler::mulsd(dst, as_Address(src));
 2327   } else {
 2328     lea(rscratch, src);
 2329     Assembler::mulsd(dst, Address(rscratch, 0));
 2330   }
 2331 }
 2332 
 2333 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2334   assert(rscratch != noreg || always_reachable(src), "missing");
 2335 
 2336   if (reachable(src)) {
 2337     Assembler::mulss(dst, as_Address(src));
 2338   } else {
 2339     lea(rscratch, src);
 2340     Assembler::mulss(dst, Address(rscratch, 0));
 2341   }
 2342 }
 2343 
 2344 void MacroAssembler::null_check(Register reg, int offset) {
 2345   if (needs_explicit_null_check(offset)) {
 2346     // provoke OS null exception if reg is null by
 2347     // accessing M[reg] w/o changing any (non-CC) registers
 2348     // NOTE: cmpl is plenty here to provoke a segv
 2349     cmpptr(rax, Address(reg, 0));
 2350     // Note: should probably use testl(rax, Address(reg, 0));
 2351     //       may be shorter code (however, this version of
 2352     //       testl needs to be implemented first)
 2353   } else {
 2354     // nothing to do, (later) access of M[reg + offset]
 2355     // will provoke OS null exception if reg is null
 2356   }
 2357 }
 2358 
 2359 void MacroAssembler::os_breakpoint() {
 2360   // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
 2361   // (e.g., MSVC can't call ps() otherwise)
 2362   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
 2363 }
 2364 
 2365 void MacroAssembler::unimplemented(const char* what) {
 2366   const char* buf = nullptr;
 2367   {
 2368     ResourceMark rm;
 2369     stringStream ss;
 2370     ss.print("unimplemented: %s", what);
 2371     buf = code_string(ss.as_string());
 2372   }
 2373   stop(buf);
 2374 }
 2375 
 2376 #define XSTATE_BV 0x200
 2377 
 2378 void MacroAssembler::pop_CPU_state() {
 2379   pop_FPU_state();
 2380   pop_IU_state();
 2381 }
 2382 
 2383 void MacroAssembler::pop_FPU_state() {
 2384   fxrstor(Address(rsp, 0));
 2385   addptr(rsp, FPUStateSizeInWords * wordSize);
 2386 }
 2387 
 2388 void MacroAssembler::pop_IU_state() {
 2389   popa();
 2390   addq(rsp, 8);
 2391   popf();
 2392 }
 2393 
 2394 // Save Integer and Float state
 2395 // Warning: Stack must be 16 byte aligned (64bit)
 2396 void MacroAssembler::push_CPU_state() {
 2397   push_IU_state();
 2398   push_FPU_state();
 2399 }
 2400 
 2401 void MacroAssembler::push_FPU_state() {
 2402   subptr(rsp, FPUStateSizeInWords * wordSize);
 2403   fxsave(Address(rsp, 0));
 2404 }
 2405 
 2406 void MacroAssembler::push_IU_state() {
 2407   // Push flags first because pusha kills them
 2408   pushf();
 2409   // Make sure rsp stays 16-byte aligned
 2410   subq(rsp, 8);
 2411   pusha();
 2412 }
 2413 
 2414 void MacroAssembler::push_cont_fastpath() {
 2415   if (!Continuations::enabled()) return;
 2416 
 2417   Label L_done;
 2418   cmpptr(rsp, Address(r15_thread, JavaThread::cont_fastpath_offset()));
 2419   jccb(Assembler::belowEqual, L_done);
 2420   movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), rsp);
 2421   bind(L_done);
 2422 }
 2423 
 2424 void MacroAssembler::pop_cont_fastpath() {
 2425   if (!Continuations::enabled()) return;
 2426 
 2427   Label L_done;
 2428   cmpptr(rsp, Address(r15_thread, JavaThread::cont_fastpath_offset()));
 2429   jccb(Assembler::below, L_done);
 2430   movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), 0);
 2431   bind(L_done);
 2432 }
 2433 
 2434 #ifdef ASSERT
 2435 void MacroAssembler::stop_if_in_cont(Register cont, const char* name) {
 2436   Label no_cont;
 2437   movptr(cont, Address(r15_thread, JavaThread::cont_entry_offset()));
 2438   testl(cont, cont);
 2439   jcc(Assembler::zero, no_cont);
 2440   stop(name);
 2441   bind(no_cont);
 2442 }
 2443 #endif
 2444 
 2445 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { // determine java_thread register
 2446   // we must set sp to zero to clear frame
 2447   movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
 2448   // must clear fp, so that compiled frames are not confused; it is
 2449   // possible that we need it only for debugging
 2450   if (clear_fp) {
 2451     movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
 2452   }
 2453   // Always clear the pc because it could have been set by make_walkable()
 2454   movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
 2455   vzeroupper();
 2456 }
 2457 
 2458 void MacroAssembler::round_to(Register reg, int modulus) {
 2459   addptr(reg, modulus - 1);
 2460   andptr(reg, -modulus);
 2461 }
 2462 
 2463 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod) {
 2464   if (at_return) {
 2465     // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
 2466     // we may safely use rsp instead to perform the stack watermark check.
 2467     cmpptr(in_nmethod ? rsp : rbp, Address(r15_thread, JavaThread::polling_word_offset()));
 2468     jcc(Assembler::above, slow_path);
 2469     return;
 2470   }
 2471   testb(Address(r15_thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
 2472   jcc(Assembler::notZero, slow_path); // handshake bit set implies poll
 2473 }
 2474 
 2475 // Calls to C land
 2476 //
 2477 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded
 2478 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
 2479 // has to be reset to 0. This is required to allow proper stack traversal.
 2480 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
 2481                                          Register last_java_fp,
 2482                                          address  last_java_pc,
 2483                                          Register rscratch) {
 2484   vzeroupper();
 2485   // determine last_java_sp register
 2486   if (!last_java_sp->is_valid()) {
 2487     last_java_sp = rsp;
 2488   }
 2489   // last_java_fp is optional
 2490   if (last_java_fp->is_valid()) {
 2491     movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
 2492   }
 2493   // last_java_pc is optional
 2494   if (last_java_pc != nullptr) {
 2495     Address java_pc(r15_thread,
 2496                     JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
 2497     lea(java_pc, InternalAddress(last_java_pc), rscratch);
 2498   }
 2499   movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
 2500 }
 2501 
 2502 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
 2503                                          Register last_java_fp,
 2504                                          Label &L,
 2505                                          Register scratch) {
 2506   lea(scratch, L);
 2507   movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), scratch);
 2508   set_last_Java_frame(last_java_sp, last_java_fp, nullptr, scratch);
 2509 }
 2510 
 2511 void MacroAssembler::shlptr(Register dst, int imm8) {
 2512   shlq(dst, imm8);
 2513 }
 2514 
 2515 void MacroAssembler::shrptr(Register dst, int imm8) {
 2516   shrq(dst, imm8);
 2517 }
 2518 
 2519 void MacroAssembler::sign_extend_byte(Register reg) {
 2520   movsbl(reg, reg); // movsxb
 2521 }
 2522 
 2523 void MacroAssembler::sign_extend_short(Register reg) {
 2524   movswl(reg, reg); // movsxw
 2525 }
 2526 
 2527 void MacroAssembler::testl(Address dst, int32_t imm32) {
 2528   if (imm32 >= 0 && is8bit(imm32)) {
 2529     testb(dst, imm32);
 2530   } else {
 2531     Assembler::testl(dst, imm32);
 2532   }
 2533 }
 2534 
 2535 void MacroAssembler::testl(Register dst, int32_t imm32) {
 2536   if (imm32 >= 0 && is8bit(imm32) && dst->has_byte_register()) {
 2537     testb(dst, imm32);
 2538   } else {
 2539     Assembler::testl(dst, imm32);
 2540   }
 2541 }
 2542 
 2543 void MacroAssembler::testl(Register dst, AddressLiteral src) {
 2544   assert(always_reachable(src), "Address should be reachable");
 2545   testl(dst, as_Address(src));
 2546 }
 2547 
 2548 void MacroAssembler::testq(Address dst, int32_t imm32) {
 2549   if (imm32 >= 0) {
 2550     testl(dst, imm32);
 2551   } else {
 2552     Assembler::testq(dst, imm32);
 2553   }
 2554 }
 2555 
 2556 void MacroAssembler::testq(Register dst, int32_t imm32) {
 2557   if (imm32 >= 0) {
 2558     testl(dst, imm32);
 2559   } else {
 2560     Assembler::testq(dst, imm32);
 2561   }
 2562 }
 2563 
 2564 void MacroAssembler::pcmpeqb(XMMRegister dst, XMMRegister src) {
 2565   assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2566   Assembler::pcmpeqb(dst, src);
 2567 }
 2568 
 2569 void MacroAssembler::pcmpeqw(XMMRegister dst, XMMRegister src) {
 2570   assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2571   Assembler::pcmpeqw(dst, src);
 2572 }
 2573 
 2574 void MacroAssembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
 2575   assert((dst->encoding() < 16),"XMM register should be 0-15");
 2576   Assembler::pcmpestri(dst, src, imm8);
 2577 }
 2578 
 2579 void MacroAssembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
 2580   assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15");
 2581   Assembler::pcmpestri(dst, src, imm8);
 2582 }
 2583 
 2584 void MacroAssembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
 2585   assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2586   Assembler::pmovzxbw(dst, src);
 2587 }
 2588 
 2589 void MacroAssembler::pmovzxbw(XMMRegister dst, Address src) {
 2590   assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2591   Assembler::pmovzxbw(dst, src);
 2592 }
 2593 
 2594 void MacroAssembler::pmovmskb(Register dst, XMMRegister src) {
 2595   assert((src->encoding() < 16),"XMM register should be 0-15");
 2596   Assembler::pmovmskb(dst, src);
 2597 }
 2598 
 2599 void MacroAssembler::ptest(XMMRegister dst, XMMRegister src) {
 2600   assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15");
 2601   Assembler::ptest(dst, src);
 2602 }
 2603 
 2604 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2605   assert(rscratch != noreg || always_reachable(src), "missing");
 2606 
 2607   if (reachable(src)) {
 2608     Assembler::sqrtss(dst, as_Address(src));
 2609   } else {
 2610     lea(rscratch, src);
 2611     Assembler::sqrtss(dst, Address(rscratch, 0));
 2612   }
 2613 }
 2614 
 2615 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2616   assert(rscratch != noreg || always_reachable(src), "missing");
 2617 
 2618   if (reachable(src)) {
 2619     Assembler::subsd(dst, as_Address(src));
 2620   } else {
 2621     lea(rscratch, src);
 2622     Assembler::subsd(dst, Address(rscratch, 0));
 2623   }
 2624 }
 2625 
 2626 void MacroAssembler::roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch) {
 2627   assert(rscratch != noreg || always_reachable(src), "missing");
 2628 
 2629   if (reachable(src)) {
 2630     Assembler::roundsd(dst, as_Address(src), rmode);
 2631   } else {
 2632     lea(rscratch, src);
 2633     Assembler::roundsd(dst, Address(rscratch, 0), rmode);
 2634   }
 2635 }
 2636 
 2637 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2638   assert(rscratch != noreg || always_reachable(src), "missing");
 2639 
 2640   if (reachable(src)) {
 2641     Assembler::subss(dst, as_Address(src));
 2642   } else {
 2643     lea(rscratch, src);
 2644     Assembler::subss(dst, Address(rscratch, 0));
 2645   }
 2646 }
 2647 
 2648 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2649   assert(rscratch != noreg || always_reachable(src), "missing");
 2650 
 2651   if (reachable(src)) {
 2652     Assembler::ucomisd(dst, as_Address(src));
 2653   } else {
 2654     lea(rscratch, src);
 2655     Assembler::ucomisd(dst, Address(rscratch, 0));
 2656   }
 2657 }
 2658 
 2659 void MacroAssembler::vucomxsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2660   assert(rscratch != noreg || always_reachable(src), "missing");
 2661 
 2662   if (reachable(src)) {
 2663     Assembler::vucomxsd(dst, as_Address(src));
 2664   } else {
 2665     lea(rscratch, src);
 2666     Assembler::vucomxsd(dst, Address(rscratch, 0));
 2667   }
 2668 }
 2669 
 2670 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2671   assert(rscratch != noreg || always_reachable(src), "missing");
 2672 
 2673   if (reachable(src)) {
 2674     Assembler::ucomiss(dst, as_Address(src));
 2675   } else {
 2676     lea(rscratch, src);
 2677     Assembler::ucomiss(dst, Address(rscratch, 0));
 2678   }
 2679 }
 2680 
 2681 void MacroAssembler::vucomxss(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2682   assert(rscratch != noreg || always_reachable(src), "missing");
 2683 
 2684   if (reachable(src)) {
 2685     Assembler::vucomxss(dst, as_Address(src));
 2686   } else {
 2687     lea(rscratch, src);
 2688     Assembler::vucomxss(dst, Address(rscratch, 0));
 2689   }
 2690 }
 2691 
 2692 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2693   assert(rscratch != noreg || always_reachable(src), "missing");
 2694 
 2695   // Used in sign-bit flipping with aligned address.
 2696   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
 2697 
 2698   if (UseAVX > 2 &&
 2699       (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
 2700       (dst->encoding() >= 16)) {
 2701     vpxor(dst, dst, src, Assembler::AVX_512bit, rscratch);
 2702   } else if (reachable(src)) {
 2703     Assembler::xorpd(dst, as_Address(src));
 2704   } else {
 2705     lea(rscratch, src);
 2706     Assembler::xorpd(dst, Address(rscratch, 0));
 2707   }
 2708 }
 2709 
 2710 void MacroAssembler::xorpd(XMMRegister dst, XMMRegister src) {
 2711   if (UseAVX > 2 &&
 2712       (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
 2713       ((dst->encoding() >= 16) || (src->encoding() >= 16))) {
 2714     Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit);
 2715   } else {
 2716     Assembler::xorpd(dst, src);
 2717   }
 2718 }
 2719 
 2720 void MacroAssembler::xorps(XMMRegister dst, XMMRegister src) {
 2721   if (UseAVX > 2 &&
 2722       (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
 2723       ((dst->encoding() >= 16) || (src->encoding() >= 16))) {
 2724     Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit);
 2725   } else {
 2726     Assembler::xorps(dst, src);
 2727   }
 2728 }
 2729 
 2730 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2731   assert(rscratch != noreg || always_reachable(src), "missing");
 2732 
 2733   // Used in sign-bit flipping with aligned address.
 2734   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
 2735 
 2736   if (UseAVX > 2 &&
 2737       (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) &&
 2738       (dst->encoding() >= 16)) {
 2739     vpxor(dst, dst, src, Assembler::AVX_512bit, rscratch);
 2740   } else if (reachable(src)) {
 2741     Assembler::xorps(dst, as_Address(src));
 2742   } else {
 2743     lea(rscratch, src);
 2744     Assembler::xorps(dst, Address(rscratch, 0));
 2745   }
 2746 }
 2747 
 2748 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src, Register rscratch) {
 2749   assert(rscratch != noreg || always_reachable(src), "missing");
 2750 
 2751   // Used in sign-bit flipping with aligned address.
 2752   bool aligned_adr = (((intptr_t)src.target() & 15) == 0);
 2753   assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes");
 2754   if (reachable(src)) {
 2755     Assembler::pshufb(dst, as_Address(src));
 2756   } else {
 2757     lea(rscratch, src);
 2758     Assembler::pshufb(dst, Address(rscratch, 0));
 2759   }
 2760 }
 2761 
 2762 // AVX 3-operands instructions
 2763 
 2764 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 2765   assert(rscratch != noreg || always_reachable(src), "missing");
 2766 
 2767   if (reachable(src)) {
 2768     vaddsd(dst, nds, as_Address(src));
 2769   } else {
 2770     lea(rscratch, src);
 2771     vaddsd(dst, nds, Address(rscratch, 0));
 2772   }
 2773 }
 2774 
 2775 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 2776   assert(rscratch != noreg || always_reachable(src), "missing");
 2777 
 2778   if (reachable(src)) {
 2779     vaddss(dst, nds, as_Address(src));
 2780   } else {
 2781     lea(rscratch, src);
 2782     vaddss(dst, nds, Address(rscratch, 0));
 2783   }
 2784 }
 2785 
 2786 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 2787   assert(UseAVX > 0, "requires some form of AVX");
 2788   assert(rscratch != noreg || always_reachable(src), "missing");
 2789 
 2790   if (reachable(src)) {
 2791     Assembler::vpaddb(dst, nds, as_Address(src), vector_len);
 2792   } else {
 2793     lea(rscratch, src);
 2794     Assembler::vpaddb(dst, nds, Address(rscratch, 0), vector_len);
 2795   }
 2796 }
 2797 
 2798 void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 2799   assert(UseAVX > 0, "requires some form of AVX");
 2800   assert(rscratch != noreg || always_reachable(src), "missing");
 2801 
 2802   if (reachable(src)) {
 2803     Assembler::vpaddd(dst, nds, as_Address(src), vector_len);
 2804   } else {
 2805     lea(rscratch, src);
 2806     Assembler::vpaddd(dst, nds, Address(rscratch, 0), vector_len);
 2807   }
 2808 }
 2809 
 2810 void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) {
 2811   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
 2812   assert(rscratch != noreg || always_reachable(negate_field), "missing");
 2813 
 2814   vandps(dst, nds, negate_field, vector_len, rscratch);
 2815 }
 2816 
 2817 void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) {
 2818   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
 2819   assert(rscratch != noreg || always_reachable(negate_field), "missing");
 2820 
 2821   vandpd(dst, nds, negate_field, vector_len, rscratch);
 2822 }
 2823 
 2824 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 2825   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2826   Assembler::vpaddb(dst, nds, src, vector_len);
 2827 }
 2828 
 2829 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 2830   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2831   Assembler::vpaddb(dst, nds, src, vector_len);
 2832 }
 2833 
 2834 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 2835   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2836   Assembler::vpaddw(dst, nds, src, vector_len);
 2837 }
 2838 
 2839 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 2840   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2841   Assembler::vpaddw(dst, nds, src, vector_len);
 2842 }
 2843 
 2844 void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 2845   assert(rscratch != noreg || always_reachable(src), "missing");
 2846 
 2847   if (reachable(src)) {
 2848     Assembler::vpand(dst, nds, as_Address(src), vector_len);
 2849   } else {
 2850     lea(rscratch, src);
 2851     Assembler::vpand(dst, nds, Address(rscratch, 0), vector_len);
 2852   }
 2853 }
 2854 
 2855 void MacroAssembler::vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2856   assert(rscratch != noreg || always_reachable(src), "missing");
 2857 
 2858   if (reachable(src)) {
 2859     Assembler::vpbroadcastd(dst, as_Address(src), vector_len);
 2860   } else {
 2861     lea(rscratch, src);
 2862     Assembler::vpbroadcastd(dst, Address(rscratch, 0), vector_len);
 2863   }
 2864 }
 2865 
 2866 void MacroAssembler::vbroadcasti128(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2867   assert(rscratch != noreg || always_reachable(src), "missing");
 2868 
 2869   if (reachable(src)) {
 2870     Assembler::vbroadcasti128(dst, as_Address(src), vector_len);
 2871   } else {
 2872     lea(rscratch, src);
 2873     Assembler::vbroadcasti128(dst, Address(rscratch, 0), vector_len);
 2874   }
 2875 }
 2876 
 2877 void MacroAssembler::vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2878   assert(rscratch != noreg || always_reachable(src), "missing");
 2879 
 2880   if (reachable(src)) {
 2881     Assembler::vpbroadcastq(dst, as_Address(src), vector_len);
 2882   } else {
 2883     lea(rscratch, src);
 2884     Assembler::vpbroadcastq(dst, Address(rscratch, 0), vector_len);
 2885   }
 2886 }
 2887 
 2888 void MacroAssembler::vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2889   assert(rscratch != noreg || always_reachable(src), "missing");
 2890 
 2891   if (reachable(src)) {
 2892     Assembler::vbroadcastsd(dst, as_Address(src), vector_len);
 2893   } else {
 2894     lea(rscratch, src);
 2895     Assembler::vbroadcastsd(dst, Address(rscratch, 0), vector_len);
 2896   }
 2897 }
 2898 
 2899 void MacroAssembler::vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
 2900   assert(rscratch != noreg || always_reachable(src), "missing");
 2901 
 2902   if (reachable(src)) {
 2903     Assembler::vbroadcastss(dst, as_Address(src), vector_len);
 2904   } else {
 2905     lea(rscratch, src);
 2906     Assembler::vbroadcastss(dst, Address(rscratch, 0), vector_len);
 2907   }
 2908 }
 2909 
 2910 // Vector float blend
 2911 // vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg)
 2912 void MacroAssembler::vblendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) {
 2913   // WARN: Allow dst == (src1|src2), mask == scratch
 2914   bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1 &&
 2915                          !(VM_Version::is_intel_darkmont() && (dst == src1)); // partially fixed on Darkmont
 2916   bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst;
 2917   bool dst_available = dst != mask && (dst != src1 || dst != src2);
 2918   if (blend_emulation && scratch_available && dst_available) {
 2919     if (compute_mask) {
 2920       vpsrad(scratch, mask, 32, vector_len);
 2921       mask = scratch;
 2922     }
 2923     if (dst == src1) {
 2924       vpandn(dst,     mask, src1, vector_len); // if mask == 0, src1
 2925       vpand (scratch, mask, src2, vector_len); // if mask == 1, src2
 2926     } else {
 2927       vpand (dst,     mask, src2, vector_len); // if mask == 1, src2
 2928       vpandn(scratch, mask, src1, vector_len); // if mask == 0, src1
 2929     }
 2930     vpor(dst, dst, scratch, vector_len);
 2931   } else {
 2932     Assembler::vblendvps(dst, src1, src2, mask, vector_len);
 2933   }
 2934 }
 2935 
 2936 // vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg)
 2937 void MacroAssembler::vblendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) {
 2938   // WARN: Allow dst == (src1|src2), mask == scratch
 2939   bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1 &&
 2940                          !(VM_Version::is_intel_darkmont() && (dst == src1)); // partially fixed on Darkmont
 2941   bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst && (!compute_mask || scratch != mask);
 2942   bool dst_available = dst != mask && (dst != src1 || dst != src2);
 2943   if (blend_emulation && scratch_available && dst_available) {
 2944     if (compute_mask) {
 2945       vpxor(scratch, scratch, scratch, vector_len);
 2946       vpcmpgtq(scratch, scratch, mask, vector_len);
 2947       mask = scratch;
 2948     }
 2949     if (dst == src1) {
 2950       vpandn(dst,     mask, src1, vector_len); // if mask == 0, src
 2951       vpand (scratch, mask, src2, vector_len); // if mask == 1, src2
 2952     } else {
 2953       vpand (dst,     mask, src2, vector_len); // if mask == 1, src2
 2954       vpandn(scratch, mask, src1, vector_len); // if mask == 0, src
 2955     }
 2956     vpor(dst, dst, scratch, vector_len);
 2957   } else {
 2958     Assembler::vblendvpd(dst, src1, src2, mask, vector_len);
 2959   }
 2960 }
 2961 
 2962 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 2963   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2964   Assembler::vpcmpeqb(dst, nds, src, vector_len);
 2965 }
 2966 
 2967 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) {
 2968   assert(((dst->encoding() < 16 && src1->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2969   Assembler::vpcmpeqb(dst, src1, src2, vector_len);
 2970 }
 2971 
 2972 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 2973   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2974   Assembler::vpcmpeqw(dst, nds, src, vector_len);
 2975 }
 2976 
 2977 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 2978   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 2979   Assembler::vpcmpeqw(dst, nds, src, vector_len);
 2980 }
 2981 
 2982 void MacroAssembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 2983   assert(rscratch != noreg || always_reachable(src), "missing");
 2984 
 2985   if (reachable(src)) {
 2986     Assembler::evpcmpeqd(kdst, mask, nds, as_Address(src), vector_len);
 2987   } else {
 2988     lea(rscratch, src);
 2989     Assembler::evpcmpeqd(kdst, mask, nds, Address(rscratch, 0), vector_len);
 2990   }
 2991 }
 2992 
 2993 void MacroAssembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
 2994                              int comparison, bool is_signed, int vector_len, Register rscratch) {
 2995   assert(rscratch != noreg || always_reachable(src), "missing");
 2996 
 2997   if (reachable(src)) {
 2998     Assembler::evpcmpd(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
 2999   } else {
 3000     lea(rscratch, src);
 3001     Assembler::evpcmpd(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
 3002   }
 3003 }
 3004 
 3005 void MacroAssembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
 3006                              int comparison, bool is_signed, int vector_len, Register rscratch) {
 3007   assert(rscratch != noreg || always_reachable(src), "missing");
 3008 
 3009   if (reachable(src)) {
 3010     Assembler::evpcmpq(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
 3011   } else {
 3012     lea(rscratch, src);
 3013     Assembler::evpcmpq(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
 3014   }
 3015 }
 3016 
 3017 void MacroAssembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
 3018                              int comparison, bool is_signed, int vector_len, Register rscratch) {
 3019   assert(rscratch != noreg || always_reachable(src), "missing");
 3020 
 3021   if (reachable(src)) {
 3022     Assembler::evpcmpb(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
 3023   } else {
 3024     lea(rscratch, src);
 3025     Assembler::evpcmpb(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
 3026   }
 3027 }
 3028 
 3029 void MacroAssembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
 3030                              int comparison, bool is_signed, int vector_len, Register rscratch) {
 3031   assert(rscratch != noreg || always_reachable(src), "missing");
 3032 
 3033   if (reachable(src)) {
 3034     Assembler::evpcmpw(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len);
 3035   } else {
 3036     lea(rscratch, src);
 3037     Assembler::evpcmpw(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len);
 3038   }
 3039 }
 3040 
 3041 void MacroAssembler::vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len) {
 3042   if (width == Assembler::Q) {
 3043     Assembler::vpcmpCCq(dst, nds, src, cond_encoding, vector_len);
 3044   } else {
 3045     Assembler::vpcmpCCbwd(dst, nds, src, cond_encoding, vector_len);
 3046   }
 3047 }
 3048 
 3049 void MacroAssembler::vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len) {
 3050   int eq_cond_enc = 0x29;
 3051   int gt_cond_enc = 0x37;
 3052   if (width != Assembler::Q) {
 3053     eq_cond_enc = 0x74 + width;
 3054     gt_cond_enc = 0x64 + width;
 3055   }
 3056   switch (cond) {
 3057   case eq:
 3058     vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len);
 3059     break;
 3060   case neq:
 3061     vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len);
 3062     vallones(xtmp, vector_len);
 3063     vpxor(dst, xtmp, dst, vector_len);
 3064     break;
 3065   case le:
 3066     vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len);
 3067     vallones(xtmp, vector_len);
 3068     vpxor(dst, xtmp, dst, vector_len);
 3069     break;
 3070   case nlt:
 3071     vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len);
 3072     vallones(xtmp, vector_len);
 3073     vpxor(dst, xtmp, dst, vector_len);
 3074     break;
 3075   case lt:
 3076     vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len);
 3077     break;
 3078   case nle:
 3079     vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len);
 3080     break;
 3081   default:
 3082     assert(false, "Should not reach here");
 3083   }
 3084 }
 3085 
 3086 void MacroAssembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) {
 3087   assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3088   Assembler::vpmovzxbw(dst, src, vector_len);
 3089 }
 3090 
 3091 void MacroAssembler::vpmovmskb(Register dst, XMMRegister src, int vector_len) {
 3092   assert((src->encoding() < 16),"XMM register should be 0-15");
 3093   Assembler::vpmovmskb(dst, src, vector_len);
 3094 }
 3095 
 3096 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 3097   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3098   Assembler::vpmullw(dst, nds, src, vector_len);
 3099 }
 3100 
 3101 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 3102   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3103   Assembler::vpmullw(dst, nds, src, vector_len);
 3104 }
 3105 
 3106 void MacroAssembler::vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3107   assert((UseAVX > 0), "AVX support is needed");
 3108   assert(rscratch != noreg || always_reachable(src), "missing");
 3109 
 3110   if (reachable(src)) {
 3111     Assembler::vpmulld(dst, nds, as_Address(src), vector_len);
 3112   } else {
 3113     lea(rscratch, src);
 3114     Assembler::vpmulld(dst, nds, Address(rscratch, 0), vector_len);
 3115   }
 3116 }
 3117 
 3118 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 3119   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3120   Assembler::vpsubb(dst, nds, src, vector_len);
 3121 }
 3122 
 3123 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 3124   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3125   Assembler::vpsubb(dst, nds, src, vector_len);
 3126 }
 3127 
 3128 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
 3129   assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3130   Assembler::vpsubw(dst, nds, src, vector_len);
 3131 }
 3132 
 3133 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
 3134   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3135   Assembler::vpsubw(dst, nds, src, vector_len);
 3136 }
 3137 
 3138 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
 3139   assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3140   Assembler::vpsraw(dst, nds, shift, vector_len);
 3141 }
 3142 
 3143 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
 3144   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3145   Assembler::vpsraw(dst, nds, shift, vector_len);
 3146 }
 3147 
 3148 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
 3149   assert(UseAVX > 2,"");
 3150   if (!VM_Version::supports_avx512vl() && vector_len < 2) {
 3151      vector_len = 2;
 3152   }
 3153   Assembler::evpsraq(dst, nds, shift, vector_len);
 3154 }
 3155 
 3156 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
 3157   assert(UseAVX > 2,"");
 3158   if (!VM_Version::supports_avx512vl() && vector_len < 2) {
 3159      vector_len = 2;
 3160   }
 3161   Assembler::evpsraq(dst, nds, shift, vector_len);
 3162 }
 3163 
 3164 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
 3165   assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3166   Assembler::vpsrlw(dst, nds, shift, vector_len);
 3167 }
 3168 
 3169 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
 3170   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3171   Assembler::vpsrlw(dst, nds, shift, vector_len);
 3172 }
 3173 
 3174 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
 3175   assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3176   Assembler::vpsllw(dst, nds, shift, vector_len);
 3177 }
 3178 
 3179 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
 3180   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3181   Assembler::vpsllw(dst, nds, shift, vector_len);
 3182 }
 3183 
 3184 void MacroAssembler::vptest(XMMRegister dst, XMMRegister src) {
 3185   assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15");
 3186   Assembler::vptest(dst, src);
 3187 }
 3188 
 3189 void MacroAssembler::punpcklbw(XMMRegister dst, XMMRegister src) {
 3190   assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3191   Assembler::punpcklbw(dst, src);
 3192 }
 3193 
 3194 void MacroAssembler::pshufd(XMMRegister dst, Address src, int mode) {
 3195   assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15");
 3196   Assembler::pshufd(dst, src, mode);
 3197 }
 3198 
 3199 void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
 3200   assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
 3201   Assembler::pshuflw(dst, src, mode);
 3202 }
 3203 
 3204 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3205   assert(rscratch != noreg || always_reachable(src), "missing");
 3206 
 3207   if (reachable(src)) {
 3208     vandpd(dst, nds, as_Address(src), vector_len);
 3209   } else {
 3210     lea(rscratch, src);
 3211     vandpd(dst, nds, Address(rscratch, 0), vector_len);
 3212   }
 3213 }
 3214 
 3215 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3216   assert(rscratch != noreg || always_reachable(src), "missing");
 3217 
 3218   if (reachable(src)) {
 3219     vandps(dst, nds, as_Address(src), vector_len);
 3220   } else {
 3221     lea(rscratch, src);
 3222     vandps(dst, nds, Address(rscratch, 0), vector_len);
 3223   }
 3224 }
 3225 
 3226 void MacroAssembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src,
 3227                             bool merge, int vector_len, Register rscratch) {
 3228   assert(rscratch != noreg || always_reachable(src), "missing");
 3229 
 3230   if (reachable(src)) {
 3231     Assembler::evpord(dst, mask, nds, as_Address(src), merge, vector_len);
 3232   } else {
 3233     lea(rscratch, src);
 3234     Assembler::evpord(dst, mask, nds, Address(rscratch, 0), merge, vector_len);
 3235   }
 3236 }
 3237 
 3238 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3239   assert(rscratch != noreg || always_reachable(src), "missing");
 3240 
 3241   if (reachable(src)) {
 3242     vdivsd(dst, nds, as_Address(src));
 3243   } else {
 3244     lea(rscratch, src);
 3245     vdivsd(dst, nds, Address(rscratch, 0));
 3246   }
 3247 }
 3248 
 3249 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3250   assert(rscratch != noreg || always_reachable(src), "missing");
 3251 
 3252   if (reachable(src)) {
 3253     vdivss(dst, nds, as_Address(src));
 3254   } else {
 3255     lea(rscratch, src);
 3256     vdivss(dst, nds, Address(rscratch, 0));
 3257   }
 3258 }
 3259 
 3260 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3261   assert(rscratch != noreg || always_reachable(src), "missing");
 3262 
 3263   if (reachable(src)) {
 3264     vmulsd(dst, nds, as_Address(src));
 3265   } else {
 3266     lea(rscratch, src);
 3267     vmulsd(dst, nds, Address(rscratch, 0));
 3268   }
 3269 }
 3270 
 3271 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3272   assert(rscratch != noreg || always_reachable(src), "missing");
 3273 
 3274   if (reachable(src)) {
 3275     vmulss(dst, nds, as_Address(src));
 3276   } else {
 3277     lea(rscratch, src);
 3278     vmulss(dst, nds, Address(rscratch, 0));
 3279   }
 3280 }
 3281 
 3282 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3283   assert(rscratch != noreg || always_reachable(src), "missing");
 3284 
 3285   if (reachable(src)) {
 3286     vsubsd(dst, nds, as_Address(src));
 3287   } else {
 3288     lea(rscratch, src);
 3289     vsubsd(dst, nds, Address(rscratch, 0));
 3290   }
 3291 }
 3292 
 3293 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3294   assert(rscratch != noreg || always_reachable(src), "missing");
 3295 
 3296   if (reachable(src)) {
 3297     vsubss(dst, nds, as_Address(src));
 3298   } else {
 3299     lea(rscratch, src);
 3300     vsubss(dst, nds, Address(rscratch, 0));
 3301   }
 3302 }
 3303 
 3304 void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3305   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
 3306   assert(rscratch != noreg || always_reachable(src), "missing");
 3307 
 3308   vxorps(dst, nds, src, Assembler::AVX_128bit, rscratch);
 3309 }
 3310 
 3311 void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) {
 3312   assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
 3313   assert(rscratch != noreg || always_reachable(src), "missing");
 3314 
 3315   vxorpd(dst, nds, src, Assembler::AVX_128bit, rscratch);
 3316 }
 3317 
 3318 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3319   assert(rscratch != noreg || always_reachable(src), "missing");
 3320 
 3321   if (reachable(src)) {
 3322     vxorpd(dst, nds, as_Address(src), vector_len);
 3323   } else {
 3324     lea(rscratch, src);
 3325     vxorpd(dst, nds, Address(rscratch, 0), vector_len);
 3326   }
 3327 }
 3328 
 3329 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3330   assert(rscratch != noreg || always_reachable(src), "missing");
 3331 
 3332   if (reachable(src)) {
 3333     vxorps(dst, nds, as_Address(src), vector_len);
 3334   } else {
 3335     lea(rscratch, src);
 3336     vxorps(dst, nds, Address(rscratch, 0), vector_len);
 3337   }
 3338 }
 3339 
 3340 void MacroAssembler::vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3341   assert(rscratch != noreg || always_reachable(src), "missing");
 3342 
 3343   if (UseAVX > 1 || (vector_len < 1)) {
 3344     if (reachable(src)) {
 3345       Assembler::vpxor(dst, nds, as_Address(src), vector_len);
 3346     } else {
 3347       lea(rscratch, src);
 3348       Assembler::vpxor(dst, nds, Address(rscratch, 0), vector_len);
 3349     }
 3350   } else {
 3351     MacroAssembler::vxorpd(dst, nds, src, vector_len, rscratch);
 3352   }
 3353 }
 3354 
 3355 void MacroAssembler::vpermd(XMMRegister dst,  XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 3356   assert(rscratch != noreg || always_reachable(src), "missing");
 3357 
 3358   if (reachable(src)) {
 3359     Assembler::vpermd(dst, nds, as_Address(src), vector_len);
 3360   } else {
 3361     lea(rscratch, src);
 3362     Assembler::vpermd(dst, nds, Address(rscratch, 0), vector_len);
 3363   }
 3364 }
 3365 
 3366 void MacroAssembler::clear_jobject_tag(Register possibly_non_local) {
 3367   const int32_t inverted_mask = ~static_cast<int32_t>(JNIHandles::tag_mask);
 3368   STATIC_ASSERT(inverted_mask == -4); // otherwise check this code
 3369   // The inverted mask is sign-extended
 3370   andptr(possibly_non_local, inverted_mask);
 3371 }
 3372 
 3373 void MacroAssembler::resolve_jobject(Register value,
 3374                                      Register tmp) {
 3375   Register thread = r15_thread;
 3376   assert_different_registers(value, thread, tmp);
 3377   Label done, tagged, weak_tagged;
 3378   testptr(value, value);
 3379   jcc(Assembler::zero, done);           // Use null as-is.
 3380   testptr(value, JNIHandles::tag_mask); // Test for tag.
 3381   jcc(Assembler::notZero, tagged);
 3382 
 3383   // Resolve local handle
 3384   access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp);
 3385   verify_oop(value);
 3386   jmp(done);
 3387 
 3388   bind(tagged);
 3389   testptr(value, JNIHandles::TypeTag::weak_global); // Test for weak tag.
 3390   jcc(Assembler::notZero, weak_tagged);
 3391 
 3392   // Resolve global handle
 3393   access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp);
 3394   verify_oop(value);
 3395   jmp(done);
 3396 
 3397   bind(weak_tagged);
 3398   // Resolve jweak.
 3399   access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
 3400                  value, Address(value, -JNIHandles::TypeTag::weak_global), tmp);
 3401   verify_oop(value);
 3402 
 3403   bind(done);
 3404 }
 3405 
 3406 void MacroAssembler::resolve_global_jobject(Register value,
 3407                                             Register tmp) {
 3408   Register thread = r15_thread;
 3409   assert_different_registers(value, thread, tmp);
 3410   Label done;
 3411 
 3412   testptr(value, value);
 3413   jcc(Assembler::zero, done);           // Use null as-is.
 3414 
 3415 #ifdef ASSERT
 3416   {
 3417     Label valid_global_tag;
 3418     testptr(value, JNIHandles::TypeTag::global); // Test for global tag.
 3419     jcc(Assembler::notZero, valid_global_tag);
 3420     stop("non global jobject using resolve_global_jobject");
 3421     bind(valid_global_tag);
 3422   }
 3423 #endif
 3424 
 3425   // Resolve global handle
 3426   access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp);
 3427   verify_oop(value);
 3428 
 3429   bind(done);
 3430 }
 3431 
 3432 void MacroAssembler::subptr(Register dst, int32_t imm32) {
 3433   subq(dst, imm32);
 3434 }
 3435 
 3436 // Force generation of a 4 byte immediate value even if it fits into 8bit
 3437 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) {
 3438   subq_imm32(dst, imm32);
 3439 }
 3440 
 3441 void MacroAssembler::subptr(Register dst, Register src) {
 3442   subq(dst, src);
 3443 }
 3444 
 3445 // C++ bool manipulation
 3446 void MacroAssembler::testbool(Register dst) {
 3447   if(sizeof(bool) == 1)
 3448     testb(dst, 0xff);
 3449   else if(sizeof(bool) == 2) {
 3450     // testw implementation needed for two byte bools
 3451     ShouldNotReachHere();
 3452   } else if(sizeof(bool) == 4)
 3453     testl(dst, dst);
 3454   else
 3455     // unsupported
 3456     ShouldNotReachHere();
 3457 }
 3458 
 3459 void MacroAssembler::testptr(Register dst, Register src) {
 3460   testq(dst, src);
 3461 }
 3462 
 3463 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
 3464 void MacroAssembler::tlab_allocate(Register obj,
 3465                                    Register var_size_in_bytes,
 3466                                    int con_size_in_bytes,
 3467                                    Register t1,
 3468                                    Register t2,
 3469                                    Label& slow_case) {
 3470   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 3471   bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
 3472 }
 3473 
 3474 RegSet MacroAssembler::call_clobbered_gp_registers() {
 3475   RegSet regs;
 3476   regs += RegSet::of(rax, rcx, rdx);
 3477 #ifndef _WINDOWS
 3478   regs += RegSet::of(rsi, rdi);
 3479 #endif
 3480   regs += RegSet::range(r8, r11);
 3481   if (UseAPX) {
 3482     regs += RegSet::range(r16, as_Register(Register::number_of_registers - 1));
 3483   }
 3484   return regs;
 3485 }
 3486 
 3487 XMMRegSet MacroAssembler::call_clobbered_xmm_registers() {
 3488   int num_xmm_registers = XMMRegister::available_xmm_registers();
 3489 #if defined(_WINDOWS)
 3490   XMMRegSet result = XMMRegSet::range(xmm0, xmm5);
 3491   if (num_xmm_registers > 16) {
 3492      result += XMMRegSet::range(xmm16, as_XMMRegister(num_xmm_registers - 1));
 3493   }
 3494   return result;
 3495 #else
 3496   return XMMRegSet::range(xmm0, as_XMMRegister(num_xmm_registers - 1));
 3497 #endif
 3498 }
 3499 
 3500 // C1 only ever uses the first double/float of the XMM register.
 3501 static int xmm_save_size() { return sizeof(double); }
 3502 
 3503 static void save_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) {
 3504   masm->movdbl(Address(rsp, offset), reg);
 3505 }
 3506 
 3507 static void restore_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) {
 3508   masm->movdbl(reg, Address(rsp, offset));
 3509 }
 3510 
 3511 static int register_section_sizes(RegSet gp_registers, XMMRegSet xmm_registers,
 3512                                   bool save_fpu, int& gp_area_size, int& xmm_area_size) {
 3513 
 3514   gp_area_size = align_up(gp_registers.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size,
 3515                          StackAlignmentInBytes);
 3516   xmm_area_size = save_fpu ? xmm_registers.size() * xmm_save_size() : 0;
 3517 
 3518   return gp_area_size + xmm_area_size;
 3519 }
 3520 
 3521 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude, bool save_fpu) {
 3522   block_comment("push_call_clobbered_registers start");
 3523   // Regular registers
 3524   RegSet gp_registers_to_push = call_clobbered_gp_registers() - exclude;
 3525 
 3526   int gp_area_size;
 3527   int xmm_area_size;
 3528   int total_save_size = register_section_sizes(gp_registers_to_push, call_clobbered_xmm_registers(), save_fpu,
 3529                                                gp_area_size, xmm_area_size);
 3530   subptr(rsp, total_save_size);
 3531 
 3532   push_set(gp_registers_to_push, 0);
 3533 
 3534   if (save_fpu) {
 3535     push_set(call_clobbered_xmm_registers(), gp_area_size);
 3536   }
 3537 
 3538   block_comment("push_call_clobbered_registers end");
 3539 }
 3540 
 3541 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu) {
 3542   block_comment("pop_call_clobbered_registers start");
 3543 
 3544   RegSet gp_registers_to_pop = call_clobbered_gp_registers() - exclude;
 3545 
 3546   int gp_area_size;
 3547   int xmm_area_size;
 3548   int total_save_size = register_section_sizes(gp_registers_to_pop, call_clobbered_xmm_registers(), restore_fpu,
 3549                                                gp_area_size, xmm_area_size);
 3550 
 3551   if (restore_fpu) {
 3552     pop_set(call_clobbered_xmm_registers(), gp_area_size);
 3553   }
 3554 
 3555   pop_set(gp_registers_to_pop, 0);
 3556 
 3557   addptr(rsp, total_save_size);
 3558 
 3559   vzeroupper();
 3560 
 3561   block_comment("pop_call_clobbered_registers end");
 3562 }
 3563 
 3564 void MacroAssembler::push_set(XMMRegSet set, int offset) {
 3565   assert(is_aligned(set.size() * xmm_save_size(), StackAlignmentInBytes), "must be");
 3566   int spill_offset = offset;
 3567 
 3568   for (RegSetIterator<XMMRegister> it = set.begin(); *it != xnoreg; ++it) {
 3569     save_xmm_register(this, spill_offset, *it);
 3570     spill_offset += xmm_save_size();
 3571   }
 3572 }
 3573 
 3574 void MacroAssembler::pop_set(XMMRegSet set, int offset) {
 3575   int restore_size = set.size() * xmm_save_size();
 3576   assert(is_aligned(restore_size, StackAlignmentInBytes), "must be");
 3577 
 3578   int restore_offset = offset + restore_size - xmm_save_size();
 3579 
 3580   for (ReverseRegSetIterator<XMMRegister> it = set.rbegin(); *it != xnoreg; ++it) {
 3581     restore_xmm_register(this, restore_offset, *it);
 3582     restore_offset -= xmm_save_size();
 3583   }
 3584 }
 3585 
 3586 void MacroAssembler::push_set(RegSet set, int offset) {
 3587   int spill_offset;
 3588   if (offset == -1) {
 3589     int register_push_size = set.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size;
 3590     int aligned_size = align_up(register_push_size, StackAlignmentInBytes);
 3591     subptr(rsp, aligned_size);
 3592     spill_offset = 0;
 3593   } else {
 3594     spill_offset = offset;
 3595   }
 3596 
 3597   for (RegSetIterator<Register> it = set.begin(); *it != noreg; ++it) {
 3598     movptr(Address(rsp, spill_offset), *it);
 3599     spill_offset += Register::max_slots_per_register * VMRegImpl::stack_slot_size;
 3600   }
 3601 }
 3602 
 3603 void MacroAssembler::pop_set(RegSet set, int offset) {
 3604 
 3605   int gp_reg_size = Register::max_slots_per_register * VMRegImpl::stack_slot_size;
 3606   int restore_size = set.size() * gp_reg_size;
 3607   int aligned_size = align_up(restore_size, StackAlignmentInBytes);
 3608 
 3609   int restore_offset;
 3610   if (offset == -1) {
 3611     restore_offset = restore_size - gp_reg_size;
 3612   } else {
 3613     restore_offset = offset + restore_size - gp_reg_size;
 3614   }
 3615   for (ReverseRegSetIterator<Register> it = set.rbegin(); *it != noreg; ++it) {
 3616     movptr(*it, Address(rsp, restore_offset));
 3617     restore_offset -= gp_reg_size;
 3618   }
 3619 
 3620   if (offset == -1) {
 3621     addptr(rsp, aligned_size);
 3622   }
 3623 }
 3624 
 3625 // Preserves the contents of address, destroys the contents length_in_bytes and temp.
 3626 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) {
 3627   assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different");
 3628   assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord");
 3629   Label done;
 3630 
 3631   testptr(length_in_bytes, length_in_bytes);
 3632   jcc(Assembler::zero, done);
 3633 
 3634   // initialize topmost word, divide index by 2, check if odd and test if zero
 3635   // note: for the remaining code to work, index must be a multiple of BytesPerWord
 3636 #ifdef ASSERT
 3637   {
 3638     Label L;
 3639     testptr(length_in_bytes, BytesPerWord - 1);
 3640     jcc(Assembler::zero, L);
 3641     stop("length must be a multiple of BytesPerWord");
 3642     bind(L);
 3643   }
 3644 #endif
 3645   Register index = length_in_bytes;
 3646   xorptr(temp, temp);    // use _zero reg to clear memory (shorter code)
 3647   if (UseIncDec) {
 3648     shrptr(index, 3);  // divide by 8/16 and set carry flag if bit 2 was set
 3649   } else {
 3650     shrptr(index, 2);  // use 2 instructions to avoid partial flag stall
 3651     shrptr(index, 1);
 3652   }
 3653 
 3654   // initialize remaining object fields: index is a multiple of 2 now
 3655   {
 3656     Label loop;
 3657     bind(loop);
 3658     movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp);
 3659     decrement(index);
 3660     jcc(Assembler::notZero, loop);
 3661   }
 3662 
 3663   bind(done);
 3664 }
 3665 
 3666 // Look up the method for a megamorphic invokeinterface call.
 3667 // The target method is determined by <intf_klass, itable_index>.
 3668 // The receiver klass is in recv_klass.
 3669 // On success, the result will be in method_result, and execution falls through.
 3670 // On failure, execution transfers to the given label.
 3671 void MacroAssembler::lookup_interface_method(Register recv_klass,
 3672                                              Register intf_klass,
 3673                                              RegisterOrConstant itable_index,
 3674                                              Register method_result,
 3675                                              Register scan_temp,
 3676                                              Label& L_no_such_interface,
 3677                                              bool return_method) {
 3678   assert_different_registers(recv_klass, intf_klass, scan_temp);
 3679   assert_different_registers(method_result, intf_klass, scan_temp);
 3680   assert(recv_klass != method_result || !return_method,
 3681          "recv_klass can be destroyed when method isn't needed");
 3682 
 3683   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
 3684          "caller must use same register for non-constant itable index as for method");
 3685 
 3686   // Compute start of first itableOffsetEntry (which is at the end of the vtable)
 3687   int vtable_base = in_bytes(Klass::vtable_start_offset());
 3688   int itentry_off = in_bytes(itableMethodEntry::method_offset());
 3689   int scan_step   = itableOffsetEntry::size() * wordSize;
 3690   int vte_size    = vtableEntry::size_in_bytes();
 3691   Address::ScaleFactor times_vte_scale = Address::times_ptr;
 3692   assert(vte_size == wordSize, "else adjust times_vte_scale");
 3693 
 3694   movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
 3695 
 3696   // Could store the aligned, prescaled offset in the klass.
 3697   lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
 3698 
 3699   if (return_method) {
 3700     // Adjust recv_klass by scaled itable_index, so we can free itable_index.
 3701     assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
 3702     lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
 3703   }
 3704 
 3705   // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) {
 3706   //   if (scan->interface() == intf) {
 3707   //     result = (klass + scan->offset() + itable_index);
 3708   //   }
 3709   // }
 3710   Label search, found_method;
 3711 
 3712   for (int peel = 1; peel >= 0; peel--) {
 3713     movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset()));
 3714     cmpptr(intf_klass, method_result);
 3715 
 3716     if (peel) {
 3717       jccb(Assembler::equal, found_method);
 3718     } else {
 3719       jccb(Assembler::notEqual, search);
 3720       // (invert the test to fall through to found_method...)
 3721     }
 3722 
 3723     if (!peel)  break;
 3724 
 3725     bind(search);
 3726 
 3727     // Check that the previous entry is non-null.  A null entry means that
 3728     // the receiver class doesn't implement the interface, and wasn't the
 3729     // same as when the caller was compiled.
 3730     testptr(method_result, method_result);
 3731     jcc(Assembler::zero, L_no_such_interface);
 3732     addptr(scan_temp, scan_step);
 3733   }
 3734 
 3735   bind(found_method);
 3736 
 3737   if (return_method) {
 3738     // Got a hit.
 3739     movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset()));
 3740     movptr(method_result, Address(recv_klass, scan_temp, Address::times_1));
 3741   }
 3742 }
 3743 
 3744 // Look up the method for a megamorphic invokeinterface call in a single pass over itable:
 3745 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData
 3746 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index
 3747 // The target method is determined by <holder_klass, itable_index>.
 3748 // The receiver klass is in recv_klass.
 3749 // On success, the result will be in method_result, and execution falls through.
 3750 // On failure, execution transfers to the given label.
 3751 void MacroAssembler::lookup_interface_method_stub(Register recv_klass,
 3752                                                   Register holder_klass,
 3753                                                   Register resolved_klass,
 3754                                                   Register method_result,
 3755                                                   Register scan_temp,
 3756                                                   Register temp_reg2,
 3757                                                   Register receiver,
 3758                                                   int itable_index,
 3759                                                   Label& L_no_such_interface) {
 3760   assert_different_registers(recv_klass, method_result, holder_klass, resolved_klass, scan_temp, temp_reg2, receiver);
 3761   Register temp_itbl_klass = method_result;
 3762   Register temp_reg = (temp_reg2 == noreg ? recv_klass : temp_reg2); // reuse recv_klass register on 32-bit x86 impl
 3763 
 3764   int vtable_base = in_bytes(Klass::vtable_start_offset());
 3765   int itentry_off = in_bytes(itableMethodEntry::method_offset());
 3766   int scan_step = itableOffsetEntry::size() * wordSize;
 3767   int vte_size = vtableEntry::size_in_bytes();
 3768   int ioffset = in_bytes(itableOffsetEntry::interface_offset());
 3769   int ooffset = in_bytes(itableOffsetEntry::offset_offset());
 3770   Address::ScaleFactor times_vte_scale = Address::times_ptr;
 3771   assert(vte_size == wordSize, "adjust times_vte_scale");
 3772 
 3773   Label L_loop_scan_resolved_entry, L_resolved_found, L_holder_found;
 3774 
 3775   // temp_itbl_klass = recv_klass.itable[0]
 3776   // scan_temp = &recv_klass.itable[0] + step
 3777   movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
 3778   movptr(temp_itbl_klass, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset));
 3779   lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset + scan_step));
 3780   xorptr(temp_reg, temp_reg);
 3781 
 3782   // Initial checks:
 3783   //   - if (holder_klass != resolved_klass), go to "scan for resolved"
 3784   //   - if (itable[0] == 0), no such interface
 3785   //   - if (itable[0] == holder_klass), shortcut to "holder found"
 3786   cmpptr(holder_klass, resolved_klass);
 3787   jccb(Assembler::notEqual, L_loop_scan_resolved_entry);
 3788   testptr(temp_itbl_klass, temp_itbl_klass);
 3789   jccb(Assembler::zero, L_no_such_interface);
 3790   cmpptr(holder_klass, temp_itbl_klass);
 3791   jccb(Assembler::equal, L_holder_found);
 3792 
 3793   // Loop: Look for holder_klass record in itable
 3794   //   do {
 3795   //     tmp = itable[index];
 3796   //     index += step;
 3797   //     if (tmp == holder_klass) {
 3798   //       goto L_holder_found; // Found!
 3799   //     }
 3800   //   } while (tmp != 0);
 3801   //   goto L_no_such_interface // Not found.
 3802   Label L_scan_holder;
 3803   bind(L_scan_holder);
 3804     movptr(temp_itbl_klass, Address(scan_temp, 0));
 3805     addptr(scan_temp, scan_step);
 3806     cmpptr(holder_klass, temp_itbl_klass);
 3807     jccb(Assembler::equal, L_holder_found);
 3808     testptr(temp_itbl_klass, temp_itbl_klass);
 3809     jccb(Assembler::notZero, L_scan_holder);
 3810 
 3811   jmpb(L_no_such_interface);
 3812 
 3813   // Loop: Look for resolved_class record in itable
 3814   //   do {
 3815   //     tmp = itable[index];
 3816   //     index += step;
 3817   //     if (tmp == holder_klass) {
 3818   //        // Also check if we have met a holder klass
 3819   //        holder_tmp = itable[index-step-ioffset];
 3820   //     }
 3821   //     if (tmp == resolved_klass) {
 3822   //        goto L_resolved_found;  // Found!
 3823   //     }
 3824   //   } while (tmp != 0);
 3825   //   goto L_no_such_interface // Not found.
 3826   //
 3827   Label L_loop_scan_resolved;
 3828   bind(L_loop_scan_resolved);
 3829     movptr(temp_itbl_klass, Address(scan_temp, 0));
 3830     addptr(scan_temp, scan_step);
 3831     bind(L_loop_scan_resolved_entry);
 3832     cmpptr(holder_klass, temp_itbl_klass);
 3833     cmovl(Assembler::equal, temp_reg, Address(scan_temp, ooffset - ioffset - scan_step));
 3834     cmpptr(resolved_klass, temp_itbl_klass);
 3835     jccb(Assembler::equal, L_resolved_found);
 3836     testptr(temp_itbl_klass, temp_itbl_klass);
 3837     jccb(Assembler::notZero, L_loop_scan_resolved);
 3838 
 3839   jmpb(L_no_such_interface);
 3840 
 3841   Label L_ready;
 3842 
 3843   // See if we already have a holder klass. If not, go and scan for it.
 3844   bind(L_resolved_found);
 3845   testptr(temp_reg, temp_reg);
 3846   jccb(Assembler::zero, L_scan_holder);
 3847   jmpb(L_ready);
 3848 
 3849   bind(L_holder_found);
 3850   movl(temp_reg, Address(scan_temp, ooffset - ioffset - scan_step));
 3851 
 3852   // Finally, temp_reg contains holder_klass vtable offset
 3853   bind(L_ready);
 3854   assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
 3855   if (temp_reg2 == noreg) { // recv_klass register is clobbered for 32-bit x86 impl
 3856     load_klass(scan_temp, receiver, noreg);
 3857     movptr(method_result, Address(scan_temp, temp_reg, Address::times_1, itable_index * wordSize + itentry_off));
 3858   } else {
 3859     movptr(method_result, Address(recv_klass, temp_reg, Address::times_1, itable_index * wordSize + itentry_off));
 3860   }
 3861 }
 3862 
 3863 
 3864 // virtual method calling
 3865 void MacroAssembler::lookup_virtual_method(Register recv_klass,
 3866                                            RegisterOrConstant vtable_index,
 3867                                            Register method_result) {
 3868   const ByteSize base = Klass::vtable_start_offset();
 3869   assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
 3870   Address vtable_entry_addr(recv_klass,
 3871                             vtable_index, Address::times_ptr,
 3872                             base + vtableEntry::method_offset());
 3873   movptr(method_result, vtable_entry_addr);
 3874 }
 3875 
 3876 
 3877 void MacroAssembler::check_klass_subtype(Register sub_klass,
 3878                            Register super_klass,
 3879                            Register temp_reg,
 3880                            Label& L_success) {
 3881   Label L_failure;
 3882   check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg,        &L_success, &L_failure, nullptr);
 3883   check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr);
 3884   bind(L_failure);
 3885 }
 3886 
 3887 
 3888 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
 3889                                                    Register super_klass,
 3890                                                    Register temp_reg,
 3891                                                    Label* L_success,
 3892                                                    Label* L_failure,
 3893                                                    Label* L_slow_path,
 3894                                         RegisterOrConstant super_check_offset) {
 3895   assert_different_registers(sub_klass, super_klass, temp_reg);
 3896   bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
 3897   if (super_check_offset.is_register()) {
 3898     assert_different_registers(sub_klass, super_klass,
 3899                                super_check_offset.as_register());
 3900   } else if (must_load_sco) {
 3901     assert(temp_reg != noreg, "supply either a temp or a register offset");
 3902   }
 3903 
 3904   Label L_fallthrough;
 3905   int label_nulls = 0;
 3906   if (L_success == nullptr)   { L_success   = &L_fallthrough; label_nulls++; }
 3907   if (L_failure == nullptr)   { L_failure   = &L_fallthrough; label_nulls++; }
 3908   if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; }
 3909   assert(label_nulls <= 1, "at most one null in the batch");
 3910 
 3911   int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
 3912   int sco_offset = in_bytes(Klass::super_check_offset_offset());
 3913   Address super_check_offset_addr(super_klass, sco_offset);
 3914 
 3915   // Hacked jcc, which "knows" that L_fallthrough, at least, is in
 3916   // range of a jccb.  If this routine grows larger, reconsider at
 3917   // least some of these.
 3918 #define local_jcc(assembler_cond, label)                                \
 3919   if (&(label) == &L_fallthrough)  jccb(assembler_cond, label);         \
 3920   else                             jcc( assembler_cond, label) /*omit semi*/
 3921 
 3922   // Hacked jmp, which may only be used just before L_fallthrough.
 3923 #define final_jmp(label)                                                \
 3924   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
 3925   else                            jmp(label)                /*omit semi*/
 3926 
 3927   // If the pointers are equal, we are done (e.g., String[] elements).
 3928   // This self-check enables sharing of secondary supertype arrays among
 3929   // non-primary types such as array-of-interface.  Otherwise, each such
 3930   // type would need its own customized SSA.
 3931   // We move this check to the front of the fast path because many
 3932   // type checks are in fact trivially successful in this manner,
 3933   // so we get a nicely predicted branch right at the start of the check.
 3934   cmpptr(sub_klass, super_klass);
 3935   local_jcc(Assembler::equal, *L_success);
 3936 
 3937   // Check the supertype display:
 3938   if (must_load_sco) {
 3939     // Positive movl does right thing on LP64.
 3940     movl(temp_reg, super_check_offset_addr);
 3941     super_check_offset = RegisterOrConstant(temp_reg);
 3942   }
 3943   Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
 3944   cmpptr(super_klass, super_check_addr); // load displayed supertype
 3945 
 3946   // This check has worked decisively for primary supers.
 3947   // Secondary supers are sought in the super_cache ('super_cache_addr').
 3948   // (Secondary supers are interfaces and very deeply nested subtypes.)
 3949   // This works in the same check above because of a tricky aliasing
 3950   // between the super_cache and the primary super display elements.
 3951   // (The 'super_check_addr' can address either, as the case requires.)
 3952   // Note that the cache is updated below if it does not help us find
 3953   // what we need immediately.
 3954   // So if it was a primary super, we can just fail immediately.
 3955   // Otherwise, it's the slow path for us (no success at this point).
 3956 
 3957   if (super_check_offset.is_register()) {
 3958     local_jcc(Assembler::equal, *L_success);
 3959     cmpl(super_check_offset.as_register(), sc_offset);
 3960     if (L_failure == &L_fallthrough) {
 3961       local_jcc(Assembler::equal, *L_slow_path);
 3962     } else {
 3963       local_jcc(Assembler::notEqual, *L_failure);
 3964       final_jmp(*L_slow_path);
 3965     }
 3966   } else if (super_check_offset.as_constant() == sc_offset) {
 3967     // Need a slow path; fast failure is impossible.
 3968     if (L_slow_path == &L_fallthrough) {
 3969       local_jcc(Assembler::equal, *L_success);
 3970     } else {
 3971       local_jcc(Assembler::notEqual, *L_slow_path);
 3972       final_jmp(*L_success);
 3973     }
 3974   } else {
 3975     // No slow path; it's a fast decision.
 3976     if (L_failure == &L_fallthrough) {
 3977       local_jcc(Assembler::equal, *L_success);
 3978     } else {
 3979       local_jcc(Assembler::notEqual, *L_failure);
 3980       final_jmp(*L_success);
 3981     }
 3982   }
 3983 
 3984   bind(L_fallthrough);
 3985 
 3986 #undef local_jcc
 3987 #undef final_jmp
 3988 }
 3989 
 3990 
 3991 void MacroAssembler::check_klass_subtype_slow_path_linear(Register sub_klass,
 3992                                                           Register super_klass,
 3993                                                           Register temp_reg,
 3994                                                           Register temp2_reg,
 3995                                                           Label* L_success,
 3996                                                           Label* L_failure,
 3997                                                           bool set_cond_codes) {
 3998   assert_different_registers(sub_klass, super_klass, temp_reg);
 3999   if (temp2_reg != noreg)
 4000     assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg);
 4001 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
 4002 
 4003   Label L_fallthrough;
 4004   int label_nulls = 0;
 4005   if (L_success == nullptr)   { L_success   = &L_fallthrough; label_nulls++; }
 4006   if (L_failure == nullptr)   { L_failure   = &L_fallthrough; label_nulls++; }
 4007   assert(label_nulls <= 1, "at most one null in the batch");
 4008 
 4009   // a couple of useful fields in sub_klass:
 4010   int ss_offset = in_bytes(Klass::secondary_supers_offset());
 4011   int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
 4012   Address secondary_supers_addr(sub_klass, ss_offset);
 4013   Address super_cache_addr(     sub_klass, sc_offset);
 4014 
 4015   // Do a linear scan of the secondary super-klass chain.
 4016   // This code is rarely used, so simplicity is a virtue here.
 4017   // The repne_scan instruction uses fixed registers, which we must spill.
 4018   // Don't worry too much about pre-existing connections with the input regs.
 4019 
 4020   assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super)
 4021   assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter)
 4022 
 4023   // Get super_klass value into rax (even if it was in rdi or rcx).
 4024   bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false;
 4025   if (super_klass != rax) {
 4026     if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; }
 4027     mov(rax, super_klass);
 4028   }
 4029   if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; }
 4030   if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; }
 4031 
 4032 #ifndef PRODUCT
 4033   uint* pst_counter = &SharedRuntime::_partial_subtype_ctr;
 4034   ExternalAddress pst_counter_addr((address) pst_counter);
 4035   lea(rcx, pst_counter_addr);
 4036   incrementl(Address(rcx, 0));
 4037 #endif //PRODUCT
 4038 
 4039   // We will consult the secondary-super array.
 4040   movptr(rdi, secondary_supers_addr);
 4041   // Load the array length.  (Positive movl does right thing on LP64.)
 4042   movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes()));
 4043   // Skip to start of data.
 4044   addptr(rdi, Array<Klass*>::base_offset_in_bytes());
 4045 
 4046   // Scan RCX words at [RDI] for an occurrence of RAX.
 4047   // Set NZ/Z based on last compare.
 4048   // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does
 4049   // not change flags (only scas instruction which is repeated sets flags).
 4050   // Set Z = 0 (not equal) before 'repne' to indicate that class was not found.
 4051 
 4052     testptr(rax,rax); // Set Z = 0
 4053     repne_scan();
 4054 
 4055   // Unspill the temp. registers:
 4056   if (pushed_rdi)  pop(rdi);
 4057   if (pushed_rcx)  pop(rcx);
 4058   if (pushed_rax)  pop(rax);
 4059 
 4060   if (set_cond_codes) {
 4061     // Special hack for the AD files:  rdi is guaranteed non-zero.
 4062     assert(!pushed_rdi, "rdi must be left non-null");
 4063     // Also, the condition codes are properly set Z/NZ on succeed/failure.
 4064   }
 4065 
 4066   if (L_failure == &L_fallthrough)
 4067         jccb(Assembler::notEqual, *L_failure);
 4068   else  jcc(Assembler::notEqual, *L_failure);
 4069 
 4070   // Success.  Cache the super we found and proceed in triumph.
 4071   movptr(super_cache_addr, super_klass);
 4072 
 4073   if (L_success != &L_fallthrough) {
 4074     jmp(*L_success);
 4075   }
 4076 
 4077 #undef IS_A_TEMP
 4078 
 4079   bind(L_fallthrough);
 4080 }
 4081 
 4082 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
 4083                                                    Register super_klass,
 4084                                                    Register temp_reg,
 4085                                                    Register temp2_reg,
 4086                                                    Label* L_success,
 4087                                                    Label* L_failure,
 4088                                                    bool set_cond_codes) {
 4089   assert(set_cond_codes == false, "must be false on 64-bit x86");
 4090   check_klass_subtype_slow_path
 4091     (sub_klass, super_klass, temp_reg, temp2_reg, noreg, noreg,
 4092      L_success, L_failure);
 4093 }
 4094 
 4095 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
 4096                                                    Register super_klass,
 4097                                                    Register temp_reg,
 4098                                                    Register temp2_reg,
 4099                                                    Register temp3_reg,
 4100                                                    Register temp4_reg,
 4101                                                    Label* L_success,
 4102                                                    Label* L_failure) {
 4103   if (UseSecondarySupersTable) {
 4104     check_klass_subtype_slow_path_table
 4105       (sub_klass, super_klass, temp_reg, temp2_reg, temp3_reg, temp4_reg,
 4106        L_success, L_failure);
 4107   } else {
 4108     check_klass_subtype_slow_path_linear
 4109       (sub_klass, super_klass, temp_reg, temp2_reg, L_success, L_failure, /*set_cond_codes*/false);
 4110   }
 4111 }
 4112 
 4113 Register MacroAssembler::allocate_if_noreg(Register r,
 4114                                   RegSetIterator<Register> &available_regs,
 4115                                   RegSet &regs_to_push) {
 4116   if (!r->is_valid()) {
 4117     r = *available_regs++;
 4118     regs_to_push += r;
 4119   }
 4120   return r;
 4121 }
 4122 
 4123 void MacroAssembler::check_klass_subtype_slow_path_table(Register sub_klass,
 4124                                                          Register super_klass,
 4125                                                          Register temp_reg,
 4126                                                          Register temp2_reg,
 4127                                                          Register temp3_reg,
 4128                                                          Register result_reg,
 4129                                                          Label* L_success,
 4130                                                          Label* L_failure) {
 4131   // NB! Callers may assume that, when temp2_reg is a valid register,
 4132   // this code sets it to a nonzero value.
 4133   bool temp2_reg_was_valid = temp2_reg->is_valid();
 4134 
 4135   RegSet temps = RegSet::of(temp_reg, temp2_reg, temp3_reg);
 4136 
 4137   Label L_fallthrough;
 4138   int label_nulls = 0;
 4139   if (L_success == nullptr)   { L_success   = &L_fallthrough; label_nulls++; }
 4140   if (L_failure == nullptr)   { L_failure   = &L_fallthrough; label_nulls++; }
 4141   assert(label_nulls <= 1, "at most one null in the batch");
 4142 
 4143   BLOCK_COMMENT("check_klass_subtype_slow_path_table");
 4144 
 4145   RegSetIterator<Register> available_regs
 4146     = (RegSet::of(rax, rcx, rdx, r8) + r9 + r10 + r11 + r12 - temps - sub_klass - super_klass).begin();
 4147 
 4148   RegSet pushed_regs;
 4149 
 4150   temp_reg = allocate_if_noreg(temp_reg, available_regs, pushed_regs);
 4151   temp2_reg = allocate_if_noreg(temp2_reg, available_regs, pushed_regs);
 4152   temp3_reg = allocate_if_noreg(temp3_reg, available_regs, pushed_regs);
 4153   result_reg = allocate_if_noreg(result_reg, available_regs, pushed_regs);
 4154   Register temp4_reg = allocate_if_noreg(noreg, available_regs, pushed_regs);
 4155 
 4156   assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, temp3_reg, result_reg);
 4157 
 4158   {
 4159 
 4160     int register_push_size = pushed_regs.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size;
 4161     int aligned_size = align_up(register_push_size, StackAlignmentInBytes);
 4162     subptr(rsp, aligned_size);
 4163     push_set(pushed_regs, 0);
 4164 
 4165     lookup_secondary_supers_table_var(sub_klass,
 4166                                       super_klass,
 4167                                       temp_reg, temp2_reg, temp3_reg, temp4_reg, result_reg);
 4168     cmpq(result_reg, 0);
 4169 
 4170     // Unspill the temp. registers:
 4171     pop_set(pushed_regs, 0);
 4172     // Increment SP but do not clobber flags.
 4173     lea(rsp, Address(rsp, aligned_size));
 4174   }
 4175 
 4176   if (temp2_reg_was_valid) {
 4177     movq(temp2_reg, 1);
 4178   }
 4179 
 4180   jcc(Assembler::notEqual, *L_failure);
 4181 
 4182   if (L_success != &L_fallthrough) {
 4183     jmp(*L_success);
 4184   }
 4185 
 4186   bind(L_fallthrough);
 4187 }
 4188 
 4189 // population_count variant for running without the POPCNT
 4190 // instruction, which was introduced with SSE4.2 in 2008.
 4191 void MacroAssembler::population_count(Register dst, Register src,
 4192                                       Register scratch1, Register scratch2) {
 4193   assert_different_registers(src, scratch1, scratch2);
 4194   if (UsePopCountInstruction) {
 4195     Assembler::popcntq(dst, src);
 4196   } else {
 4197     assert_different_registers(src, scratch1, scratch2);
 4198     assert_different_registers(dst, scratch1, scratch2);
 4199     Label loop, done;
 4200 
 4201     mov(scratch1, src);
 4202     // dst = 0;
 4203     // while(scratch1 != 0) {
 4204     //   dst++;
 4205     //   scratch1 &= (scratch1 - 1);
 4206     // }
 4207     xorl(dst, dst);
 4208     testq(scratch1, scratch1);
 4209     jccb(Assembler::equal, done);
 4210     {
 4211       bind(loop);
 4212       incq(dst);
 4213       movq(scratch2, scratch1);
 4214       decq(scratch2);
 4215       andq(scratch1, scratch2);
 4216       jccb(Assembler::notEqual, loop);
 4217     }
 4218     bind(done);
 4219   }
 4220 #ifdef ASSERT
 4221   mov64(scratch1, 0xCafeBabeDeadBeef);
 4222   movq(scratch2, scratch1);
 4223 #endif
 4224 }
 4225 
 4226 // Ensure that the inline code and the stub are using the same registers.
 4227 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS                      \
 4228 do {                                                                 \
 4229   assert(r_super_klass  == rax, "mismatch");                         \
 4230   assert(r_array_base   == rbx, "mismatch");                         \
 4231   assert(r_array_length == rcx, "mismatch");                         \
 4232   assert(r_array_index  == rdx, "mismatch");                         \
 4233   assert(r_sub_klass    == rsi || r_sub_klass == noreg, "mismatch"); \
 4234   assert(r_bitmap       == r11 || r_bitmap    == noreg, "mismatch"); \
 4235   assert(result         == rdi || result      == noreg, "mismatch"); \
 4236 } while(0)
 4237 
 4238 // Versions of salq and rorq that don't need count to be in rcx
 4239 
 4240 void MacroAssembler::salq(Register dest, Register count) {
 4241   if (count == rcx) {
 4242     Assembler::salq(dest);
 4243   } else {
 4244     assert_different_registers(rcx, dest);
 4245     xchgq(rcx, count);
 4246     Assembler::salq(dest);
 4247     xchgq(rcx, count);
 4248   }
 4249 }
 4250 
 4251 void MacroAssembler::rorq(Register dest, Register count) {
 4252   if (count == rcx) {
 4253     Assembler::rorq(dest);
 4254   } else {
 4255     assert_different_registers(rcx, dest);
 4256     xchgq(rcx, count);
 4257     Assembler::rorq(dest);
 4258     xchgq(rcx, count);
 4259   }
 4260 }
 4261 
 4262 // Return true: we succeeded in generating this code
 4263 //
 4264 // At runtime, return 0 in result if r_super_klass is a superclass of
 4265 // r_sub_klass, otherwise return nonzero. Use this if you know the
 4266 // super_klass_slot of the class you're looking for. This is always
 4267 // the case for instanceof and checkcast.
 4268 void MacroAssembler::lookup_secondary_supers_table_const(Register r_sub_klass,
 4269                                                          Register r_super_klass,
 4270                                                          Register temp1,
 4271                                                          Register temp2,
 4272                                                          Register temp3,
 4273                                                          Register temp4,
 4274                                                          Register result,
 4275                                                          u1 super_klass_slot) {
 4276   assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result);
 4277 
 4278   Label L_fallthrough, L_success, L_failure;
 4279 
 4280   BLOCK_COMMENT("lookup_secondary_supers_table {");
 4281 
 4282   const Register
 4283     r_array_index  = temp1,
 4284     r_array_length = temp2,
 4285     r_array_base   = temp3,
 4286     r_bitmap       = temp4;
 4287 
 4288   LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS;
 4289 
 4290   xorq(result, result); // = 0
 4291 
 4292   movq(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
 4293   movq(r_array_index, r_bitmap);
 4294 
 4295   // First check the bitmap to see if super_klass might be present. If
 4296   // the bit is zero, we are certain that super_klass is not one of
 4297   // the secondary supers.
 4298   u1 bit = super_klass_slot;
 4299   {
 4300     // NB: If the count in a x86 shift instruction is 0, the flags are
 4301     // not affected, so we do a testq instead.
 4302     int shift_count = Klass::SECONDARY_SUPERS_TABLE_MASK - bit;
 4303     if (shift_count != 0) {
 4304       salq(r_array_index, shift_count);
 4305     } else {
 4306       testq(r_array_index, r_array_index);
 4307     }
 4308   }
 4309   // We test the MSB of r_array_index, i.e. its sign bit
 4310   jcc(Assembler::positive, L_failure);
 4311 
 4312   // Get the first array index that can contain super_klass into r_array_index.
 4313   if (bit != 0) {
 4314     population_count(r_array_index, r_array_index, temp2, temp3);
 4315   } else {
 4316     movl(r_array_index, 1);
 4317   }
 4318   // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
 4319 
 4320   // We will consult the secondary-super array.
 4321   movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
 4322 
 4323   // We're asserting that the first word in an Array<Klass*> is the
 4324   // length, and the second word is the first word of the data. If
 4325   // that ever changes, r_array_base will have to be adjusted here.
 4326   assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code");
 4327   assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code");
 4328 
 4329   cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8));
 4330   jccb(Assembler::equal, L_success);
 4331 
 4332   // Is there another entry to check? Consult the bitmap.
 4333   btq(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK);
 4334   jccb(Assembler::carryClear, L_failure);
 4335 
 4336   // Linear probe. Rotate the bitmap so that the next bit to test is
 4337   // in Bit 1.
 4338   if (bit != 0) {
 4339     rorq(r_bitmap, bit);
 4340   }
 4341 
 4342   // Calls into the stub generated by lookup_secondary_supers_table_slow_path.
 4343   // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap.
 4344   // Kills: r_array_length.
 4345   // Returns: result.
 4346   call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub()));
 4347   // Result (0/1) is in rdi
 4348   jmpb(L_fallthrough);
 4349 
 4350   bind(L_failure);
 4351   incq(result); // 0 => 1
 4352 
 4353   bind(L_success);
 4354   // result = 0;
 4355 
 4356   bind(L_fallthrough);
 4357   BLOCK_COMMENT("} lookup_secondary_supers_table");
 4358 
 4359   if (VerifySecondarySupers) {
 4360     verify_secondary_supers_table(r_sub_klass, r_super_klass, result,
 4361                                   temp1, temp2, temp3);
 4362   }
 4363 }
 4364 
 4365 // At runtime, return 0 in result if r_super_klass is a superclass of
 4366 // r_sub_klass, otherwise return nonzero. Use this version of
 4367 // lookup_secondary_supers_table() if you don't know ahead of time
 4368 // which superclass will be searched for. Used by interpreter and
 4369 // runtime stubs. It is larger and has somewhat greater latency than
 4370 // the version above, which takes a constant super_klass_slot.
 4371 void MacroAssembler::lookup_secondary_supers_table_var(Register r_sub_klass,
 4372                                                        Register r_super_klass,
 4373                                                        Register temp1,
 4374                                                        Register temp2,
 4375                                                        Register temp3,
 4376                                                        Register temp4,
 4377                                                        Register result) {
 4378   assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result);
 4379   assert_different_registers(r_sub_klass, r_super_klass, rcx);
 4380   RegSet temps = RegSet::of(temp1, temp2, temp3, temp4);
 4381 
 4382   Label L_fallthrough, L_success, L_failure;
 4383 
 4384   BLOCK_COMMENT("lookup_secondary_supers_table {");
 4385 
 4386   RegSetIterator<Register> available_regs = (temps - rcx).begin();
 4387 
 4388   // FIXME. Once we are sure that all paths reaching this point really
 4389   // do pass rcx as one of our temps we can get rid of the following
 4390   // workaround.
 4391   assert(temps.contains(rcx), "fix this code");
 4392 
 4393   // We prefer to have our shift count in rcx. If rcx is one of our
 4394   // temps, use it for slot. If not, pick any of our temps.
 4395   Register slot;
 4396   if (!temps.contains(rcx)) {
 4397     slot = *available_regs++;
 4398   } else {
 4399     slot = rcx;
 4400   }
 4401 
 4402   const Register r_array_index = *available_regs++;
 4403   const Register r_bitmap      = *available_regs++;
 4404 
 4405   // The logic above guarantees this property, but we state it here.
 4406   assert_different_registers(r_array_index, r_bitmap, rcx);
 4407 
 4408   movq(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
 4409   movq(r_array_index, r_bitmap);
 4410 
 4411   // First check the bitmap to see if super_klass might be present. If
 4412   // the bit is zero, we are certain that super_klass is not one of
 4413   // the secondary supers.
 4414   movb(slot, Address(r_super_klass, Klass::hash_slot_offset()));
 4415   xorl(slot, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1)); // slot ^ 63 === 63 - slot (mod 64)
 4416   salq(r_array_index, slot);
 4417 
 4418   testq(r_array_index, r_array_index);
 4419   // We test the MSB of r_array_index, i.e. its sign bit
 4420   jcc(Assembler::positive, L_failure);
 4421 
 4422   const Register r_array_base = *available_regs++;
 4423 
 4424   // Get the first array index that can contain super_klass into r_array_index.
 4425   // Note: Clobbers r_array_base and slot.
 4426   population_count(r_array_index, r_array_index, /*temp2*/r_array_base, /*temp3*/slot);
 4427 
 4428   // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
 4429 
 4430   // We will consult the secondary-super array.
 4431   movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
 4432 
 4433   // We're asserting that the first word in an Array<Klass*> is the
 4434   // length, and the second word is the first word of the data. If
 4435   // that ever changes, r_array_base will have to be adjusted here.
 4436   assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code");
 4437   assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code");
 4438 
 4439   cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8));
 4440   jccb(Assembler::equal, L_success);
 4441 
 4442   // Restore slot to its true value
 4443   movb(slot, Address(r_super_klass, Klass::hash_slot_offset()));
 4444 
 4445   // Linear probe. Rotate the bitmap so that the next bit to test is
 4446   // in Bit 1.
 4447   rorq(r_bitmap, slot);
 4448 
 4449   // Is there another entry to check? Consult the bitmap.
 4450   btq(r_bitmap, 1);
 4451   jccb(Assembler::carryClear, L_failure);
 4452 
 4453   // Calls into the stub generated by lookup_secondary_supers_table_slow_path.
 4454   // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap.
 4455   // Kills: r_array_length.
 4456   // Returns: result.
 4457   lookup_secondary_supers_table_slow_path(r_super_klass,
 4458                                           r_array_base,
 4459                                           r_array_index,
 4460                                           r_bitmap,
 4461                                           /*temp1*/result,
 4462                                           /*temp2*/slot,
 4463                                           &L_success,
 4464                                           nullptr);
 4465 
 4466   bind(L_failure);
 4467   movq(result, 1);
 4468   jmpb(L_fallthrough);
 4469 
 4470   bind(L_success);
 4471   xorq(result, result); // = 0
 4472 
 4473   bind(L_fallthrough);
 4474   BLOCK_COMMENT("} lookup_secondary_supers_table");
 4475 
 4476   if (VerifySecondarySupers) {
 4477     verify_secondary_supers_table(r_sub_klass, r_super_klass, result,
 4478                                   temp1, temp2, temp3);
 4479   }
 4480 }
 4481 
 4482 void MacroAssembler::repne_scanq(Register addr, Register value, Register count, Register limit,
 4483                                  Label* L_success, Label* L_failure) {
 4484   Label L_loop, L_fallthrough;
 4485   {
 4486     int label_nulls = 0;
 4487     if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
 4488     if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
 4489     assert(label_nulls <= 1, "at most one null in the batch");
 4490   }
 4491   bind(L_loop);
 4492   cmpq(value, Address(addr, count, Address::times_8));
 4493   jcc(Assembler::equal, *L_success);
 4494   addl(count, 1);
 4495   cmpl(count, limit);
 4496   jcc(Assembler::less, L_loop);
 4497 
 4498   if (&L_fallthrough != L_failure) {
 4499     jmp(*L_failure);
 4500   }
 4501   bind(L_fallthrough);
 4502 }
 4503 
 4504 // Called by code generated by check_klass_subtype_slow_path
 4505 // above. This is called when there is a collision in the hashed
 4506 // lookup in the secondary supers array.
 4507 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass,
 4508                                                              Register r_array_base,
 4509                                                              Register r_array_index,
 4510                                                              Register r_bitmap,
 4511                                                              Register temp1,
 4512                                                              Register temp2,
 4513                                                              Label* L_success,
 4514                                                              Label* L_failure) {
 4515   assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, temp2);
 4516 
 4517   const Register
 4518     r_array_length = temp1,
 4519     r_sub_klass    = noreg,
 4520     result         = noreg;
 4521 
 4522   Label L_fallthrough;
 4523   int label_nulls = 0;
 4524   if (L_success == nullptr)   { L_success   = &L_fallthrough; label_nulls++; }
 4525   if (L_failure == nullptr)   { L_failure   = &L_fallthrough; label_nulls++; }
 4526   assert(label_nulls <= 1, "at most one null in the batch");
 4527 
 4528   // Load the array length.
 4529   movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
 4530   // And adjust the array base to point to the data.
 4531   // NB! Effectively increments current slot index by 1.
 4532   assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "");
 4533   addptr(r_array_base, Array<Klass*>::base_offset_in_bytes());
 4534 
 4535   // Linear probe
 4536   Label L_huge;
 4537 
 4538   // The bitmap is full to bursting.
 4539   // Implicit invariant: BITMAP_FULL implies (length > 0)
 4540   cmpl(r_array_length, (int32_t)Klass::SECONDARY_SUPERS_TABLE_SIZE - 2);
 4541   jcc(Assembler::greater, L_huge);
 4542 
 4543   // NB! Our caller has checked bits 0 and 1 in the bitmap. The
 4544   // current slot (at secondary_supers[r_array_index]) has not yet
 4545   // been inspected, and r_array_index may be out of bounds if we
 4546   // wrapped around the end of the array.
 4547 
 4548   { // This is conventional linear probing, but instead of terminating
 4549     // when a null entry is found in the table, we maintain a bitmap
 4550     // in which a 0 indicates missing entries.
 4551     // The check above guarantees there are 0s in the bitmap, so the loop
 4552     // eventually terminates.
 4553 
 4554     xorl(temp2, temp2); // = 0;
 4555 
 4556     Label L_again;
 4557     bind(L_again);
 4558 
 4559     // Check for array wraparound.
 4560     cmpl(r_array_index, r_array_length);
 4561     cmovl(Assembler::greaterEqual, r_array_index, temp2);
 4562 
 4563     cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8));
 4564     jcc(Assembler::equal, *L_success);
 4565 
 4566     // If the next bit in bitmap is zero, we're done.
 4567     btq(r_bitmap, 2); // look-ahead check (Bit 2); Bits 0 and 1 are tested by now
 4568     jcc(Assembler::carryClear, *L_failure);
 4569 
 4570     rorq(r_bitmap, 1); // Bits 1/2 => 0/1
 4571     addl(r_array_index, 1);
 4572 
 4573     jmp(L_again);
 4574   }
 4575 
 4576   { // Degenerate case: more than 64 secondary supers.
 4577     // FIXME: We could do something smarter here, maybe a vectorized
 4578     // comparison or a binary search, but is that worth any added
 4579     // complexity?
 4580     bind(L_huge);
 4581     xorl(r_array_index, r_array_index); // = 0
 4582     repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length,
 4583                 L_success,
 4584                 (&L_fallthrough != L_failure ? L_failure : nullptr));
 4585 
 4586     bind(L_fallthrough);
 4587   }
 4588 }
 4589 
 4590 struct VerifyHelperArguments {
 4591   Klass* _super;
 4592   Klass* _sub;
 4593   intptr_t _linear_result;
 4594   intptr_t _table_result;
 4595 };
 4596 
 4597 static void verify_secondary_supers_table_helper(const char* msg, VerifyHelperArguments* args) {
 4598   Klass::on_secondary_supers_verification_failure(args->_super,
 4599                                                   args->_sub,
 4600                                                   args->_linear_result,
 4601                                                   args->_table_result,
 4602                                                   msg);
 4603 }
 4604 
 4605 // Make sure that the hashed lookup and a linear scan agree.
 4606 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass,
 4607                                                    Register r_super_klass,
 4608                                                    Register result,
 4609                                                    Register temp1,
 4610                                                    Register temp2,
 4611                                                    Register temp3) {
 4612   const Register
 4613       r_array_index  = temp1,
 4614       r_array_length = temp2,
 4615       r_array_base   = temp3,
 4616       r_bitmap       = noreg;
 4617 
 4618   BLOCK_COMMENT("verify_secondary_supers_table {");
 4619 
 4620   Label L_success, L_failure, L_check, L_done;
 4621 
 4622   movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
 4623   movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
 4624   // And adjust the array base to point to the data.
 4625   addptr(r_array_base, Array<Klass*>::base_offset_in_bytes());
 4626 
 4627   testl(r_array_length, r_array_length); // array_length == 0?
 4628   jcc(Assembler::zero, L_failure);
 4629 
 4630   movl(r_array_index, 0);
 4631   repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length, &L_success);
 4632   // fall through to L_failure
 4633 
 4634   const Register linear_result = r_array_index; // reuse temp1
 4635 
 4636   bind(L_failure); // not present
 4637   movl(linear_result, 1);
 4638   jmp(L_check);
 4639 
 4640   bind(L_success); // present
 4641   movl(linear_result, 0);
 4642 
 4643   bind(L_check);
 4644   cmpl(linear_result, result);
 4645   jcc(Assembler::equal, L_done);
 4646 
 4647   { // To avoid calling convention issues, build a record on the stack
 4648     // and pass the pointer to that instead.
 4649     push(result);
 4650     push(linear_result);
 4651     push(r_sub_klass);
 4652     push(r_super_klass);
 4653     movptr(c_rarg1, rsp);
 4654     movptr(c_rarg0, (uintptr_t) "mismatch");
 4655     call(RuntimeAddress(CAST_FROM_FN_PTR(address, verify_secondary_supers_table_helper)));
 4656     should_not_reach_here();
 4657   }
 4658   bind(L_done);
 4659 
 4660   BLOCK_COMMENT("} verify_secondary_supers_table");
 4661 }
 4662 
 4663 #undef LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS
 4664 
 4665 void MacroAssembler::clinit_barrier(Register klass, Label* L_fast_path, Label* L_slow_path) {
 4666   assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required");
 4667 
 4668   Label L_fallthrough;
 4669   if (L_fast_path == nullptr) {
 4670     L_fast_path = &L_fallthrough;
 4671   } else if (L_slow_path == nullptr) {
 4672     L_slow_path = &L_fallthrough;
 4673   }
 4674 
 4675   // Fast path check: class is fully initialized.
 4676   // init_state needs acquire, but x86 is TSO, and so we are already good.
 4677   cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
 4678   jcc(Assembler::equal, *L_fast_path);
 4679 
 4680   // Fast path check: current thread is initializer thread
 4681   cmpptr(r15_thread, Address(klass, InstanceKlass::init_thread_offset()));
 4682   if (L_slow_path == &L_fallthrough) {
 4683     jcc(Assembler::equal, *L_fast_path);
 4684     bind(*L_slow_path);
 4685   } else if (L_fast_path == &L_fallthrough) {
 4686     jcc(Assembler::notEqual, *L_slow_path);
 4687     bind(*L_fast_path);
 4688   } else {
 4689     Unimplemented();
 4690   }
 4691 }
 4692 
 4693 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) {
 4694   if (VM_Version::supports_cmov()) {
 4695     cmovl(cc, dst, src);
 4696   } else {
 4697     Label L;
 4698     jccb(negate_condition(cc), L);
 4699     movl(dst, src);
 4700     bind(L);
 4701   }
 4702 }
 4703 
 4704 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
 4705   if (VM_Version::supports_cmov()) {
 4706     cmovl(cc, dst, src);
 4707   } else {
 4708     Label L;
 4709     jccb(negate_condition(cc), L);
 4710     movl(dst, src);
 4711     bind(L);
 4712   }
 4713 }
 4714 
 4715 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
 4716   if (!VerifyOops) return;
 4717 
 4718   BLOCK_COMMENT("verify_oop {");
 4719   push(rscratch1);
 4720   push(rax);                          // save rax
 4721   push(reg);                          // pass register argument
 4722 
 4723   // Pass register number to verify_oop_subroutine
 4724   const char* b = nullptr;
 4725   {
 4726     ResourceMark rm;
 4727     stringStream ss;
 4728     ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
 4729     b = code_string(ss.as_string());
 4730   }
 4731   AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate());
 4732   pushptr(buffer.addr(), rscratch1);
 4733 
 4734   // call indirectly to solve generation ordering problem
 4735   movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
 4736   call(rax);
 4737   // Caller pops the arguments (oop, message) and restores rax, r10
 4738   BLOCK_COMMENT("} verify_oop");
 4739 }
 4740 
 4741 void MacroAssembler::vallones(XMMRegister dst, int vector_len) {
 4742   if (UseAVX > 2 && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl())) {
 4743     // Only pcmpeq has dependency breaking treatment (i.e the execution can begin without
 4744     // waiting for the previous result on dst), not vpcmpeqd, so just use vpternlog
 4745     vpternlogd(dst, 0xFF, dst, dst, vector_len);
 4746   } else if (VM_Version::supports_avx()) {
 4747     vpcmpeqd(dst, dst, dst, vector_len);
 4748   } else {
 4749     pcmpeqd(dst, dst);
 4750   }
 4751 }
 4752 
 4753 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
 4754                                          int extra_slot_offset) {
 4755   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
 4756   int stackElementSize = Interpreter::stackElementSize;
 4757   int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
 4758 #ifdef ASSERT
 4759   int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
 4760   assert(offset1 - offset == stackElementSize, "correct arithmetic");
 4761 #endif
 4762   Register             scale_reg    = noreg;
 4763   Address::ScaleFactor scale_factor = Address::no_scale;
 4764   if (arg_slot.is_constant()) {
 4765     offset += arg_slot.as_constant() * stackElementSize;
 4766   } else {
 4767     scale_reg    = arg_slot.as_register();
 4768     scale_factor = Address::times(stackElementSize);
 4769   }
 4770   offset += wordSize;           // return PC is on stack
 4771   return Address(rsp, scale_reg, scale_factor, offset);
 4772 }
 4773 
 4774 // Handle the receiver type profile update given the "recv" klass.
 4775 //
 4776 // Normally updates the ReceiverData (RD) that starts at "mdp" + "mdp_offset".
 4777 // If there are no matching or claimable receiver entries in RD, updates
 4778 // the polymorphic counter.
 4779 //
 4780 // This code expected to run by either the interpreter or JIT-ed code, without
 4781 // extra synchronization. For safety, receiver cells are claimed atomically, which
 4782 // avoids grossly misrepresenting the profiles under concurrent updates. For speed,
 4783 // counter updates are not atomic.
 4784 //
 4785 void MacroAssembler::profile_receiver_type(Register recv, Register mdp, int mdp_offset) {
 4786   int base_receiver_offset   = in_bytes(ReceiverTypeData::receiver_offset(0));
 4787   int end_receiver_offset    = in_bytes(ReceiverTypeData::receiver_offset(ReceiverTypeData::row_limit()));
 4788   int poly_count_offset      = in_bytes(CounterData::count_offset());
 4789   int receiver_step          = in_bytes(ReceiverTypeData::receiver_offset(1)) - base_receiver_offset;
 4790   int receiver_to_count_step = in_bytes(ReceiverTypeData::receiver_count_offset(0)) - base_receiver_offset;
 4791 
 4792   // Adjust for MDP offsets. Slots are pointer-sized, so is the global offset.
 4793   assert(is_aligned(mdp_offset, BytesPerWord), "sanity");
 4794   base_receiver_offset += mdp_offset;
 4795   end_receiver_offset  += mdp_offset;
 4796   poly_count_offset    += mdp_offset;
 4797 
 4798   // Scale down to optimize encoding. Slots are pointer-sized.
 4799   assert(is_aligned(base_receiver_offset,   BytesPerWord), "sanity");
 4800   assert(is_aligned(end_receiver_offset,    BytesPerWord), "sanity");
 4801   assert(is_aligned(poly_count_offset,      BytesPerWord), "sanity");
 4802   assert(is_aligned(receiver_step,          BytesPerWord), "sanity");
 4803   assert(is_aligned(receiver_to_count_step, BytesPerWord), "sanity");
 4804   base_receiver_offset   >>= LogBytesPerWord;
 4805   end_receiver_offset    >>= LogBytesPerWord;
 4806   poly_count_offset      >>= LogBytesPerWord;
 4807   receiver_step          >>= LogBytesPerWord;
 4808   receiver_to_count_step >>= LogBytesPerWord;
 4809 
 4810 #ifdef ASSERT
 4811   // We are about to walk the MDO slots without asking for offsets.
 4812   // Check that our math hits all the right spots.
 4813   for (uint c = 0; c < ReceiverTypeData::row_limit(); c++) {
 4814     int real_recv_offset  = mdp_offset + in_bytes(ReceiverTypeData::receiver_offset(c));
 4815     int real_count_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_count_offset(c));
 4816     int offset = base_receiver_offset + receiver_step*c;
 4817     int count_offset = offset + receiver_to_count_step;
 4818     assert((offset << LogBytesPerWord) == real_recv_offset, "receiver slot math");
 4819     assert((count_offset << LogBytesPerWord) == real_count_offset, "receiver count math");
 4820   }
 4821   int real_poly_count_offset = mdp_offset + in_bytes(CounterData::count_offset());
 4822   assert(poly_count_offset << LogBytesPerWord == real_poly_count_offset, "poly counter math");
 4823 #endif
 4824 
 4825   // Corner case: no profile table. Increment poly counter and exit.
 4826   if (ReceiverTypeData::row_limit() == 0) {
 4827     addptr(Address(mdp, poly_count_offset, Address::times_ptr), DataLayout::counter_increment);
 4828     return;
 4829   }
 4830 
 4831   Register offset = rscratch1;
 4832 
 4833   Label L_loop_search_receiver, L_loop_search_empty;
 4834   Label L_restart, L_found_recv, L_found_empty, L_polymorphic, L_count_update;
 4835 
 4836   // The code here recognizes three major cases:
 4837   //   A. Fastest: receiver found in the table
 4838   //   B. Fast: no receiver in the table, and the table is full
 4839   //   C. Slow: no receiver in the table, free slots in the table
 4840   //
 4841   // The case A performance is most important, as perfectly-behaved code would end up
 4842   // there, especially with larger TypeProfileWidth. The case B performance is
 4843   // important as well, this is where bulk of code would land for normally megamorphic
 4844   // cases. The case C performance is not essential, its job is to deal with installation
 4845   // races, we optimize for code density instead. Case C needs to make sure that receiver
 4846   // rows are only claimed once. This makes sure we never overwrite a row for another
 4847   // receiver and never duplicate the receivers in the list, making profile type-accurate.
 4848   //
 4849   // It is very tempting to handle these cases in a single loop, and claim the first slot
 4850   // without checking the rest of the table. But, profiling code should tolerate free slots
 4851   // in the table, as class unloading can clear them. After such cleanup, the receiver
 4852   // we need might be _after_ the free slot. Therefore, we need to let at least full scan
 4853   // to complete, before trying to install new slots. Splitting the code in several tight
 4854   // loops also helpfully optimizes for cases A and B.
 4855   //
 4856   // This code is effectively:
 4857   //
 4858   // restart:
 4859   //   // Fastest: receiver is already installed
 4860   //   for (i = 0; i < receiver_count(); i++) {
 4861   //     if (receiver(i) == recv) goto found_recv(i);
 4862   //   }
 4863   //
 4864   //   // Fast: no receiver, but profile is full
 4865   //   for (i = 0; i < receiver_count(); i++) {
 4866   //     if (receiver(i) == null) goto found_null(i);
 4867   //   }
 4868   //   goto polymorphic
 4869   //
 4870   //   // Slow: try to install receiver
 4871   // found_null(i):
 4872   //   CAS(&receiver(i), null, recv);
 4873   //   goto restart
 4874   //
 4875   // polymorphic:
 4876   //   count++;
 4877   //   return
 4878   //
 4879   // found_recv(i):
 4880   //   *receiver_count(i)++
 4881   //
 4882 
 4883   bind(L_restart);
 4884 
 4885   // Fastest: receiver is already installed
 4886   movptr(offset, base_receiver_offset);
 4887   bind(L_loop_search_receiver);
 4888     cmpptr(recv, Address(mdp, offset, Address::times_ptr));
 4889     jccb(Assembler::equal, L_found_recv);
 4890   addptr(offset, receiver_step);
 4891   cmpptr(offset, end_receiver_offset);
 4892   jccb(Assembler::notEqual, L_loop_search_receiver);
 4893 
 4894   // Fast: no receiver, but profile is full
 4895   movptr(offset, base_receiver_offset);
 4896   bind(L_loop_search_empty);
 4897     cmpptr(Address(mdp, offset, Address::times_ptr), NULL_WORD);
 4898     jccb(Assembler::equal, L_found_empty);
 4899   addptr(offset, receiver_step);
 4900   cmpptr(offset, end_receiver_offset);
 4901   jccb(Assembler::notEqual, L_loop_search_empty);
 4902   jmpb(L_polymorphic);
 4903 
 4904   // Slow: try to install receiver
 4905   bind(L_found_empty);
 4906 
 4907   // Atomically swing receiver slot: null -> recv.
 4908   //
 4909   // The update code uses CAS, which wants RAX register specifically, *and* it needs
 4910   // other important registers untouched, as they form the address. Therefore, we need
 4911   // to shift any important registers from RAX into some other spare register. If we
 4912   // have a spare register, we are forced to save it on stack here.
 4913 
 4914   Register spare_reg = noreg;
 4915   Register shifted_mdp = mdp;
 4916   Register shifted_recv = recv;
 4917   if (recv == rax || mdp == rax) {
 4918     spare_reg = (recv != rbx && mdp != rbx) ? rbx :
 4919                 (recv != rcx && mdp != rcx) ? rcx :
 4920                 rdx;
 4921     assert_different_registers(mdp, recv, offset, spare_reg);
 4922 
 4923     push(spare_reg);
 4924     if (recv == rax) {
 4925       movptr(spare_reg, recv);
 4926       shifted_recv = spare_reg;
 4927     } else {
 4928       assert(mdp == rax, "Remaining case");
 4929       movptr(spare_reg, mdp);
 4930       shifted_mdp = spare_reg;
 4931     }
 4932   } else {
 4933     push(rax);
 4934   }
 4935 
 4936   // None of the important registers are in RAX after this shuffle.
 4937   assert_different_registers(rax, shifted_mdp, shifted_recv, offset);
 4938 
 4939   xorptr(rax, rax);
 4940   cmpxchgptr(shifted_recv, Address(shifted_mdp, offset, Address::times_ptr));
 4941 
 4942   // Unshift registers.
 4943   if (recv == rax || mdp == rax) {
 4944     movptr(rax, spare_reg);
 4945     pop(spare_reg);
 4946   } else {
 4947     pop(rax);
 4948   }
 4949 
 4950   // CAS success means the slot now has the receiver we want. CAS failure means
 4951   // something had claimed the slot concurrently: it can be the same receiver we want,
 4952   // or something else. Since this is a slow path, we can optimize for code density,
 4953   // and just restart the search from the beginning.
 4954   jmpb(L_restart);
 4955 
 4956   // Counter updates:
 4957 
 4958   // Increment polymorphic counter instead of receiver slot.
 4959   bind(L_polymorphic);
 4960   movptr(offset, poly_count_offset);
 4961   jmpb(L_count_update);
 4962 
 4963   // Found a receiver, convert its slot offset to corresponding count offset.
 4964   bind(L_found_recv);
 4965   addptr(offset, receiver_to_count_step);
 4966 
 4967   bind(L_count_update);
 4968   addptr(Address(mdp, offset, Address::times_ptr), DataLayout::counter_increment);
 4969 }
 4970 
 4971 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
 4972   if (!VerifyOops) return;
 4973 
 4974   push(rscratch1);
 4975   push(rax); // save rax,
 4976   // addr may contain rsp so we will have to adjust it based on the push
 4977   // we just did (and on 64 bit we do two pushes)
 4978   // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
 4979   // stores rax into addr which is backwards of what was intended.
 4980   if (addr.uses(rsp)) {
 4981     lea(rax, addr);
 4982     pushptr(Address(rax, 2 * BytesPerWord));
 4983   } else {
 4984     pushptr(addr);
 4985   }
 4986 
 4987   // Pass register number to verify_oop_subroutine
 4988   const char* b = nullptr;
 4989   {
 4990     ResourceMark rm;
 4991     stringStream ss;
 4992     ss.print("verify_oop_addr: %s (%s:%d)", s, file, line);
 4993     b = code_string(ss.as_string());
 4994   }
 4995   AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate());
 4996   pushptr(buffer.addr(), rscratch1);
 4997 
 4998   // call indirectly to solve generation ordering problem
 4999   movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
 5000   call(rax);
 5001   // Caller pops the arguments (addr, message) and restores rax, r10.
 5002 }
 5003 
 5004 void MacroAssembler::verify_tlab() {
 5005 #ifdef ASSERT
 5006   if (UseTLAB && VerifyOops) {
 5007     Label next, ok;
 5008     Register t1 = rsi;
 5009 
 5010     push(t1);
 5011 
 5012     movptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
 5013     cmpptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_start_offset())));
 5014     jcc(Assembler::aboveEqual, next);
 5015     STOP("assert(top >= start)");
 5016     should_not_reach_here();
 5017 
 5018     bind(next);
 5019     movptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset())));
 5020     cmpptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
 5021     jcc(Assembler::aboveEqual, ok);
 5022     STOP("assert(top <= end)");
 5023     should_not_reach_here();
 5024 
 5025     bind(ok);
 5026     pop(t1);
 5027   }
 5028 #endif
 5029 }
 5030 
 5031 class ControlWord {
 5032  public:
 5033   int32_t _value;
 5034 
 5035   int  rounding_control() const        { return  (_value >> 10) & 3      ; }
 5036   int  precision_control() const       { return  (_value >>  8) & 3      ; }
 5037   bool precision() const               { return ((_value >>  5) & 1) != 0; }
 5038   bool underflow() const               { return ((_value >>  4) & 1) != 0; }
 5039   bool overflow() const                { return ((_value >>  3) & 1) != 0; }
 5040   bool zero_divide() const             { return ((_value >>  2) & 1) != 0; }
 5041   bool denormalized() const            { return ((_value >>  1) & 1) != 0; }
 5042   bool invalid() const                 { return ((_value >>  0) & 1) != 0; }
 5043 
 5044   void print() const {
 5045     // rounding control
 5046     const char* rc;
 5047     switch (rounding_control()) {
 5048       case 0: rc = "round near"; break;
 5049       case 1: rc = "round down"; break;
 5050       case 2: rc = "round up  "; break;
 5051       case 3: rc = "chop      "; break;
 5052       default:
 5053         rc = nullptr; // silence compiler warnings
 5054         fatal("Unknown rounding control: %d", rounding_control());
 5055     };
 5056     // precision control
 5057     const char* pc;
 5058     switch (precision_control()) {
 5059       case 0: pc = "24 bits "; break;
 5060       case 1: pc = "reserved"; break;
 5061       case 2: pc = "53 bits "; break;
 5062       case 3: pc = "64 bits "; break;
 5063       default:
 5064         pc = nullptr; // silence compiler warnings
 5065         fatal("Unknown precision control: %d", precision_control());
 5066     };
 5067     // flags
 5068     char f[9];
 5069     f[0] = ' ';
 5070     f[1] = ' ';
 5071     f[2] = (precision   ()) ? 'P' : 'p';
 5072     f[3] = (underflow   ()) ? 'U' : 'u';
 5073     f[4] = (overflow    ()) ? 'O' : 'o';
 5074     f[5] = (zero_divide ()) ? 'Z' : 'z';
 5075     f[6] = (denormalized()) ? 'D' : 'd';
 5076     f[7] = (invalid     ()) ? 'I' : 'i';
 5077     f[8] = '\x0';
 5078     // output
 5079     printf("%04x  masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc);
 5080   }
 5081 
 5082 };
 5083 
 5084 class StatusWord {
 5085  public:
 5086   int32_t _value;
 5087 
 5088   bool busy() const                    { return ((_value >> 15) & 1) != 0; }
 5089   bool C3() const                      { return ((_value >> 14) & 1) != 0; }
 5090   bool C2() const                      { return ((_value >> 10) & 1) != 0; }
 5091   bool C1() const                      { return ((_value >>  9) & 1) != 0; }
 5092   bool C0() const                      { return ((_value >>  8) & 1) != 0; }
 5093   int  top() const                     { return  (_value >> 11) & 7      ; }
 5094   bool error_status() const            { return ((_value >>  7) & 1) != 0; }
 5095   bool stack_fault() const             { return ((_value >>  6) & 1) != 0; }
 5096   bool precision() const               { return ((_value >>  5) & 1) != 0; }
 5097   bool underflow() const               { return ((_value >>  4) & 1) != 0; }
 5098   bool overflow() const                { return ((_value >>  3) & 1) != 0; }
 5099   bool zero_divide() const             { return ((_value >>  2) & 1) != 0; }
 5100   bool denormalized() const            { return ((_value >>  1) & 1) != 0; }
 5101   bool invalid() const                 { return ((_value >>  0) & 1) != 0; }
 5102 
 5103   void print() const {
 5104     // condition codes
 5105     char c[5];
 5106     c[0] = (C3()) ? '3' : '-';
 5107     c[1] = (C2()) ? '2' : '-';
 5108     c[2] = (C1()) ? '1' : '-';
 5109     c[3] = (C0()) ? '0' : '-';
 5110     c[4] = '\x0';
 5111     // flags
 5112     char f[9];
 5113     f[0] = (error_status()) ? 'E' : '-';
 5114     f[1] = (stack_fault ()) ? 'S' : '-';
 5115     f[2] = (precision   ()) ? 'P' : '-';
 5116     f[3] = (underflow   ()) ? 'U' : '-';
 5117     f[4] = (overflow    ()) ? 'O' : '-';
 5118     f[5] = (zero_divide ()) ? 'Z' : '-';
 5119     f[6] = (denormalized()) ? 'D' : '-';
 5120     f[7] = (invalid     ()) ? 'I' : '-';
 5121     f[8] = '\x0';
 5122     // output
 5123     printf("%04x  flags = %s, cc =  %s, top = %d", _value & 0xFFFF, f, c, top());
 5124   }
 5125 
 5126 };
 5127 
 5128 class TagWord {
 5129  public:
 5130   int32_t _value;
 5131 
 5132   int tag_at(int i) const              { return (_value >> (i*2)) & 3; }
 5133 
 5134   void print() const {
 5135     printf("%04x", _value & 0xFFFF);
 5136   }
 5137 
 5138 };
 5139 
 5140 class FPU_Register {
 5141  public:
 5142   int32_t _m0;
 5143   int32_t _m1;
 5144   int16_t _ex;
 5145 
 5146   bool is_indefinite() const           {
 5147     return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0;
 5148   }
 5149 
 5150   void print() const {
 5151     char  sign = (_ex < 0) ? '-' : '+';
 5152     const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : "   ";
 5153     printf("%c%04hx.%08x%08x  %s", sign, _ex, _m1, _m0, kind);
 5154   };
 5155 
 5156 };
 5157 
 5158 class FPU_State {
 5159  public:
 5160   enum {
 5161     register_size       = 10,
 5162     number_of_registers =  8,
 5163     register_mask       =  7
 5164   };
 5165 
 5166   ControlWord  _control_word;
 5167   StatusWord   _status_word;
 5168   TagWord      _tag_word;
 5169   int32_t      _error_offset;
 5170   int32_t      _error_selector;
 5171   int32_t      _data_offset;
 5172   int32_t      _data_selector;
 5173   int8_t       _register[register_size * number_of_registers];
 5174 
 5175   int tag_for_st(int i) const          { return _tag_word.tag_at((_status_word.top() + i) & register_mask); }
 5176   FPU_Register* st(int i) const        { return (FPU_Register*)&_register[register_size * i]; }
 5177 
 5178   const char* tag_as_string(int tag) const {
 5179     switch (tag) {
 5180       case 0: return "valid";
 5181       case 1: return "zero";
 5182       case 2: return "special";
 5183       case 3: return "empty";
 5184     }
 5185     ShouldNotReachHere();
 5186     return nullptr;
 5187   }
 5188 
 5189   void print() const {
 5190     // print computation registers
 5191     { int t = _status_word.top();
 5192       for (int i = 0; i < number_of_registers; i++) {
 5193         int j = (i - t) & register_mask;
 5194         printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j);
 5195         st(j)->print();
 5196         printf(" %s\n", tag_as_string(_tag_word.tag_at(i)));
 5197       }
 5198     }
 5199     printf("\n");
 5200     // print control registers
 5201     printf("ctrl = "); _control_word.print(); printf("\n");
 5202     printf("stat = "); _status_word .print(); printf("\n");
 5203     printf("tags = "); _tag_word    .print(); printf("\n");
 5204   }
 5205 
 5206 };
 5207 
 5208 class Flag_Register {
 5209  public:
 5210   int32_t _value;
 5211 
 5212   bool overflow() const                { return ((_value >> 11) & 1) != 0; }
 5213   bool direction() const               { return ((_value >> 10) & 1) != 0; }
 5214   bool sign() const                    { return ((_value >>  7) & 1) != 0; }
 5215   bool zero() const                    { return ((_value >>  6) & 1) != 0; }
 5216   bool auxiliary_carry() const         { return ((_value >>  4) & 1) != 0; }
 5217   bool parity() const                  { return ((_value >>  2) & 1) != 0; }
 5218   bool carry() const                   { return ((_value >>  0) & 1) != 0; }
 5219 
 5220   void print() const {
 5221     // flags
 5222     char f[8];
 5223     f[0] = (overflow       ()) ? 'O' : '-';
 5224     f[1] = (direction      ()) ? 'D' : '-';
 5225     f[2] = (sign           ()) ? 'S' : '-';
 5226     f[3] = (zero           ()) ? 'Z' : '-';
 5227     f[4] = (auxiliary_carry()) ? 'A' : '-';
 5228     f[5] = (parity         ()) ? 'P' : '-';
 5229     f[6] = (carry          ()) ? 'C' : '-';
 5230     f[7] = '\x0';
 5231     // output
 5232     printf("%08x  flags = %s", _value, f);
 5233   }
 5234 
 5235 };
 5236 
 5237 class IU_Register {
 5238  public:
 5239   int32_t _value;
 5240 
 5241   void print() const {
 5242     printf("%08x  %11d", _value, _value);
 5243   }
 5244 
 5245 };
 5246 
 5247 class IU_State {
 5248  public:
 5249   Flag_Register _eflags;
 5250   IU_Register   _rdi;
 5251   IU_Register   _rsi;
 5252   IU_Register   _rbp;
 5253   IU_Register   _rsp;
 5254   IU_Register   _rbx;
 5255   IU_Register   _rdx;
 5256   IU_Register   _rcx;
 5257   IU_Register   _rax;
 5258 
 5259   void print() const {
 5260     // computation registers
 5261     printf("rax,  = "); _rax.print(); printf("\n");
 5262     printf("rbx,  = "); _rbx.print(); printf("\n");
 5263     printf("rcx  = "); _rcx.print(); printf("\n");
 5264     printf("rdx  = "); _rdx.print(); printf("\n");
 5265     printf("rdi  = "); _rdi.print(); printf("\n");
 5266     printf("rsi  = "); _rsi.print(); printf("\n");
 5267     printf("rbp,  = "); _rbp.print(); printf("\n");
 5268     printf("rsp  = "); _rsp.print(); printf("\n");
 5269     printf("\n");
 5270     // control registers
 5271     printf("flgs = "); _eflags.print(); printf("\n");
 5272   }
 5273 };
 5274 
 5275 
 5276 class CPU_State {
 5277  public:
 5278   FPU_State _fpu_state;
 5279   IU_State  _iu_state;
 5280 
 5281   void print() const {
 5282     printf("--------------------------------------------------\n");
 5283     _iu_state .print();
 5284     printf("\n");
 5285     _fpu_state.print();
 5286     printf("--------------------------------------------------\n");
 5287   }
 5288 
 5289 };
 5290 
 5291 
 5292 static void _print_CPU_state(CPU_State* state) {
 5293   state->print();
 5294 };
 5295 
 5296 
 5297 void MacroAssembler::print_CPU_state() {
 5298   push_CPU_state();
 5299   push(rsp);                // pass CPU state
 5300   call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state)));
 5301   addptr(rsp, wordSize);       // discard argument
 5302   pop_CPU_state();
 5303 }
 5304 
 5305 void MacroAssembler::restore_cpu_control_state_after_jni(Register rscratch) {
 5306   // Either restore the MXCSR register after returning from the JNI Call
 5307   // or verify that it wasn't changed (with -Xcheck:jni flag).
 5308   if (VM_Version::supports_sse()) {
 5309     if (RestoreMXCSROnJNICalls) {
 5310       ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), rscratch);
 5311     } else if (CheckJNICalls) {
 5312       call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
 5313     }
 5314   }
 5315   // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty.
 5316   vzeroupper();
 5317 }
 5318 
 5319 // ((OopHandle)result).resolve();
 5320 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) {
 5321   assert_different_registers(result, tmp);
 5322 
 5323   // Only 64 bit platforms support GCs that require a tmp register
 5324   // Only IN_HEAP loads require a thread_tmp register
 5325   // OopHandle::resolve is an indirection like jobject.
 5326   access_load_at(T_OBJECT, IN_NATIVE,
 5327                  result, Address(result, 0), tmp);
 5328 }
 5329 
 5330 // ((WeakHandle)result).resolve();
 5331 void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) {
 5332   assert_different_registers(rresult, rtmp);
 5333   Label resolved;
 5334 
 5335   // A null weak handle resolves to null.
 5336   cmpptr(rresult, 0);
 5337   jcc(Assembler::equal, resolved);
 5338 
 5339   // Only 64 bit platforms support GCs that require a tmp register
 5340   // Only IN_HEAP loads require a thread_tmp register
 5341   // WeakHandle::resolve is an indirection like jweak.
 5342   access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
 5343                  rresult, Address(rresult, 0), rtmp);
 5344   bind(resolved);
 5345 }
 5346 
 5347 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
 5348   // get mirror
 5349   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
 5350   load_method_holder(mirror, method);
 5351   movptr(mirror, Address(mirror, mirror_offset));
 5352   resolve_oop_handle(mirror, tmp);
 5353 }
 5354 
 5355 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
 5356   load_method_holder(rresult, rmethod);
 5357   movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
 5358 }
 5359 
 5360 void MacroAssembler::load_method_holder(Register holder, Register method) {
 5361   movptr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
 5362   movptr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
 5363   movptr(holder, Address(holder, ConstantPool::pool_holder_offset()));          // InstanceKlass*
 5364 }
 5365 
 5366 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
 5367   assert(UseCompactObjectHeaders, "expect compact object headers");
 5368   movq(dst, Address(src, oopDesc::mark_offset_in_bytes()));
 5369   shrq(dst, markWord::klass_shift);
 5370 }
 5371 
 5372 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
 5373   assert_different_registers(src, tmp);
 5374   assert_different_registers(dst, tmp);
 5375 
 5376   if (UseCompactObjectHeaders) {
 5377     load_narrow_klass_compact(dst, src);
 5378     decode_klass_not_null(dst, tmp);
 5379   } else if (UseCompressedClassPointers) {
 5380     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5381     decode_klass_not_null(dst, tmp);
 5382   } else {
 5383     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
 5384   }
 5385 }
 5386 
 5387 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
 5388   assert(!UseCompactObjectHeaders, "not with compact headers");
 5389   assert_different_registers(src, tmp);
 5390   assert_different_registers(dst, tmp);
 5391   if (UseCompressedClassPointers) {
 5392     encode_klass_not_null(src, tmp);
 5393     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5394   } else {
 5395     movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 5396   }
 5397 }
 5398 
 5399 void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) {
 5400   if (UseCompactObjectHeaders) {
 5401     assert(tmp != noreg, "need tmp");
 5402     assert_different_registers(klass, obj, tmp);
 5403     load_narrow_klass_compact(tmp, obj);
 5404     cmpl(klass, tmp);
 5405   } else if (UseCompressedClassPointers) {
 5406     cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
 5407   } else {
 5408     cmpptr(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
 5409   }
 5410 }
 5411 
 5412 void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) {
 5413   if (UseCompactObjectHeaders) {
 5414     assert(tmp2 != noreg, "need tmp2");
 5415     assert_different_registers(obj1, obj2, tmp1, tmp2);
 5416     load_narrow_klass_compact(tmp1, obj1);
 5417     load_narrow_klass_compact(tmp2, obj2);
 5418     cmpl(tmp1, tmp2);
 5419   } else if (UseCompressedClassPointers) {
 5420     movl(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
 5421     cmpl(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes()));
 5422   } else {
 5423     movptr(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
 5424     cmpptr(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes()));
 5425   }
 5426 }
 5427 
 5428 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
 5429                                     Register tmp1) {
 5430   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5431   decorators = AccessInternal::decorator_fixup(decorators, type);
 5432   bool as_raw = (decorators & AS_RAW) != 0;
 5433   if (as_raw) {
 5434     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1);
 5435   } else {
 5436     bs->load_at(this, decorators, type, dst, src, tmp1);
 5437   }
 5438 }
 5439 
 5440 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
 5441                                      Register tmp1, Register tmp2, Register tmp3) {
 5442   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 5443   decorators = AccessInternal::decorator_fixup(decorators, type);
 5444   bool as_raw = (decorators & AS_RAW) != 0;
 5445   if (as_raw) {
 5446     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5447   } else {
 5448     bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
 5449   }
 5450 }
 5451 
 5452 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
 5453   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1);
 5454 }
 5455 
 5456 // Doesn't do verification, generates fixed size code
 5457 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, DecoratorSet decorators) {
 5458   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1);
 5459 }
 5460 
 5461 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
 5462                                     Register tmp2, Register tmp3, DecoratorSet decorators) {
 5463   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
 5464 }
 5465 
 5466 // Used for storing nulls.
 5467 void MacroAssembler::store_heap_oop_null(Address dst) {
 5468   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
 5469 }
 5470 
 5471 void MacroAssembler::store_klass_gap(Register dst, Register src) {
 5472   assert(!UseCompactObjectHeaders, "Don't use with compact headers");
 5473   if (UseCompressedClassPointers) {
 5474     // Store to klass gap in destination
 5475     movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
 5476   }
 5477 }
 5478 
 5479 #ifdef ASSERT
 5480 void MacroAssembler::verify_heapbase(const char* msg) {
 5481   assert (UseCompressedOops, "should be compressed");
 5482   assert (Universe::heap() != nullptr, "java heap should be initialized");
 5483   if (CheckCompressedOops) {
 5484     Label ok;
 5485     ExternalAddress src2(CompressedOops::base_addr());
 5486     const bool is_src2_reachable = reachable(src2);
 5487     if (!is_src2_reachable) {
 5488       push(rscratch1);  // cmpptr trashes rscratch1
 5489     }
 5490     cmpptr(r12_heapbase, src2, rscratch1);
 5491     jcc(Assembler::equal, ok);
 5492     STOP(msg);
 5493     bind(ok);
 5494     if (!is_src2_reachable) {
 5495       pop(rscratch1);
 5496     }
 5497   }
 5498 }
 5499 #endif
 5500 
 5501 // Algorithm must match oop.inline.hpp encode_heap_oop.
 5502 void MacroAssembler::encode_heap_oop(Register r) {
 5503 #ifdef ASSERT
 5504   verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
 5505 #endif
 5506   verify_oop_msg(r, "broken oop in encode_heap_oop");
 5507   if (CompressedOops::base() == nullptr) {
 5508     if (CompressedOops::shift() != 0) {
 5509       assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
 5510       shrq(r, LogMinObjAlignmentInBytes);
 5511     }
 5512     return;
 5513   }
 5514   testq(r, r);
 5515   cmovq(Assembler::equal, r, r12_heapbase);
 5516   subq(r, r12_heapbase);
 5517   shrq(r, LogMinObjAlignmentInBytes);
 5518 }
 5519 
 5520 void MacroAssembler::encode_heap_oop_not_null(Register r) {
 5521 #ifdef ASSERT
 5522   verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
 5523   if (CheckCompressedOops) {
 5524     Label ok;
 5525     testq(r, r);
 5526     jcc(Assembler::notEqual, ok);
 5527     STOP("null oop passed to encode_heap_oop_not_null");
 5528     bind(ok);
 5529   }
 5530 #endif
 5531   verify_oop_msg(r, "broken oop in encode_heap_oop_not_null");
 5532   if (CompressedOops::base() != nullptr) {
 5533     subq(r, r12_heapbase);
 5534   }
 5535   if (CompressedOops::shift() != 0) {
 5536     assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
 5537     shrq(r, LogMinObjAlignmentInBytes);
 5538   }
 5539 }
 5540 
 5541 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
 5542 #ifdef ASSERT
 5543   verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
 5544   if (CheckCompressedOops) {
 5545     Label ok;
 5546     testq(src, src);
 5547     jcc(Assembler::notEqual, ok);
 5548     STOP("null oop passed to encode_heap_oop_not_null2");
 5549     bind(ok);
 5550   }
 5551 #endif
 5552   verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2");
 5553   if (dst != src) {
 5554     movq(dst, src);
 5555   }
 5556   if (CompressedOops::base() != nullptr) {
 5557     subq(dst, r12_heapbase);
 5558   }
 5559   if (CompressedOops::shift() != 0) {
 5560     assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
 5561     shrq(dst, LogMinObjAlignmentInBytes);
 5562   }
 5563 }
 5564 
 5565 void  MacroAssembler::decode_heap_oop(Register r) {
 5566 #ifdef ASSERT
 5567   verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
 5568 #endif
 5569   if (CompressedOops::base() == nullptr) {
 5570     if (CompressedOops::shift() != 0) {
 5571       assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
 5572       shlq(r, LogMinObjAlignmentInBytes);
 5573     }
 5574   } else {
 5575     Label done;
 5576     shlq(r, LogMinObjAlignmentInBytes);
 5577     jccb(Assembler::equal, done);
 5578     addq(r, r12_heapbase);
 5579     bind(done);
 5580   }
 5581   verify_oop_msg(r, "broken oop in decode_heap_oop");
 5582 }
 5583 
 5584 void  MacroAssembler::decode_heap_oop_not_null(Register r) {
 5585   // Note: it will change flags
 5586   assert (UseCompressedOops, "should only be used for compressed headers");
 5587   assert (Universe::heap() != nullptr, "java heap should be initialized");
 5588   // Cannot assert, unverified entry point counts instructions (see .ad file)
 5589   // vtableStubs also counts instructions in pd_code_size_limit.
 5590   // Also do not verify_oop as this is called by verify_oop.
 5591   if (CompressedOops::shift() != 0) {
 5592     assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
 5593     shlq(r, LogMinObjAlignmentInBytes);
 5594     if (CompressedOops::base() != nullptr) {
 5595       addq(r, r12_heapbase);
 5596     }
 5597   } else {
 5598     assert (CompressedOops::base() == nullptr, "sanity");
 5599   }
 5600 }
 5601 
 5602 void  MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
 5603   // Note: it will change flags
 5604   assert (UseCompressedOops, "should only be used for compressed headers");
 5605   assert (Universe::heap() != nullptr, "java heap should be initialized");
 5606   // Cannot assert, unverified entry point counts instructions (see .ad file)
 5607   // vtableStubs also counts instructions in pd_code_size_limit.
 5608   // Also do not verify_oop as this is called by verify_oop.
 5609   if (CompressedOops::shift() != 0) {
 5610     assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
 5611     if (LogMinObjAlignmentInBytes == Address::times_8) {
 5612       leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
 5613     } else {
 5614       if (dst != src) {
 5615         movq(dst, src);
 5616       }
 5617       shlq(dst, LogMinObjAlignmentInBytes);
 5618       if (CompressedOops::base() != nullptr) {
 5619         addq(dst, r12_heapbase);
 5620       }
 5621     }
 5622   } else {
 5623     assert (CompressedOops::base() == nullptr, "sanity");
 5624     if (dst != src) {
 5625       movq(dst, src);
 5626     }
 5627   }
 5628 }
 5629 
 5630 void MacroAssembler::encode_klass_not_null(Register r, Register tmp) {
 5631   BLOCK_COMMENT("encode_klass_not_null {");
 5632   assert_different_registers(r, tmp);
 5633   if (CompressedKlassPointers::base() != nullptr) {
 5634     if (AOTCodeCache::is_on_for_dump()) {
 5635       movptr(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
 5636     } else {
 5637       movptr(tmp, (intptr_t)CompressedKlassPointers::base());
 5638     }
 5639     subq(r, tmp);
 5640   }
 5641   if (CompressedKlassPointers::shift() != 0) {
 5642     shrq(r, CompressedKlassPointers::shift());
 5643   }
 5644   BLOCK_COMMENT("} encode_klass_not_null");
 5645 }
 5646 
 5647 void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) {
 5648   BLOCK_COMMENT("encode_and_move_klass_not_null {");
 5649   assert_different_registers(src, dst);
 5650   if (CompressedKlassPointers::base() != nullptr) {
 5651     movptr(dst, -(intptr_t)CompressedKlassPointers::base());
 5652     addq(dst, src);
 5653   } else {
 5654     movptr(dst, src);
 5655   }
 5656   if (CompressedKlassPointers::shift() != 0) {
 5657     shrq(dst, CompressedKlassPointers::shift());
 5658   }
 5659   BLOCK_COMMENT("} encode_and_move_klass_not_null");
 5660 }
 5661 
 5662 void  MacroAssembler::decode_klass_not_null(Register r, Register tmp) {
 5663   BLOCK_COMMENT("decode_klass_not_null {");
 5664   assert_different_registers(r, tmp);
 5665   // Note: it will change flags
 5666   assert(UseCompressedClassPointers, "should only be used for compressed headers");
 5667   // Cannot assert, unverified entry point counts instructions (see .ad file)
 5668   // vtableStubs also counts instructions in pd_code_size_limit.
 5669   // Also do not verify_oop as this is called by verify_oop.
 5670   if (CompressedKlassPointers::shift() != 0) {
 5671     shlq(r, CompressedKlassPointers::shift());
 5672   }
 5673   if (CompressedKlassPointers::base() != nullptr) {
 5674     if (AOTCodeCache::is_on_for_dump()) {
 5675       movptr(tmp, ExternalAddress(CompressedKlassPointers::base_addr()));
 5676     } else {
 5677       movptr(tmp, (intptr_t)CompressedKlassPointers::base());
 5678     }
 5679     addq(r, tmp);
 5680   }
 5681   BLOCK_COMMENT("} decode_klass_not_null");
 5682 }
 5683 
 5684 void  MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) {
 5685   BLOCK_COMMENT("decode_and_move_klass_not_null {");
 5686   assert_different_registers(src, dst);
 5687   // Note: it will change flags
 5688   assert (UseCompressedClassPointers, "should only be used for compressed headers");
 5689   // Cannot assert, unverified entry point counts instructions (see .ad file)
 5690   // vtableStubs also counts instructions in pd_code_size_limit.
 5691   // Also do not verify_oop as this is called by verify_oop.
 5692 
 5693   if (CompressedKlassPointers::base() == nullptr &&
 5694       CompressedKlassPointers::shift() == 0) {
 5695     // The best case scenario is that there is no base or shift. Then it is already
 5696     // a pointer that needs nothing but a register rename.
 5697     movl(dst, src);
 5698   } else {
 5699     if (CompressedKlassPointers::shift() <= Address::times_8) {
 5700       if (CompressedKlassPointers::base() != nullptr) {
 5701         movptr(dst, (intptr_t)CompressedKlassPointers::base());
 5702       } else {
 5703         xorq(dst, dst);
 5704       }
 5705       if (CompressedKlassPointers::shift() != 0) {
 5706         assert(CompressedKlassPointers::shift() == Address::times_8, "klass not aligned on 64bits?");
 5707         leaq(dst, Address(dst, src, Address::times_8, 0));
 5708       } else {
 5709         addq(dst, src);
 5710       }
 5711     } else {
 5712       if (CompressedKlassPointers::base() != nullptr) {
 5713         const intptr_t base_right_shifted =
 5714             (intptr_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
 5715         movptr(dst, base_right_shifted);
 5716       } else {
 5717         xorq(dst, dst);
 5718       }
 5719       addq(dst, src);
 5720       shlq(dst, CompressedKlassPointers::shift());
 5721     }
 5722   }
 5723   BLOCK_COMMENT("} decode_and_move_klass_not_null");
 5724 }
 5725 
 5726 void  MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
 5727   assert (UseCompressedOops, "should only be used for compressed headers");
 5728   assert (Universe::heap() != nullptr, "java heap should be initialized");
 5729   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5730   int oop_index = oop_recorder()->find_index(obj);
 5731   RelocationHolder rspec = oop_Relocation::spec(oop_index);
 5732   mov_narrow_oop(dst, oop_index, rspec);
 5733 }
 5734 
 5735 void  MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
 5736   assert (UseCompressedOops, "should only be used for compressed headers");
 5737   assert (Universe::heap() != nullptr, "java heap should be initialized");
 5738   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5739   int oop_index = oop_recorder()->find_index(obj);
 5740   RelocationHolder rspec = oop_Relocation::spec(oop_index);
 5741   mov_narrow_oop(dst, oop_index, rspec);
 5742 }
 5743 
 5744 void  MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
 5745   assert (UseCompressedClassPointers, "should only be used for compressed headers");
 5746   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5747   int klass_index = oop_recorder()->find_index(k);
 5748   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
 5749   mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 5750 }
 5751 
 5752 void  MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
 5753   assert (UseCompressedClassPointers, "should only be used for compressed headers");
 5754   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5755   int klass_index = oop_recorder()->find_index(k);
 5756   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
 5757   mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 5758 }
 5759 
 5760 void  MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
 5761   assert (UseCompressedOops, "should only be used for compressed headers");
 5762   assert (Universe::heap() != nullptr, "java heap should be initialized");
 5763   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5764   int oop_index = oop_recorder()->find_index(obj);
 5765   RelocationHolder rspec = oop_Relocation::spec(oop_index);
 5766   Assembler::cmp_narrow_oop(dst, oop_index, rspec);
 5767 }
 5768 
 5769 void  MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
 5770   assert (UseCompressedOops, "should only be used for compressed headers");
 5771   assert (Universe::heap() != nullptr, "java heap should be initialized");
 5772   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5773   int oop_index = oop_recorder()->find_index(obj);
 5774   RelocationHolder rspec = oop_Relocation::spec(oop_index);
 5775   Assembler::cmp_narrow_oop(dst, oop_index, rspec);
 5776 }
 5777 
 5778 void  MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
 5779   assert (UseCompressedClassPointers, "should only be used for compressed headers");
 5780   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5781   int klass_index = oop_recorder()->find_index(k);
 5782   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
 5783   Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 5784 }
 5785 
 5786 void  MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
 5787   assert (UseCompressedClassPointers, "should only be used for compressed headers");
 5788   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
 5789   int klass_index = oop_recorder()->find_index(k);
 5790   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
 5791   Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 5792 }
 5793 
 5794 void MacroAssembler::reinit_heapbase() {
 5795   if (UseCompressedOops) {
 5796     if (Universe::heap() != nullptr) {
 5797       if (CompressedOops::base() == nullptr) {
 5798         MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
 5799       } else {
 5800         mov64(r12_heapbase, (int64_t)CompressedOops::base());
 5801       }
 5802     } else {
 5803       movptr(r12_heapbase, ExternalAddress(CompressedOops::base_addr()));
 5804     }
 5805   }
 5806 }
 5807 
 5808 #if COMPILER2_OR_JVMCI
 5809 
 5810 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers
 5811 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 5812   // cnt - number of qwords (8-byte words).
 5813   // base - start address, qword aligned.
 5814   Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
 5815   bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0);
 5816   if (use64byteVector) {
 5817     vpxor(xtmp, xtmp, xtmp, AVX_512bit);
 5818   } else if (MaxVectorSize >= 32) {
 5819     vpxor(xtmp, xtmp, xtmp, AVX_256bit);
 5820   } else {
 5821     pxor(xtmp, xtmp);
 5822   }
 5823   jmp(L_zero_64_bytes);
 5824 
 5825   BIND(L_loop);
 5826   if (MaxVectorSize >= 32) {
 5827     fill64(base, 0, xtmp, use64byteVector);
 5828   } else {
 5829     movdqu(Address(base,  0), xtmp);
 5830     movdqu(Address(base, 16), xtmp);
 5831     movdqu(Address(base, 32), xtmp);
 5832     movdqu(Address(base, 48), xtmp);
 5833   }
 5834   addptr(base, 64);
 5835 
 5836   BIND(L_zero_64_bytes);
 5837   subptr(cnt, 8);
 5838   jccb(Assembler::greaterEqual, L_loop);
 5839 
 5840   // Copy trailing 64 bytes
 5841   if (use64byteVector) {
 5842     addptr(cnt, 8);
 5843     jccb(Assembler::equal, L_end);
 5844     fill64_masked(3, base, 0, xtmp, mask, cnt, rtmp, true);
 5845     jmp(L_end);
 5846   } else {
 5847     addptr(cnt, 4);
 5848     jccb(Assembler::less, L_tail);
 5849     if (MaxVectorSize >= 32) {
 5850       vmovdqu(Address(base, 0), xtmp);
 5851     } else {
 5852       movdqu(Address(base,  0), xtmp);
 5853       movdqu(Address(base, 16), xtmp);
 5854     }
 5855   }
 5856   addptr(base, 32);
 5857   subptr(cnt, 4);
 5858 
 5859   BIND(L_tail);
 5860   addptr(cnt, 4);
 5861   jccb(Assembler::lessEqual, L_end);
 5862   if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) {
 5863     fill32_masked(3, base, 0, xtmp, mask, cnt, rtmp);
 5864   } else {
 5865     decrement(cnt);
 5866 
 5867     BIND(L_sloop);
 5868     movq(Address(base, 0), xtmp);
 5869     addptr(base, 8);
 5870     decrement(cnt);
 5871     jccb(Assembler::greaterEqual, L_sloop);
 5872   }
 5873   BIND(L_end);
 5874 }
 5875 
 5876 // Clearing constant sized memory using YMM/ZMM registers.
 5877 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
 5878   assert(UseAVX > 2 && VM_Version::supports_avx512vl(), "");
 5879   bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
 5880 
 5881   int vector64_count = (cnt & (~0x7)) >> 3;
 5882   cnt = cnt & 0x7;
 5883   const int fill64_per_loop = 4;
 5884   const int max_unrolled_fill64 = 8;
 5885 
 5886   // 64 byte initialization loop.
 5887   vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit);
 5888   int start64 = 0;
 5889   if (vector64_count > max_unrolled_fill64) {
 5890     Label LOOP;
 5891     Register index = rtmp;
 5892 
 5893     start64 = vector64_count - (vector64_count % fill64_per_loop);
 5894 
 5895     movl(index, 0);
 5896     BIND(LOOP);
 5897     for (int i = 0; i < fill64_per_loop; i++) {
 5898       fill64(Address(base, index, Address::times_1, i * 64), xtmp, use64byteVector);
 5899     }
 5900     addl(index, fill64_per_loop * 64);
 5901     cmpl(index, start64 * 64);
 5902     jccb(Assembler::less, LOOP);
 5903   }
 5904   for (int i = start64; i < vector64_count; i++) {
 5905     fill64(base, i * 64, xtmp, use64byteVector);
 5906   }
 5907 
 5908   // Clear remaining 64 byte tail.
 5909   int disp = vector64_count * 64;
 5910   if (cnt) {
 5911     switch (cnt) {
 5912       case 1:
 5913         movq(Address(base, disp), xtmp);
 5914         break;
 5915       case 2:
 5916         evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_128bit);
 5917         break;
 5918       case 3:
 5919         movl(rtmp, 0x7);
 5920         kmovwl(mask, rtmp);
 5921         evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_256bit);
 5922         break;
 5923       case 4:
 5924         evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 5925         break;
 5926       case 5:
 5927         if (use64byteVector) {
 5928           movl(rtmp, 0x1F);
 5929           kmovwl(mask, rtmp);
 5930           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 5931         } else {
 5932           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 5933           movq(Address(base, disp + 32), xtmp);
 5934         }
 5935         break;
 5936       case 6:
 5937         if (use64byteVector) {
 5938           movl(rtmp, 0x3F);
 5939           kmovwl(mask, rtmp);
 5940           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 5941         } else {
 5942           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 5943           evmovdqu(T_LONG, k0, Address(base, disp + 32), xtmp, false, Assembler::AVX_128bit);
 5944         }
 5945         break;
 5946       case 7:
 5947         if (use64byteVector) {
 5948           movl(rtmp, 0x7F);
 5949           kmovwl(mask, rtmp);
 5950           evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit);
 5951         } else {
 5952           evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit);
 5953           movl(rtmp, 0x7);
 5954           kmovwl(mask, rtmp);
 5955           evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit);
 5956         }
 5957         break;
 5958       default:
 5959         fatal("Unexpected length : %d\n",cnt);
 5960         break;
 5961     }
 5962   }
 5963 }
 5964 
 5965 void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp, XMMRegister xtmp,
 5966                                bool is_large, KRegister mask) {
 5967   // cnt      - number of qwords (8-byte words).
 5968   // base     - start address, qword aligned.
 5969   // is_large - if optimizers know cnt is larger than InitArrayShortSize
 5970   assert(base==rdi, "base register must be edi for rep stos");
 5971   assert(tmp==rax,   "tmp register must be eax for rep stos");
 5972   assert(cnt==rcx,   "cnt register must be ecx for rep stos");
 5973   assert(InitArrayShortSize % BytesPerLong == 0,
 5974     "InitArrayShortSize should be the multiple of BytesPerLong");
 5975 
 5976   Label DONE;
 5977   if (!is_large || !UseXMMForObjInit) {
 5978     xorptr(tmp, tmp);
 5979   }
 5980 
 5981   if (!is_large) {
 5982     Label LOOP, LONG;
 5983     cmpptr(cnt, InitArrayShortSize/BytesPerLong);
 5984     jccb(Assembler::greater, LONG);
 5985 
 5986     decrement(cnt);
 5987     jccb(Assembler::negative, DONE); // Zero length
 5988 
 5989     // Use individual pointer-sized stores for small counts:
 5990     BIND(LOOP);
 5991     movptr(Address(base, cnt, Address::times_ptr), tmp);
 5992     decrement(cnt);
 5993     jccb(Assembler::greaterEqual, LOOP);
 5994     jmpb(DONE);
 5995 
 5996     BIND(LONG);
 5997   }
 5998 
 5999   // Use longer rep-prefixed ops for non-small counts:
 6000   if (UseFastStosb) {
 6001     shlptr(cnt, 3); // convert to number of bytes
 6002     rep_stosb();
 6003   } else if (UseXMMForObjInit) {
 6004     xmm_clear_mem(base, cnt, tmp, xtmp, mask);
 6005   } else {
 6006     rep_stos();
 6007   }
 6008 
 6009   BIND(DONE);
 6010 }
 6011 
 6012 #endif //COMPILER2_OR_JVMCI
 6013 
 6014 
 6015 void MacroAssembler::generate_fill(BasicType t, bool aligned,
 6016                                    Register to, Register value, Register count,
 6017                                    Register rtmp, XMMRegister xtmp) {
 6018   ShortBranchVerifier sbv(this);
 6019   assert_different_registers(to, value, count, rtmp);
 6020   Label L_exit;
 6021   Label L_fill_2_bytes, L_fill_4_bytes;
 6022 
 6023 #if defined(COMPILER2)
 6024   if(MaxVectorSize >=32 &&
 6025      VM_Version::supports_avx512vlbw() &&
 6026      VM_Version::supports_bmi2()) {
 6027     generate_fill_avx3(t, to, value, count, rtmp, xtmp);
 6028     return;
 6029   }
 6030 #endif
 6031 
 6032   int shift = -1;
 6033   switch (t) {
 6034     case T_BYTE:
 6035       shift = 2;
 6036       break;
 6037     case T_SHORT:
 6038       shift = 1;
 6039       break;
 6040     case T_INT:
 6041       shift = 0;
 6042       break;
 6043     default: ShouldNotReachHere();
 6044   }
 6045 
 6046   if (t == T_BYTE) {
 6047     andl(value, 0xff);
 6048     movl(rtmp, value);
 6049     shll(rtmp, 8);
 6050     orl(value, rtmp);
 6051   }
 6052   if (t == T_SHORT) {
 6053     andl(value, 0xffff);
 6054   }
 6055   if (t == T_BYTE || t == T_SHORT) {
 6056     movl(rtmp, value);
 6057     shll(rtmp, 16);
 6058     orl(value, rtmp);
 6059   }
 6060 
 6061   cmpptr(count, 8 << shift); // Short arrays (< 32 bytes) fill by element
 6062   jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp
 6063   if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
 6064     Label L_skip_align2;
 6065     // align source address at 4 bytes address boundary
 6066     if (t == T_BYTE) {
 6067       Label L_skip_align1;
 6068       // One byte misalignment happens only for byte arrays
 6069       testptr(to, 1);
 6070       jccb(Assembler::zero, L_skip_align1);
 6071       movb(Address(to, 0), value);
 6072       increment(to);
 6073       decrement(count);
 6074       BIND(L_skip_align1);
 6075     }
 6076     // Two bytes misalignment happens only for byte and short (char) arrays
 6077     testptr(to, 2);
 6078     jccb(Assembler::zero, L_skip_align2);
 6079     movw(Address(to, 0), value);
 6080     addptr(to, 2);
 6081     subptr(count, 1<<(shift-1));
 6082     BIND(L_skip_align2);
 6083   }
 6084   {
 6085     Label L_fill_32_bytes;
 6086     if (!UseUnalignedLoadStores) {
 6087       // align to 8 bytes, we know we are 4 byte aligned to start
 6088       testptr(to, 4);
 6089       jccb(Assembler::zero, L_fill_32_bytes);
 6090       movl(Address(to, 0), value);
 6091       addptr(to, 4);
 6092       subptr(count, 1<<shift);
 6093     }
 6094     BIND(L_fill_32_bytes);
 6095     {
 6096       Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
 6097       movdl(xtmp, value);
 6098       if (UseAVX >= 2 && UseUnalignedLoadStores) {
 6099         Label L_check_fill_32_bytes;
 6100         if (UseAVX > 2) {
 6101           // Fill 64-byte chunks
 6102           Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2;
 6103 
 6104           // If number of bytes to fill < VM_Version::avx3_threshold(), perform fill using AVX2
 6105           cmpptr(count, VM_Version::avx3_threshold());
 6106           jccb(Assembler::below, L_check_fill_64_bytes_avx2);
 6107 
 6108           vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
 6109 
 6110           subptr(count, 16 << shift);
 6111           jcc(Assembler::less, L_check_fill_32_bytes);
 6112           align(16);
 6113 
 6114           BIND(L_fill_64_bytes_loop_avx3);
 6115           evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit);
 6116           addptr(to, 64);
 6117           subptr(count, 16 << shift);
 6118           jcc(Assembler::greaterEqual, L_fill_64_bytes_loop_avx3);
 6119           jmpb(L_check_fill_32_bytes);
 6120 
 6121           BIND(L_check_fill_64_bytes_avx2);
 6122         }
 6123         // Fill 64-byte chunks
 6124         vpbroadcastd(xtmp, xtmp, Assembler::AVX_256bit);
 6125 
 6126         subptr(count, 16 << shift);
 6127         jcc(Assembler::less, L_check_fill_32_bytes);
 6128 
 6129         // align data for 64-byte chunks
 6130         Label L_fill_64_bytes_loop, L_align_64_bytes_loop;
 6131         if (EnableX86ECoreOpts) {
 6132             // align 'big' arrays to cache lines to minimize split_stores
 6133             cmpptr(count, 96 << shift);
 6134             jcc(Assembler::below, L_fill_64_bytes_loop);
 6135 
 6136             // Find the bytes needed for alignment
 6137             movptr(rtmp, to);
 6138             andptr(rtmp, 0x1c);
 6139             jcc(Assembler::zero, L_fill_64_bytes_loop);
 6140             negptr(rtmp);           // number of bytes to fill 32-rtmp. it filled by 2 mov by 32
 6141             addptr(rtmp, 32);
 6142             shrptr(rtmp, 2 - shift);// get number of elements from bytes
 6143             subptr(count, rtmp);    // adjust count by number of elements
 6144 
 6145             align(16);
 6146             BIND(L_align_64_bytes_loop);
 6147             movdl(Address(to, 0), xtmp);
 6148             addptr(to, 4);
 6149             subptr(rtmp, 1 << shift);
 6150             jcc(Assembler::greater, L_align_64_bytes_loop);
 6151         }
 6152 
 6153         align(16);
 6154         BIND(L_fill_64_bytes_loop);
 6155         vmovdqu(Address(to, 0), xtmp);
 6156         vmovdqu(Address(to, 32), xtmp);
 6157         addptr(to, 64);
 6158         subptr(count, 16 << shift);
 6159         jcc(Assembler::greaterEqual, L_fill_64_bytes_loop);
 6160 
 6161         align(16);
 6162         BIND(L_check_fill_32_bytes);
 6163         addptr(count, 8 << shift);
 6164         jccb(Assembler::less, L_check_fill_8_bytes);
 6165         vmovdqu(Address(to, 0), xtmp);
 6166         addptr(to, 32);
 6167         subptr(count, 8 << shift);
 6168 
 6169         BIND(L_check_fill_8_bytes);
 6170         // clean upper bits of YMM registers
 6171         movdl(xtmp, value);
 6172         pshufd(xtmp, xtmp, 0);
 6173       } else {
 6174         // Fill 32-byte chunks
 6175         pshufd(xtmp, xtmp, 0);
 6176 
 6177         subptr(count, 8 << shift);
 6178         jcc(Assembler::less, L_check_fill_8_bytes);
 6179         align(16);
 6180 
 6181         BIND(L_fill_32_bytes_loop);
 6182 
 6183         if (UseUnalignedLoadStores) {
 6184           movdqu(Address(to, 0), xtmp);
 6185           movdqu(Address(to, 16), xtmp);
 6186         } else {
 6187           movq(Address(to, 0), xtmp);
 6188           movq(Address(to, 8), xtmp);
 6189           movq(Address(to, 16), xtmp);
 6190           movq(Address(to, 24), xtmp);
 6191         }
 6192 
 6193         addptr(to, 32);
 6194         subptr(count, 8 << shift);
 6195         jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
 6196 
 6197         BIND(L_check_fill_8_bytes);
 6198       }
 6199       addptr(count, 8 << shift);
 6200       jccb(Assembler::zero, L_exit);
 6201       jmpb(L_fill_8_bytes);
 6202 
 6203       //
 6204       // length is too short, just fill qwords
 6205       //
 6206       align(16);
 6207       BIND(L_fill_8_bytes_loop);
 6208       movq(Address(to, 0), xtmp);
 6209       addptr(to, 8);
 6210       BIND(L_fill_8_bytes);
 6211       subptr(count, 1 << (shift + 1));
 6212       jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
 6213     }
 6214   }
 6215 
 6216   Label L_fill_4_bytes_loop;
 6217   testl(count, 1 << shift);
 6218   jccb(Assembler::zero, L_fill_2_bytes);
 6219 
 6220   align(16);
 6221   BIND(L_fill_4_bytes_loop);
 6222   movl(Address(to, 0), value);
 6223   addptr(to, 4);
 6224 
 6225   BIND(L_fill_4_bytes);
 6226   subptr(count, 1 << shift);
 6227   jccb(Assembler::greaterEqual, L_fill_4_bytes_loop);
 6228 
 6229   if (t == T_BYTE || t == T_SHORT) {
 6230     Label L_fill_byte;
 6231     BIND(L_fill_2_bytes);
 6232     // fill trailing 2 bytes
 6233     testl(count, 1<<(shift-1));
 6234     jccb(Assembler::zero, L_fill_byte);
 6235     movw(Address(to, 0), value);
 6236     if (t == T_BYTE) {
 6237       addptr(to, 2);
 6238       BIND(L_fill_byte);
 6239       // fill trailing byte
 6240       testl(count, 1);
 6241       jccb(Assembler::zero, L_exit);
 6242       movb(Address(to, 0), value);
 6243     } else {
 6244       BIND(L_fill_byte);
 6245     }
 6246   } else {
 6247     BIND(L_fill_2_bytes);
 6248   }
 6249   BIND(L_exit);
 6250 }
 6251 
 6252 void MacroAssembler::evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len) {
 6253   switch(type) {
 6254     case T_BYTE:
 6255     case T_BOOLEAN:
 6256       evpbroadcastb(dst, src, vector_len);
 6257       break;
 6258     case T_SHORT:
 6259     case T_CHAR:
 6260       evpbroadcastw(dst, src, vector_len);
 6261       break;
 6262     case T_INT:
 6263     case T_FLOAT:
 6264       evpbroadcastd(dst, src, vector_len);
 6265       break;
 6266     case T_LONG:
 6267     case T_DOUBLE:
 6268       evpbroadcastq(dst, src, vector_len);
 6269       break;
 6270     default:
 6271       fatal("Unhandled type : %s", type2name(type));
 6272       break;
 6273   }
 6274 }
 6275 
 6276 // Encode given char[]/byte[] to byte[] in ISO_8859_1 or ASCII
 6277 //
 6278 // @IntrinsicCandidate
 6279 // int sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(
 6280 //         char[] sa, int sp, byte[] da, int dp, int len) {
 6281 //     int i = 0;
 6282 //     for (; i < len; i++) {
 6283 //         char c = sa[sp++];
 6284 //         if (c > '\u00FF')
 6285 //             break;
 6286 //         da[dp++] = (byte) c;
 6287 //     }
 6288 //     return i;
 6289 // }
 6290 //
 6291 // @IntrinsicCandidate
 6292 // int java.lang.StringCoding.encodeISOArray0(
 6293 //         byte[] sa, int sp, byte[] da, int dp, int len) {
 6294 //   int i = 0;
 6295 //   for (; i < len; i++) {
 6296 //     char c = StringUTF16.getChar(sa, sp++);
 6297 //     if (c > '\u00FF')
 6298 //       break;
 6299 //     da[dp++] = (byte) c;
 6300 //   }
 6301 //   return i;
 6302 // }
 6303 //
 6304 // @IntrinsicCandidate
 6305 // int java.lang.StringCoding.encodeAsciiArray0(
 6306 //         char[] sa, int sp, byte[] da, int dp, int len) {
 6307 //   int i = 0;
 6308 //   for (; i < len; i++) {
 6309 //     char c = sa[sp++];
 6310 //     if (c >= '\u0080')
 6311 //       break;
 6312 //     da[dp++] = (byte) c;
 6313 //   }
 6314 //   return i;
 6315 // }
 6316 void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
 6317   XMMRegister tmp1Reg, XMMRegister tmp2Reg,
 6318   XMMRegister tmp3Reg, XMMRegister tmp4Reg,
 6319   Register tmp5, Register result, bool ascii) {
 6320 
 6321   // rsi: src
 6322   // rdi: dst
 6323   // rdx: len
 6324   // rcx: tmp5
 6325   // rax: result
 6326   ShortBranchVerifier sbv(this);
 6327   assert_different_registers(src, dst, len, tmp5, result);
 6328   Label L_done, L_copy_1_char, L_copy_1_char_exit;
 6329 
 6330   int mask = ascii ? 0xff80ff80 : 0xff00ff00;
 6331   int short_mask = ascii ? 0xff80 : 0xff00;
 6332 
 6333   // set result
 6334   xorl(result, result);
 6335   // check for zero length
 6336   testl(len, len);
 6337   jcc(Assembler::zero, L_done);
 6338 
 6339   movl(result, len);
 6340 
 6341   // Setup pointers
 6342   lea(src, Address(src, len, Address::times_2)); // char[]
 6343   lea(dst, Address(dst, len, Address::times_1)); // byte[]
 6344   negptr(len);
 6345 
 6346   if (UseSSE42Intrinsics || UseAVX >= 2) {
 6347     Label L_copy_8_chars, L_copy_8_chars_exit;
 6348     Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit;
 6349 
 6350     if (UseAVX >= 2) {
 6351       Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit;
 6352       movl(tmp5, mask);   // create mask to test for Unicode or non-ASCII chars in vector
 6353       movdl(tmp1Reg, tmp5);
 6354       vpbroadcastd(tmp1Reg, tmp1Reg, Assembler::AVX_256bit);
 6355       jmp(L_chars_32_check);
 6356 
 6357       bind(L_copy_32_chars);
 6358       vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64));
 6359       vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32));
 6360       vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1);
 6361       vptest(tmp2Reg, tmp1Reg);       // check for Unicode or non-ASCII chars in vector
 6362       jccb(Assembler::notZero, L_copy_32_chars_exit);
 6363       vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1);
 6364       vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector_len */ 1);
 6365       vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg);
 6366 
 6367       bind(L_chars_32_check);
 6368       addptr(len, 32);
 6369       jcc(Assembler::lessEqual, L_copy_32_chars);
 6370 
 6371       bind(L_copy_32_chars_exit);
 6372       subptr(len, 16);
 6373       jccb(Assembler::greater, L_copy_16_chars_exit);
 6374 
 6375     } else if (UseSSE42Intrinsics) {
 6376       movl(tmp5, mask);   // create mask to test for Unicode or non-ASCII chars in vector
 6377       movdl(tmp1Reg, tmp5);
 6378       pshufd(tmp1Reg, tmp1Reg, 0);
 6379       jmpb(L_chars_16_check);
 6380     }
 6381 
 6382     bind(L_copy_16_chars);
 6383     if (UseAVX >= 2) {
 6384       vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32));
 6385       vptest(tmp2Reg, tmp1Reg);
 6386       jcc(Assembler::notZero, L_copy_16_chars_exit);
 6387       vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector_len */ 1);
 6388       vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector_len */ 1);
 6389     } else {
 6390       if (UseAVX > 0) {
 6391         movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
 6392         movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
 6393         vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 0);
 6394       } else {
 6395         movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
 6396         por(tmp2Reg, tmp3Reg);
 6397         movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
 6398         por(tmp2Reg, tmp4Reg);
 6399       }
 6400       ptest(tmp2Reg, tmp1Reg);       // check for Unicode or non-ASCII chars in vector
 6401       jccb(Assembler::notZero, L_copy_16_chars_exit);
 6402       packuswb(tmp3Reg, tmp4Reg);
 6403     }
 6404     movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg);
 6405 
 6406     bind(L_chars_16_check);
 6407     addptr(len, 16);
 6408     jcc(Assembler::lessEqual, L_copy_16_chars);
 6409 
 6410     bind(L_copy_16_chars_exit);
 6411     if (UseAVX >= 2) {
 6412       // clean upper bits of YMM registers
 6413       vpxor(tmp2Reg, tmp2Reg);
 6414       vpxor(tmp3Reg, tmp3Reg);
 6415       vpxor(tmp4Reg, tmp4Reg);
 6416       movdl(tmp1Reg, tmp5);
 6417       pshufd(tmp1Reg, tmp1Reg, 0);
 6418     }
 6419     subptr(len, 8);
 6420     jccb(Assembler::greater, L_copy_8_chars_exit);
 6421 
 6422     bind(L_copy_8_chars);
 6423     movdqu(tmp3Reg, Address(src, len, Address::times_2, -16));
 6424     ptest(tmp3Reg, tmp1Reg);
 6425     jccb(Assembler::notZero, L_copy_8_chars_exit);
 6426     packuswb(tmp3Reg, tmp1Reg);
 6427     movq(Address(dst, len, Address::times_1, -8), tmp3Reg);
 6428     addptr(len, 8);
 6429     jccb(Assembler::lessEqual, L_copy_8_chars);
 6430 
 6431     bind(L_copy_8_chars_exit);
 6432     subptr(len, 8);
 6433     jccb(Assembler::zero, L_done);
 6434   }
 6435 
 6436   bind(L_copy_1_char);
 6437   load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0));
 6438   testl(tmp5, short_mask);      // check if Unicode or non-ASCII char
 6439   jccb(Assembler::notZero, L_copy_1_char_exit);
 6440   movb(Address(dst, len, Address::times_1, 0), tmp5);
 6441   addptr(len, 1);
 6442   jccb(Assembler::less, L_copy_1_char);
 6443 
 6444   bind(L_copy_1_char_exit);
 6445   addptr(result, len); // len is negative count of not processed elements
 6446 
 6447   bind(L_done);
 6448 }
 6449 
 6450 /**
 6451  * Helper for multiply_to_len().
 6452  */
 6453 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) {
 6454   addq(dest_lo, src1);
 6455   adcq(dest_hi, 0);
 6456   addq(dest_lo, src2);
 6457   adcq(dest_hi, 0);
 6458 }
 6459 
 6460 /**
 6461  * Multiply 64 bit by 64 bit first loop.
 6462  */
 6463 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
 6464                                            Register y, Register y_idx, Register z,
 6465                                            Register carry, Register product,
 6466                                            Register idx, Register kdx) {
 6467   //
 6468   //  jlong carry, x[], y[], z[];
 6469   //  for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
 6470   //    huge_128 product = y[idx] * x[xstart] + carry;
 6471   //    z[kdx] = (jlong)product;
 6472   //    carry  = (jlong)(product >>> 64);
 6473   //  }
 6474   //  z[xstart] = carry;
 6475   //
 6476 
 6477   Label L_first_loop, L_first_loop_exit;
 6478   Label L_one_x, L_one_y, L_multiply;
 6479 
 6480   decrementl(xstart);
 6481   jcc(Assembler::negative, L_one_x);
 6482 
 6483   movq(x_xstart, Address(x, xstart, Address::times_4,  0));
 6484   rorq(x_xstart, 32); // convert big-endian to little-endian
 6485 
 6486   bind(L_first_loop);
 6487   decrementl(idx);
 6488   jcc(Assembler::negative, L_first_loop_exit);
 6489   decrementl(idx);
 6490   jcc(Assembler::negative, L_one_y);
 6491   movq(y_idx, Address(y, idx, Address::times_4,  0));
 6492   rorq(y_idx, 32); // convert big-endian to little-endian
 6493   bind(L_multiply);
 6494   movq(product, x_xstart);
 6495   mulq(y_idx); // product(rax) * y_idx -> rdx:rax
 6496   addq(product, carry);
 6497   adcq(rdx, 0);
 6498   subl(kdx, 2);
 6499   movl(Address(z, kdx, Address::times_4,  4), product);
 6500   shrq(product, 32);
 6501   movl(Address(z, kdx, Address::times_4,  0), product);
 6502   movq(carry, rdx);
 6503   jmp(L_first_loop);
 6504 
 6505   bind(L_one_y);
 6506   movl(y_idx, Address(y,  0));
 6507   jmp(L_multiply);
 6508 
 6509   bind(L_one_x);
 6510   movl(x_xstart, Address(x,  0));
 6511   jmp(L_first_loop);
 6512 
 6513   bind(L_first_loop_exit);
 6514 }
 6515 
 6516 /**
 6517  * Multiply 64 bit by 64 bit and add 128 bit.
 6518  */
 6519 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z,
 6520                                             Register yz_idx, Register idx,
 6521                                             Register carry, Register product, int offset) {
 6522   //     huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry;
 6523   //     z[kdx] = (jlong)product;
 6524 
 6525   movq(yz_idx, Address(y, idx, Address::times_4,  offset));
 6526   rorq(yz_idx, 32); // convert big-endian to little-endian
 6527   movq(product, x_xstart);
 6528   mulq(yz_idx);     // product(rax) * yz_idx -> rdx:product(rax)
 6529   movq(yz_idx, Address(z, idx, Address::times_4,  offset));
 6530   rorq(yz_idx, 32); // convert big-endian to little-endian
 6531 
 6532   add2_with_carry(rdx, product, carry, yz_idx);
 6533 
 6534   movl(Address(z, idx, Address::times_4,  offset+4), product);
 6535   shrq(product, 32);
 6536   movl(Address(z, idx, Address::times_4,  offset), product);
 6537 
 6538 }
 6539 
 6540 /**
 6541  * Multiply 128 bit by 128 bit. Unrolled inner loop.
 6542  */
 6543 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
 6544                                              Register yz_idx, Register idx, Register jdx,
 6545                                              Register carry, Register product,
 6546                                              Register carry2) {
 6547   //   jlong carry, x[], y[], z[];
 6548   //   int kdx = ystart+1;
 6549   //   for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
 6550   //     huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry;
 6551   //     z[kdx+idx+1] = (jlong)product;
 6552   //     jlong carry2  = (jlong)(product >>> 64);
 6553   //     product = (y[idx] * x_xstart) + z[kdx+idx] + carry2;
 6554   //     z[kdx+idx] = (jlong)product;
 6555   //     carry  = (jlong)(product >>> 64);
 6556   //   }
 6557   //   idx += 2;
 6558   //   if (idx > 0) {
 6559   //     product = (y[idx] * x_xstart) + z[kdx+idx] + carry;
 6560   //     z[kdx+idx] = (jlong)product;
 6561   //     carry  = (jlong)(product >>> 64);
 6562   //   }
 6563   //
 6564 
 6565   Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
 6566 
 6567   movl(jdx, idx);
 6568   andl(jdx, 0xFFFFFFFC);
 6569   shrl(jdx, 2);
 6570 
 6571   bind(L_third_loop);
 6572   subl(jdx, 1);
 6573   jcc(Assembler::negative, L_third_loop_exit);
 6574   subl(idx, 4);
 6575 
 6576   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8);
 6577   movq(carry2, rdx);
 6578 
 6579   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0);
 6580   movq(carry, rdx);
 6581   jmp(L_third_loop);
 6582 
 6583   bind (L_third_loop_exit);
 6584 
 6585   andl (idx, 0x3);
 6586   jcc(Assembler::zero, L_post_third_loop_done);
 6587 
 6588   Label L_check_1;
 6589   subl(idx, 2);
 6590   jcc(Assembler::negative, L_check_1);
 6591 
 6592   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0);
 6593   movq(carry, rdx);
 6594 
 6595   bind (L_check_1);
 6596   addl (idx, 0x2);
 6597   andl (idx, 0x1);
 6598   subl(idx, 1);
 6599   jcc(Assembler::negative, L_post_third_loop_done);
 6600 
 6601   movl(yz_idx, Address(y, idx, Address::times_4,  0));
 6602   movq(product, x_xstart);
 6603   mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax)
 6604   movl(yz_idx, Address(z, idx, Address::times_4,  0));
 6605 
 6606   add2_with_carry(rdx, product, yz_idx, carry);
 6607 
 6608   movl(Address(z, idx, Address::times_4,  0), product);
 6609   shrq(product, 32);
 6610 
 6611   shlq(rdx, 32);
 6612   orq(product, rdx);
 6613   movq(carry, product);
 6614 
 6615   bind(L_post_third_loop_done);
 6616 }
 6617 
 6618 /**
 6619  * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop.
 6620  *
 6621  */
 6622 void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z,
 6623                                                   Register carry, Register carry2,
 6624                                                   Register idx, Register jdx,
 6625                                                   Register yz_idx1, Register yz_idx2,
 6626                                                   Register tmp, Register tmp3, Register tmp4) {
 6627   assert(UseBMI2Instructions, "should be used only when BMI2 is available");
 6628 
 6629   //   jlong carry, x[], y[], z[];
 6630   //   int kdx = ystart+1;
 6631   //   for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
 6632   //     huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry;
 6633   //     jlong carry2  = (jlong)(tmp3 >>> 64);
 6634   //     huge_128 tmp4 = (y[idx]   * rdx) + z[kdx+idx] + carry2;
 6635   //     carry  = (jlong)(tmp4 >>> 64);
 6636   //     z[kdx+idx+1] = (jlong)tmp3;
 6637   //     z[kdx+idx] = (jlong)tmp4;
 6638   //   }
 6639   //   idx += 2;
 6640   //   if (idx > 0) {
 6641   //     yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry;
 6642   //     z[kdx+idx] = (jlong)yz_idx1;
 6643   //     carry  = (jlong)(yz_idx1 >>> 64);
 6644   //   }
 6645   //
 6646 
 6647   Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
 6648 
 6649   movl(jdx, idx);
 6650   andl(jdx, 0xFFFFFFFC);
 6651   shrl(jdx, 2);
 6652 
 6653   bind(L_third_loop);
 6654   subl(jdx, 1);
 6655   jcc(Assembler::negative, L_third_loop_exit);
 6656   subl(idx, 4);
 6657 
 6658   movq(yz_idx1,  Address(y, idx, Address::times_4,  8));
 6659   rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian
 6660   movq(yz_idx2, Address(y, idx, Address::times_4,  0));
 6661   rorxq(yz_idx2, yz_idx2, 32);
 6662 
 6663   mulxq(tmp4, tmp3, yz_idx1);  //  yz_idx1 * rdx -> tmp4:tmp3
 6664   mulxq(carry2, tmp, yz_idx2); //  yz_idx2 * rdx -> carry2:tmp
 6665 
 6666   movq(yz_idx1,  Address(z, idx, Address::times_4,  8));
 6667   rorxq(yz_idx1, yz_idx1, 32);
 6668   movq(yz_idx2, Address(z, idx, Address::times_4,  0));
 6669   rorxq(yz_idx2, yz_idx2, 32);
 6670 
 6671   if (VM_Version::supports_adx()) {
 6672     adcxq(tmp3, carry);
 6673     adoxq(tmp3, yz_idx1);
 6674 
 6675     adcxq(tmp4, tmp);
 6676     adoxq(tmp4, yz_idx2);
 6677 
 6678     movl(carry, 0); // does not affect flags
 6679     adcxq(carry2, carry);
 6680     adoxq(carry2, carry);
 6681   } else {
 6682     add2_with_carry(tmp4, tmp3, carry, yz_idx1);
 6683     add2_with_carry(carry2, tmp4, tmp, yz_idx2);
 6684   }
 6685   movq(carry, carry2);
 6686 
 6687   movl(Address(z, idx, Address::times_4, 12), tmp3);
 6688   shrq(tmp3, 32);
 6689   movl(Address(z, idx, Address::times_4,  8), tmp3);
 6690 
 6691   movl(Address(z, idx, Address::times_4,  4), tmp4);
 6692   shrq(tmp4, 32);
 6693   movl(Address(z, idx, Address::times_4,  0), tmp4);
 6694 
 6695   jmp(L_third_loop);
 6696 
 6697   bind (L_third_loop_exit);
 6698 
 6699   andl (idx, 0x3);
 6700   jcc(Assembler::zero, L_post_third_loop_done);
 6701 
 6702   Label L_check_1;
 6703   subl(idx, 2);
 6704   jcc(Assembler::negative, L_check_1);
 6705 
 6706   movq(yz_idx1, Address(y, idx, Address::times_4,  0));
 6707   rorxq(yz_idx1, yz_idx1, 32);
 6708   mulxq(tmp4, tmp3, yz_idx1); //  yz_idx1 * rdx -> tmp4:tmp3
 6709   movq(yz_idx2, Address(z, idx, Address::times_4,  0));
 6710   rorxq(yz_idx2, yz_idx2, 32);
 6711 
 6712   add2_with_carry(tmp4, tmp3, carry, yz_idx2);
 6713 
 6714   movl(Address(z, idx, Address::times_4,  4), tmp3);
 6715   shrq(tmp3, 32);
 6716   movl(Address(z, idx, Address::times_4,  0), tmp3);
 6717   movq(carry, tmp4);
 6718 
 6719   bind (L_check_1);
 6720   addl (idx, 0x2);
 6721   andl (idx, 0x1);
 6722   subl(idx, 1);
 6723   jcc(Assembler::negative, L_post_third_loop_done);
 6724   movl(tmp4, Address(y, idx, Address::times_4,  0));
 6725   mulxq(carry2, tmp3, tmp4);  //  tmp4 * rdx -> carry2:tmp3
 6726   movl(tmp4, Address(z, idx, Address::times_4,  0));
 6727 
 6728   add2_with_carry(carry2, tmp3, tmp4, carry);
 6729 
 6730   movl(Address(z, idx, Address::times_4,  0), tmp3);
 6731   shrq(tmp3, 32);
 6732 
 6733   shlq(carry2, 32);
 6734   orq(tmp3, carry2);
 6735   movq(carry, tmp3);
 6736 
 6737   bind(L_post_third_loop_done);
 6738 }
 6739 
 6740 /**
 6741  * Code for BigInteger::multiplyToLen() intrinsic.
 6742  *
 6743  * rdi: x
 6744  * rax: xlen
 6745  * rsi: y
 6746  * rcx: ylen
 6747  * r8:  z
 6748  * r11: tmp0
 6749  * r12: tmp1
 6750  * r13: tmp2
 6751  * r14: tmp3
 6752  * r15: tmp4
 6753  * rbx: tmp5
 6754  *
 6755  */
 6756 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0,
 6757                                      Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) {
 6758   ShortBranchVerifier sbv(this);
 6759   assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, rdx);
 6760 
 6761   push(tmp0);
 6762   push(tmp1);
 6763   push(tmp2);
 6764   push(tmp3);
 6765   push(tmp4);
 6766   push(tmp5);
 6767 
 6768   push(xlen);
 6769 
 6770   const Register idx = tmp1;
 6771   const Register kdx = tmp2;
 6772   const Register xstart = tmp3;
 6773 
 6774   const Register y_idx = tmp4;
 6775   const Register carry = tmp5;
 6776   const Register product  = xlen;
 6777   const Register x_xstart = tmp0;
 6778 
 6779   // First Loop.
 6780   //
 6781   //  final static long LONG_MASK = 0xffffffffL;
 6782   //  int xstart = xlen - 1;
 6783   //  int ystart = ylen - 1;
 6784   //  long carry = 0;
 6785   //  for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
 6786   //    long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
 6787   //    z[kdx] = (int)product;
 6788   //    carry = product >>> 32;
 6789   //  }
 6790   //  z[xstart] = (int)carry;
 6791   //
 6792 
 6793   movl(idx, ylen);               // idx = ylen;
 6794   lea(kdx, Address(xlen, ylen)); // kdx = xlen+ylen;
 6795   xorq(carry, carry);            // carry = 0;
 6796 
 6797   Label L_done;
 6798 
 6799   movl(xstart, xlen);
 6800   decrementl(xstart);
 6801   jcc(Assembler::negative, L_done);
 6802 
 6803   multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
 6804 
 6805   Label L_second_loop;
 6806   testl(kdx, kdx);
 6807   jcc(Assembler::zero, L_second_loop);
 6808 
 6809   Label L_carry;
 6810   subl(kdx, 1);
 6811   jcc(Assembler::zero, L_carry);
 6812 
 6813   movl(Address(z, kdx, Address::times_4,  0), carry);
 6814   shrq(carry, 32);
 6815   subl(kdx, 1);
 6816 
 6817   bind(L_carry);
 6818   movl(Address(z, kdx, Address::times_4,  0), carry);
 6819 
 6820   // Second and third (nested) loops.
 6821   //
 6822   // for (int i = xstart-1; i >= 0; i--) { // Second loop
 6823   //   carry = 0;
 6824   //   for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
 6825   //     long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
 6826   //                    (z[k] & LONG_MASK) + carry;
 6827   //     z[k] = (int)product;
 6828   //     carry = product >>> 32;
 6829   //   }
 6830   //   z[i] = (int)carry;
 6831   // }
 6832   //
 6833   // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx
 6834 
 6835   const Register jdx = tmp1;
 6836 
 6837   bind(L_second_loop);
 6838   xorl(carry, carry);    // carry = 0;
 6839   movl(jdx, ylen);       // j = ystart+1
 6840 
 6841   subl(xstart, 1);       // i = xstart-1;
 6842   jcc(Assembler::negative, L_done);
 6843 
 6844   push (z);
 6845 
 6846   Label L_last_x;
 6847   lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j
 6848   subl(xstart, 1);       // i = xstart-1;
 6849   jcc(Assembler::negative, L_last_x);
 6850 
 6851   if (UseBMI2Instructions) {
 6852     movq(rdx,  Address(x, xstart, Address::times_4,  0));
 6853     rorxq(rdx, rdx, 32); // convert big-endian to little-endian
 6854   } else {
 6855     movq(x_xstart, Address(x, xstart, Address::times_4,  0));
 6856     rorq(x_xstart, 32);  // convert big-endian to little-endian
 6857   }
 6858 
 6859   Label L_third_loop_prologue;
 6860   bind(L_third_loop_prologue);
 6861 
 6862   push (x);
 6863   push (xstart);
 6864   push (ylen);
 6865 
 6866 
 6867   if (UseBMI2Instructions) {
 6868     multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4);
 6869   } else { // !UseBMI2Instructions
 6870     multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x);
 6871   }
 6872 
 6873   pop(ylen);
 6874   pop(xlen);
 6875   pop(x);
 6876   pop(z);
 6877 
 6878   movl(tmp3, xlen);
 6879   addl(tmp3, 1);
 6880   movl(Address(z, tmp3, Address::times_4,  0), carry);
 6881   subl(tmp3, 1);
 6882   jccb(Assembler::negative, L_done);
 6883 
 6884   shrq(carry, 32);
 6885   movl(Address(z, tmp3, Address::times_4,  0), carry);
 6886   jmp(L_second_loop);
 6887 
 6888   // Next infrequent code is moved outside loops.
 6889   bind(L_last_x);
 6890   if (UseBMI2Instructions) {
 6891     movl(rdx, Address(x,  0));
 6892   } else {
 6893     movl(x_xstart, Address(x,  0));
 6894   }
 6895   jmp(L_third_loop_prologue);
 6896 
 6897   bind(L_done);
 6898 
 6899   pop(xlen);
 6900 
 6901   pop(tmp5);
 6902   pop(tmp4);
 6903   pop(tmp3);
 6904   pop(tmp2);
 6905   pop(tmp1);
 6906   pop(tmp0);
 6907 }
 6908 
 6909 void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
 6910   Register result, Register tmp1, Register tmp2, XMMRegister rymm0, XMMRegister rymm1, XMMRegister rymm2){
 6911   assert(UseSSE42Intrinsics, "SSE4.2 must be enabled.");
 6912   Label VECTOR16_LOOP, VECTOR8_LOOP, VECTOR4_LOOP;
 6913   Label VECTOR8_TAIL, VECTOR4_TAIL;
 6914   Label VECTOR32_NOT_EQUAL, VECTOR16_NOT_EQUAL, VECTOR8_NOT_EQUAL, VECTOR4_NOT_EQUAL;
 6915   Label SAME_TILL_END, DONE;
 6916   Label BYTES_LOOP, BYTES_TAIL, BYTES_NOT_EQUAL;
 6917 
 6918   //scale is in rcx in both Win64 and Unix
 6919   ShortBranchVerifier sbv(this);
 6920 
 6921   shlq(length);
 6922   xorq(result, result);
 6923 
 6924   if ((AVX3Threshold == 0) && (UseAVX > 2) &&
 6925       VM_Version::supports_avx512vlbw()) {
 6926     Label VECTOR64_LOOP, VECTOR64_NOT_EQUAL, VECTOR32_TAIL;
 6927 
 6928     cmpq(length, 64);
 6929     jcc(Assembler::less, VECTOR32_TAIL);
 6930 
 6931     movq(tmp1, length);
 6932     andq(tmp1, 0x3F);      // tail count
 6933     andq(length, ~(0x3F)); //vector count
 6934 
 6935     bind(VECTOR64_LOOP);
 6936     // AVX512 code to compare 64 byte vectors.
 6937     evmovdqub(rymm0, Address(obja, result), Assembler::AVX_512bit);
 6938     evpcmpeqb(k7, rymm0, Address(objb, result), Assembler::AVX_512bit);
 6939     kortestql(k7, k7);
 6940     jcc(Assembler::aboveEqual, VECTOR64_NOT_EQUAL);     // mismatch
 6941     addq(result, 64);
 6942     subq(length, 64);
 6943     jccb(Assembler::notZero, VECTOR64_LOOP);
 6944 
 6945     //bind(VECTOR64_TAIL);
 6946     testq(tmp1, tmp1);
 6947     jcc(Assembler::zero, SAME_TILL_END);
 6948 
 6949     //bind(VECTOR64_TAIL);
 6950     // AVX512 code to compare up to 63 byte vectors.
 6951     mov64(tmp2, 0xFFFFFFFFFFFFFFFF);
 6952     shlxq(tmp2, tmp2, tmp1);
 6953     notq(tmp2);
 6954     kmovql(k3, tmp2);
 6955 
 6956     evmovdqub(rymm0, k3, Address(obja, result), false, Assembler::AVX_512bit);
 6957     evpcmpeqb(k7, k3, rymm0, Address(objb, result), Assembler::AVX_512bit);
 6958 
 6959     ktestql(k7, k3);
 6960     jcc(Assembler::below, SAME_TILL_END);     // not mismatch
 6961 
 6962     bind(VECTOR64_NOT_EQUAL);
 6963     kmovql(tmp1, k7);
 6964     notq(tmp1);
 6965     tzcntq(tmp1, tmp1);
 6966     addq(result, tmp1);
 6967     shrq(result);
 6968     jmp(DONE);
 6969     bind(VECTOR32_TAIL);
 6970   }
 6971 
 6972   cmpq(length, 8);
 6973   jcc(Assembler::equal, VECTOR8_LOOP);
 6974   jcc(Assembler::less, VECTOR4_TAIL);
 6975 
 6976   if (UseAVX >= 2) {
 6977     Label VECTOR16_TAIL, VECTOR32_LOOP;
 6978 
 6979     cmpq(length, 16);
 6980     jcc(Assembler::equal, VECTOR16_LOOP);
 6981     jcc(Assembler::less, VECTOR8_LOOP);
 6982 
 6983     cmpq(length, 32);
 6984     jccb(Assembler::less, VECTOR16_TAIL);
 6985 
 6986     subq(length, 32);
 6987     bind(VECTOR32_LOOP);
 6988     vmovdqu(rymm0, Address(obja, result));
 6989     vmovdqu(rymm1, Address(objb, result));
 6990     vpxor(rymm2, rymm0, rymm1, Assembler::AVX_256bit);
 6991     vptest(rymm2, rymm2);
 6992     jcc(Assembler::notZero, VECTOR32_NOT_EQUAL);//mismatch found
 6993     addq(result, 32);
 6994     subq(length, 32);
 6995     jcc(Assembler::greaterEqual, VECTOR32_LOOP);
 6996     addq(length, 32);
 6997     jcc(Assembler::equal, SAME_TILL_END);
 6998     //falling through if less than 32 bytes left //close the branch here.
 6999 
 7000     bind(VECTOR16_TAIL);
 7001     cmpq(length, 16);
 7002     jccb(Assembler::less, VECTOR8_TAIL);
 7003     bind(VECTOR16_LOOP);
 7004     movdqu(rymm0, Address(obja, result));
 7005     movdqu(rymm1, Address(objb, result));
 7006     vpxor(rymm2, rymm0, rymm1, Assembler::AVX_128bit);
 7007     ptest(rymm2, rymm2);
 7008     jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found
 7009     addq(result, 16);
 7010     subq(length, 16);
 7011     jcc(Assembler::equal, SAME_TILL_END);
 7012     //falling through if less than 16 bytes left
 7013   } else {//regular intrinsics
 7014 
 7015     cmpq(length, 16);
 7016     jccb(Assembler::less, VECTOR8_TAIL);
 7017 
 7018     subq(length, 16);
 7019     bind(VECTOR16_LOOP);
 7020     movdqu(rymm0, Address(obja, result));
 7021     movdqu(rymm1, Address(objb, result));
 7022     pxor(rymm0, rymm1);
 7023     ptest(rymm0, rymm0);
 7024     jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found
 7025     addq(result, 16);
 7026     subq(length, 16);
 7027     jccb(Assembler::greaterEqual, VECTOR16_LOOP);
 7028     addq(length, 16);
 7029     jcc(Assembler::equal, SAME_TILL_END);
 7030     //falling through if less than 16 bytes left
 7031   }
 7032 
 7033   bind(VECTOR8_TAIL);
 7034   cmpq(length, 8);
 7035   jccb(Assembler::less, VECTOR4_TAIL);
 7036   bind(VECTOR8_LOOP);
 7037   movq(tmp1, Address(obja, result));
 7038   movq(tmp2, Address(objb, result));
 7039   xorq(tmp1, tmp2);
 7040   testq(tmp1, tmp1);
 7041   jcc(Assembler::notZero, VECTOR8_NOT_EQUAL);//mismatch found
 7042   addq(result, 8);
 7043   subq(length, 8);
 7044   jcc(Assembler::equal, SAME_TILL_END);
 7045   //falling through if less than 8 bytes left
 7046 
 7047   bind(VECTOR4_TAIL);
 7048   cmpq(length, 4);
 7049   jccb(Assembler::less, BYTES_TAIL);
 7050   bind(VECTOR4_LOOP);
 7051   movl(tmp1, Address(obja, result));
 7052   xorl(tmp1, Address(objb, result));
 7053   testl(tmp1, tmp1);
 7054   jcc(Assembler::notZero, VECTOR4_NOT_EQUAL);//mismatch found
 7055   addq(result, 4);
 7056   subq(length, 4);
 7057   jcc(Assembler::equal, SAME_TILL_END);
 7058   //falling through if less than 4 bytes left
 7059 
 7060   bind(BYTES_TAIL);
 7061   bind(BYTES_LOOP);
 7062   load_unsigned_byte(tmp1, Address(obja, result));
 7063   load_unsigned_byte(tmp2, Address(objb, result));
 7064   xorl(tmp1, tmp2);
 7065   testl(tmp1, tmp1);
 7066   jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
 7067   decq(length);
 7068   jcc(Assembler::zero, SAME_TILL_END);
 7069   incq(result);
 7070   load_unsigned_byte(tmp1, Address(obja, result));
 7071   load_unsigned_byte(tmp2, Address(objb, result));
 7072   xorl(tmp1, tmp2);
 7073   testl(tmp1, tmp1);
 7074   jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
 7075   decq(length);
 7076   jcc(Assembler::zero, SAME_TILL_END);
 7077   incq(result);
 7078   load_unsigned_byte(tmp1, Address(obja, result));
 7079   load_unsigned_byte(tmp2, Address(objb, result));
 7080   xorl(tmp1, tmp2);
 7081   testl(tmp1, tmp1);
 7082   jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
 7083   jmp(SAME_TILL_END);
 7084 
 7085   if (UseAVX >= 2) {
 7086     bind(VECTOR32_NOT_EQUAL);
 7087     vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_256bit);
 7088     vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_256bit);
 7089     vpxor(rymm0, rymm0, rymm2, Assembler::AVX_256bit);
 7090     vpmovmskb(tmp1, rymm0);
 7091     bsfq(tmp1, tmp1);
 7092     addq(result, tmp1);
 7093     shrq(result);
 7094     jmp(DONE);
 7095   }
 7096 
 7097   bind(VECTOR16_NOT_EQUAL);
 7098   if (UseAVX >= 2) {
 7099     vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_128bit);
 7100     vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_128bit);
 7101     pxor(rymm0, rymm2);
 7102   } else {
 7103     pcmpeqb(rymm2, rymm2);
 7104     pxor(rymm0, rymm1);
 7105     pcmpeqb(rymm0, rymm1);
 7106     pxor(rymm0, rymm2);
 7107   }
 7108   pmovmskb(tmp1, rymm0);
 7109   bsfq(tmp1, tmp1);
 7110   addq(result, tmp1);
 7111   shrq(result);
 7112   jmpb(DONE);
 7113 
 7114   bind(VECTOR8_NOT_EQUAL);
 7115   bind(VECTOR4_NOT_EQUAL);
 7116   bsfq(tmp1, tmp1);
 7117   shrq(tmp1, 3);
 7118   addq(result, tmp1);
 7119   bind(BYTES_NOT_EQUAL);
 7120   shrq(result);
 7121   jmpb(DONE);
 7122 
 7123   bind(SAME_TILL_END);
 7124   mov64(result, -1);
 7125 
 7126   bind(DONE);
 7127 }
 7128 
 7129 //Helper functions for square_to_len()
 7130 
 7131 /**
 7132  * Store the squares of x[], right shifted one bit (divided by 2) into z[]
 7133  * Preserves x and z and modifies rest of the registers.
 7134  */
 7135 void MacroAssembler::square_rshift(Register x, Register xlen, Register z, Register tmp1, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
 7136   // Perform square and right shift by 1
 7137   // Handle odd xlen case first, then for even xlen do the following
 7138   // jlong carry = 0;
 7139   // for (int j=0, i=0; j < xlen; j+=2, i+=4) {
 7140   //     huge_128 product = x[j:j+1] * x[j:j+1];
 7141   //     z[i:i+1] = (carry << 63) | (jlong)(product >>> 65);
 7142   //     z[i+2:i+3] = (jlong)(product >>> 1);
 7143   //     carry = (jlong)product;
 7144   // }
 7145 
 7146   xorq(tmp5, tmp5);     // carry
 7147   xorq(rdxReg, rdxReg);
 7148   xorl(tmp1, tmp1);     // index for x
 7149   xorl(tmp4, tmp4);     // index for z
 7150 
 7151   Label L_first_loop, L_first_loop_exit;
 7152 
 7153   testl(xlen, 1);
 7154   jccb(Assembler::zero, L_first_loop); //jump if xlen is even
 7155 
 7156   // Square and right shift by 1 the odd element using 32 bit multiply
 7157   movl(raxReg, Address(x, tmp1, Address::times_4, 0));
 7158   imulq(raxReg, raxReg);
 7159   shrq(raxReg, 1);
 7160   adcq(tmp5, 0);
 7161   movq(Address(z, tmp4, Address::times_4, 0), raxReg);
 7162   incrementl(tmp1);
 7163   addl(tmp4, 2);
 7164 
 7165   // Square and  right shift by 1 the rest using 64 bit multiply
 7166   bind(L_first_loop);
 7167   cmpptr(tmp1, xlen);
 7168   jccb(Assembler::equal, L_first_loop_exit);
 7169 
 7170   // Square
 7171   movq(raxReg, Address(x, tmp1, Address::times_4,  0));
 7172   rorq(raxReg, 32);    // convert big-endian to little-endian
 7173   mulq(raxReg);        // 64-bit multiply rax * rax -> rdx:rax
 7174 
 7175   // Right shift by 1 and save carry
 7176   shrq(tmp5, 1);       // rdx:rax:tmp5 = (tmp5:rdx:rax) >>> 1
 7177   rcrq(rdxReg, 1);
 7178   rcrq(raxReg, 1);
 7179   adcq(tmp5, 0);
 7180 
 7181   // Store result in z
 7182   movq(Address(z, tmp4, Address::times_4, 0), rdxReg);
 7183   movq(Address(z, tmp4, Address::times_4, 8), raxReg);
 7184 
 7185   // Update indices for x and z
 7186   addl(tmp1, 2);
 7187   addl(tmp4, 4);
 7188   jmp(L_first_loop);
 7189 
 7190   bind(L_first_loop_exit);
 7191 }
 7192 
 7193 
 7194 /**
 7195  * Perform the following multiply add operation using BMI2 instructions
 7196  * carry:sum = sum + op1*op2 + carry
 7197  * op2 should be in rdx
 7198  * op2 is preserved, all other registers are modified
 7199  */
 7200 void MacroAssembler::multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, Register tmp2) {
 7201   // assert op2 is rdx
 7202   mulxq(tmp2, op1, op1);  //  op1 * op2 -> tmp2:op1
 7203   addq(sum, carry);
 7204   adcq(tmp2, 0);
 7205   addq(sum, op1);
 7206   adcq(tmp2, 0);
 7207   movq(carry, tmp2);
 7208 }
 7209 
 7210 /**
 7211  * Perform the following multiply add operation:
 7212  * carry:sum = sum + op1*op2 + carry
 7213  * Preserves op1, op2 and modifies rest of registers
 7214  */
 7215 void MacroAssembler::multiply_add_64(Register sum, Register op1, Register op2, Register carry, Register rdxReg, Register raxReg) {
 7216   // rdx:rax = op1 * op2
 7217   movq(raxReg, op2);
 7218   mulq(op1);
 7219 
 7220   //  rdx:rax = sum + carry + rdx:rax
 7221   addq(sum, carry);
 7222   adcq(rdxReg, 0);
 7223   addq(sum, raxReg);
 7224   adcq(rdxReg, 0);
 7225 
 7226   // carry:sum = rdx:sum
 7227   movq(carry, rdxReg);
 7228 }
 7229 
 7230 /**
 7231  * Add 64 bit long carry into z[] with carry propagation.
 7232  * Preserves z and carry register values and modifies rest of registers.
 7233  *
 7234  */
 7235 void MacroAssembler::add_one_64(Register z, Register zlen, Register carry, Register tmp1) {
 7236   Label L_fourth_loop, L_fourth_loop_exit;
 7237 
 7238   movl(tmp1, 1);
 7239   subl(zlen, 2);
 7240   addq(Address(z, zlen, Address::times_4, 0), carry);
 7241 
 7242   bind(L_fourth_loop);
 7243   jccb(Assembler::carryClear, L_fourth_loop_exit);
 7244   subl(zlen, 2);
 7245   jccb(Assembler::negative, L_fourth_loop_exit);
 7246   addq(Address(z, zlen, Address::times_4, 0), tmp1);
 7247   jmp(L_fourth_loop);
 7248   bind(L_fourth_loop_exit);
 7249 }
 7250 
 7251 /**
 7252  * Shift z[] left by 1 bit.
 7253  * Preserves x, len, z and zlen registers and modifies rest of the registers.
 7254  *
 7255  */
 7256 void MacroAssembler::lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
 7257 
 7258   Label L_fifth_loop, L_fifth_loop_exit;
 7259 
 7260   // Fifth loop
 7261   // Perform primitiveLeftShift(z, zlen, 1)
 7262 
 7263   const Register prev_carry = tmp1;
 7264   const Register new_carry = tmp4;
 7265   const Register value = tmp2;
 7266   const Register zidx = tmp3;
 7267 
 7268   // int zidx, carry;
 7269   // long value;
 7270   // carry = 0;
 7271   // for (zidx = zlen-2; zidx >=0; zidx -= 2) {
 7272   //    (carry:value)  = (z[i] << 1) | carry ;
 7273   //    z[i] = value;
 7274   // }
 7275 
 7276   movl(zidx, zlen);
 7277   xorl(prev_carry, prev_carry); // clear carry flag and prev_carry register
 7278 
 7279   bind(L_fifth_loop);
 7280   decl(zidx);  // Use decl to preserve carry flag
 7281   decl(zidx);
 7282   jccb(Assembler::negative, L_fifth_loop_exit);
 7283 
 7284   if (UseBMI2Instructions) {
 7285      movq(value, Address(z, zidx, Address::times_4, 0));
 7286      rclq(value, 1);
 7287      rorxq(value, value, 32);
 7288      movq(Address(z, zidx, Address::times_4,  0), value);  // Store back in big endian form
 7289   }
 7290   else {
 7291     // clear new_carry
 7292     xorl(new_carry, new_carry);
 7293 
 7294     // Shift z[i] by 1, or in previous carry and save new carry
 7295     movq(value, Address(z, zidx, Address::times_4, 0));
 7296     shlq(value, 1);
 7297     adcl(new_carry, 0);
 7298 
 7299     orq(value, prev_carry);
 7300     rorq(value, 0x20);
 7301     movq(Address(z, zidx, Address::times_4,  0), value);  // Store back in big endian form
 7302 
 7303     // Set previous carry = new carry
 7304     movl(prev_carry, new_carry);
 7305   }
 7306   jmp(L_fifth_loop);
 7307 
 7308   bind(L_fifth_loop_exit);
 7309 }
 7310 
 7311 
 7312 /**
 7313  * Code for BigInteger::squareToLen() intrinsic
 7314  *
 7315  * rdi: x
 7316  * rsi: len
 7317  * r8:  z
 7318  * rcx: zlen
 7319  * r12: tmp1
 7320  * r13: tmp2
 7321  * r14: tmp3
 7322  * r15: tmp4
 7323  * rbx: tmp5
 7324  *
 7325  */
 7326 void MacroAssembler::square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
 7327 
 7328   Label L_second_loop, L_second_loop_exit, L_third_loop, L_third_loop_exit, L_last_x, L_multiply;
 7329   push(tmp1);
 7330   push(tmp2);
 7331   push(tmp3);
 7332   push(tmp4);
 7333   push(tmp5);
 7334 
 7335   // First loop
 7336   // Store the squares, right shifted one bit (i.e., divided by 2).
 7337   square_rshift(x, len, z, tmp1, tmp3, tmp4, tmp5, rdxReg, raxReg);
 7338 
 7339   // Add in off-diagonal sums.
 7340   //
 7341   // Second, third (nested) and fourth loops.
 7342   // zlen +=2;
 7343   // for (int xidx=len-2,zidx=zlen-4; xidx > 0; xidx-=2,zidx-=4) {
 7344   //    carry = 0;
 7345   //    long op2 = x[xidx:xidx+1];
 7346   //    for (int j=xidx-2,k=zidx; j >= 0; j-=2) {
 7347   //       k -= 2;
 7348   //       long op1 = x[j:j+1];
 7349   //       long sum = z[k:k+1];
 7350   //       carry:sum = multiply_add_64(sum, op1, op2, carry, tmp_regs);
 7351   //       z[k:k+1] = sum;
 7352   //    }
 7353   //    add_one_64(z, k, carry, tmp_regs);
 7354   // }
 7355 
 7356   const Register carry = tmp5;
 7357   const Register sum = tmp3;
 7358   const Register op1 = tmp4;
 7359   Register op2 = tmp2;
 7360 
 7361   push(zlen);
 7362   push(len);
 7363   addl(zlen,2);
 7364   bind(L_second_loop);
 7365   xorq(carry, carry);
 7366   subl(zlen, 4);
 7367   subl(len, 2);
 7368   push(zlen);
 7369   push(len);
 7370   cmpl(len, 0);
 7371   jccb(Assembler::lessEqual, L_second_loop_exit);
 7372 
 7373   // Multiply an array by one 64 bit long.
 7374   if (UseBMI2Instructions) {
 7375     op2 = rdxReg;
 7376     movq(op2, Address(x, len, Address::times_4,  0));
 7377     rorxq(op2, op2, 32);
 7378   }
 7379   else {
 7380     movq(op2, Address(x, len, Address::times_4,  0));
 7381     rorq(op2, 32);
 7382   }
 7383 
 7384   bind(L_third_loop);
 7385   decrementl(len);
 7386   jccb(Assembler::negative, L_third_loop_exit);
 7387   decrementl(len);
 7388   jccb(Assembler::negative, L_last_x);
 7389 
 7390   movq(op1, Address(x, len, Address::times_4,  0));
 7391   rorq(op1, 32);
 7392 
 7393   bind(L_multiply);
 7394   subl(zlen, 2);
 7395   movq(sum, Address(z, zlen, Address::times_4,  0));
 7396 
 7397   // Multiply 64 bit by 64 bit and add 64 bits lower half and upper 64 bits as carry.
 7398   if (UseBMI2Instructions) {
 7399     multiply_add_64_bmi2(sum, op1, op2, carry, tmp2);
 7400   }
 7401   else {
 7402     multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
 7403   }
 7404 
 7405   movq(Address(z, zlen, Address::times_4, 0), sum);
 7406 
 7407   jmp(L_third_loop);
 7408   bind(L_third_loop_exit);
 7409 
 7410   // Fourth loop
 7411   // Add 64 bit long carry into z with carry propagation.
 7412   // Uses offsetted zlen.
 7413   add_one_64(z, zlen, carry, tmp1);
 7414 
 7415   pop(len);
 7416   pop(zlen);
 7417   jmp(L_second_loop);
 7418 
 7419   // Next infrequent code is moved outside loops.
 7420   bind(L_last_x);
 7421   movl(op1, Address(x, 0));
 7422   jmp(L_multiply);
 7423 
 7424   bind(L_second_loop_exit);
 7425   pop(len);
 7426   pop(zlen);
 7427   pop(len);
 7428   pop(zlen);
 7429 
 7430   // Fifth loop
 7431   // Shift z left 1 bit.
 7432   lshift_by_1(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4);
 7433 
 7434   // z[zlen-1] |= x[len-1] & 1;
 7435   movl(tmp3, Address(x, len, Address::times_4, -4));
 7436   andl(tmp3, 1);
 7437   orl(Address(z, zlen, Address::times_4,  -4), tmp3);
 7438 
 7439   pop(tmp5);
 7440   pop(tmp4);
 7441   pop(tmp3);
 7442   pop(tmp2);
 7443   pop(tmp1);
 7444 }
 7445 
 7446 /**
 7447  * Helper function for mul_add()
 7448  * Multiply the in[] by int k and add to out[] starting at offset offs using
 7449  * 128 bit by 32 bit multiply and return the carry in tmp5.
 7450  * Only quad int aligned length of in[] is operated on in this function.
 7451  * k is in rdxReg for BMI2Instructions, for others it is in tmp2.
 7452  * This function preserves out, in and k registers.
 7453  * len and offset point to the appropriate index in "in" & "out" correspondingly
 7454  * tmp5 has the carry.
 7455  * other registers are temporary and are modified.
 7456  *
 7457  */
 7458 void MacroAssembler::mul_add_128_x_32_loop(Register out, Register in,
 7459   Register offset, Register len, Register tmp1, Register tmp2, Register tmp3,
 7460   Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
 7461 
 7462   Label L_first_loop, L_first_loop_exit;
 7463 
 7464   movl(tmp1, len);
 7465   shrl(tmp1, 2);
 7466 
 7467   bind(L_first_loop);
 7468   subl(tmp1, 1);
 7469   jccb(Assembler::negative, L_first_loop_exit);
 7470 
 7471   subl(len, 4);
 7472   subl(offset, 4);
 7473 
 7474   Register op2 = tmp2;
 7475   const Register sum = tmp3;
 7476   const Register op1 = tmp4;
 7477   const Register carry = tmp5;
 7478 
 7479   if (UseBMI2Instructions) {
 7480     op2 = rdxReg;
 7481   }
 7482 
 7483   movq(op1, Address(in, len, Address::times_4,  8));
 7484   rorq(op1, 32);
 7485   movq(sum, Address(out, offset, Address::times_4,  8));
 7486   rorq(sum, 32);
 7487   if (UseBMI2Instructions) {
 7488     multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
 7489   }
 7490   else {
 7491     multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
 7492   }
 7493   // Store back in big endian from little endian
 7494   rorq(sum, 0x20);
 7495   movq(Address(out, offset, Address::times_4,  8), sum);
 7496 
 7497   movq(op1, Address(in, len, Address::times_4,  0));
 7498   rorq(op1, 32);
 7499   movq(sum, Address(out, offset, Address::times_4,  0));
 7500   rorq(sum, 32);
 7501   if (UseBMI2Instructions) {
 7502     multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
 7503   }
 7504   else {
 7505     multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
 7506   }
 7507   // Store back in big endian from little endian
 7508   rorq(sum, 0x20);
 7509   movq(Address(out, offset, Address::times_4,  0), sum);
 7510 
 7511   jmp(L_first_loop);
 7512   bind(L_first_loop_exit);
 7513 }
 7514 
 7515 /**
 7516  * Code for BigInteger::mulAdd() intrinsic
 7517  *
 7518  * rdi: out
 7519  * rsi: in
 7520  * r11: offs (out.length - offset)
 7521  * rcx: len
 7522  * r8:  k
 7523  * r12: tmp1
 7524  * r13: tmp2
 7525  * r14: tmp3
 7526  * r15: tmp4
 7527  * rbx: tmp5
 7528  * Multiply the in[] by word k and add to out[], return the carry in rax
 7529  */
 7530 void MacroAssembler::mul_add(Register out, Register in, Register offs,
 7531    Register len, Register k, Register tmp1, Register tmp2, Register tmp3,
 7532    Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
 7533 
 7534   Label L_carry, L_last_in, L_done;
 7535 
 7536 // carry = 0;
 7537 // for (int j=len-1; j >= 0; j--) {
 7538 //    long product = (in[j] & LONG_MASK) * kLong +
 7539 //                   (out[offs] & LONG_MASK) + carry;
 7540 //    out[offs--] = (int)product;
 7541 //    carry = product >>> 32;
 7542 // }
 7543 //
 7544   push(tmp1);
 7545   push(tmp2);
 7546   push(tmp3);
 7547   push(tmp4);
 7548   push(tmp5);
 7549 
 7550   Register op2 = tmp2;
 7551   const Register sum = tmp3;
 7552   const Register op1 = tmp4;
 7553   const Register carry =  tmp5;
 7554 
 7555   if (UseBMI2Instructions) {
 7556     op2 = rdxReg;
 7557     movl(op2, k);
 7558   }
 7559   else {
 7560     movl(op2, k);
 7561   }
 7562 
 7563   xorq(carry, carry);
 7564 
 7565   //First loop
 7566 
 7567   //Multiply in[] by k in a 4 way unrolled loop using 128 bit by 32 bit multiply
 7568   //The carry is in tmp5
 7569   mul_add_128_x_32_loop(out, in, offs, len, tmp1, tmp2, tmp3, tmp4, tmp5, rdxReg, raxReg);
 7570 
 7571   //Multiply the trailing in[] entry using 64 bit by 32 bit, if any
 7572   decrementl(len);
 7573   jccb(Assembler::negative, L_carry);
 7574   decrementl(len);
 7575   jccb(Assembler::negative, L_last_in);
 7576 
 7577   movq(op1, Address(in, len, Address::times_4,  0));
 7578   rorq(op1, 32);
 7579 
 7580   subl(offs, 2);
 7581   movq(sum, Address(out, offs, Address::times_4,  0));
 7582   rorq(sum, 32);
 7583 
 7584   if (UseBMI2Instructions) {
 7585     multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
 7586   }
 7587   else {
 7588     multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
 7589   }
 7590 
 7591   // Store back in big endian from little endian
 7592   rorq(sum, 0x20);
 7593   movq(Address(out, offs, Address::times_4,  0), sum);
 7594 
 7595   testl(len, len);
 7596   jccb(Assembler::zero, L_carry);
 7597 
 7598   //Multiply the last in[] entry, if any
 7599   bind(L_last_in);
 7600   movl(op1, Address(in, 0));
 7601   movl(sum, Address(out, offs, Address::times_4,  -4));
 7602 
 7603   movl(raxReg, k);
 7604   mull(op1); //tmp4 * eax -> edx:eax
 7605   addl(sum, carry);
 7606   adcl(rdxReg, 0);
 7607   addl(sum, raxReg);
 7608   adcl(rdxReg, 0);
 7609   movl(carry, rdxReg);
 7610 
 7611   movl(Address(out, offs, Address::times_4,  -4), sum);
 7612 
 7613   bind(L_carry);
 7614   //return tmp5/carry as carry in rax
 7615   movl(rax, carry);
 7616 
 7617   bind(L_done);
 7618   pop(tmp5);
 7619   pop(tmp4);
 7620   pop(tmp3);
 7621   pop(tmp2);
 7622   pop(tmp1);
 7623 }
 7624 
 7625 /**
 7626  * Emits code to update CRC-32 with a byte value according to constants in table
 7627  *
 7628  * @param [in,out]crc   Register containing the crc.
 7629  * @param [in]val       Register containing the byte to fold into the CRC.
 7630  * @param [in]table     Register containing the table of crc constants.
 7631  *
 7632  * uint32_t crc;
 7633  * val = crc_table[(val ^ crc) & 0xFF];
 7634  * crc = val ^ (crc >> 8);
 7635  *
 7636  */
 7637 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
 7638   xorl(val, crc);
 7639   andl(val, 0xFF);
 7640   shrl(crc, 8); // unsigned shift
 7641   xorl(crc, Address(table, val, Address::times_4, 0));
 7642 }
 7643 
 7644 /**
 7645  * Fold 128-bit data chunk
 7646  */
 7647 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
 7648   if (UseAVX > 0) {
 7649     vpclmulhdq(xtmp, xK, xcrc); // [123:64]
 7650     vpclmulldq(xcrc, xK, xcrc); // [63:0]
 7651     vpxor(xcrc, xcrc, Address(buf, offset), 0 /* vector_len */);
 7652     pxor(xcrc, xtmp);
 7653   } else {
 7654     movdqa(xtmp, xcrc);
 7655     pclmulhdq(xtmp, xK);   // [123:64]
 7656     pclmulldq(xcrc, xK);   // [63:0]
 7657     pxor(xcrc, xtmp);
 7658     movdqu(xtmp, Address(buf, offset));
 7659     pxor(xcrc, xtmp);
 7660   }
 7661 }
 7662 
 7663 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) {
 7664   if (UseAVX > 0) {
 7665     vpclmulhdq(xtmp, xK, xcrc);
 7666     vpclmulldq(xcrc, xK, xcrc);
 7667     pxor(xcrc, xbuf);
 7668     pxor(xcrc, xtmp);
 7669   } else {
 7670     movdqa(xtmp, xcrc);
 7671     pclmulhdq(xtmp, xK);
 7672     pclmulldq(xcrc, xK);
 7673     pxor(xcrc, xbuf);
 7674     pxor(xcrc, xtmp);
 7675   }
 7676 }
 7677 
 7678 /**
 7679  * 8-bit folds to compute 32-bit CRC
 7680  *
 7681  * uint64_t xcrc;
 7682  * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8);
 7683  */
 7684 void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) {
 7685   movdl(tmp, xcrc);
 7686   andl(tmp, 0xFF);
 7687   movdl(xtmp, Address(table, tmp, Address::times_4, 0));
 7688   psrldq(xcrc, 1); // unsigned shift one byte
 7689   pxor(xcrc, xtmp);
 7690 }
 7691 
 7692 /**
 7693  * uint32_t crc;
 7694  * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
 7695  */
 7696 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
 7697   movl(tmp, crc);
 7698   andl(tmp, 0xFF);
 7699   shrl(crc, 8);
 7700   xorl(crc, Address(table, tmp, Address::times_4, 0));
 7701 }
 7702 
 7703 /**
 7704  * @param crc   register containing existing CRC (32-bit)
 7705  * @param buf   register pointing to input byte buffer (byte*)
 7706  * @param len   register containing number of bytes
 7707  * @param table register that will contain address of CRC table
 7708  * @param tmp   scratch register
 7709  */
 7710 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) {
 7711   assert_different_registers(crc, buf, len, table, tmp, rax);
 7712 
 7713   Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned;
 7714   Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop;
 7715 
 7716   // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
 7717   // context for the registers used, where all instructions below are using 128-bit mode
 7718   // On EVEX without VL and BW, these instructions will all be AVX.
 7719   lea(table, ExternalAddress(StubRoutines::crc_table_addr()));
 7720   notl(crc); // ~crc
 7721   cmpl(len, 16);
 7722   jcc(Assembler::less, L_tail);
 7723 
 7724   // Align buffer to 16 bytes
 7725   movl(tmp, buf);
 7726   andl(tmp, 0xF);
 7727   jccb(Assembler::zero, L_aligned);
 7728   subl(tmp,  16);
 7729   addl(len, tmp);
 7730 
 7731   align(4);
 7732   BIND(L_align_loop);
 7733   movsbl(rax, Address(buf, 0)); // load byte with sign extension
 7734   update_byte_crc32(crc, rax, table);
 7735   increment(buf);
 7736   incrementl(tmp);
 7737   jccb(Assembler::less, L_align_loop);
 7738 
 7739   BIND(L_aligned);
 7740   movl(tmp, len); // save
 7741   shrl(len, 4);
 7742   jcc(Assembler::zero, L_tail_restore);
 7743 
 7744   // Fold crc into first bytes of vector
 7745   movdqa(xmm1, Address(buf, 0));
 7746   movdl(rax, xmm1);
 7747   xorl(crc, rax);
 7748   if (VM_Version::supports_sse4_1()) {
 7749     pinsrd(xmm1, crc, 0);
 7750   } else {
 7751     pinsrw(xmm1, crc, 0);
 7752     shrl(crc, 16);
 7753     pinsrw(xmm1, crc, 1);
 7754   }
 7755   addptr(buf, 16);
 7756   subl(len, 4); // len > 0
 7757   jcc(Assembler::less, L_fold_tail);
 7758 
 7759   movdqa(xmm2, Address(buf,  0));
 7760   movdqa(xmm3, Address(buf, 16));
 7761   movdqa(xmm4, Address(buf, 32));
 7762   addptr(buf, 48);
 7763   subl(len, 3);
 7764   jcc(Assembler::lessEqual, L_fold_512b);
 7765 
 7766   // Fold total 512 bits of polynomial on each iteration,
 7767   // 128 bits per each of 4 parallel streams.
 7768   movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32), rscratch1);
 7769 
 7770   align32();
 7771   BIND(L_fold_512b_loop);
 7772   fold_128bit_crc32(xmm1, xmm0, xmm5, buf,  0);
 7773   fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16);
 7774   fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32);
 7775   fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48);
 7776   addptr(buf, 64);
 7777   subl(len, 4);
 7778   jcc(Assembler::greater, L_fold_512b_loop);
 7779 
 7780   // Fold 512 bits to 128 bits.
 7781   BIND(L_fold_512b);
 7782   movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1);
 7783   fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2);
 7784   fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3);
 7785   fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4);
 7786 
 7787   // Fold the rest of 128 bits data chunks
 7788   BIND(L_fold_tail);
 7789   addl(len, 3);
 7790   jccb(Assembler::lessEqual, L_fold_128b);
 7791   movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1);
 7792 
 7793   BIND(L_fold_tail_loop);
 7794   fold_128bit_crc32(xmm1, xmm0, xmm5, buf,  0);
 7795   addptr(buf, 16);
 7796   decrementl(len);
 7797   jccb(Assembler::greater, L_fold_tail_loop);
 7798 
 7799   // Fold 128 bits in xmm1 down into 32 bits in crc register.
 7800   BIND(L_fold_128b);
 7801   movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()), rscratch1);
 7802   if (UseAVX > 0) {
 7803     vpclmulqdq(xmm2, xmm0, xmm1, 0x1);
 7804     vpand(xmm3, xmm0, xmm2, 0 /* vector_len */);
 7805     vpclmulqdq(xmm0, xmm0, xmm3, 0x1);
 7806   } else {
 7807     movdqa(xmm2, xmm0);
 7808     pclmulqdq(xmm2, xmm1, 0x1);
 7809     movdqa(xmm3, xmm0);
 7810     pand(xmm3, xmm2);
 7811     pclmulqdq(xmm0, xmm3, 0x1);
 7812   }
 7813   psrldq(xmm1, 8);
 7814   psrldq(xmm2, 4);
 7815   pxor(xmm0, xmm1);
 7816   pxor(xmm0, xmm2);
 7817 
 7818   // 8 8-bit folds to compute 32-bit CRC.
 7819   for (int j = 0; j < 4; j++) {
 7820     fold_8bit_crc32(xmm0, table, xmm1, rax);
 7821   }
 7822   movdl(crc, xmm0); // mov 32 bits to general register
 7823   for (int j = 0; j < 4; j++) {
 7824     fold_8bit_crc32(crc, table, rax);
 7825   }
 7826 
 7827   BIND(L_tail_restore);
 7828   movl(len, tmp); // restore
 7829   BIND(L_tail);
 7830   andl(len, 0xf);
 7831   jccb(Assembler::zero, L_exit);
 7832 
 7833   // Fold the rest of bytes
 7834   align(4);
 7835   BIND(L_tail_loop);
 7836   movsbl(rax, Address(buf, 0)); // load byte with sign extension
 7837   update_byte_crc32(crc, rax, table);
 7838   increment(buf);
 7839   decrementl(len);
 7840   jccb(Assembler::greater, L_tail_loop);
 7841 
 7842   BIND(L_exit);
 7843   notl(crc); // ~c
 7844 }
 7845 
 7846 // Helper function for AVX 512 CRC32
 7847 // Fold 512-bit data chunks
 7848 void MacroAssembler::fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf,
 7849                                              Register pos, int offset) {
 7850   evmovdquq(xmm3, Address(buf, pos, Address::times_1, offset), Assembler::AVX_512bit);
 7851   evpclmulqdq(xtmp, xcrc, xK, 0x10, Assembler::AVX_512bit); // [123:64]
 7852   evpclmulqdq(xmm2, xcrc, xK, 0x01, Assembler::AVX_512bit); // [63:0]
 7853   evpxorq(xcrc, xtmp, xmm2, Assembler::AVX_512bit /* vector_len */);
 7854   evpxorq(xcrc, xcrc, xmm3, Assembler::AVX_512bit /* vector_len */);
 7855 }
 7856 
 7857 // Helper function for AVX 512 CRC32
 7858 // Compute CRC32 for < 256B buffers
 7859 void MacroAssembler::kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register table, Register pos,
 7860                                               Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop,
 7861                                               Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup) {
 7862 
 7863   Label L_less_than_32, L_exact_16_left, L_less_than_16_left;
 7864   Label L_less_than_8_left, L_less_than_4_left, L_less_than_2_left, L_zero_left;
 7865   Label L_only_less_than_4, L_only_less_than_3, L_only_less_than_2;
 7866 
 7867   // check if there is enough buffer to be able to fold 16B at a time
 7868   cmpl(len, 32);
 7869   jcc(Assembler::less, L_less_than_32);
 7870 
 7871   // if there is, load the constants
 7872   movdqu(xmm10, Address(table, 1 * 16));    //rk1 and rk2 in xmm10
 7873   movdl(xmm0, crc);                        // get the initial crc value
 7874   movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext
 7875   pxor(xmm7, xmm0);
 7876 
 7877   // update the buffer pointer
 7878   addl(pos, 16);
 7879   //update the counter.subtract 32 instead of 16 to save one instruction from the loop
 7880   subl(len, 32);
 7881   jmp(L_16B_reduction_loop);
 7882 
 7883   bind(L_less_than_32);
 7884   //mov initial crc to the return value. this is necessary for zero - length buffers.
 7885   movl(rax, crc);
 7886   testl(len, len);
 7887   jcc(Assembler::equal, L_cleanup);
 7888 
 7889   movdl(xmm0, crc);                        //get the initial crc value
 7890 
 7891   cmpl(len, 16);
 7892   jcc(Assembler::equal, L_exact_16_left);
 7893   jcc(Assembler::less, L_less_than_16_left);
 7894 
 7895   movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext
 7896   pxor(xmm7, xmm0);                       //xor the initial crc value
 7897   addl(pos, 16);
 7898   subl(len, 16);
 7899   movdqu(xmm10, Address(table, 1 * 16));    // rk1 and rk2 in xmm10
 7900   jmp(L_get_last_two_xmms);
 7901 
 7902   bind(L_less_than_16_left);
 7903   //use stack space to load data less than 16 bytes, zero - out the 16B in memory first.
 7904   pxor(xmm1, xmm1);
 7905   movptr(tmp1, rsp);
 7906   movdqu(Address(tmp1, 0 * 16), xmm1);
 7907 
 7908   cmpl(len, 4);
 7909   jcc(Assembler::less, L_only_less_than_4);
 7910 
 7911   //backup the counter value
 7912   movl(tmp2, len);
 7913   cmpl(len, 8);
 7914   jcc(Assembler::less, L_less_than_8_left);
 7915 
 7916   //load 8 Bytes
 7917   movq(rax, Address(buf, pos, Address::times_1, 0 * 16));
 7918   movq(Address(tmp1, 0 * 16), rax);
 7919   addptr(tmp1, 8);
 7920   subl(len, 8);
 7921   addl(pos, 8);
 7922 
 7923   bind(L_less_than_8_left);
 7924   cmpl(len, 4);
 7925   jcc(Assembler::less, L_less_than_4_left);
 7926 
 7927   //load 4 Bytes
 7928   movl(rax, Address(buf, pos, Address::times_1, 0));
 7929   movl(Address(tmp1, 0 * 16), rax);
 7930   addptr(tmp1, 4);
 7931   subl(len, 4);
 7932   addl(pos, 4);
 7933 
 7934   bind(L_less_than_4_left);
 7935   cmpl(len, 2);
 7936   jcc(Assembler::less, L_less_than_2_left);
 7937 
 7938   // load 2 Bytes
 7939   movw(rax, Address(buf, pos, Address::times_1, 0));
 7940   movl(Address(tmp1, 0 * 16), rax);
 7941   addptr(tmp1, 2);
 7942   subl(len, 2);
 7943   addl(pos, 2);
 7944 
 7945   bind(L_less_than_2_left);
 7946   cmpl(len, 1);
 7947   jcc(Assembler::less, L_zero_left);
 7948 
 7949   // load 1 Byte
 7950   movb(rax, Address(buf, pos, Address::times_1, 0));
 7951   movb(Address(tmp1, 0 * 16), rax);
 7952 
 7953   bind(L_zero_left);
 7954   movdqu(xmm7, Address(rsp, 0));
 7955   pxor(xmm7, xmm0);                       //xor the initial crc value
 7956 
 7957   lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr()));
 7958   movdqu(xmm0, Address(rax, tmp2));
 7959   pshufb(xmm7, xmm0);
 7960   jmp(L_128_done);
 7961 
 7962   bind(L_exact_16_left);
 7963   movdqu(xmm7, Address(buf, pos, Address::times_1, 0));
 7964   pxor(xmm7, xmm0);                       //xor the initial crc value
 7965   jmp(L_128_done);
 7966 
 7967   bind(L_only_less_than_4);
 7968   cmpl(len, 3);
 7969   jcc(Assembler::less, L_only_less_than_3);
 7970 
 7971   // load 3 Bytes
 7972   movb(rax, Address(buf, pos, Address::times_1, 0));
 7973   movb(Address(tmp1, 0), rax);
 7974 
 7975   movb(rax, Address(buf, pos, Address::times_1, 1));
 7976   movb(Address(tmp1, 1), rax);
 7977 
 7978   movb(rax, Address(buf, pos, Address::times_1, 2));
 7979   movb(Address(tmp1, 2), rax);
 7980 
 7981   movdqu(xmm7, Address(rsp, 0));
 7982   pxor(xmm7, xmm0);                     //xor the initial crc value
 7983 
 7984   pslldq(xmm7, 0x5);
 7985   jmp(L_barrett);
 7986   bind(L_only_less_than_3);
 7987   cmpl(len, 2);
 7988   jcc(Assembler::less, L_only_less_than_2);
 7989 
 7990   // load 2 Bytes
 7991   movb(rax, Address(buf, pos, Address::times_1, 0));
 7992   movb(Address(tmp1, 0), rax);
 7993 
 7994   movb(rax, Address(buf, pos, Address::times_1, 1));
 7995   movb(Address(tmp1, 1), rax);
 7996 
 7997   movdqu(xmm7, Address(rsp, 0));
 7998   pxor(xmm7, xmm0);                     //xor the initial crc value
 7999 
 8000   pslldq(xmm7, 0x6);
 8001   jmp(L_barrett);
 8002 
 8003   bind(L_only_less_than_2);
 8004   //load 1 Byte
 8005   movb(rax, Address(buf, pos, Address::times_1, 0));
 8006   movb(Address(tmp1, 0), rax);
 8007 
 8008   movdqu(xmm7, Address(rsp, 0));
 8009   pxor(xmm7, xmm0);                     //xor the initial crc value
 8010 
 8011   pslldq(xmm7, 0x7);
 8012 }
 8013 
 8014 /**
 8015 * Compute CRC32 using AVX512 instructions
 8016 * param crc   register containing existing CRC (32-bit)
 8017 * param buf   register pointing to input byte buffer (byte*)
 8018 * param len   register containing number of bytes
 8019 * param table address of crc or crc32c table
 8020 * param tmp1  scratch register
 8021 * param tmp2  scratch register
 8022 * return rax  result register
 8023 *
 8024 * This routine is identical for crc32c with the exception of the precomputed constant
 8025 * table which will be passed as the table argument.  The calculation steps are
 8026 * the same for both variants.
 8027 */
 8028 void MacroAssembler::kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2) {
 8029   assert_different_registers(crc, buf, len, table, tmp1, tmp2, rax, r12);
 8030 
 8031   Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned;
 8032   Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop;
 8033   Label L_less_than_256, L_fold_128_B_loop, L_fold_256_B_loop;
 8034   Label L_fold_128_B_register, L_final_reduction_for_128, L_16B_reduction_loop;
 8035   Label L_128_done, L_get_last_two_xmms, L_barrett, L_cleanup;
 8036 
 8037   const Register pos = r12;
 8038   push(r12);
 8039   subptr(rsp, 16 * 2 + 8);
 8040 
 8041   // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
 8042   // context for the registers used, where all instructions below are using 128-bit mode
 8043   // On EVEX without VL and BW, these instructions will all be AVX.
 8044   movl(pos, 0);
 8045 
 8046   // check if smaller than 256B
 8047   cmpl(len, 256);
 8048   jcc(Assembler::less, L_less_than_256);
 8049 
 8050   // load the initial crc value
 8051   movdl(xmm10, crc);
 8052 
 8053   // receive the initial 64B data, xor the initial crc value
 8054   evmovdquq(xmm0, Address(buf, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit);
 8055   evmovdquq(xmm4, Address(buf, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit);
 8056   evpxorq(xmm0, xmm0, xmm10, Assembler::AVX_512bit);
 8057   evbroadcasti32x4(xmm10, Address(table, 2 * 16), Assembler::AVX_512bit); //zmm10 has rk3 and rk4
 8058 
 8059   subl(len, 256);
 8060   cmpl(len, 256);
 8061   jcc(Assembler::less, L_fold_128_B_loop);
 8062 
 8063   evmovdquq(xmm7, Address(buf, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit);
 8064   evmovdquq(xmm8, Address(buf, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit);
 8065   evbroadcasti32x4(xmm16, Address(table, 0 * 16), Assembler::AVX_512bit); //zmm16 has rk-1 and rk-2
 8066   subl(len, 256);
 8067 
 8068   bind(L_fold_256_B_loop);
 8069   addl(pos, 256);
 8070   fold512bit_crc32_avx512(xmm0, xmm16, xmm1, buf, pos, 0 * 64);
 8071   fold512bit_crc32_avx512(xmm4, xmm16, xmm1, buf, pos, 1 * 64);
 8072   fold512bit_crc32_avx512(xmm7, xmm16, xmm1, buf, pos, 2 * 64);
 8073   fold512bit_crc32_avx512(xmm8, xmm16, xmm1, buf, pos, 3 * 64);
 8074 
 8075   subl(len, 256);
 8076   jcc(Assembler::greaterEqual, L_fold_256_B_loop);
 8077 
 8078   // Fold 256 into 128
 8079   addl(pos, 256);
 8080   evpclmulqdq(xmm1, xmm0, xmm10, 0x01, Assembler::AVX_512bit);
 8081   evpclmulqdq(xmm2, xmm0, xmm10, 0x10, Assembler::AVX_512bit);
 8082   vpternlogq(xmm7, 0x96, xmm1, xmm2, Assembler::AVX_512bit); // xor ABC
 8083 
 8084   evpclmulqdq(xmm5, xmm4, xmm10, 0x01, Assembler::AVX_512bit);
 8085   evpclmulqdq(xmm6, xmm4, xmm10, 0x10, Assembler::AVX_512bit);
 8086   vpternlogq(xmm8, 0x96, xmm5, xmm6, Assembler::AVX_512bit); // xor ABC
 8087 
 8088   evmovdquq(xmm0, xmm7, Assembler::AVX_512bit);
 8089   evmovdquq(xmm4, xmm8, Assembler::AVX_512bit);
 8090 
 8091   addl(len, 128);
 8092   jmp(L_fold_128_B_register);
 8093 
 8094   // at this section of the code, there is 128 * x + y(0 <= y<128) bytes of buffer.The fold_128_B_loop
 8095   // loop will fold 128B at a time until we have 128 + y Bytes of buffer
 8096 
 8097   // fold 128B at a time.This section of the code folds 8 xmm registers in parallel
 8098   bind(L_fold_128_B_loop);
 8099   addl(pos, 128);
 8100   fold512bit_crc32_avx512(xmm0, xmm10, xmm1, buf, pos, 0 * 64);
 8101   fold512bit_crc32_avx512(xmm4, xmm10, xmm1, buf, pos, 1 * 64);
 8102 
 8103   subl(len, 128);
 8104   jcc(Assembler::greaterEqual, L_fold_128_B_loop);
 8105 
 8106   addl(pos, 128);
 8107 
 8108   // at this point, the buffer pointer is pointing at the last y Bytes of the buffer, where 0 <= y < 128
 8109   // the 128B of folded data is in 8 of the xmm registers : xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7
 8110   bind(L_fold_128_B_register);
 8111   evmovdquq(xmm16, Address(table, 5 * 16), Assembler::AVX_512bit); // multiply by rk9-rk16
 8112   evmovdquq(xmm11, Address(table, 9 * 16), Assembler::AVX_512bit); // multiply by rk17-rk20, rk1,rk2, 0,0
 8113   evpclmulqdq(xmm1, xmm0, xmm16, 0x01, Assembler::AVX_512bit);
 8114   evpclmulqdq(xmm2, xmm0, xmm16, 0x10, Assembler::AVX_512bit);
 8115   // save last that has no multiplicand
 8116   vextracti64x2(xmm7, xmm4, 3);
 8117 
 8118   evpclmulqdq(xmm5, xmm4, xmm11, 0x01, Assembler::AVX_512bit);
 8119   evpclmulqdq(xmm6, xmm4, xmm11, 0x10, Assembler::AVX_512bit);
 8120   // Needed later in reduction loop
 8121   movdqu(xmm10, Address(table, 1 * 16));
 8122   vpternlogq(xmm1, 0x96, xmm2, xmm5, Assembler::AVX_512bit); // xor ABC
 8123   vpternlogq(xmm1, 0x96, xmm6, xmm7, Assembler::AVX_512bit); // xor ABC
 8124 
 8125   // Swap 1,0,3,2 - 01 00 11 10
 8126   evshufi64x2(xmm8, xmm1, xmm1, 0x4e, Assembler::AVX_512bit);
 8127   evpxorq(xmm8, xmm8, xmm1, Assembler::AVX_256bit);
 8128   vextracti128(xmm5, xmm8, 1);
 8129   evpxorq(xmm7, xmm5, xmm8, Assembler::AVX_128bit);
 8130 
 8131   // instead of 128, we add 128 - 16 to the loop counter to save 1 instruction from the loop
 8132   // instead of a cmp instruction, we use the negative flag with the jl instruction
 8133   addl(len, 128 - 16);
 8134   jcc(Assembler::less, L_final_reduction_for_128);
 8135 
 8136   bind(L_16B_reduction_loop);
 8137   vpclmulqdq(xmm8, xmm7, xmm10, 0x01);
 8138   vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
 8139   vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit);
 8140   movdqu(xmm0, Address(buf, pos, Address::times_1, 0 * 16));
 8141   vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
 8142   addl(pos, 16);
 8143   subl(len, 16);
 8144   jcc(Assembler::greaterEqual, L_16B_reduction_loop);
 8145 
 8146   bind(L_final_reduction_for_128);
 8147   addl(len, 16);
 8148   jcc(Assembler::equal, L_128_done);
 8149 
 8150   bind(L_get_last_two_xmms);
 8151   movdqu(xmm2, xmm7);
 8152   addl(pos, len);
 8153   movdqu(xmm1, Address(buf, pos, Address::times_1, -16));
 8154   subl(pos, len);
 8155 
 8156   // get rid of the extra data that was loaded before
 8157   // load the shift constant
 8158   lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr()));
 8159   movdqu(xmm0, Address(rax, len));
 8160   addl(rax, len);
 8161 
 8162   vpshufb(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
 8163   //Change mask to 512
 8164   vpxor(xmm0, xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 2 * 16), Assembler::AVX_128bit, tmp2);
 8165   vpshufb(xmm2, xmm2, xmm0, Assembler::AVX_128bit);
 8166 
 8167   blendvpb(xmm2, xmm2, xmm1, xmm0, Assembler::AVX_128bit);
 8168   vpclmulqdq(xmm8, xmm7, xmm10, 0x01);
 8169   vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
 8170   vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit);
 8171   vpxor(xmm7, xmm7, xmm2, Assembler::AVX_128bit);
 8172 
 8173   bind(L_128_done);
 8174   // compute crc of a 128-bit value
 8175   movdqu(xmm10, Address(table, 3 * 16));
 8176   movdqu(xmm0, xmm7);
 8177 
 8178   // 64b fold
 8179   vpclmulqdq(xmm7, xmm7, xmm10, 0x0);
 8180   vpsrldq(xmm0, xmm0, 0x8, Assembler::AVX_128bit);
 8181   vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
 8182 
 8183   // 32b fold
 8184   movdqu(xmm0, xmm7);
 8185   vpslldq(xmm7, xmm7, 0x4, Assembler::AVX_128bit);
 8186   vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
 8187   vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
 8188   jmp(L_barrett);
 8189 
 8190   bind(L_less_than_256);
 8191   kernel_crc32_avx512_256B(crc, buf, len, table, pos, tmp1, tmp2, L_barrett, L_16B_reduction_loop, L_get_last_two_xmms, L_128_done, L_cleanup);
 8192 
 8193   //barrett reduction
 8194   bind(L_barrett);
 8195   vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 1 * 16), Assembler::AVX_128bit, tmp2);
 8196   movdqu(xmm1, xmm7);
 8197   movdqu(xmm2, xmm7);
 8198   movdqu(xmm10, Address(table, 4 * 16));
 8199 
 8200   pclmulqdq(xmm7, xmm10, 0x0);
 8201   pxor(xmm7, xmm2);
 8202   vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr()), Assembler::AVX_128bit, tmp2);
 8203   movdqu(xmm2, xmm7);
 8204   pclmulqdq(xmm7, xmm10, 0x10);
 8205   pxor(xmm7, xmm2);
 8206   pxor(xmm7, xmm1);
 8207   pextrd(crc, xmm7, 2);
 8208 
 8209   bind(L_cleanup);
 8210   addptr(rsp, 16 * 2 + 8);
 8211   pop(r12);
 8212 }
 8213 
 8214 // S. Gueron / Information Processing Letters 112 (2012) 184
 8215 // Algorithm 4: Computing carry-less multiplication using a precomputed lookup table.
 8216 // Input: A 32 bit value B = [byte3, byte2, byte1, byte0].
 8217 // Output: the 64-bit carry-less product of B * CONST
 8218 void MacroAssembler::crc32c_ipl_alg4(Register in, uint32_t n,
 8219                                      Register tmp1, Register tmp2, Register tmp3) {
 8220   lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr()));
 8221   if (n > 0) {
 8222     addq(tmp3, n * 256 * 8);
 8223   }
 8224   //    Q1 = TABLEExt[n][B & 0xFF];
 8225   movl(tmp1, in);
 8226   andl(tmp1, 0x000000FF);
 8227   shll(tmp1, 3);
 8228   addq(tmp1, tmp3);
 8229   movq(tmp1, Address(tmp1, 0));
 8230 
 8231   //    Q2 = TABLEExt[n][B >> 8 & 0xFF];
 8232   movl(tmp2, in);
 8233   shrl(tmp2, 8);
 8234   andl(tmp2, 0x000000FF);
 8235   shll(tmp2, 3);
 8236   addq(tmp2, tmp3);
 8237   movq(tmp2, Address(tmp2, 0));
 8238 
 8239   shlq(tmp2, 8);
 8240   xorq(tmp1, tmp2);
 8241 
 8242   //    Q3 = TABLEExt[n][B >> 16 & 0xFF];
 8243   movl(tmp2, in);
 8244   shrl(tmp2, 16);
 8245   andl(tmp2, 0x000000FF);
 8246   shll(tmp2, 3);
 8247   addq(tmp2, tmp3);
 8248   movq(tmp2, Address(tmp2, 0));
 8249 
 8250   shlq(tmp2, 16);
 8251   xorq(tmp1, tmp2);
 8252 
 8253   //    Q4 = TABLEExt[n][B >> 24 & 0xFF];
 8254   shrl(in, 24);
 8255   andl(in, 0x000000FF);
 8256   shll(in, 3);
 8257   addq(in, tmp3);
 8258   movq(in, Address(in, 0));
 8259 
 8260   shlq(in, 24);
 8261   xorq(in, tmp1);
 8262   //    return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24;
 8263 }
 8264 
 8265 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1,
 8266                                       Register in_out,
 8267                                       uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
 8268                                       XMMRegister w_xtmp2,
 8269                                       Register tmp1,
 8270                                       Register n_tmp2, Register n_tmp3) {
 8271   if (is_pclmulqdq_supported) {
 8272     movdl(w_xtmp1, in_out); // modified blindly
 8273 
 8274     movl(tmp1, const_or_pre_comp_const_index);
 8275     movdl(w_xtmp2, tmp1);
 8276     pclmulqdq(w_xtmp1, w_xtmp2, 0);
 8277 
 8278     movdq(in_out, w_xtmp1);
 8279   } else {
 8280     crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3);
 8281   }
 8282 }
 8283 
 8284 // Recombination Alternative 2: No bit-reflections
 8285 // T1 = (CRC_A * U1) << 1
 8286 // T2 = (CRC_B * U2) << 1
 8287 // C1 = T1 >> 32
 8288 // C2 = T2 >> 32
 8289 // T1 = T1 & 0xFFFFFFFF
 8290 // T2 = T2 & 0xFFFFFFFF
 8291 // T1 = CRC32(0, T1)
 8292 // T2 = CRC32(0, T2)
 8293 // C1 = C1 ^ T1
 8294 // C2 = C2 ^ T2
 8295 // CRC = C1 ^ C2 ^ CRC_C
 8296 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
 8297                                      XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
 8298                                      Register tmp1, Register tmp2,
 8299                                      Register n_tmp3) {
 8300   crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
 8301   crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
 8302   shlq(in_out, 1);
 8303   movl(tmp1, in_out);
 8304   shrq(in_out, 32);
 8305   xorl(tmp2, tmp2);
 8306   crc32(tmp2, tmp1, 4);
 8307   xorl(in_out, tmp2); // we don't care about upper 32 bit contents here
 8308   shlq(in1, 1);
 8309   movl(tmp1, in1);
 8310   shrq(in1, 32);
 8311   xorl(tmp2, tmp2);
 8312   crc32(tmp2, tmp1, 4);
 8313   xorl(in1, tmp2);
 8314   xorl(in_out, in1);
 8315   xorl(in_out, in2);
 8316 }
 8317 
 8318 // Set N to predefined value
 8319 // Subtract from a length of a buffer
 8320 // execute in a loop:
 8321 // CRC_A = 0xFFFFFFFF, CRC_B = 0, CRC_C = 0
 8322 // for i = 1 to N do
 8323 //  CRC_A = CRC32(CRC_A, A[i])
 8324 //  CRC_B = CRC32(CRC_B, B[i])
 8325 //  CRC_C = CRC32(CRC_C, C[i])
 8326 // end for
 8327 // Recombine
 8328 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
 8329                                        Register in_out1, Register in_out2, Register in_out3,
 8330                                        Register tmp1, Register tmp2, Register tmp3,
 8331                                        XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
 8332                                        Register tmp4, Register tmp5,
 8333                                        Register n_tmp6) {
 8334   Label L_processPartitions;
 8335   Label L_processPartition;
 8336   Label L_exit;
 8337 
 8338   bind(L_processPartitions);
 8339   cmpl(in_out1, 3 * size);
 8340   jcc(Assembler::less, L_exit);
 8341     xorl(tmp1, tmp1);
 8342     xorl(tmp2, tmp2);
 8343     movq(tmp3, in_out2);
 8344     addq(tmp3, size);
 8345 
 8346     bind(L_processPartition);
 8347       crc32(in_out3, Address(in_out2, 0), 8);
 8348       crc32(tmp1, Address(in_out2, size), 8);
 8349       crc32(tmp2, Address(in_out2, size * 2), 8);
 8350       addq(in_out2, 8);
 8351       cmpq(in_out2, tmp3);
 8352       jcc(Assembler::less, L_processPartition);
 8353     crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2,
 8354             w_xtmp1, w_xtmp2, w_xtmp3,
 8355             tmp4, tmp5,
 8356             n_tmp6);
 8357     addq(in_out2, 2 * size);
 8358     subl(in_out1, 3 * size);
 8359     jmp(L_processPartitions);
 8360 
 8361   bind(L_exit);
 8362 }
 8363 
 8364 // Algorithm 2: Pipelined usage of the CRC32 instruction.
 8365 // Input: A buffer I of L bytes.
 8366 // Output: the CRC32C value of the buffer.
 8367 // Notations:
 8368 // Write L = 24N + r, with N = floor (L/24).
 8369 // r = L mod 24 (0 <= r < 24).
 8370 // Consider I as the concatenation of A|B|C|R, where A, B, C, each,
 8371 // N quadwords, and R consists of r bytes.
 8372 // A[j] = I [8j+7:8j], j= 0, 1, ..., N-1
 8373 // B[j] = I [N + 8j+7:N + 8j], j= 0, 1, ..., N-1
 8374 // C[j] = I [2N + 8j+7:2N + 8j], j= 0, 1, ..., N-1
 8375 // if r > 0 R[j] = I [3N +j], j= 0, 1, ...,r-1
 8376 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
 8377                                           Register tmp1, Register tmp2, Register tmp3,
 8378                                           Register tmp4, Register tmp5, Register tmp6,
 8379                                           XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
 8380                                           bool is_pclmulqdq_supported) {
 8381   uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS];
 8382   Label L_wordByWord;
 8383   Label L_byteByByteProlog;
 8384   Label L_byteByByte;
 8385   Label L_exit;
 8386 
 8387   if (is_pclmulqdq_supported ) {
 8388     const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::crc32c_table_addr();
 8389     const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 1);
 8390 
 8391     const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 2);
 8392     const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 3);
 8393 
 8394     const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 4);
 8395     const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 5);
 8396     assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\"");
 8397   } else {
 8398     const_or_pre_comp_const_index[0] = 1;
 8399     const_or_pre_comp_const_index[1] = 0;
 8400 
 8401     const_or_pre_comp_const_index[2] = 3;
 8402     const_or_pre_comp_const_index[3] = 2;
 8403 
 8404     const_or_pre_comp_const_index[4] = 5;
 8405     const_or_pre_comp_const_index[5] = 4;
 8406    }
 8407   crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported,
 8408                     in2, in1, in_out,
 8409                     tmp1, tmp2, tmp3,
 8410                     w_xtmp1, w_xtmp2, w_xtmp3,
 8411                     tmp4, tmp5,
 8412                     tmp6);
 8413   crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported,
 8414                     in2, in1, in_out,
 8415                     tmp1, tmp2, tmp3,
 8416                     w_xtmp1, w_xtmp2, w_xtmp3,
 8417                     tmp4, tmp5,
 8418                     tmp6);
 8419   crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported,
 8420                     in2, in1, in_out,
 8421                     tmp1, tmp2, tmp3,
 8422                     w_xtmp1, w_xtmp2, w_xtmp3,
 8423                     tmp4, tmp5,
 8424                     tmp6);
 8425   movl(tmp1, in2);
 8426   andl(tmp1, 0x00000007);
 8427   negl(tmp1);
 8428   addl(tmp1, in2);
 8429   addq(tmp1, in1);
 8430 
 8431   cmpq(in1, tmp1);
 8432   jccb(Assembler::greaterEqual, L_byteByByteProlog);
 8433   align(16);
 8434   BIND(L_wordByWord);
 8435     crc32(in_out, Address(in1, 0), 8);
 8436     addq(in1, 8);
 8437     cmpq(in1, tmp1);
 8438     jcc(Assembler::less, L_wordByWord);
 8439 
 8440   BIND(L_byteByByteProlog);
 8441   andl(in2, 0x00000007);
 8442   movl(tmp2, 1);
 8443 
 8444   cmpl(tmp2, in2);
 8445   jccb(Assembler::greater, L_exit);
 8446   BIND(L_byteByByte);
 8447     crc32(in_out, Address(in1, 0), 1);
 8448     incq(in1);
 8449     incl(tmp2);
 8450     cmpl(tmp2, in2);
 8451     jcc(Assembler::lessEqual, L_byteByByte);
 8452 
 8453   BIND(L_exit);
 8454 }
 8455 #undef BIND
 8456 #undef BLOCK_COMMENT
 8457 
 8458 // Compress char[] array to byte[].
 8459 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len)
 8460 // Return the array length if every element in array can be encoded,
 8461 // otherwise, the index of first non-latin1 (> 0xff) character.
 8462 //   @IntrinsicCandidate
 8463 //   public static int compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) {
 8464 //     for (int i = 0; i < len; i++) {
 8465 //       char c = src[srcOff];
 8466 //       if (c > 0xff) {
 8467 //           return i;  // return index of non-latin1 char
 8468 //       }
 8469 //       dst[dstOff] = (byte)c;
 8470 //       srcOff++;
 8471 //       dstOff++;
 8472 //     }
 8473 //     return len;
 8474 //   }
 8475 void MacroAssembler::char_array_compress(Register src, Register dst, Register len,
 8476   XMMRegister tmp1Reg, XMMRegister tmp2Reg,
 8477   XMMRegister tmp3Reg, XMMRegister tmp4Reg,
 8478   Register tmp5, Register result, KRegister mask1, KRegister mask2) {
 8479   Label copy_chars_loop, done, reset_sp, copy_tail;
 8480 
 8481   // rsi: src
 8482   // rdi: dst
 8483   // rdx: len
 8484   // rcx: tmp5
 8485   // rax: result
 8486 
 8487   // rsi holds start addr of source char[] to be compressed
 8488   // rdi holds start addr of destination byte[]
 8489   // rdx holds length
 8490 
 8491   assert(len != result, "");
 8492 
 8493   // save length for return
 8494   movl(result, len);
 8495 
 8496   if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512
 8497     VM_Version::supports_avx512vlbw() &&
 8498     VM_Version::supports_bmi2()) {
 8499 
 8500     Label copy_32_loop, copy_loop_tail, below_threshold, reset_for_copy_tail;
 8501 
 8502     // alignment
 8503     Label post_alignment;
 8504 
 8505     // if length of the string is less than 32, handle it the old fashioned way
 8506     testl(len, -32);
 8507     jcc(Assembler::zero, below_threshold);
 8508 
 8509     // First check whether a character is compressible ( <= 0xFF).
 8510     // Create mask to test for Unicode chars inside zmm vector
 8511     movl(tmp5, 0x00FF);
 8512     evpbroadcastw(tmp2Reg, tmp5, Assembler::AVX_512bit);
 8513 
 8514     testl(len, -64);
 8515     jccb(Assembler::zero, post_alignment);
 8516 
 8517     movl(tmp5, dst);
 8518     andl(tmp5, (32 - 1));
 8519     negl(tmp5);
 8520     andl(tmp5, (32 - 1));
 8521 
 8522     // bail out when there is nothing to be done
 8523     testl(tmp5, 0xFFFFFFFF);
 8524     jccb(Assembler::zero, post_alignment);
 8525 
 8526     // ~(~0 << len), where len is the # of remaining elements to process
 8527     movl(len, 0xFFFFFFFF);
 8528     shlxl(len, len, tmp5);
 8529     notl(len);
 8530     kmovdl(mask2, len);
 8531     movl(len, result);
 8532 
 8533     evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit);
 8534     evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit);
 8535     ktestd(mask1, mask2);
 8536     jcc(Assembler::carryClear, copy_tail);
 8537 
 8538     evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit);
 8539 
 8540     addptr(src, tmp5);
 8541     addptr(src, tmp5);
 8542     addptr(dst, tmp5);
 8543     subl(len, tmp5);
 8544 
 8545     bind(post_alignment);
 8546     // end of alignment
 8547 
 8548     movl(tmp5, len);
 8549     andl(tmp5, (32 - 1));    // tail count (in chars)
 8550     andl(len, ~(32 - 1));    // vector count (in chars)
 8551     jccb(Assembler::zero, copy_loop_tail);
 8552 
 8553     lea(src, Address(src, len, Address::times_2));
 8554     lea(dst, Address(dst, len, Address::times_1));
 8555     negptr(len);
 8556 
 8557     bind(copy_32_loop);
 8558     evmovdquw(tmp1Reg, Address(src, len, Address::times_2), Assembler::AVX_512bit);
 8559     evpcmpuw(mask1, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit);
 8560     kortestdl(mask1, mask1);
 8561     jccb(Assembler::carryClear, reset_for_copy_tail);
 8562 
 8563     // All elements in current processed chunk are valid candidates for
 8564     // compression. Write a truncated byte elements to the memory.
 8565     evpmovwb(Address(dst, len, Address::times_1), tmp1Reg, Assembler::AVX_512bit);
 8566     addptr(len, 32);
 8567     jccb(Assembler::notZero, copy_32_loop);
 8568 
 8569     bind(copy_loop_tail);
 8570     // bail out when there is nothing to be done
 8571     testl(tmp5, 0xFFFFFFFF);
 8572     jcc(Assembler::zero, done);
 8573 
 8574     movl(len, tmp5);
 8575 
 8576     // ~(~0 << len), where len is the # of remaining elements to process
 8577     movl(tmp5, 0xFFFFFFFF);
 8578     shlxl(tmp5, tmp5, len);
 8579     notl(tmp5);
 8580 
 8581     kmovdl(mask2, tmp5);
 8582 
 8583     evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit);
 8584     evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit);
 8585     ktestd(mask1, mask2);
 8586     jcc(Assembler::carryClear, copy_tail);
 8587 
 8588     evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit);
 8589     jmp(done);
 8590 
 8591     bind(reset_for_copy_tail);
 8592     lea(src, Address(src, tmp5, Address::times_2));
 8593     lea(dst, Address(dst, tmp5, Address::times_1));
 8594     subptr(len, tmp5);
 8595     jmp(copy_chars_loop);
 8596 
 8597     bind(below_threshold);
 8598   }
 8599 
 8600   if (UseSSE42Intrinsics) {
 8601     Label copy_32_loop, copy_16, copy_tail_sse, reset_for_copy_tail;
 8602 
 8603     // vectored compression
 8604     testl(len, 0xfffffff8);
 8605     jcc(Assembler::zero, copy_tail);
 8606 
 8607     movl(tmp5, 0xff00ff00);   // create mask to test for Unicode chars in vectors
 8608     movdl(tmp1Reg, tmp5);
 8609     pshufd(tmp1Reg, tmp1Reg, 0);   // store Unicode mask in tmp1Reg
 8610 
 8611     andl(len, 0xfffffff0);
 8612     jccb(Assembler::zero, copy_16);
 8613 
 8614     // compress 16 chars per iter
 8615     pxor(tmp4Reg, tmp4Reg);
 8616 
 8617     lea(src, Address(src, len, Address::times_2));
 8618     lea(dst, Address(dst, len, Address::times_1));
 8619     negptr(len);
 8620 
 8621     bind(copy_32_loop);
 8622     movdqu(tmp2Reg, Address(src, len, Address::times_2));     // load 1st 8 characters
 8623     por(tmp4Reg, tmp2Reg);
 8624     movdqu(tmp3Reg, Address(src, len, Address::times_2, 16)); // load next 8 characters
 8625     por(tmp4Reg, tmp3Reg);
 8626     ptest(tmp4Reg, tmp1Reg);       // check for Unicode chars in next vector
 8627     jccb(Assembler::notZero, reset_for_copy_tail);
 8628     packuswb(tmp2Reg, tmp3Reg);    // only ASCII chars; compress each to 1 byte
 8629     movdqu(Address(dst, len, Address::times_1), tmp2Reg);
 8630     addptr(len, 16);
 8631     jccb(Assembler::notZero, copy_32_loop);
 8632 
 8633     // compress next vector of 8 chars (if any)
 8634     bind(copy_16);
 8635     // len = 0
 8636     testl(result, 0x00000008);     // check if there's a block of 8 chars to compress
 8637     jccb(Assembler::zero, copy_tail_sse);
 8638 
 8639     pxor(tmp3Reg, tmp3Reg);
 8640 
 8641     movdqu(tmp2Reg, Address(src, 0));
 8642     ptest(tmp2Reg, tmp1Reg);       // check for Unicode chars in vector
 8643     jccb(Assembler::notZero, reset_for_copy_tail);
 8644     packuswb(tmp2Reg, tmp3Reg);    // only LATIN1 chars; compress each to 1 byte
 8645     movq(Address(dst, 0), tmp2Reg);
 8646     addptr(src, 16);
 8647     addptr(dst, 8);
 8648     jmpb(copy_tail_sse);
 8649 
 8650     bind(reset_for_copy_tail);
 8651     movl(tmp5, result);
 8652     andl(tmp5, 0x0000000f);
 8653     lea(src, Address(src, tmp5, Address::times_2));
 8654     lea(dst, Address(dst, tmp5, Address::times_1));
 8655     subptr(len, tmp5);
 8656     jmpb(copy_chars_loop);
 8657 
 8658     bind(copy_tail_sse);
 8659     movl(len, result);
 8660     andl(len, 0x00000007);    // tail count (in chars)
 8661   }
 8662   // compress 1 char per iter
 8663   bind(copy_tail);
 8664   testl(len, len);
 8665   jccb(Assembler::zero, done);
 8666   lea(src, Address(src, len, Address::times_2));
 8667   lea(dst, Address(dst, len, Address::times_1));
 8668   negptr(len);
 8669 
 8670   bind(copy_chars_loop);
 8671   load_unsigned_short(tmp5, Address(src, len, Address::times_2));
 8672   testl(tmp5, 0xff00);      // check if Unicode char
 8673   jccb(Assembler::notZero, reset_sp);
 8674   movb(Address(dst, len, Address::times_1), tmp5);  // ASCII char; compress to 1 byte
 8675   increment(len);
 8676   jccb(Assembler::notZero, copy_chars_loop);
 8677 
 8678   // add len then return (len will be zero if compress succeeded, otherwise negative)
 8679   bind(reset_sp);
 8680   addl(result, len);
 8681 
 8682   bind(done);
 8683 }
 8684 
 8685 // Inflate byte[] array to char[].
 8686 //   ..\jdk\src\java.base\share\classes\java\lang\StringLatin1.java
 8687 //   @IntrinsicCandidate
 8688 //   private static void inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len) {
 8689 //     for (int i = 0; i < len; i++) {
 8690 //       dst[dstOff++] = (char)(src[srcOff++] & 0xff);
 8691 //     }
 8692 //   }
 8693 void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len,
 8694   XMMRegister tmp1, Register tmp2, KRegister mask) {
 8695   Label copy_chars_loop, done, below_threshold, avx3_threshold;
 8696   // rsi: src
 8697   // rdi: dst
 8698   // rdx: len
 8699   // rcx: tmp2
 8700 
 8701   // rsi holds start addr of source byte[] to be inflated
 8702   // rdi holds start addr of destination char[]
 8703   // rdx holds length
 8704   assert_different_registers(src, dst, len, tmp2);
 8705   movl(tmp2, len);
 8706   if ((UseAVX > 2) && // AVX512
 8707     VM_Version::supports_avx512vlbw() &&
 8708     VM_Version::supports_bmi2()) {
 8709 
 8710     Label copy_32_loop, copy_tail;
 8711     Register tmp3_aliased = len;
 8712 
 8713     // if length of the string is less than 16, handle it in an old fashioned way
 8714     testl(len, -16);
 8715     jcc(Assembler::zero, below_threshold);
 8716 
 8717     testl(len, -1 * AVX3Threshold);
 8718     jcc(Assembler::zero, avx3_threshold);
 8719 
 8720     // In order to use only one arithmetic operation for the main loop we use
 8721     // this pre-calculation
 8722     andl(tmp2, (32 - 1)); // tail count (in chars), 32 element wide loop
 8723     andl(len, -32);     // vector count
 8724     jccb(Assembler::zero, copy_tail);
 8725 
 8726     lea(src, Address(src, len, Address::times_1));
 8727     lea(dst, Address(dst, len, Address::times_2));
 8728     negptr(len);
 8729 
 8730 
 8731     // inflate 32 chars per iter
 8732     bind(copy_32_loop);
 8733     vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_512bit);
 8734     evmovdquw(Address(dst, len, Address::times_2), tmp1, Assembler::AVX_512bit);
 8735     addptr(len, 32);
 8736     jcc(Assembler::notZero, copy_32_loop);
 8737 
 8738     bind(copy_tail);
 8739     // bail out when there is nothing to be done
 8740     testl(tmp2, -1); // we don't destroy the contents of tmp2 here
 8741     jcc(Assembler::zero, done);
 8742 
 8743     // ~(~0 << length), where length is the # of remaining elements to process
 8744     movl(tmp3_aliased, -1);
 8745     shlxl(tmp3_aliased, tmp3_aliased, tmp2);
 8746     notl(tmp3_aliased);
 8747     kmovdl(mask, tmp3_aliased);
 8748     evpmovzxbw(tmp1, mask, Address(src, 0), Assembler::AVX_512bit);
 8749     evmovdquw(Address(dst, 0), mask, tmp1, /*merge*/ true, Assembler::AVX_512bit);
 8750 
 8751     jmp(done);
 8752     bind(avx3_threshold);
 8753   }
 8754   if (UseSSE42Intrinsics) {
 8755     Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail;
 8756 
 8757     if (UseAVX > 1) {
 8758       andl(tmp2, (16 - 1));
 8759       andl(len, -16);
 8760       jccb(Assembler::zero, copy_new_tail);
 8761     } else {
 8762       andl(tmp2, 0x00000007);   // tail count (in chars)
 8763       andl(len, 0xfffffff8);    // vector count (in chars)
 8764       jccb(Assembler::zero, copy_tail);
 8765     }
 8766 
 8767     // vectored inflation
 8768     lea(src, Address(src, len, Address::times_1));
 8769     lea(dst, Address(dst, len, Address::times_2));
 8770     negptr(len);
 8771 
 8772     if (UseAVX > 1) {
 8773       bind(copy_16_loop);
 8774       vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_256bit);
 8775       vmovdqu(Address(dst, len, Address::times_2), tmp1);
 8776       addptr(len, 16);
 8777       jcc(Assembler::notZero, copy_16_loop);
 8778 
 8779       bind(below_threshold);
 8780       bind(copy_new_tail);
 8781       movl(len, tmp2);
 8782       andl(tmp2, 0x00000007);
 8783       andl(len, 0xFFFFFFF8);
 8784       jccb(Assembler::zero, copy_tail);
 8785 
 8786       pmovzxbw(tmp1, Address(src, 0));
 8787       movdqu(Address(dst, 0), tmp1);
 8788       addptr(src, 8);
 8789       addptr(dst, 2 * 8);
 8790 
 8791       jmp(copy_tail, true);
 8792     }
 8793 
 8794     // inflate 8 chars per iter
 8795     bind(copy_8_loop);
 8796     pmovzxbw(tmp1, Address(src, len, Address::times_1));  // unpack to 8 words
 8797     movdqu(Address(dst, len, Address::times_2), tmp1);
 8798     addptr(len, 8);
 8799     jcc(Assembler::notZero, copy_8_loop);
 8800 
 8801     bind(copy_tail);
 8802     movl(len, tmp2);
 8803 
 8804     cmpl(len, 4);
 8805     jccb(Assembler::less, copy_bytes);
 8806 
 8807     movdl(tmp1, Address(src, 0));  // load 4 byte chars
 8808     pmovzxbw(tmp1, tmp1);
 8809     movq(Address(dst, 0), tmp1);
 8810     subptr(len, 4);
 8811     addptr(src, 4);
 8812     addptr(dst, 8);
 8813 
 8814     bind(copy_bytes);
 8815   } else {
 8816     bind(below_threshold);
 8817   }
 8818 
 8819   testl(len, len);
 8820   jccb(Assembler::zero, done);
 8821   lea(src, Address(src, len, Address::times_1));
 8822   lea(dst, Address(dst, len, Address::times_2));
 8823   negptr(len);
 8824 
 8825   // inflate 1 char per iter
 8826   bind(copy_chars_loop);
 8827   load_unsigned_byte(tmp2, Address(src, len, Address::times_1));  // load byte char
 8828   movw(Address(dst, len, Address::times_2), tmp2);  // inflate byte char to word
 8829   increment(len);
 8830   jcc(Assembler::notZero, copy_chars_loop);
 8831 
 8832   bind(done);
 8833 }
 8834 
 8835 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len) {
 8836   switch(type) {
 8837     case T_BYTE:
 8838     case T_BOOLEAN:
 8839       evmovdqub(dst, kmask, src, merge, vector_len);
 8840       break;
 8841     case T_CHAR:
 8842     case T_SHORT:
 8843       evmovdquw(dst, kmask, src, merge, vector_len);
 8844       break;
 8845     case T_INT:
 8846     case T_FLOAT:
 8847       evmovdqul(dst, kmask, src, merge, vector_len);
 8848       break;
 8849     case T_LONG:
 8850     case T_DOUBLE:
 8851       evmovdquq(dst, kmask, src, merge, vector_len);
 8852       break;
 8853     default:
 8854       fatal("Unexpected type argument %s", type2name(type));
 8855       break;
 8856   }
 8857 }
 8858 
 8859 
 8860 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len) {
 8861   switch(type) {
 8862     case T_BYTE:
 8863     case T_BOOLEAN:
 8864       evmovdqub(dst, kmask, src, merge, vector_len);
 8865       break;
 8866     case T_CHAR:
 8867     case T_SHORT:
 8868       evmovdquw(dst, kmask, src, merge, vector_len);
 8869       break;
 8870     case T_INT:
 8871     case T_FLOAT:
 8872       evmovdqul(dst, kmask, src, merge, vector_len);
 8873       break;
 8874     case T_LONG:
 8875     case T_DOUBLE:
 8876       evmovdquq(dst, kmask, src, merge, vector_len);
 8877       break;
 8878     default:
 8879       fatal("Unexpected type argument %s", type2name(type));
 8880       break;
 8881   }
 8882 }
 8883 
 8884 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len) {
 8885   switch(type) {
 8886     case T_BYTE:
 8887     case T_BOOLEAN:
 8888       evmovdqub(dst, kmask, src, merge, vector_len);
 8889       break;
 8890     case T_CHAR:
 8891     case T_SHORT:
 8892       evmovdquw(dst, kmask, src, merge, vector_len);
 8893       break;
 8894     case T_INT:
 8895     case T_FLOAT:
 8896       evmovdqul(dst, kmask, src, merge, vector_len);
 8897       break;
 8898     case T_LONG:
 8899     case T_DOUBLE:
 8900       evmovdquq(dst, kmask, src, merge, vector_len);
 8901       break;
 8902     default:
 8903       fatal("Unexpected type argument %s", type2name(type));
 8904       break;
 8905   }
 8906 }
 8907 
 8908 void MacroAssembler::knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp, Register rtmp) {
 8909   switch(masklen) {
 8910     case 2:
 8911        knotbl(dst, src);
 8912        movl(rtmp, 3);
 8913        kmovbl(ktmp, rtmp);
 8914        kandbl(dst, ktmp, dst);
 8915        break;
 8916     case 4:
 8917        knotbl(dst, src);
 8918        movl(rtmp, 15);
 8919        kmovbl(ktmp, rtmp);
 8920        kandbl(dst, ktmp, dst);
 8921        break;
 8922     case 8:
 8923        knotbl(dst, src);
 8924        break;
 8925     case 16:
 8926        knotwl(dst, src);
 8927        break;
 8928     case 32:
 8929        knotdl(dst, src);
 8930        break;
 8931     case 64:
 8932        knotql(dst, src);
 8933        break;
 8934     default:
 8935       fatal("Unexpected vector length %d", masklen);
 8936       break;
 8937   }
 8938 }
 8939 
 8940 void MacroAssembler::kand(BasicType type, KRegister dst, KRegister src1, KRegister src2) {
 8941   switch(type) {
 8942     case T_BOOLEAN:
 8943     case T_BYTE:
 8944        kandbl(dst, src1, src2);
 8945        break;
 8946     case T_CHAR:
 8947     case T_SHORT:
 8948        kandwl(dst, src1, src2);
 8949        break;
 8950     case T_INT:
 8951     case T_FLOAT:
 8952        kanddl(dst, src1, src2);
 8953        break;
 8954     case T_LONG:
 8955     case T_DOUBLE:
 8956        kandql(dst, src1, src2);
 8957        break;
 8958     default:
 8959       fatal("Unexpected type argument %s", type2name(type));
 8960       break;
 8961   }
 8962 }
 8963 
 8964 void MacroAssembler::kor(BasicType type, KRegister dst, KRegister src1, KRegister src2) {
 8965   switch(type) {
 8966     case T_BOOLEAN:
 8967     case T_BYTE:
 8968        korbl(dst, src1, src2);
 8969        break;
 8970     case T_CHAR:
 8971     case T_SHORT:
 8972        korwl(dst, src1, src2);
 8973        break;
 8974     case T_INT:
 8975     case T_FLOAT:
 8976        kordl(dst, src1, src2);
 8977        break;
 8978     case T_LONG:
 8979     case T_DOUBLE:
 8980        korql(dst, src1, src2);
 8981        break;
 8982     default:
 8983       fatal("Unexpected type argument %s", type2name(type));
 8984       break;
 8985   }
 8986 }
 8987 
 8988 void MacroAssembler::kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2) {
 8989   switch(type) {
 8990     case T_BOOLEAN:
 8991     case T_BYTE:
 8992        kxorbl(dst, src1, src2);
 8993        break;
 8994     case T_CHAR:
 8995     case T_SHORT:
 8996        kxorwl(dst, src1, src2);
 8997        break;
 8998     case T_INT:
 8999     case T_FLOAT:
 9000        kxordl(dst, src1, src2);
 9001        break;
 9002     case T_LONG:
 9003     case T_DOUBLE:
 9004        kxorql(dst, src1, src2);
 9005        break;
 9006     default:
 9007       fatal("Unexpected type argument %s", type2name(type));
 9008       break;
 9009   }
 9010 }
 9011 
 9012 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9013   switch(type) {
 9014     case T_BOOLEAN:
 9015     case T_BYTE:
 9016       evpermb(dst, mask, nds, src, merge, vector_len); break;
 9017     case T_CHAR:
 9018     case T_SHORT:
 9019       evpermw(dst, mask, nds, src, merge, vector_len); break;
 9020     case T_INT:
 9021     case T_FLOAT:
 9022       evpermd(dst, mask, nds, src, merge, vector_len); break;
 9023     case T_LONG:
 9024     case T_DOUBLE:
 9025       evpermq(dst, mask, nds, src, merge, vector_len); break;
 9026     default:
 9027       fatal("Unexpected type argument %s", type2name(type)); break;
 9028   }
 9029 }
 9030 
 9031 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9032   switch(type) {
 9033     case T_BOOLEAN:
 9034     case T_BYTE:
 9035       evpermb(dst, mask, nds, src, merge, vector_len); break;
 9036     case T_CHAR:
 9037     case T_SHORT:
 9038       evpermw(dst, mask, nds, src, merge, vector_len); break;
 9039     case T_INT:
 9040     case T_FLOAT:
 9041       evpermd(dst, mask, nds, src, merge, vector_len); break;
 9042     case T_LONG:
 9043     case T_DOUBLE:
 9044       evpermq(dst, mask, nds, src, merge, vector_len); break;
 9045     default:
 9046       fatal("Unexpected type argument %s", type2name(type)); break;
 9047   }
 9048 }
 9049 
 9050 void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9051   switch(type) {
 9052     case T_BYTE:
 9053       evpminub(dst, mask, nds, src, merge, vector_len); break;
 9054     case T_SHORT:
 9055       evpminuw(dst, mask, nds, src, merge, vector_len); break;
 9056     case T_INT:
 9057       evpminud(dst, mask, nds, src, merge, vector_len); break;
 9058     case T_LONG:
 9059       evpminuq(dst, mask, nds, src, merge, vector_len); break;
 9060     default:
 9061       fatal("Unexpected type argument %s", type2name(type)); break;
 9062   }
 9063 }
 9064 
 9065 void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9066   switch(type) {
 9067     case T_BYTE:
 9068       evpmaxub(dst, mask, nds, src, merge, vector_len); break;
 9069     case T_SHORT:
 9070       evpmaxuw(dst, mask, nds, src, merge, vector_len); break;
 9071     case T_INT:
 9072       evpmaxud(dst, mask, nds, src, merge, vector_len); break;
 9073     case T_LONG:
 9074       evpmaxuq(dst, mask, nds, src, merge, vector_len); break;
 9075     default:
 9076       fatal("Unexpected type argument %s", type2name(type)); break;
 9077   }
 9078 }
 9079 
 9080 void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9081   switch(type) {
 9082     case T_BYTE:
 9083       evpminub(dst, mask, nds, src, merge, vector_len); break;
 9084     case T_SHORT:
 9085       evpminuw(dst, mask, nds, src, merge, vector_len); break;
 9086     case T_INT:
 9087       evpminud(dst, mask, nds, src, merge, vector_len); break;
 9088     case T_LONG:
 9089       evpminuq(dst, mask, nds, src, merge, vector_len); break;
 9090     default:
 9091       fatal("Unexpected type argument %s", type2name(type)); break;
 9092   }
 9093 }
 9094 
 9095 void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9096   switch(type) {
 9097     case T_BYTE:
 9098       evpmaxub(dst, mask, nds, src, merge, vector_len); break;
 9099     case T_SHORT:
 9100       evpmaxuw(dst, mask, nds, src, merge, vector_len); break;
 9101     case T_INT:
 9102       evpmaxud(dst, mask, nds, src, merge, vector_len); break;
 9103     case T_LONG:
 9104       evpmaxuq(dst, mask, nds, src, merge, vector_len); break;
 9105     default:
 9106       fatal("Unexpected type argument %s", type2name(type)); break;
 9107   }
 9108 }
 9109 
 9110 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9111   switch(type) {
 9112     case T_BYTE:
 9113       evpminsb(dst, mask, nds, src, merge, vector_len); break;
 9114     case T_SHORT:
 9115       evpminsw(dst, mask, nds, src, merge, vector_len); break;
 9116     case T_INT:
 9117       evpminsd(dst, mask, nds, src, merge, vector_len); break;
 9118     case T_LONG:
 9119       evpminsq(dst, mask, nds, src, merge, vector_len); break;
 9120     case T_FLOAT:
 9121       evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
 9122     case T_DOUBLE:
 9123       evminmaxpd(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
 9124     default:
 9125       fatal("Unexpected type argument %s", type2name(type)); break;
 9126   }
 9127 }
 9128 
 9129 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9130   switch(type) {
 9131     case T_BYTE:
 9132       evpmaxsb(dst, mask, nds, src, merge, vector_len); break;
 9133     case T_SHORT:
 9134       evpmaxsw(dst, mask, nds, src, merge, vector_len); break;
 9135     case T_INT:
 9136       evpmaxsd(dst, mask, nds, src, merge, vector_len); break;
 9137     case T_LONG:
 9138       evpmaxsq(dst, mask, nds, src, merge, vector_len); break;
 9139     case T_FLOAT:
 9140       evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
 9141     case T_DOUBLE:
 9142       evminmaxpd(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
 9143     default:
 9144       fatal("Unexpected type argument %s", type2name(type)); break;
 9145   }
 9146 }
 9147 
 9148 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9149   switch(type) {
 9150     case T_BYTE:
 9151       evpminsb(dst, mask, nds, src, merge, vector_len); break;
 9152     case T_SHORT:
 9153       evpminsw(dst, mask, nds, src, merge, vector_len); break;
 9154     case T_INT:
 9155       evpminsd(dst, mask, nds, src, merge, vector_len); break;
 9156     case T_LONG:
 9157       evpminsq(dst, mask, nds, src, merge, vector_len); break;
 9158     case T_FLOAT:
 9159       evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
 9160     case T_DOUBLE:
 9161       evminmaxpd(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
 9162     default:
 9163       fatal("Unexpected type argument %s", type2name(type)); break;
 9164   }
 9165 }
 9166 
 9167 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9168   switch(type) {
 9169     case T_BYTE:
 9170       evpmaxsb(dst, mask, nds, src, merge, vector_len); break;
 9171     case T_SHORT:
 9172       evpmaxsw(dst, mask, nds, src, merge, vector_len); break;
 9173     case T_INT:
 9174       evpmaxsd(dst, mask, nds, src, merge, vector_len); break;
 9175     case T_LONG:
 9176       evpmaxsq(dst, mask, nds, src, merge, vector_len); break;
 9177     case T_FLOAT:
 9178       evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
 9179     case T_DOUBLE:
 9180       evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
 9181     default:
 9182       fatal("Unexpected type argument %s", type2name(type)); break;
 9183   }
 9184 }
 9185 
 9186 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9187   switch(type) {
 9188     case T_INT:
 9189       evpxord(dst, mask, nds, src, merge, vector_len); break;
 9190     case T_LONG:
 9191       evpxorq(dst, mask, nds, src, merge, vector_len); break;
 9192     default:
 9193       fatal("Unexpected type argument %s", type2name(type)); break;
 9194   }
 9195 }
 9196 
 9197 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9198   switch(type) {
 9199     case T_INT:
 9200       evpxord(dst, mask, nds, src, merge, vector_len); break;
 9201     case T_LONG:
 9202       evpxorq(dst, mask, nds, src, merge, vector_len); break;
 9203     default:
 9204       fatal("Unexpected type argument %s", type2name(type)); break;
 9205   }
 9206 }
 9207 
 9208 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9209   switch(type) {
 9210     case T_INT:
 9211       Assembler::evpord(dst, mask, nds, src, merge, vector_len); break;
 9212     case T_LONG:
 9213       evporq(dst, mask, nds, src, merge, vector_len); break;
 9214     default:
 9215       fatal("Unexpected type argument %s", type2name(type)); break;
 9216   }
 9217 }
 9218 
 9219 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9220   switch(type) {
 9221     case T_INT:
 9222       Assembler::evpord(dst, mask, nds, src, merge, vector_len); break;
 9223     case T_LONG:
 9224       evporq(dst, mask, nds, src, merge, vector_len); break;
 9225     default:
 9226       fatal("Unexpected type argument %s", type2name(type)); break;
 9227   }
 9228 }
 9229 
 9230 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
 9231   switch(type) {
 9232     case T_INT:
 9233       evpandd(dst, mask, nds, src, merge, vector_len); break;
 9234     case T_LONG:
 9235       evpandq(dst, mask, nds, src, merge, vector_len); break;
 9236     default:
 9237       fatal("Unexpected type argument %s", type2name(type)); break;
 9238   }
 9239 }
 9240 
 9241 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) {
 9242   switch(type) {
 9243     case T_INT:
 9244       evpandd(dst, mask, nds, src, merge, vector_len); break;
 9245     case T_LONG:
 9246       evpandq(dst, mask, nds, src, merge, vector_len); break;
 9247     default:
 9248       fatal("Unexpected type argument %s", type2name(type)); break;
 9249   }
 9250 }
 9251 
 9252 void MacroAssembler::kortest(uint masklen, KRegister src1, KRegister src2) {
 9253   switch(masklen) {
 9254     case 8:
 9255        kortestbl(src1, src2);
 9256        break;
 9257     case 16:
 9258        kortestwl(src1, src2);
 9259        break;
 9260     case 32:
 9261        kortestdl(src1, src2);
 9262        break;
 9263     case 64:
 9264        kortestql(src1, src2);
 9265        break;
 9266     default:
 9267       fatal("Unexpected mask length %d", masklen);
 9268       break;
 9269   }
 9270 }
 9271 
 9272 
 9273 void MacroAssembler::ktest(uint masklen, KRegister src1, KRegister src2) {
 9274   switch(masklen)  {
 9275     case 8:
 9276        ktestbl(src1, src2);
 9277        break;
 9278     case 16:
 9279        ktestwl(src1, src2);
 9280        break;
 9281     case 32:
 9282        ktestdl(src1, src2);
 9283        break;
 9284     case 64:
 9285        ktestql(src1, src2);
 9286        break;
 9287     default:
 9288       fatal("Unexpected mask length %d", masklen);
 9289       break;
 9290   }
 9291 }
 9292 
 9293 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) {
 9294   switch(type) {
 9295     case T_INT:
 9296       evprold(dst, mask, src, shift, merge, vlen_enc); break;
 9297     case T_LONG:
 9298       evprolq(dst, mask, src, shift, merge, vlen_enc); break;
 9299     default:
 9300       fatal("Unexpected type argument %s", type2name(type)); break;
 9301       break;
 9302   }
 9303 }
 9304 
 9305 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) {
 9306   switch(type) {
 9307     case T_INT:
 9308       evprord(dst, mask, src, shift, merge, vlen_enc); break;
 9309     case T_LONG:
 9310       evprorq(dst, mask, src, shift, merge, vlen_enc); break;
 9311     default:
 9312       fatal("Unexpected type argument %s", type2name(type)); break;
 9313   }
 9314 }
 9315 
 9316 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) {
 9317   switch(type) {
 9318     case T_INT:
 9319       evprolvd(dst, mask, src1, src2, merge, vlen_enc); break;
 9320     case T_LONG:
 9321       evprolvq(dst, mask, src1, src2, merge, vlen_enc); break;
 9322     default:
 9323       fatal("Unexpected type argument %s", type2name(type)); break;
 9324   }
 9325 }
 9326 
 9327 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) {
 9328   switch(type) {
 9329     case T_INT:
 9330       evprorvd(dst, mask, src1, src2, merge, vlen_enc); break;
 9331     case T_LONG:
 9332       evprorvq(dst, mask, src1, src2, merge, vlen_enc); break;
 9333     default:
 9334       fatal("Unexpected type argument %s", type2name(type)); break;
 9335   }
 9336 }
 9337 
 9338 void MacroAssembler::evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 9339   assert(rscratch != noreg || always_reachable(src), "missing");
 9340 
 9341   if (reachable(src)) {
 9342     evpandq(dst, nds, as_Address(src), vector_len);
 9343   } else {
 9344     lea(rscratch, src);
 9345     evpandq(dst, nds, Address(rscratch, 0), vector_len);
 9346   }
 9347 }
 9348 
 9349 void MacroAssembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch) {
 9350   assert(rscratch != noreg || always_reachable(src), "missing");
 9351 
 9352   if (reachable(src)) {
 9353     Assembler::evpaddq(dst, mask, nds, as_Address(src), merge, vector_len);
 9354   } else {
 9355     lea(rscratch, src);
 9356     Assembler::evpaddq(dst, mask, nds, Address(rscratch, 0), merge, vector_len);
 9357   }
 9358 }
 9359 
 9360 void MacroAssembler::evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 9361   assert(rscratch != noreg || always_reachable(src), "missing");
 9362 
 9363   if (reachable(src)) {
 9364     evporq(dst, nds, as_Address(src), vector_len);
 9365   } else {
 9366     lea(rscratch, src);
 9367     evporq(dst, nds, Address(rscratch, 0), vector_len);
 9368   }
 9369 }
 9370 
 9371 void MacroAssembler::vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 9372   assert(rscratch != noreg || always_reachable(src), "missing");
 9373 
 9374   if (reachable(src)) {
 9375     vpshufb(dst, nds, as_Address(src), vector_len);
 9376   } else {
 9377     lea(rscratch, src);
 9378     vpshufb(dst, nds, Address(rscratch, 0), vector_len);
 9379   }
 9380 }
 9381 
 9382 void MacroAssembler::vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
 9383   assert(rscratch != noreg || always_reachable(src), "missing");
 9384 
 9385   if (reachable(src)) {
 9386     Assembler::vpor(dst, nds, as_Address(src), vector_len);
 9387   } else {
 9388     lea(rscratch, src);
 9389     Assembler::vpor(dst, nds, Address(rscratch, 0), vector_len);
 9390   }
 9391 }
 9392 
 9393 void MacroAssembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch) {
 9394   assert(rscratch != noreg || always_reachable(src3), "missing");
 9395 
 9396   if (reachable(src3)) {
 9397     vpternlogq(dst, imm8, src2, as_Address(src3), vector_len);
 9398   } else {
 9399     lea(rscratch, src3);
 9400     vpternlogq(dst, imm8, src2, Address(rscratch, 0), vector_len);
 9401   }
 9402 }
 9403 
 9404 #if COMPILER2_OR_JVMCI
 9405 
 9406 void MacroAssembler::fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask,
 9407                                  Register length, Register temp, int vec_enc) {
 9408   // Computing mask for predicated vector store.
 9409   movptr(temp, -1);
 9410   bzhiq(temp, temp, length);
 9411   kmov(mask, temp);
 9412   evmovdqu(bt, mask, dst, xmm, true, vec_enc);
 9413 }
 9414 
 9415 // Set memory operation for length "less than" 64 bytes.
 9416 void MacroAssembler::fill64_masked(uint shift, Register dst, int disp,
 9417                                        XMMRegister xmm, KRegister mask, Register length,
 9418                                        Register temp, bool use64byteVector) {
 9419   assert(MaxVectorSize >= 32, "vector length should be >= 32");
 9420   const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG};
 9421   if (!use64byteVector) {
 9422     fill32(dst, disp, xmm);
 9423     subptr(length, 32 >> shift);
 9424     fill32_masked(shift, dst, disp + 32, xmm, mask, length, temp);
 9425   } else {
 9426     assert(MaxVectorSize == 64, "vector length != 64");
 9427     fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_512bit);
 9428   }
 9429 }
 9430 
 9431 
 9432 void MacroAssembler::fill32_masked(uint shift, Register dst, int disp,
 9433                                        XMMRegister xmm, KRegister mask, Register length,
 9434                                        Register temp) {
 9435   assert(MaxVectorSize >= 32, "vector length should be >= 32");
 9436   const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG};
 9437   fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_256bit);
 9438 }
 9439 
 9440 
 9441 void MacroAssembler::fill32(Address dst, XMMRegister xmm) {
 9442   assert(MaxVectorSize >= 32, "vector length should be >= 32");
 9443   vmovdqu(dst, xmm);
 9444 }
 9445 
 9446 void MacroAssembler::fill32(Register dst, int disp, XMMRegister xmm) {
 9447   fill32(Address(dst, disp), xmm);
 9448 }
 9449 
 9450 void MacroAssembler::fill64(Address dst, XMMRegister xmm, bool use64byteVector) {
 9451   assert(MaxVectorSize >= 32, "vector length should be >= 32");
 9452   if (!use64byteVector) {
 9453     fill32(dst, xmm);
 9454     fill32(dst.plus_disp(32), xmm);
 9455   } else {
 9456     evmovdquq(dst, xmm, Assembler::AVX_512bit);
 9457   }
 9458 }
 9459 
 9460 void MacroAssembler::fill64(Register dst, int disp, XMMRegister xmm, bool use64byteVector) {
 9461   fill64(Address(dst, disp), xmm, use64byteVector);
 9462 }
 9463 
 9464 void MacroAssembler::generate_fill_avx3(BasicType type, Register to, Register value,
 9465                                         Register count, Register rtmp, XMMRegister xtmp) {
 9466   Label L_exit;
 9467   Label L_fill_start;
 9468   Label L_fill_64_bytes;
 9469   Label L_fill_96_bytes;
 9470   Label L_fill_128_bytes;
 9471   Label L_fill_128_bytes_loop;
 9472   Label L_fill_128_loop_header;
 9473   Label L_fill_128_bytes_loop_header;
 9474   Label L_fill_128_bytes_loop_pre_header;
 9475   Label L_fill_zmm_sequence;
 9476 
 9477   int shift = -1;
 9478   int avx3threshold = VM_Version::avx3_threshold();
 9479   switch(type) {
 9480     case T_BYTE:  shift = 0;
 9481       break;
 9482     case T_SHORT: shift = 1;
 9483       break;
 9484     case T_INT:   shift = 2;
 9485       break;
 9486     /* Uncomment when LONG fill stubs are supported.
 9487     case T_LONG:  shift = 3;
 9488       break;
 9489     */
 9490     default:
 9491       fatal("Unhandled type: %s\n", type2name(type));
 9492   }
 9493 
 9494   if ((avx3threshold != 0)  || (MaxVectorSize == 32)) {
 9495 
 9496     if (MaxVectorSize == 64) {
 9497       cmpq(count, avx3threshold >> shift);
 9498       jcc(Assembler::greater, L_fill_zmm_sequence);
 9499     }
 9500 
 9501     evpbroadcast(type, xtmp, value, Assembler::AVX_256bit);
 9502 
 9503     bind(L_fill_start);
 9504 
 9505     cmpq(count, 32 >> shift);
 9506     jccb(Assembler::greater, L_fill_64_bytes);
 9507     fill32_masked(shift, to, 0, xtmp, k2, count, rtmp);
 9508     jmp(L_exit);
 9509 
 9510     bind(L_fill_64_bytes);
 9511     cmpq(count, 64 >> shift);
 9512     jccb(Assembler::greater, L_fill_96_bytes);
 9513     fill64_masked(shift, to, 0, xtmp, k2, count, rtmp);
 9514     jmp(L_exit);
 9515 
 9516     bind(L_fill_96_bytes);
 9517     cmpq(count, 96 >> shift);
 9518     jccb(Assembler::greater, L_fill_128_bytes);
 9519     fill64(to, 0, xtmp);
 9520     subq(count, 64 >> shift);
 9521     fill32_masked(shift, to, 64, xtmp, k2, count, rtmp);
 9522     jmp(L_exit);
 9523 
 9524     bind(L_fill_128_bytes);
 9525     cmpq(count, 128 >> shift);
 9526     jccb(Assembler::greater, L_fill_128_bytes_loop_pre_header);
 9527     fill64(to, 0, xtmp);
 9528     fill32(to, 64, xtmp);
 9529     subq(count, 96 >> shift);
 9530     fill32_masked(shift, to, 96, xtmp, k2, count, rtmp);
 9531     jmp(L_exit);
 9532 
 9533     bind(L_fill_128_bytes_loop_pre_header);
 9534     {
 9535       mov(rtmp, to);
 9536       andq(rtmp, 31);
 9537       jccb(Assembler::zero, L_fill_128_bytes_loop_header);
 9538       negq(rtmp);
 9539       addq(rtmp, 32);
 9540       mov64(r8, -1L);
 9541       bzhiq(r8, r8, rtmp);
 9542       kmovql(k2, r8);
 9543       evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_256bit);
 9544       addq(to, rtmp);
 9545       shrq(rtmp, shift);
 9546       subq(count, rtmp);
 9547     }
 9548 
 9549     cmpq(count, 128 >> shift);
 9550     jcc(Assembler::less, L_fill_start);
 9551 
 9552     bind(L_fill_128_bytes_loop_header);
 9553     subq(count, 128 >> shift);
 9554 
 9555     align32();
 9556     bind(L_fill_128_bytes_loop);
 9557       fill64(to, 0, xtmp);
 9558       fill64(to, 64, xtmp);
 9559       addq(to, 128);
 9560       subq(count, 128 >> shift);
 9561       jccb(Assembler::greaterEqual, L_fill_128_bytes_loop);
 9562 
 9563     addq(count, 128 >> shift);
 9564     jcc(Assembler::zero, L_exit);
 9565     jmp(L_fill_start);
 9566   }
 9567 
 9568   if (MaxVectorSize == 64) {
 9569     // Sequence using 64 byte ZMM register.
 9570     Label L_fill_128_bytes_zmm;
 9571     Label L_fill_192_bytes_zmm;
 9572     Label L_fill_192_bytes_loop_zmm;
 9573     Label L_fill_192_bytes_loop_header_zmm;
 9574     Label L_fill_192_bytes_loop_pre_header_zmm;
 9575     Label L_fill_start_zmm_sequence;
 9576 
 9577     bind(L_fill_zmm_sequence);
 9578     evpbroadcast(type, xtmp, value, Assembler::AVX_512bit);
 9579 
 9580     bind(L_fill_start_zmm_sequence);
 9581     cmpq(count, 64 >> shift);
 9582     jccb(Assembler::greater, L_fill_128_bytes_zmm);
 9583     fill64_masked(shift, to, 0, xtmp, k2, count, rtmp, true);
 9584     jmp(L_exit);
 9585 
 9586     bind(L_fill_128_bytes_zmm);
 9587     cmpq(count, 128 >> shift);
 9588     jccb(Assembler::greater, L_fill_192_bytes_zmm);
 9589     fill64(to, 0, xtmp, true);
 9590     subq(count, 64 >> shift);
 9591     fill64_masked(shift, to, 64, xtmp, k2, count, rtmp, true);
 9592     jmp(L_exit);
 9593 
 9594     bind(L_fill_192_bytes_zmm);
 9595     cmpq(count, 192 >> shift);
 9596     jccb(Assembler::greater, L_fill_192_bytes_loop_pre_header_zmm);
 9597     fill64(to, 0, xtmp, true);
 9598     fill64(to, 64, xtmp, true);
 9599     subq(count, 128 >> shift);
 9600     fill64_masked(shift, to, 128, xtmp, k2, count, rtmp, true);
 9601     jmp(L_exit);
 9602 
 9603     bind(L_fill_192_bytes_loop_pre_header_zmm);
 9604     {
 9605       movq(rtmp, to);
 9606       andq(rtmp, 63);
 9607       jccb(Assembler::zero, L_fill_192_bytes_loop_header_zmm);
 9608       negq(rtmp);
 9609       addq(rtmp, 64);
 9610       mov64(r8, -1L);
 9611       bzhiq(r8, r8, rtmp);
 9612       kmovql(k2, r8);
 9613       evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_512bit);
 9614       addq(to, rtmp);
 9615       shrq(rtmp, shift);
 9616       subq(count, rtmp);
 9617     }
 9618 
 9619     cmpq(count, 192 >> shift);
 9620     jcc(Assembler::less, L_fill_start_zmm_sequence);
 9621 
 9622     bind(L_fill_192_bytes_loop_header_zmm);
 9623     subq(count, 192 >> shift);
 9624 
 9625     align32();
 9626     bind(L_fill_192_bytes_loop_zmm);
 9627       fill64(to, 0, xtmp, true);
 9628       fill64(to, 64, xtmp, true);
 9629       fill64(to, 128, xtmp, true);
 9630       addq(to, 192);
 9631       subq(count, 192 >> shift);
 9632       jccb(Assembler::greaterEqual, L_fill_192_bytes_loop_zmm);
 9633 
 9634     addq(count, 192 >> shift);
 9635     jcc(Assembler::zero, L_exit);
 9636     jmp(L_fill_start_zmm_sequence);
 9637   }
 9638   bind(L_exit);
 9639 }
 9640 #endif //COMPILER2_OR_JVMCI
 9641 
 9642 
 9643 void MacroAssembler::convert_f2i(Register dst, XMMRegister src) {
 9644   Label done;
 9645   cvttss2sil(dst, src);
 9646   // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
 9647   cmpl(dst, 0x80000000); // float_sign_flip
 9648   jccb(Assembler::notEqual, done);
 9649   subptr(rsp, 8);
 9650   movflt(Address(rsp, 0), src);
 9651   call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2i_fixup())));
 9652   pop(dst);
 9653   bind(done);
 9654 }
 9655 
 9656 void MacroAssembler::convert_d2i(Register dst, XMMRegister src) {
 9657   Label done;
 9658   cvttsd2sil(dst, src);
 9659   // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
 9660   cmpl(dst, 0x80000000); // float_sign_flip
 9661   jccb(Assembler::notEqual, done);
 9662   subptr(rsp, 8);
 9663   movdbl(Address(rsp, 0), src);
 9664   call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2i_fixup())));
 9665   pop(dst);
 9666   bind(done);
 9667 }
 9668 
 9669 void MacroAssembler::convert_f2l(Register dst, XMMRegister src) {
 9670   Label done;
 9671   cvttss2siq(dst, src);
 9672   cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip()));
 9673   jccb(Assembler::notEqual, done);
 9674   subptr(rsp, 8);
 9675   movflt(Address(rsp, 0), src);
 9676   call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2l_fixup())));
 9677   pop(dst);
 9678   bind(done);
 9679 }
 9680 
 9681 void MacroAssembler::round_float(Register dst, XMMRegister src, Register rtmp, Register rcx) {
 9682   // Following code is line by line assembly translation rounding algorithm.
 9683   // Please refer to java.lang.Math.round(float) algorithm for details.
 9684   const int32_t FloatConsts_EXP_BIT_MASK = 0x7F800000;
 9685   const int32_t FloatConsts_SIGNIFICAND_WIDTH = 24;
 9686   const int32_t FloatConsts_EXP_BIAS = 127;
 9687   const int32_t FloatConsts_SIGNIF_BIT_MASK = 0x007FFFFF;
 9688   const int32_t MINUS_32 = 0xFFFFFFE0;
 9689   Label L_special_case, L_block1, L_exit;
 9690   movl(rtmp, FloatConsts_EXP_BIT_MASK);
 9691   movdl(dst, src);
 9692   andl(dst, rtmp);
 9693   sarl(dst, FloatConsts_SIGNIFICAND_WIDTH - 1);
 9694   movl(rtmp, FloatConsts_SIGNIFICAND_WIDTH - 2 + FloatConsts_EXP_BIAS);
 9695   subl(rtmp, dst);
 9696   movl(rcx, rtmp);
 9697   movl(dst, MINUS_32);
 9698   testl(rtmp, dst);
 9699   jccb(Assembler::notEqual, L_special_case);
 9700   movdl(dst, src);
 9701   andl(dst, FloatConsts_SIGNIF_BIT_MASK);
 9702   orl(dst, FloatConsts_SIGNIF_BIT_MASK + 1);
 9703   movdl(rtmp, src);
 9704   testl(rtmp, rtmp);
 9705   jccb(Assembler::greaterEqual, L_block1);
 9706   negl(dst);
 9707   bind(L_block1);
 9708   sarl(dst);
 9709   addl(dst, 0x1);
 9710   sarl(dst, 0x1);
 9711   jmp(L_exit);
 9712   bind(L_special_case);
 9713   convert_f2i(dst, src);
 9714   bind(L_exit);
 9715 }
 9716 
 9717 void MacroAssembler::round_double(Register dst, XMMRegister src, Register rtmp, Register rcx) {
 9718   // Following code is line by line assembly translation rounding algorithm.
 9719   // Please refer to java.lang.Math.round(double) algorithm for details.
 9720   const int64_t DoubleConsts_EXP_BIT_MASK = 0x7FF0000000000000L;
 9721   const int64_t DoubleConsts_SIGNIFICAND_WIDTH = 53;
 9722   const int64_t DoubleConsts_EXP_BIAS = 1023;
 9723   const int64_t DoubleConsts_SIGNIF_BIT_MASK = 0x000FFFFFFFFFFFFFL;
 9724   const int64_t MINUS_64 = 0xFFFFFFFFFFFFFFC0L;
 9725   Label L_special_case, L_block1, L_exit;
 9726   mov64(rtmp, DoubleConsts_EXP_BIT_MASK);
 9727   movq(dst, src);
 9728   andq(dst, rtmp);
 9729   sarq(dst, DoubleConsts_SIGNIFICAND_WIDTH - 1);
 9730   mov64(rtmp, DoubleConsts_SIGNIFICAND_WIDTH - 2 + DoubleConsts_EXP_BIAS);
 9731   subq(rtmp, dst);
 9732   movq(rcx, rtmp);
 9733   mov64(dst, MINUS_64);
 9734   testq(rtmp, dst);
 9735   jccb(Assembler::notEqual, L_special_case);
 9736   movq(dst, src);
 9737   mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK);
 9738   andq(dst, rtmp);
 9739   mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK + 1);
 9740   orq(dst, rtmp);
 9741   movq(rtmp, src);
 9742   testq(rtmp, rtmp);
 9743   jccb(Assembler::greaterEqual, L_block1);
 9744   negq(dst);
 9745   bind(L_block1);
 9746   sarq(dst);
 9747   addq(dst, 0x1);
 9748   sarq(dst, 0x1);
 9749   jmp(L_exit);
 9750   bind(L_special_case);
 9751   convert_d2l(dst, src);
 9752   bind(L_exit);
 9753 }
 9754 
 9755 void MacroAssembler::convert_d2l(Register dst, XMMRegister src) {
 9756   Label done;
 9757   cvttsd2siq(dst, src);
 9758   cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip()));
 9759   jccb(Assembler::notEqual, done);
 9760   subptr(rsp, 8);
 9761   movdbl(Address(rsp, 0), src);
 9762   call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2l_fixup())));
 9763   pop(dst);
 9764   bind(done);
 9765 }
 9766 
 9767 void MacroAssembler::cache_wb(Address line)
 9768 {
 9769   // 64 bit cpus always support clflush
 9770   assert(VM_Version::supports_clflush(), "clflush should be available");
 9771   bool optimized = VM_Version::supports_clflushopt();
 9772   bool no_evict = VM_Version::supports_clwb();
 9773 
 9774   // prefer clwb (writeback without evict) otherwise
 9775   // prefer clflushopt (potentially parallel writeback with evict)
 9776   // otherwise fallback on clflush (serial writeback with evict)
 9777 
 9778   if (optimized) {
 9779     if (no_evict) {
 9780       clwb(line);
 9781     } else {
 9782       clflushopt(line);
 9783     }
 9784   } else {
 9785     // no need for fence when using CLFLUSH
 9786     clflush(line);
 9787   }
 9788 }
 9789 
 9790 void MacroAssembler::cache_wbsync(bool is_pre)
 9791 {
 9792   assert(VM_Version::supports_clflush(), "clflush should be available");
 9793   bool optimized = VM_Version::supports_clflushopt();
 9794   bool no_evict = VM_Version::supports_clwb();
 9795 
 9796   // pick the correct implementation
 9797 
 9798   if (!is_pre && (optimized || no_evict)) {
 9799     // need an sfence for post flush when using clflushopt or clwb
 9800     // otherwise no no need for any synchroniaztion
 9801 
 9802     sfence();
 9803   }
 9804 }
 9805 
 9806 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
 9807   switch (cond) {
 9808     // Note some conditions are synonyms for others
 9809     case Assembler::zero:         return Assembler::notZero;
 9810     case Assembler::notZero:      return Assembler::zero;
 9811     case Assembler::less:         return Assembler::greaterEqual;
 9812     case Assembler::lessEqual:    return Assembler::greater;
 9813     case Assembler::greater:      return Assembler::lessEqual;
 9814     case Assembler::greaterEqual: return Assembler::less;
 9815     case Assembler::below:        return Assembler::aboveEqual;
 9816     case Assembler::belowEqual:   return Assembler::above;
 9817     case Assembler::above:        return Assembler::belowEqual;
 9818     case Assembler::aboveEqual:   return Assembler::below;
 9819     case Assembler::overflow:     return Assembler::noOverflow;
 9820     case Assembler::noOverflow:   return Assembler::overflow;
 9821     case Assembler::negative:     return Assembler::positive;
 9822     case Assembler::positive:     return Assembler::negative;
 9823     case Assembler::parity:       return Assembler::noParity;
 9824     case Assembler::noParity:     return Assembler::parity;
 9825   }
 9826   ShouldNotReachHere(); return Assembler::overflow;
 9827 }
 9828 
 9829 // This is simply a call to Thread::current()
 9830 void MacroAssembler::get_thread_slow(Register thread) {
 9831   if (thread != rax) {
 9832     push(rax);
 9833   }
 9834   push(rdi);
 9835   push(rsi);
 9836   push(rdx);
 9837   push(rcx);
 9838   push(r8);
 9839   push(r9);
 9840   push(r10);
 9841   push(r11);
 9842 
 9843   MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, Thread::current), 0);
 9844 
 9845   pop(r11);
 9846   pop(r10);
 9847   pop(r9);
 9848   pop(r8);
 9849   pop(rcx);
 9850   pop(rdx);
 9851   pop(rsi);
 9852   pop(rdi);
 9853   if (thread != rax) {
 9854     mov(thread, rax);
 9855     pop(rax);
 9856   }
 9857 }
 9858 
 9859 void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigned bias, Register tmp) {
 9860   Label L_stack_ok;
 9861   if (bias == 0) {
 9862     testptr(sp, 2 * wordSize - 1);
 9863   } else {
 9864     // lea(tmp, Address(rsp, bias);
 9865     mov(tmp, sp);
 9866     addptr(tmp, bias);
 9867     testptr(tmp, 2 * wordSize - 1);
 9868   }
 9869   jcc(Assembler::equal, L_stack_ok);
 9870   block_comment(msg);
 9871   stop(msg);
 9872   bind(L_stack_ok);
 9873 }
 9874 
 9875 // Implements fast-locking.
 9876 //
 9877 // obj: the object to be locked
 9878 // reg_rax: rax
 9879 // thread: the thread which attempts to lock obj
 9880 // tmp: a temporary register
 9881 void MacroAssembler::fast_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow) {
 9882   Register thread = r15_thread;
 9883 
 9884   assert(reg_rax == rax, "");
 9885   assert_different_registers(basic_lock, obj, reg_rax, thread, tmp);
 9886 
 9887   Label push;
 9888   const Register top = tmp;
 9889 
 9890   // Preload the markWord. It is important that this is the first
 9891   // instruction emitted as it is part of C1's null check semantics.
 9892   movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes()));
 9893 
 9894   if (UseObjectMonitorTable) {
 9895     // Clear cache in case fast locking succeeds or we need to take the slow-path.
 9896     movptr(Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes()))), 0);
 9897   }
 9898 
 9899   if (DiagnoseSyncOnValueBasedClasses != 0) {
 9900     load_klass(tmp, obj, rscratch1);
 9901     testb(Address(tmp, Klass::misc_flags_offset()), KlassFlags::_misc_is_value_based_class);
 9902     jcc(Assembler::notZero, slow);
 9903   }
 9904 
 9905   // Load top.
 9906   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
 9907 
 9908   // Check if the lock-stack is full.
 9909   cmpl(top, LockStack::end_offset());
 9910   jcc(Assembler::greaterEqual, slow);
 9911 
 9912   // Check for recursion.
 9913   cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
 9914   jcc(Assembler::equal, push);
 9915 
 9916   // Check header for monitor (0b10).
 9917   testptr(reg_rax, markWord::monitor_value);
 9918   jcc(Assembler::notZero, slow);
 9919 
 9920   // Try to lock. Transition lock bits 0b01 => 0b00
 9921   movptr(tmp, reg_rax);
 9922   andptr(tmp, ~(int32_t)markWord::unlocked_value);
 9923   orptr(reg_rax, markWord::unlocked_value);
 9924   lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
 9925   jcc(Assembler::notEqual, slow);
 9926 
 9927   // Restore top, CAS clobbers register.
 9928   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
 9929 
 9930   bind(push);
 9931   // After successful lock, push object on lock-stack.
 9932   movptr(Address(thread, top), obj);
 9933   incrementl(top, oopSize);
 9934   movl(Address(thread, JavaThread::lock_stack_top_offset()), top);
 9935 }
 9936 
 9937 // Implements fast-unlocking.
 9938 //
 9939 // obj: the object to be unlocked
 9940 // reg_rax: rax
 9941 // thread: the thread
 9942 // tmp: a temporary register
 9943 void MacroAssembler::fast_unlock(Register obj, Register reg_rax, Register tmp, Label& slow) {
 9944   Register thread = r15_thread;
 9945 
 9946   assert(reg_rax == rax, "");
 9947   assert_different_registers(obj, reg_rax, thread, tmp);
 9948 
 9949   Label unlocked, push_and_slow;
 9950   const Register top = tmp;
 9951 
 9952   // Check if obj is top of lock-stack.
 9953   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
 9954   cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
 9955   jcc(Assembler::notEqual, slow);
 9956 
 9957   // Pop lock-stack.
 9958   DEBUG_ONLY(movptr(Address(thread, top, Address::times_1, -oopSize), 0);)
 9959   subl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
 9960 
 9961   // Check if recursive.
 9962   cmpptr(obj, Address(thread, top, Address::times_1, -2 * oopSize));
 9963   jcc(Assembler::equal, unlocked);
 9964 
 9965   // Not recursive. Check header for monitor (0b10).
 9966   movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes()));
 9967   testptr(reg_rax, markWord::monitor_value);
 9968   jcc(Assembler::notZero, push_and_slow);
 9969 
 9970 #ifdef ASSERT
 9971   // Check header not unlocked (0b01).
 9972   Label not_unlocked;
 9973   testptr(reg_rax, markWord::unlocked_value);
 9974   jcc(Assembler::zero, not_unlocked);
 9975   stop("fast_unlock already unlocked");
 9976   bind(not_unlocked);
 9977 #endif
 9978 
 9979   // Try to unlock. Transition lock bits 0b00 => 0b01
 9980   movptr(tmp, reg_rax);
 9981   orptr(tmp, markWord::unlocked_value);
 9982   lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
 9983   jcc(Assembler::equal, unlocked);
 9984 
 9985   bind(push_and_slow);
 9986   // Restore lock-stack and handle the unlock in runtime.
 9987 #ifdef ASSERT
 9988   movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
 9989   movptr(Address(thread, top), obj);
 9990 #endif
 9991   addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
 9992   jmp(slow);
 9993 
 9994   bind(unlocked);
 9995 }
 9996 
 9997 // Saves legacy GPRs state on stack.
 9998 void MacroAssembler::save_legacy_gprs() {
 9999   subq(rsp, 16 * wordSize);
10000   movq(Address(rsp, 15 * wordSize), rax);
10001   movq(Address(rsp, 14 * wordSize), rcx);
10002   movq(Address(rsp, 13 * wordSize), rdx);
10003   movq(Address(rsp, 12 * wordSize), rbx);
10004   movq(Address(rsp, 10 * wordSize), rbp);
10005   movq(Address(rsp, 9 * wordSize), rsi);
10006   movq(Address(rsp, 8 * wordSize), rdi);
10007   movq(Address(rsp, 7 * wordSize), r8);
10008   movq(Address(rsp, 6 * wordSize), r9);
10009   movq(Address(rsp, 5 * wordSize), r10);
10010   movq(Address(rsp, 4 * wordSize), r11);
10011   movq(Address(rsp, 3 * wordSize), r12);
10012   movq(Address(rsp, 2 * wordSize), r13);
10013   movq(Address(rsp, wordSize), r14);
10014   movq(Address(rsp, 0), r15);
10015 }
10016 
10017 // Resotres back legacy GPRs state from stack.
10018 void MacroAssembler::restore_legacy_gprs() {
10019   movq(r15, Address(rsp, 0));
10020   movq(r14, Address(rsp, wordSize));
10021   movq(r13, Address(rsp, 2 * wordSize));
10022   movq(r12, Address(rsp, 3 * wordSize));
10023   movq(r11, Address(rsp, 4 * wordSize));
10024   movq(r10, Address(rsp, 5 * wordSize));
10025   movq(r9,  Address(rsp, 6 * wordSize));
10026   movq(r8,  Address(rsp, 7 * wordSize));
10027   movq(rdi, Address(rsp, 8 * wordSize));
10028   movq(rsi, Address(rsp, 9 * wordSize));
10029   movq(rbp, Address(rsp, 10 * wordSize));
10030   movq(rbx, Address(rsp, 12 * wordSize));
10031   movq(rdx, Address(rsp, 13 * wordSize));
10032   movq(rcx, Address(rsp, 14 * wordSize));
10033   movq(rax, Address(rsp, 15 * wordSize));
10034   addq(rsp, 16 * wordSize);
10035 }
10036 
10037 void MacroAssembler::setcc(Assembler::Condition comparison, Register dst) {
10038   if (VM_Version::supports_apx_f()) {
10039     esetzucc(comparison, dst);
10040   } else {
10041     setb(comparison, dst);
10042     movzbl(dst, dst);
10043   }
10044 }