1 /*
   2  * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/assembler.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "classfile/javaClasses.hpp"
  28 #include "classfile/vmIntrinsics.hpp"
  29 #include "compiler/oopMap.hpp"
  30 #include "gc/shared/barrierSet.hpp"
  31 #include "gc/shared/barrierSetAssembler.hpp"
  32 #include "gc/shared/barrierSetNMethod.hpp"
  33 #include "gc/shared/gc_globals.hpp"
  34 #include "memory/universe.hpp"
  35 #include "prims/jvmtiExport.hpp"
  36 #include "prims/upcallLinker.hpp"
  37 #include "runtime/arguments.hpp"
  38 #include "runtime/continuationEntry.hpp"
  39 #include "runtime/javaThread.hpp"
  40 #include "runtime/sharedRuntime.hpp"
  41 #include "runtime/stubRoutines.hpp"
  42 #include "utilities/macros.hpp"
  43 #include "vmreg_x86.inline.hpp"
  44 #include "stubGenerator_x86_64.hpp"
  45 #ifdef COMPILER2
  46 #include "opto/runtime.hpp"
  47 #include "opto/c2_globals.hpp"
  48 #endif
  49 #if INCLUDE_JVMCI
  50 #include "jvmci/jvmci_globals.hpp"
  51 #endif
  52 
  53 // For a more detailed description of the stub routine structure
  54 // see the comment in stubRoutines.hpp
  55 
  56 #define __ _masm->
  57 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
  58 
  59 #ifdef PRODUCT
  60 #define BLOCK_COMMENT(str) /* nothing */
  61 #else
  62 #define BLOCK_COMMENT(str) __ block_comment(str)
  63 #endif // PRODUCT
  64 
  65 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  66 
  67 //
  68 // Linux Arguments:
  69 //    c_rarg0:   call wrapper address                   address
  70 //    c_rarg1:   result                                 address
  71 //    c_rarg2:   result type                            BasicType
  72 //    c_rarg3:   method                                 Method*
  73 //    c_rarg4:   (interpreter) entry point              address
  74 //    c_rarg5:   parameters                             intptr_t*
  75 //    16(rbp): parameter size (in words)              int
  76 //    24(rbp): thread                                 Thread*
  77 //
  78 //     [ return_from_Java     ] <--- rsp
  79 //     [ argument word n      ]
  80 //      ...
  81 // -12 [ argument word 1      ]
  82 // -11 [ saved r15            ] <--- rsp_after_call
  83 // -10 [ saved r14            ]
  84 //  -9 [ saved r13            ]
  85 //  -8 [ saved r12            ]
  86 //  -7 [ saved rbx            ]
  87 //  -6 [ call wrapper         ]
  88 //  -5 [ result               ]
  89 //  -4 [ result type          ]
  90 //  -3 [ method               ]
  91 //  -2 [ entry point          ]
  92 //  -1 [ parameters           ]
  93 //   0 [ saved rbp            ] <--- rbp
  94 //   1 [ return address       ]
  95 //   2 [ parameter size       ]
  96 //   3 [ thread               ]
  97 //
  98 // Windows Arguments:
  99 //    c_rarg0:   call wrapper address                   address
 100 //    c_rarg1:   result                                 address
 101 //    c_rarg2:   result type                            BasicType
 102 //    c_rarg3:   method                                 Method*
 103 //    48(rbp): (interpreter) entry point              address
 104 //    56(rbp): parameters                             intptr_t*
 105 //    64(rbp): parameter size (in words)              int
 106 //    72(rbp): thread                                 Thread*
 107 //
 108 //     [ return_from_Java     ] <--- rsp
 109 //     [ argument word n      ]
 110 //      ...
 111 // -28 [ argument word 1      ]
 112 // -27 [ saved xmm15          ] <--- rsp after_call
 113 //     [ saved xmm7-xmm14     ]
 114 //  -9 [ saved xmm6           ] (each xmm register takes 2 slots)
 115 //  -7 [ saved r15            ]
 116 //  -6 [ saved r14            ]
 117 //  -5 [ saved r13            ]
 118 //  -4 [ saved r12            ]
 119 //  -3 [ saved rdi            ]
 120 //  -2 [ saved rsi            ]
 121 //  -1 [ saved rbx            ]
 122 //   0 [ saved rbp            ] <--- rbp
 123 //   1 [ return address       ]
 124 //   2 [ call wrapper         ]
 125 //   3 [ result               ]
 126 //   4 [ result type          ]
 127 //   5 [ method               ]
 128 //   6 [ entry point          ]
 129 //   7 [ parameters           ]
 130 //   8 [ parameter size       ]
 131 //   9 [ thread               ]
 132 //
 133 //    Windows reserves the callers stack space for arguments 1-4.
 134 //    We spill c_rarg0-c_rarg3 to this space.
 135 
 136 // Call stub stack layout word offsets from rbp
 137 #ifdef _WIN64
 138 enum call_stub_layout {
 139   xmm_save_first     = 6,  // save from xmm6
 140   xmm_save_last      = 15, // to xmm15
 141   xmm_save_base      = -9,
 142   rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27
 143   r15_off            = -7,
 144   r14_off            = -6,
 145   r13_off            = -5,
 146   r12_off            = -4,
 147   rdi_off            = -3,
 148   rsi_off            = -2,
 149   rbx_off            = -1,
 150   rbp_off            =  0,
 151   retaddr_off        =  1,
 152   call_wrapper_off   =  2,
 153   result_off         =  3,
 154   result_type_off    =  4,
 155   method_off         =  5,
 156   entry_point_off    =  6,
 157   parameters_off     =  7,
 158   parameter_size_off =  8,
 159   thread_off         =  9
 160 };
 161 
 162 static Address xmm_save(int reg) {
 163   assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range");
 164   return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize);
 165 }
 166 #else // !_WIN64
 167 enum call_stub_layout {
 168   rsp_after_call_off = -12,
 169   mxcsr_off          = rsp_after_call_off,
 170   r15_off            = -11,
 171   r14_off            = -10,
 172   r13_off            = -9,
 173   r12_off            = -8,
 174   rbx_off            = -7,
 175   call_wrapper_off   = -6,
 176   result_off         = -5,
 177   result_type_off    = -4,
 178   method_off         = -3,
 179   entry_point_off    = -2,
 180   parameters_off     = -1,
 181   rbp_off            =  0,
 182   retaddr_off        =  1,
 183   parameter_size_off =  2,
 184   thread_off         =  3
 185 };
 186 #endif // _WIN64
 187 
 188 address StubGenerator::generate_call_stub(address& return_address) {
 189 
 190   assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 &&
 191          (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off,
 192          "adjust this code");
 193   StubId stub_id = StubId::stubgen_call_stub_id;
 194   GrowableArray<address> entries;
 195   int entry_count = StubInfo::entry_count(stub_id);
 196   assert(entry_count == 2, "sanity check");
 197   address start = load_archive_data(stub_id, &entries);
 198   if (start != nullptr) {
 199     assert(entries.length() == 1, "expected 1 extra entry");
 200     return_address = entries.at(0);
 201     return start;
 202   }
 203 
 204   StubCodeMark mark(this, stub_id);
 205   start = __ pc();
 206 
 207   // same as in generate_catch_exception()!
 208   const Address rsp_after_call(rbp, rsp_after_call_off * wordSize);
 209 
 210   const Address call_wrapper  (rbp, call_wrapper_off   * wordSize);
 211   const Address result        (rbp, result_off         * wordSize);
 212   const Address result_type   (rbp, result_type_off    * wordSize);
 213   const Address method        (rbp, method_off         * wordSize);
 214   const Address entry_point   (rbp, entry_point_off    * wordSize);
 215   const Address parameters    (rbp, parameters_off     * wordSize);
 216   const Address parameter_size(rbp, parameter_size_off * wordSize);
 217 
 218   // same as in generate_catch_exception()!
 219   const Address thread        (rbp, thread_off         * wordSize);
 220 
 221   const Address r15_save(rbp, r15_off * wordSize);
 222   const Address r14_save(rbp, r14_off * wordSize);
 223   const Address r13_save(rbp, r13_off * wordSize);
 224   const Address r12_save(rbp, r12_off * wordSize);
 225   const Address rbx_save(rbp, rbx_off * wordSize);
 226 
 227   // stub code
 228   __ enter();
 229   __ subptr(rsp, -rsp_after_call_off * wordSize);
 230 
 231   // save register parameters
 232 #ifndef _WIN64
 233   __ movptr(parameters,   c_rarg5); // parameters
 234   __ movptr(entry_point,  c_rarg4); // entry_point
 235 #endif
 236 
 237   __ movptr(method,       c_rarg3); // method
 238   __ movl(result_type,  c_rarg2);   // result type
 239   __ movptr(result,       c_rarg1); // result
 240   __ movptr(call_wrapper, c_rarg0); // call wrapper
 241 
 242   // save regs belonging to calling function
 243   __ movptr(rbx_save, rbx);
 244   __ movptr(r12_save, r12);
 245   __ movptr(r13_save, r13);
 246   __ movptr(r14_save, r14);
 247   __ movptr(r15_save, r15);
 248 
 249 #ifdef _WIN64
 250   int last_reg = 15;
 251   for (int i = xmm_save_first; i <= last_reg; i++) {
 252     __ movdqu(xmm_save(i), as_XMMRegister(i));
 253   }
 254 
 255   const Address rdi_save(rbp, rdi_off * wordSize);
 256   const Address rsi_save(rbp, rsi_off * wordSize);
 257 
 258   __ movptr(rsi_save, rsi);
 259   __ movptr(rdi_save, rdi);
 260 #else
 261   const Address mxcsr_save(rbp, mxcsr_off * wordSize);
 262   {
 263     Label skip_ldmx;
 264     __ cmp32_mxcsr_std(mxcsr_save, rax, rscratch1);
 265     __ jcc(Assembler::equal, skip_ldmx);
 266     ExternalAddress mxcsr_std(StubRoutines::x86::addr_mxcsr_std());
 267     __ ldmxcsr(mxcsr_std, rscratch1);
 268     __ bind(skip_ldmx);
 269   }
 270 #endif
 271 
 272   // Load up thread register
 273   __ movptr(r15_thread, thread);
 274   __ reinit_heapbase();
 275 
 276 #ifdef ASSERT
 277   // make sure we have no pending exceptions
 278   {
 279     Label L;
 280     __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
 281     __ jcc(Assembler::equal, L);
 282     __ stop("StubRoutines::call_stub: entered with pending exception");
 283     __ bind(L);
 284   }
 285 #endif
 286 
 287   // pass parameters if any
 288   BLOCK_COMMENT("pass parameters if any");
 289   Label parameters_done;
 290   __ movl(c_rarg3, parameter_size);
 291   __ testl(c_rarg3, c_rarg3);
 292   __ jcc(Assembler::zero, parameters_done);
 293 
 294   Label loop;
 295   __ movptr(c_rarg2, parameters);       // parameter pointer
 296   __ movl(c_rarg1, c_rarg3);            // parameter counter is in c_rarg1
 297   __ BIND(loop);
 298   __ movptr(rax, Address(c_rarg2, 0));// get parameter
 299   __ addptr(c_rarg2, wordSize);       // advance to next parameter
 300   __ decrementl(c_rarg1);             // decrement counter
 301   __ push(rax);                       // pass parameter
 302   __ jcc(Assembler::notZero, loop);
 303 
 304   // call Java function
 305   __ BIND(parameters_done);
 306   __ movptr(rbx, method);             // get Method*
 307   __ movptr(c_rarg1, entry_point);    // get entry_point
 308   __ mov(r13, rsp);                   // set sender sp
 309   BLOCK_COMMENT("call Java function");
 310   __ call(c_rarg1);
 311 
 312   BLOCK_COMMENT("call_stub_return_address:");
 313   return_address = __ pc();
 314   entries.append(return_address);
 315 
 316   // store result depending on type (everything that is not
 317   // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
 318   __ movptr(r13, result);
 319   Label is_long, is_float, is_double, check_prim, exit;
 320   __ movl(rbx, result_type);
 321   __ cmpl(rbx, T_OBJECT);
 322   __ jcc(Assembler::equal, check_prim);
 323   __ cmpl(rbx, T_LONG);
 324   __ jcc(Assembler::equal, is_long);
 325   __ cmpl(rbx, T_FLOAT);
 326   __ jcc(Assembler::equal, is_float);
 327   __ cmpl(rbx, T_DOUBLE);
 328   __ jcc(Assembler::equal, is_double);
 329 #ifdef ASSERT
 330   // make sure the type is INT
 331   {
 332     Label L;
 333     __ cmpl(rbx, T_INT);
 334     __ jcc(Assembler::equal, L);
 335     __ stop("StubRoutines::call_stub: unexpected result type");
 336     __ bind(L);
 337   }
 338 #endif
 339 
 340   // handle T_INT case
 341   __ movl(Address(r13, 0), rax);
 342 
 343   __ BIND(exit);
 344 
 345   // pop parameters
 346   __ lea(rsp, rsp_after_call);
 347 
 348 #ifdef ASSERT
 349   // verify that threads correspond
 350   {
 351    Label L1, L2, L3;
 352     __ cmpptr(r15_thread, thread);
 353     __ jcc(Assembler::equal, L1);
 354     __ stop("StubRoutines::call_stub: r15_thread is corrupted");
 355     __ bind(L1);
 356     __ get_thread_slow(rbx);
 357     __ cmpptr(r15_thread, thread);
 358     __ jcc(Assembler::equal, L2);
 359     __ stop("StubRoutines::call_stub: r15_thread is modified by call");
 360     __ bind(L2);
 361     __ cmpptr(r15_thread, rbx);
 362     __ jcc(Assembler::equal, L3);
 363     __ stop("StubRoutines::call_stub: threads must correspond");
 364     __ bind(L3);
 365   }
 366 #endif
 367 
 368   __ pop_cont_fastpath();
 369 
 370   // restore regs belonging to calling function
 371 #ifdef _WIN64
 372   // emit the restores for xmm regs
 373   for (int i = xmm_save_first; i <= last_reg; i++) {
 374     __ movdqu(as_XMMRegister(i), xmm_save(i));
 375   }
 376 #endif
 377   __ movptr(r15, r15_save);
 378   __ movptr(r14, r14_save);
 379   __ movptr(r13, r13_save);
 380   __ movptr(r12, r12_save);
 381   __ movptr(rbx, rbx_save);
 382 
 383 #ifdef _WIN64
 384   __ movptr(rdi, rdi_save);
 385   __ movptr(rsi, rsi_save);
 386 #else
 387   __ ldmxcsr(mxcsr_save);
 388 #endif
 389 
 390   // restore rsp
 391   __ addptr(rsp, -rsp_after_call_off * wordSize);
 392 
 393   // return
 394   __ vzeroupper();
 395   __ pop(rbp);
 396   __ ret(0);
 397 
 398   // handle return types different from T_INT
 399   __ BIND(check_prim);
 400   if (InlineTypeReturnedAsFields) {
 401     // Check for scalarized return value
 402     __ testptr(rax, 1);
 403     __ jcc(Assembler::zero, is_long);
 404     // Load pack handler address
 405     __ andptr(rax, -2);
 406     __ movptr(rax, Address(rax, InlineKlass::adr_members_offset()));
 407     __ movptr(rbx, Address(rax, InlineKlass::pack_handler_jobject_offset()));
 408     // Call pack handler to initialize the buffer
 409     __ call(rbx);
 410     __ jmp(exit);
 411   }
 412   __ BIND(is_long);
 413   __ movq(Address(r13, 0), rax);
 414   __ jmp(exit);
 415 
 416   __ BIND(is_float);
 417   __ movflt(Address(r13, 0), xmm0);
 418   __ jmp(exit);
 419 
 420   __ BIND(is_double);
 421   __ movdbl(Address(r13, 0), xmm0);
 422   __ jmp(exit);
 423 
 424   // record the stub entry and end plus the auxiliary entry
 425   store_archive_data(stub_id, start, __ pc(), &entries);
 426 
 427   return start;
 428 }
 429 
 430 // Return point for a Java call if there's an exception thrown in
 431 // Java code.  The exception is caught and transformed into a
 432 // pending exception stored in JavaThread that can be tested from
 433 // within the VM.
 434 //
 435 // Note: Usually the parameters are removed by the callee. In case
 436 // of an exception crossing an activation frame boundary, that is
 437 // not the case if the callee is compiled code => need to setup the
 438 // rsp.
 439 //
 440 // rax: exception oop
 441 
 442 address StubGenerator::generate_catch_exception() {
 443   StubId stub_id = StubId::stubgen_catch_exception_id;
 444   int entry_count = StubInfo::entry_count(stub_id);
 445   assert(entry_count == 1, "sanity check");
 446   address start = load_archive_data(stub_id);
 447   if (start != nullptr) {
 448     return start;
 449   }
 450 
 451   StubCodeMark mark(this, stub_id);
 452   start = __ pc();
 453 
 454   // same as in generate_call_stub():
 455   const Address rsp_after_call(rbp, rsp_after_call_off * wordSize);
 456   const Address thread        (rbp, thread_off         * wordSize);
 457 
 458 #ifdef ASSERT
 459   // verify that threads correspond
 460   {
 461     Label L1, L2, L3;
 462     __ cmpptr(r15_thread, thread);
 463     __ jcc(Assembler::equal, L1);
 464     __ stop("StubRoutines::catch_exception: r15_thread is corrupted");
 465     __ bind(L1);
 466     __ get_thread_slow(rbx);
 467     __ cmpptr(r15_thread, thread);
 468     __ jcc(Assembler::equal, L2);
 469     __ stop("StubRoutines::catch_exception: r15_thread is modified by call");
 470     __ bind(L2);
 471     __ cmpptr(r15_thread, rbx);
 472     __ jcc(Assembler::equal, L3);
 473     __ stop("StubRoutines::catch_exception: threads must correspond");
 474     __ bind(L3);
 475   }
 476 #endif
 477 
 478   // set pending exception
 479   __ verify_oop(rax);
 480 
 481   __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax);
 482   // special case -- add file name string to AOT address table
 483   address file = (address)AOTCodeCache::add_C_string(__FILE__);
 484   __ lea(rscratch1, ExternalAddress(file));
 485   __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1);
 486   __ movl(Address(r15_thread, Thread::exception_line_offset()), (int)  __LINE__);
 487 
 488   // complete return to VM
 489   assert(StubRoutines::_call_stub_return_address != nullptr,
 490          "_call_stub_return_address must have been generated before");
 491   __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address));
 492 
 493   // record the stub entry and end
 494   store_archive_data(stub_id, start, __ pc());
 495 
 496   return start;
 497 }
 498 
 499 // Continuation point for runtime calls returning with a pending
 500 // exception.  The pending exception check happened in the runtime
 501 // or native call stub.  The pending exception in Thread is
 502 // converted into a Java-level exception.
 503 //
 504 // Contract with Java-level exception handlers:
 505 // rax: exception
 506 // rdx: throwing pc
 507 //
 508 // NOTE: At entry of this stub, exception-pc must be on stack !!
 509 
 510 address StubGenerator::generate_forward_exception() {
 511   StubId stub_id = StubId::stubgen_forward_exception_id;
 512   int entry_count = StubInfo::entry_count(stub_id);
 513   assert(entry_count == 1, "sanity check");
 514   address start = load_archive_data(stub_id);
 515   if (start != nullptr) {
 516     return start;
 517   }
 518   StubCodeMark mark(this, stub_id);
 519   start = __ pc();
 520 
 521   // Upon entry, the sp points to the return address returning into
 522   // Java (interpreted or compiled) code; i.e., the return address
 523   // becomes the throwing pc.
 524   //
 525   // Arguments pushed before the runtime call are still on the stack
 526   // but the exception handler will reset the stack pointer ->
 527   // ignore them.  A potential result in registers can be ignored as
 528   // well.
 529 
 530 #ifdef ASSERT
 531   // make sure this code is only executed if there is a pending exception
 532   {
 533     Label L;
 534     __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
 535     __ jcc(Assembler::notEqual, L);
 536     __ stop("StubRoutines::forward exception: no pending exception (1)");
 537     __ bind(L);
 538   }
 539 #endif
 540 
 541   // compute exception handler into rbx
 542   __ movptr(c_rarg0, Address(rsp, 0));
 543   BLOCK_COMMENT("call exception_handler_for_return_address");
 544   __ call_VM_leaf(CAST_FROM_FN_PTR(address,
 545                        SharedRuntime::exception_handler_for_return_address),
 546                   r15_thread, c_rarg0);
 547   __ mov(rbx, rax);
 548 
 549   // setup rax & rdx, remove return address & clear pending exception
 550   __ pop(rdx);
 551   __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
 552   __ movptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
 553 
 554 #ifdef ASSERT
 555   // make sure exception is set
 556   {
 557     Label L;
 558     __ testptr(rax, rax);
 559     __ jcc(Assembler::notEqual, L);
 560     __ stop("StubRoutines::forward exception: no pending exception (2)");
 561     __ bind(L);
 562   }
 563 #endif
 564 
 565   // continue at exception handler (return address removed)
 566   // rax: exception
 567   // rbx: exception handler
 568   // rdx: throwing pc
 569   __ verify_oop(rax);
 570   __ jmp(rbx);
 571 
 572   // record the stub entry and end
 573   store_archive_data(stub_id, start, __ pc());
 574 
 575   return start;
 576 }
 577 
 578 // Support for intptr_t OrderAccess::fence()
 579 //
 580 // Arguments :
 581 //
 582 // Result:
 583 address StubGenerator::generate_orderaccess_fence() {
 584   StubId stub_id = StubId::stubgen_fence_id;
 585   int entry_count = StubInfo::entry_count(stub_id);
 586   assert(entry_count == 1, "sanity check");
 587   address start = load_archive_data(stub_id);
 588   if (start != nullptr) {
 589     return start;
 590   }
 591   StubCodeMark mark(this, stub_id);
 592   start = __ pc();
 593 
 594   __ membar(Assembler::StoreLoad);
 595   __ ret(0);
 596 
 597   // record the stub entry and end
 598   store_archive_data(stub_id, start, __ pc());
 599 
 600   return start;
 601 }
 602 
 603 
 604 //----------------------------------------------------------------------------------------------------
 605 // Support for void verify_mxcsr()
 606 //
 607 // This routine is used with -Xcheck:jni to verify that native
 608 // JNI code does not return to Java code without restoring the
 609 // MXCSR register to our expected state.
 610 
 611 address StubGenerator::generate_verify_mxcsr() {
 612   StubId stub_id = StubId::stubgen_verify_mxcsr_id;
 613   int entry_count = StubInfo::entry_count(stub_id);
 614   assert(entry_count == 1, "sanity check");
 615   address start = load_archive_data(stub_id);
 616   if (start != nullptr) {
 617     return start;
 618   }
 619   StubCodeMark mark(this, stub_id);
 620   start = __ pc();
 621 
 622   const Address mxcsr_save(rsp, 0);
 623 
 624   if (CheckJNICalls) {
 625     Label ok_ret;
 626     ExternalAddress mxcsr_std(StubRoutines::x86::addr_mxcsr_std());
 627     __ push_ppx(rax);
 628     __ subptr(rsp, wordSize);      // allocate a temp location
 629     __ cmp32_mxcsr_std(mxcsr_save, rax, rscratch1);
 630     __ jcc(Assembler::equal, ok_ret);
 631 
 632     __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall");
 633 
 634     __ ldmxcsr(mxcsr_std, rscratch1);
 635 
 636     __ bind(ok_ret);
 637     __ addptr(rsp, wordSize);
 638     __ pop_ppx(rax);
 639   }
 640 
 641   __ ret(0);
 642 
 643   // record the stub entry and end
 644   store_archive_data(stub_id, start, __ pc());
 645 
 646   return start;
 647 }
 648 
 649 address StubGenerator::generate_f2i_fixup() {
 650   StubId stub_id = StubId::stubgen_f2i_fixup_id;
 651   int entry_count = StubInfo::entry_count(stub_id);
 652   assert(entry_count == 1, "sanity check");
 653   address start = load_archive_data(stub_id);
 654   if (start != nullptr) {
 655     return start;
 656   }
 657   StubCodeMark mark(this, stub_id);
 658   Address inout(rsp, 5 * wordSize); // return address + 4 saves
 659 
 660   start = __ pc();
 661 
 662   Label L;
 663 
 664   __ push_ppx(rax);
 665   __ push_ppx(c_rarg3);
 666   __ push_ppx(c_rarg2);
 667   __ push_ppx(c_rarg1);
 668 
 669   __ movl(rax, 0x7f800000);
 670   __ xorl(c_rarg3, c_rarg3);
 671   __ movl(c_rarg2, inout);
 672   __ movl(c_rarg1, c_rarg2);
 673   __ andl(c_rarg1, 0x7fffffff);
 674   __ cmpl(rax, c_rarg1); // NaN? -> 0
 675   __ jcc(Assembler::negative, L);
 676   __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint
 677   __ movl(c_rarg3, 0x80000000);
 678   __ movl(rax, 0x7fffffff);
 679   __ cmovl(Assembler::positive, c_rarg3, rax);
 680 
 681   __ bind(L);
 682   __ movptr(inout, c_rarg3);
 683 
 684   __ pop_ppx(c_rarg1);
 685   __ pop_ppx(c_rarg2);
 686   __ pop_ppx(c_rarg3);
 687   __ pop_ppx(rax);
 688 
 689   __ ret(0);
 690 
 691   // record the stub entry and end
 692   store_archive_data(stub_id, start, __ pc());
 693 
 694   return start;
 695 }
 696 
 697 address StubGenerator::generate_f2l_fixup() {
 698   StubId stub_id = StubId::stubgen_f2l_fixup_id;
 699   int entry_count = StubInfo::entry_count(stub_id);
 700   assert(entry_count == 1, "sanity check");
 701   address start = load_archive_data(stub_id);
 702   if (start != nullptr) {
 703     return start;
 704   }
 705   StubCodeMark mark(this, stub_id);
 706   Address inout(rsp, 5 * wordSize); // return address + 4 saves
 707   start = __ pc();
 708 
 709   Label L;
 710 
 711   __ push_ppx(rax);
 712   __ push_ppx(c_rarg3);
 713   __ push_ppx(c_rarg2);
 714   __ push_ppx(c_rarg1);
 715 
 716   __ movl(rax, 0x7f800000);
 717   __ xorl(c_rarg3, c_rarg3);
 718   __ movl(c_rarg2, inout);
 719   __ movl(c_rarg1, c_rarg2);
 720   __ andl(c_rarg1, 0x7fffffff);
 721   __ cmpl(rax, c_rarg1); // NaN? -> 0
 722   __ jcc(Assembler::negative, L);
 723   __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong
 724   __ mov64(c_rarg3, 0x8000000000000000);
 725   __ mov64(rax, 0x7fffffffffffffff);
 726   __ cmov(Assembler::positive, c_rarg3, rax);
 727 
 728   __ bind(L);
 729   __ movptr(inout, c_rarg3);
 730 
 731   __ pop_ppx(c_rarg1);
 732   __ pop_ppx(c_rarg2);
 733   __ pop_ppx(c_rarg3);
 734   __ pop_ppx(rax);
 735 
 736   __ ret(0);
 737 
 738   // record the stub entry and end
 739   store_archive_data(stub_id, start, __ pc());
 740 
 741   return start;
 742 }
 743 
 744 address StubGenerator::generate_d2i_fixup() {
 745   StubId stub_id = StubId::stubgen_d2i_fixup_id;
 746   int entry_count = StubInfo::entry_count(stub_id);
 747   assert(entry_count == 1, "sanity check");
 748   address start = load_archive_data(stub_id);
 749   if (start != nullptr) {
 750     return start;
 751   }
 752   StubCodeMark mark(this, stub_id);
 753   Address inout(rsp, 6 * wordSize); // return address + 5 saves
 754 
 755   start = __ pc();
 756 
 757   Label L;
 758 
 759   __ push_ppx(rax);
 760   __ push_ppx(c_rarg3);
 761   __ push_ppx(c_rarg2);
 762   __ push_ppx(c_rarg1);
 763   __ push_ppx(c_rarg0);
 764 
 765   __ movl(rax, 0x7ff00000);
 766   __ movq(c_rarg2, inout);
 767   __ movl(c_rarg3, c_rarg2);
 768   __ mov(c_rarg1, c_rarg2);
 769   __ mov(c_rarg0, c_rarg2);
 770   __ negl(c_rarg3);
 771   __ shrptr(c_rarg1, 0x20);
 772   __ orl(c_rarg3, c_rarg2);
 773   __ andl(c_rarg1, 0x7fffffff);
 774   __ xorl(c_rarg2, c_rarg2);
 775   __ shrl(c_rarg3, 0x1f);
 776   __ orl(c_rarg1, c_rarg3);
 777   __ cmpl(rax, c_rarg1);
 778   __ jcc(Assembler::negative, L); // NaN -> 0
 779   __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint
 780   __ movl(c_rarg2, 0x80000000);
 781   __ movl(rax, 0x7fffffff);
 782   __ cmov(Assembler::positive, c_rarg2, rax);
 783 
 784   __ bind(L);
 785   __ movptr(inout, c_rarg2);
 786 
 787   __ pop_ppx(c_rarg0);
 788   __ pop_ppx(c_rarg1);
 789   __ pop_ppx(c_rarg2);
 790   __ pop_ppx(c_rarg3);
 791   __ pop_ppx(rax);
 792 
 793   __ ret(0);
 794 
 795   // record the stub entry and end
 796   store_archive_data(stub_id, start, __ pc());
 797 
 798   return start;
 799 }
 800 
 801 address StubGenerator::generate_d2l_fixup() {
 802   StubId stub_id = StubId::stubgen_d2l_fixup_id;
 803   int entry_count = StubInfo::entry_count(stub_id);
 804   assert(entry_count == 1, "sanity check");
 805   address start = load_archive_data(stub_id);
 806   if (start != nullptr) {
 807     return start;
 808   }
 809   StubCodeMark mark(this, stub_id);
 810   Address inout(rsp, 6 * wordSize); // return address + 5 saves
 811 
 812   start = __ pc();
 813 
 814   Label L;
 815 
 816   __ push_ppx(rax);
 817   __ push_ppx(c_rarg3);
 818   __ push_ppx(c_rarg2);
 819   __ push_ppx(c_rarg1);
 820   __ push_ppx(c_rarg0);
 821 
 822   __ movl(rax, 0x7ff00000);
 823   __ movq(c_rarg2, inout);
 824   __ movl(c_rarg3, c_rarg2);
 825   __ mov(c_rarg1, c_rarg2);
 826   __ mov(c_rarg0, c_rarg2);
 827   __ negl(c_rarg3);
 828   __ shrptr(c_rarg1, 0x20);
 829   __ orl(c_rarg3, c_rarg2);
 830   __ andl(c_rarg1, 0x7fffffff);
 831   __ xorl(c_rarg2, c_rarg2);
 832   __ shrl(c_rarg3, 0x1f);
 833   __ orl(c_rarg1, c_rarg3);
 834   __ cmpl(rax, c_rarg1);
 835   __ jcc(Assembler::negative, L); // NaN -> 0
 836   __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong
 837   __ mov64(c_rarg2, 0x8000000000000000);
 838   __ mov64(rax, 0x7fffffffffffffff);
 839   __ cmovq(Assembler::positive, c_rarg2, rax);
 840 
 841   __ bind(L);
 842   __ movq(inout, c_rarg2);
 843 
 844   __ pop_ppx(c_rarg0);
 845   __ pop_ppx(c_rarg1);
 846   __ pop_ppx(c_rarg2);
 847   __ pop_ppx(c_rarg3);
 848   __ pop_ppx(rax);
 849 
 850   __ ret(0);
 851 
 852   // record the stub entry and end
 853   store_archive_data(stub_id, start, __ pc());
 854 
 855   return start;
 856 }
 857 
 858 address StubGenerator::generate_count_leading_zeros_lut() {
 859   StubId stub_id = StubId::stubgen_vector_count_leading_zeros_lut_id;
 860   int entry_count = StubInfo::entry_count(stub_id);
 861   assert(entry_count == 1, "sanity check");
 862   address start = load_archive_data(stub_id);
 863   if (start != nullptr) {
 864     return start;
 865   }
 866   __ align64();
 867   StubCodeMark mark(this, stub_id);
 868   start = __ pc();
 869 
 870   __ emit_data64(0x0101010102020304, relocInfo::none);
 871   __ emit_data64(0x0000000000000000, relocInfo::none);
 872   __ emit_data64(0x0101010102020304, relocInfo::none);
 873   __ emit_data64(0x0000000000000000, relocInfo::none);
 874   __ emit_data64(0x0101010102020304, relocInfo::none);
 875   __ emit_data64(0x0000000000000000, relocInfo::none);
 876   __ emit_data64(0x0101010102020304, relocInfo::none);
 877   __ emit_data64(0x0000000000000000, relocInfo::none);
 878 
 879   // record the stub entry and end
 880   store_archive_data(stub_id, start, __ pc());
 881 
 882   return start;
 883 }
 884 
 885 address StubGenerator::generate_popcount_avx_lut() {
 886   StubId stub_id = StubId::stubgen_vector_popcount_lut_id;
 887   int entry_count = StubInfo::entry_count(stub_id);
 888   assert(entry_count == 1, "sanity check");
 889   address start = load_archive_data(stub_id);
 890   if (start != nullptr) {
 891     return start;
 892   }
 893   __ align64();
 894   StubCodeMark mark(this, stub_id);
 895   start = __ pc();
 896 
 897   __ emit_data64(0x0302020102010100, relocInfo::none);
 898   __ emit_data64(0x0403030203020201, relocInfo::none);
 899   __ emit_data64(0x0302020102010100, relocInfo::none);
 900   __ emit_data64(0x0403030203020201, relocInfo::none);
 901   __ emit_data64(0x0302020102010100, relocInfo::none);
 902   __ emit_data64(0x0403030203020201, relocInfo::none);
 903   __ emit_data64(0x0302020102010100, relocInfo::none);
 904   __ emit_data64(0x0403030203020201, relocInfo::none);
 905 
 906   // record the stub entry and end
 907   store_archive_data(stub_id, start, __ pc());
 908 
 909   return start;
 910 }
 911 
 912 void StubGenerator::generate_iota_indices() {
 913   StubId stub_id = StubId::stubgen_vector_iota_indices_id;
 914   GrowableArray<address> entries;
 915   int entry_count = StubInfo::entry_count(stub_id);
 916   assert(entry_count == VECTOR_IOTA_COUNT, "sanity check");
 917   address start = load_archive_data(stub_id, &entries);
 918   if (start != nullptr) {
 919     assert(entries.length() == VECTOR_IOTA_COUNT - 1,
 920            "unexpected extra entry count %d", entries.length());
 921     StubRoutines::x86::_vector_iota_indices[0] = start;
 922     for (int i = 1; i < VECTOR_IOTA_COUNT; i++) {
 923       StubRoutines::x86::_vector_iota_indices[i] = entries.at(i - 1);
 924     }
 925     return;
 926   }
 927   __ align(CodeEntryAlignment);
 928   StubCodeMark mark(this, stub_id);
 929   start = __ pc();
 930   // B
 931   __ emit_data64(0x0706050403020100, relocInfo::none);
 932   __ emit_data64(0x0F0E0D0C0B0A0908, relocInfo::none);
 933   __ emit_data64(0x1716151413121110, relocInfo::none);
 934   __ emit_data64(0x1F1E1D1C1B1A1918, relocInfo::none);
 935   __ emit_data64(0x2726252423222120, relocInfo::none);
 936   __ emit_data64(0x2F2E2D2C2B2A2928, relocInfo::none);
 937   __ emit_data64(0x3736353433323130, relocInfo::none);
 938   __ emit_data64(0x3F3E3D3C3B3A3938, relocInfo::none);
 939   entries.append(__ pc());
 940   // W
 941   __ emit_data64(0x0003000200010000, relocInfo::none);
 942   __ emit_data64(0x0007000600050004, relocInfo::none);
 943   __ emit_data64(0x000B000A00090008, relocInfo::none);
 944   __ emit_data64(0x000F000E000D000C, relocInfo::none);
 945   __ emit_data64(0x0013001200110010, relocInfo::none);
 946   __ emit_data64(0x0017001600150014, relocInfo::none);
 947   __ emit_data64(0x001B001A00190018, relocInfo::none);
 948   __ emit_data64(0x001F001E001D001C, relocInfo::none);
 949   entries.append(__ pc());
 950   // D
 951   __ emit_data64(0x0000000100000000, relocInfo::none);
 952   __ emit_data64(0x0000000300000002, relocInfo::none);
 953   __ emit_data64(0x0000000500000004, relocInfo::none);
 954   __ emit_data64(0x0000000700000006, relocInfo::none);
 955   __ emit_data64(0x0000000900000008, relocInfo::none);
 956   __ emit_data64(0x0000000B0000000A, relocInfo::none);
 957   __ emit_data64(0x0000000D0000000C, relocInfo::none);
 958   __ emit_data64(0x0000000F0000000E, relocInfo::none);
 959   entries.append(__ pc());
 960   // Q
 961   __ emit_data64(0x0000000000000000, relocInfo::none);
 962   __ emit_data64(0x0000000000000001, relocInfo::none);
 963   __ emit_data64(0x0000000000000002, relocInfo::none);
 964   __ emit_data64(0x0000000000000003, relocInfo::none);
 965   __ emit_data64(0x0000000000000004, relocInfo::none);
 966   __ emit_data64(0x0000000000000005, relocInfo::none);
 967   __ emit_data64(0x0000000000000006, relocInfo::none);
 968   __ emit_data64(0x0000000000000007, relocInfo::none);
 969   entries.append(__ pc());
 970   // D - FP
 971   __ emit_data64(0x3F80000000000000, relocInfo::none); // 0.0f, 1.0f
 972   __ emit_data64(0x4040000040000000, relocInfo::none); // 2.0f, 3.0f
 973   __ emit_data64(0x40A0000040800000, relocInfo::none); // 4.0f, 5.0f
 974   __ emit_data64(0x40E0000040C00000, relocInfo::none); // 6.0f, 7.0f
 975   __ emit_data64(0x4110000041000000, relocInfo::none); // 8.0f, 9.0f
 976   __ emit_data64(0x4130000041200000, relocInfo::none); // 10.0f, 11.0f
 977   __ emit_data64(0x4150000041400000, relocInfo::none); // 12.0f, 13.0f
 978   __ emit_data64(0x4170000041600000, relocInfo::none); // 14.0f, 15.0f
 979   entries.append(__ pc());
 980   // Q - FP
 981   __ emit_data64(0x0000000000000000, relocInfo::none); // 0.0d
 982   __ emit_data64(0x3FF0000000000000, relocInfo::none); // 1.0d
 983   __ emit_data64(0x4000000000000000, relocInfo::none); // 2.0d
 984   __ emit_data64(0x4008000000000000, relocInfo::none); // 3.0d
 985   __ emit_data64(0x4010000000000000, relocInfo::none); // 4.0d
 986   __ emit_data64(0x4014000000000000, relocInfo::none); // 5.0d
 987   __ emit_data64(0x4018000000000000, relocInfo::none); // 6.0d
 988   __ emit_data64(0x401c000000000000, relocInfo::none); // 7.0d
 989 
 990   // record the stub entry and end
 991   store_archive_data(stub_id, start, __ pc(), &entries);
 992 
 993   // install the entry addresses in the entry array
 994   assert(entries.length() == entry_count - 1,
 995          "unexpected entries count %d", entries.length());
 996   StubRoutines::x86::_vector_iota_indices[0] = start;
 997   for (int i = 1; i < VECTOR_IOTA_COUNT; i++) {
 998     StubRoutines::x86::_vector_iota_indices[i] = entries.at(i - 1);
 999   }
1000 }
1001 
1002 address StubGenerator::generate_vector_reverse_bit_lut() {
1003   StubId stub_id = StubId::stubgen_vector_reverse_bit_lut_id;
1004   int entry_count = StubInfo::entry_count(stub_id);
1005   assert(entry_count == 1, "sanity check");
1006   address start = load_archive_data(stub_id);
1007   if (start != nullptr) {
1008     return start;
1009   }
1010   __ align(CodeEntryAlignment);
1011   StubCodeMark mark(this, stub_id);
1012   start = __ pc();
1013 
1014   __ emit_data64(0x0E060A020C040800, relocInfo::none);
1015   __ emit_data64(0x0F070B030D050901, relocInfo::none);
1016   __ emit_data64(0x0E060A020C040800, relocInfo::none);
1017   __ emit_data64(0x0F070B030D050901, relocInfo::none);
1018   __ emit_data64(0x0E060A020C040800, relocInfo::none);
1019   __ emit_data64(0x0F070B030D050901, relocInfo::none);
1020   __ emit_data64(0x0E060A020C040800, relocInfo::none);
1021   __ emit_data64(0x0F070B030D050901, relocInfo::none);
1022 
1023   // record the stub entry and end
1024   store_archive_data(stub_id, start, __ pc());
1025 
1026   return start;
1027 }
1028 
1029 address StubGenerator::generate_vector_reverse_byte_perm_mask_long() {
1030   StubId stub_id = StubId::stubgen_vector_reverse_byte_perm_mask_long_id;
1031   int entry_count = StubInfo::entry_count(stub_id);
1032   assert(entry_count == 1, "sanity check");
1033   address start = load_archive_data(stub_id);
1034   if (start != nullptr) {
1035     return start;
1036   }
1037   __ align(CodeEntryAlignment);
1038   StubCodeMark mark(this, stub_id);
1039   start = __ pc();
1040 
1041   __ emit_data64(0x0001020304050607, relocInfo::none);
1042   __ emit_data64(0x08090A0B0C0D0E0F, relocInfo::none);
1043   __ emit_data64(0x0001020304050607, relocInfo::none);
1044   __ emit_data64(0x08090A0B0C0D0E0F, relocInfo::none);
1045   __ emit_data64(0x0001020304050607, relocInfo::none);
1046   __ emit_data64(0x08090A0B0C0D0E0F, relocInfo::none);
1047   __ emit_data64(0x0001020304050607, relocInfo::none);
1048   __ emit_data64(0x08090A0B0C0D0E0F, relocInfo::none);
1049 
1050   // record the stub entry and end
1051   store_archive_data(stub_id, start, __ pc());
1052 
1053   return start;
1054 }
1055 
1056 address StubGenerator::generate_vector_reverse_byte_perm_mask_int() {
1057   StubId stub_id = StubId::stubgen_vector_reverse_byte_perm_mask_int_id;
1058   int entry_count = StubInfo::entry_count(stub_id);
1059   assert(entry_count == 1, "sanity check");
1060   address start = load_archive_data(stub_id);
1061   if (start != nullptr) {
1062     return start;
1063   }
1064   __ align(CodeEntryAlignment);
1065   StubCodeMark mark(this, stub_id);
1066   start = __ pc();
1067 
1068   __ emit_data64(0x0405060700010203, relocInfo::none);
1069   __ emit_data64(0x0C0D0E0F08090A0B, relocInfo::none);
1070   __ emit_data64(0x0405060700010203, relocInfo::none);
1071   __ emit_data64(0x0C0D0E0F08090A0B, relocInfo::none);
1072   __ emit_data64(0x0405060700010203, relocInfo::none);
1073   __ emit_data64(0x0C0D0E0F08090A0B, relocInfo::none);
1074   __ emit_data64(0x0405060700010203, relocInfo::none);
1075   __ emit_data64(0x0C0D0E0F08090A0B, relocInfo::none);
1076 
1077   // record the stub entry and end
1078   store_archive_data(stub_id, start, __ pc());
1079 
1080   return start;
1081 }
1082 
1083 address StubGenerator::generate_vector_reverse_byte_perm_mask_short() {
1084   StubId stub_id = StubId::stubgen_vector_reverse_byte_perm_mask_short_id;
1085   int entry_count = StubInfo::entry_count(stub_id);
1086   assert(entry_count == 1, "sanity check");
1087   address start = load_archive_data(stub_id);
1088   if (start != nullptr) {
1089     return start;
1090   }
1091   __ align(CodeEntryAlignment);
1092   StubCodeMark mark(this, stub_id);
1093   start = __ pc();
1094 
1095   __ emit_data64(0x0607040502030001, relocInfo::none);
1096   __ emit_data64(0x0E0F0C0D0A0B0809, relocInfo::none);
1097   __ emit_data64(0x0607040502030001, relocInfo::none);
1098   __ emit_data64(0x0E0F0C0D0A0B0809, relocInfo::none);
1099   __ emit_data64(0x0607040502030001, relocInfo::none);
1100   __ emit_data64(0x0E0F0C0D0A0B0809, relocInfo::none);
1101   __ emit_data64(0x0607040502030001, relocInfo::none);
1102   __ emit_data64(0x0E0F0C0D0A0B0809, relocInfo::none);
1103 
1104   // record the stub entry and end
1105   store_archive_data(stub_id, start, __ pc());
1106 
1107   return start;
1108 }
1109 
1110 address StubGenerator::generate_vector_byte_shuffle_mask() {
1111   StubId stub_id = StubId::stubgen_vector_byte_shuffle_mask_id;
1112   int entry_count = StubInfo::entry_count(stub_id);
1113   assert(entry_count == 1, "sanity check");
1114   address start = load_archive_data(stub_id);
1115   if (start != nullptr) {
1116     return start;
1117   }
1118   __ align(CodeEntryAlignment);
1119   StubCodeMark mark(this, stub_id);
1120   start = __ pc();
1121 
1122   __ emit_data64(0x7070707070707070, relocInfo::none);
1123   __ emit_data64(0x7070707070707070, relocInfo::none);
1124   __ emit_data64(0xF0F0F0F0F0F0F0F0, relocInfo::none);
1125   __ emit_data64(0xF0F0F0F0F0F0F0F0, relocInfo::none);
1126 
1127   // record the stub entry and end
1128   store_archive_data(stub_id, start, __ pc());
1129 
1130   return start;
1131 }
1132 
1133 address StubGenerator::generate_fp_mask(StubId stub_id, int64_t mask) {
1134   int entry_count = StubInfo::entry_count(stub_id);
1135   assert(entry_count == 1, "sanity check");
1136   address start = load_archive_data(stub_id);
1137   if (start != nullptr) {
1138     return start;
1139   }
1140   __ align(CodeEntryAlignment);
1141   StubCodeMark mark(this, stub_id);
1142   start = __ pc();
1143 
1144   __ emit_data64( mask, relocInfo::none );
1145   __ emit_data64( mask, relocInfo::none );
1146 
1147   // record the stub entry and end
1148   store_archive_data(stub_id, start, __ pc());
1149 
1150   return start;
1151 }
1152 
1153 address StubGenerator::generate_compress_perm_table(StubId stub_id) {
1154   int esize;
1155   switch (stub_id) {
1156   case StubId::stubgen_compress_perm_table32_id:
1157     esize = 32;
1158     break;
1159   case StubId::stubgen_compress_perm_table64_id:
1160     esize = 64;
1161     break;
1162   default:
1163     ShouldNotReachHere();
1164   }
1165   int entry_count = StubInfo::entry_count(stub_id);
1166   assert(entry_count == 1, "sanity check");
1167   address start = load_archive_data(stub_id);
1168   if (start != nullptr) {
1169     return start;
1170   }
1171   __ align(CodeEntryAlignment);
1172   StubCodeMark mark(this, stub_id);
1173   start = __ pc();
1174   if (esize == 32) {
1175     // Loop to generate 256 x 8 int compression permute index table. A row is
1176     // accessed using 8 bit index computed using vector mask. An entry in
1177     // a row holds either a valid permute index corresponding to set bit position
1178     // or a -1 (default) value.
1179     for (int mask = 0; mask < 256; mask++) {
1180       int ctr = 0;
1181       for (int j = 0; j < 8; j++) {
1182         if (mask & (1 << j)) {
1183           __ emit_data(j, relocInfo::none);
1184           ctr++;
1185         }
1186       }
1187       for (; ctr < 8; ctr++) {
1188         __ emit_data(-1, relocInfo::none);
1189       }
1190     }
1191   } else {
1192     assert(esize == 64, "");
1193     // Loop to generate 16 x 4 long compression permute index table. A row is
1194     // accessed using 4 bit index computed using vector mask. An entry in
1195     // a row holds either a valid permute index pair for a quadword corresponding
1196     // to set bit position or a -1 (default) value.
1197     for (int mask = 0; mask < 16; mask++) {
1198       int ctr = 0;
1199       for (int j = 0; j < 4; j++) {
1200         if (mask & (1 << j)) {
1201           __ emit_data(2 * j, relocInfo::none);
1202           __ emit_data(2 * j + 1, relocInfo::none);
1203           ctr++;
1204         }
1205       }
1206       for (; ctr < 4; ctr++) {
1207         __ emit_data64(-1L, relocInfo::none);
1208       }
1209     }
1210   }
1211   // record the stub entry and end
1212   store_archive_data(stub_id, start, __ pc());
1213 
1214   return start;
1215 }
1216 
1217 address StubGenerator::generate_expand_perm_table(StubId stub_id) {
1218   int esize;
1219   switch (stub_id) {
1220   case StubId::stubgen_expand_perm_table32_id:
1221     esize = 32;
1222     break;
1223   case StubId::stubgen_expand_perm_table64_id:
1224     esize = 64;
1225     break;
1226   default:
1227     ShouldNotReachHere();
1228   }
1229   int entry_count = StubInfo::entry_count(stub_id);
1230   assert(entry_count == 1, "sanity check");
1231   address start = load_archive_data(stub_id);
1232   if (start != nullptr) {
1233     return start;
1234   }
1235   __ align(CodeEntryAlignment);
1236   StubCodeMark mark(this, stub_id);
1237   start = __ pc();
1238   if (esize == 32) {
1239     // Loop to generate 256 x 8 int expand permute index table. A row is accessed
1240     // using 8 bit index computed using vector mask. An entry in a row holds either
1241     // a valid permute index (starting from least significant lane) placed at poisition
1242     // corresponding to set bit position or a -1 (default) value.
1243     for (int mask = 0; mask < 256; mask++) {
1244       int ctr = 0;
1245       for (int j = 0; j < 8; j++) {
1246         if (mask & (1 << j)) {
1247           __ emit_data(ctr++, relocInfo::none);
1248         } else {
1249           __ emit_data(-1, relocInfo::none);
1250         }
1251       }
1252     }
1253   } else {
1254     assert(esize == 64, "");
1255     // Loop to generate 16 x 4 long expand permute index table. A row is accessed
1256     // using 4 bit index computed using vector mask. An entry in a row holds either
1257     // a valid doubleword permute index pair representing a quadword index (starting
1258     // from least significant lane) placed at poisition corresponding to set bit
1259     // position or a -1 (default) value.
1260     for (int mask = 0; mask < 16; mask++) {
1261       int ctr = 0;
1262       for (int j = 0; j < 4; j++) {
1263         if (mask & (1 << j)) {
1264           __ emit_data(2 * ctr, relocInfo::none);
1265           __ emit_data(2 * ctr + 1, relocInfo::none);
1266           ctr++;
1267         } else {
1268           __ emit_data64(-1L, relocInfo::none);
1269         }
1270       }
1271     }
1272   }
1273   // record the stub entry and end
1274   store_archive_data(stub_id, start, __ pc());
1275 
1276   return start;
1277 }
1278 
1279 address StubGenerator::generate_vector_mask(StubId stub_id, int64_t mask) {
1280   int entry_count = StubInfo::entry_count(stub_id);
1281   assert(entry_count == 1, "sanity check");
1282   address start = load_archive_data(stub_id);
1283   if (start != nullptr) {
1284     return start;
1285   }
1286   __ align(CodeEntryAlignment);
1287   StubCodeMark mark(this, stub_id);
1288   start = __ pc();
1289 
1290   __ emit_data64(mask, relocInfo::none);
1291   __ emit_data64(mask, relocInfo::none);
1292   __ emit_data64(mask, relocInfo::none);
1293   __ emit_data64(mask, relocInfo::none);
1294   __ emit_data64(mask, relocInfo::none);
1295   __ emit_data64(mask, relocInfo::none);
1296   __ emit_data64(mask, relocInfo::none);
1297   __ emit_data64(mask, relocInfo::none);
1298 
1299   // record the stub entry and end
1300   store_archive_data(stub_id, start, __ pc());
1301 
1302   return start;
1303 }
1304 
1305 address StubGenerator::generate_vector_byte_perm_mask() {
1306   StubId stub_id = StubId::stubgen_vector_byte_perm_mask_id;
1307   int entry_count = StubInfo::entry_count(stub_id);
1308   assert(entry_count == 1, "sanity check");
1309   address start = load_archive_data(stub_id);
1310   if (start != nullptr) {
1311     return start;
1312   }
1313   __ align(CodeEntryAlignment);
1314   StubCodeMark mark(this, stub_id);
1315   start = __ pc();
1316 
1317   __ emit_data64(0x0000000000000001, relocInfo::none);
1318   __ emit_data64(0x0000000000000003, relocInfo::none);
1319   __ emit_data64(0x0000000000000005, relocInfo::none);
1320   __ emit_data64(0x0000000000000007, relocInfo::none);
1321   __ emit_data64(0x0000000000000000, relocInfo::none);
1322   __ emit_data64(0x0000000000000002, relocInfo::none);
1323   __ emit_data64(0x0000000000000004, relocInfo::none);
1324   __ emit_data64(0x0000000000000006, relocInfo::none);
1325 
1326   // record the stub entry and end
1327   store_archive_data(stub_id, start, __ pc());
1328 
1329   return start;
1330 }
1331 
1332 address StubGenerator::generate_vector_fp_mask(StubId stub_id, int64_t mask) {
1333   int entry_count = StubInfo::entry_count(stub_id);
1334   assert(entry_count == 1, "sanity check");
1335   address start = load_archive_data(stub_id);
1336   if (start != nullptr) {
1337     return start;
1338   }
1339   __ align(CodeEntryAlignment);
1340   StubCodeMark mark(this, stub_id);
1341   start = __ pc();
1342 
1343   __ emit_data64(mask, relocInfo::none);
1344   __ emit_data64(mask, relocInfo::none);
1345   __ emit_data64(mask, relocInfo::none);
1346   __ emit_data64(mask, relocInfo::none);
1347   __ emit_data64(mask, relocInfo::none);
1348   __ emit_data64(mask, relocInfo::none);
1349   __ emit_data64(mask, relocInfo::none);
1350   __ emit_data64(mask, relocInfo::none);
1351 
1352   // record the stub entry and end
1353   store_archive_data(stub_id, start, __ pc());
1354 
1355   return start;
1356 }
1357 
1358 address StubGenerator::generate_vector_custom_i32(StubId stub_id, Assembler::AvxVectorLen len,
1359                                    int32_t val0, int32_t val1, int32_t val2, int32_t val3,
1360                                    int32_t val4, int32_t val5, int32_t val6, int32_t val7,
1361                                    int32_t val8, int32_t val9, int32_t val10, int32_t val11,
1362                                    int32_t val12, int32_t val13, int32_t val14, int32_t val15) {
1363   int entry_count = StubInfo::entry_count(stub_id);
1364   assert(entry_count == 1, "sanity check");
1365   address start = load_archive_data(stub_id);
1366   if (start != nullptr) {
1367     return start;
1368   }
1369   __ align(CodeEntryAlignment);
1370   StubCodeMark mark(this, stub_id);
1371   start = __ pc();
1372 
1373   assert(len != Assembler::AVX_NoVec, "vector len must be specified");
1374   __ emit_data(val0, relocInfo::none, 0);
1375   __ emit_data(val1, relocInfo::none, 0);
1376   __ emit_data(val2, relocInfo::none, 0);
1377   __ emit_data(val3, relocInfo::none, 0);
1378   if (len >= Assembler::AVX_256bit) {
1379     __ emit_data(val4, relocInfo::none, 0);
1380     __ emit_data(val5, relocInfo::none, 0);
1381     __ emit_data(val6, relocInfo::none, 0);
1382     __ emit_data(val7, relocInfo::none, 0);
1383     if (len >= Assembler::AVX_512bit) {
1384       __ emit_data(val8, relocInfo::none, 0);
1385       __ emit_data(val9, relocInfo::none, 0);
1386       __ emit_data(val10, relocInfo::none, 0);
1387       __ emit_data(val11, relocInfo::none, 0);
1388       __ emit_data(val12, relocInfo::none, 0);
1389       __ emit_data(val13, relocInfo::none, 0);
1390       __ emit_data(val14, relocInfo::none, 0);
1391       __ emit_data(val15, relocInfo::none, 0);
1392     }
1393   }
1394   // record the stub entry and end
1395   store_archive_data(stub_id, start, __ pc());
1396 
1397   return start;
1398 }
1399 
1400 // Non-destructive plausibility checks for oops
1401 //
1402 // Arguments:
1403 //    all args on stack!
1404 //
1405 // Stack after saving c_rarg3:
1406 //    [tos + 0]: saved c_rarg3
1407 //    [tos + 1]: saved c_rarg2
1408 //    [tos + 2]: saved r12 (several TemplateTable methods use it)
1409 //    [tos + 3]: saved flags
1410 //    [tos + 4]: return address
1411 //  * [tos + 5]: error message (char*)
1412 //  * [tos + 6]: object to verify (oop)
1413 //  * [tos + 7]: saved rax - saved by caller and bashed
1414 //  * [tos + 8]: saved r10 (rscratch1) - saved by caller
1415 //  * = popped on exit
1416 address StubGenerator::generate_verify_oop() {
1417   StubId stub_id = StubId::stubgen_verify_oop_id;
1418   int entry_count = StubInfo::entry_count(stub_id);
1419   assert(entry_count == 1, "sanity check");
1420   address start = load_archive_data(stub_id);
1421   if (start != nullptr) {
1422     return start;
1423   }
1424   StubCodeMark mark(this, stub_id);
1425   start = __ pc();
1426 
1427   Label exit, error;
1428 
1429   __ pushf();
1430   __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr()), rscratch1);
1431 
1432   __ push_ppx(r12);
1433 
1434   // save c_rarg2 and c_rarg3
1435   __ push_ppx(c_rarg2);
1436   __ push_ppx(c_rarg3);
1437 
1438   enum {
1439     // After previous pushes.
1440     oop_to_verify = 6 * wordSize,
1441     saved_rax     = 7 * wordSize,
1442     saved_r10     = 8 * wordSize,
1443 
1444     // Before the call to MacroAssembler::debug(), see below.
1445     return_addr   = 16 * wordSize,
1446     error_msg     = 17 * wordSize
1447   };
1448 
1449   // get object
1450   __ movptr(rax, Address(rsp, oop_to_verify));
1451 
1452   // make sure object is 'reasonable'
1453   __ testptr(rax, rax);
1454   __ jcc(Assembler::zero, exit); // if obj is null it is OK
1455 
1456    BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
1457    bs_asm->check_oop(_masm, rax, c_rarg2, c_rarg3, error);
1458 
1459   // return if everything seems ok
1460   __ bind(exit);
1461   __ movptr(rax, Address(rsp, saved_rax));     // get saved rax back
1462   __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back
1463   __ pop_ppx(c_rarg3);           // restore c_rarg3
1464   __ pop_ppx(c_rarg2);           // restore c_rarg2
1465   __ pop_ppx(r12);               // restore r12
1466   __ popf();                                   // restore flags
1467   __ ret(4 * wordSize);                        // pop caller saved stuff
1468 
1469   // handle errors
1470   __ bind(error);
1471   __ movptr(rax, Address(rsp, saved_rax));     // get saved rax back
1472   __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back
1473   __ pop_ppx(c_rarg3);           // get saved c_rarg3 back
1474   __ pop_ppx(c_rarg2);           // get saved c_rarg2 back
1475   __ pop_ppx(r12);               // get saved r12 back
1476   __ popf();                                   // get saved flags off stack --
1477                                                // will be ignored
1478 
1479   __ pusha();                                  // push registers
1480                                                // (rip is already
1481                                                // already pushed)
1482   // debug(char* msg, int64_t pc, int64_t regs[])
1483   // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and
1484   // pushed all the registers, so now the stack looks like:
1485   //     [tos +  0] 16 saved registers
1486   //     [tos + 16] return address
1487   //   * [tos + 17] error message (char*)
1488   //   * [tos + 18] object to verify (oop)
1489   //   * [tos + 19] saved rax - saved by caller and bashed
1490   //   * [tos + 20] saved r10 (rscratch1) - saved by caller
1491   //   * = popped on exit
1492 
1493   __ movptr(c_rarg0, Address(rsp, error_msg));    // pass address of error message
1494   __ movptr(c_rarg1, Address(rsp, return_addr));  // pass return address
1495   __ movq(c_rarg2, rsp);                          // pass address of regs on stack
1496   __ mov(r12, rsp);                               // remember rsp
1497   __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1498   __ andptr(rsp, -16);                            // align stack as required by ABI
1499   BLOCK_COMMENT("call MacroAssembler::debug");
1500   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
1501   __ hlt();
1502 
1503   // record the stub entry and end
1504   store_archive_data(stub_id, start, __ pc());
1505 
1506   return start;
1507 }
1508 
1509 
1510 // Shuffle first three arg regs on Windows into Linux/Solaris locations.
1511 //
1512 // Outputs:
1513 //    rdi - rcx
1514 //    rsi - rdx
1515 //    rdx - r8
1516 //    rcx - r9
1517 //
1518 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter
1519 // are non-volatile.  r9 and r10 should not be used by the caller.
1520 //
1521 void StubGenerator::setup_arg_regs(int nargs) {
1522   const Register saved_rdi = r9;
1523   const Register saved_rsi = r10;
1524   assert(nargs == 3 || nargs == 4, "else fix");
1525 #ifdef _WIN64
1526   assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9,
1527          "unexpected argument registers");
1528   if (nargs == 4) {
1529     __ mov(rax, r9);  // r9 is also saved_rdi
1530   }
1531   __ movptr(saved_rdi, rdi);
1532   __ movptr(saved_rsi, rsi);
1533   __ mov(rdi, rcx); // c_rarg0
1534   __ mov(rsi, rdx); // c_rarg1
1535   __ mov(rdx, r8);  // c_rarg2
1536   if (nargs == 4) {
1537     __ mov(rcx, rax); // c_rarg3 (via rax)
1538   }
1539 #else
1540   assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx,
1541          "unexpected argument registers");
1542 #endif
1543   DEBUG_ONLY(_regs_in_thread = false;)
1544 }
1545 
1546 
1547 void StubGenerator::restore_arg_regs() {
1548   assert(!_regs_in_thread, "wrong call to restore_arg_regs");
1549   const Register saved_rdi = r9;
1550   const Register saved_rsi = r10;
1551 #ifdef _WIN64
1552   __ movptr(rdi, saved_rdi);
1553   __ movptr(rsi, saved_rsi);
1554 #endif
1555 }
1556 
1557 
1558 // This is used in places where r10 is a scratch register, and can
1559 // be adapted if r9 is needed also.
1560 void StubGenerator::setup_arg_regs_using_thread(int nargs) {
1561   const Register saved_r15 = r9;
1562   assert(nargs == 3 || nargs == 4, "else fix");
1563 #ifdef _WIN64
1564   if (nargs == 4) {
1565     __ mov(rax, r9);       // r9 is also saved_r15
1566   }
1567   __ mov(saved_r15, r15);  // r15 is callee saved and needs to be restored
1568   __ get_thread_slow(r15_thread);
1569   assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9,
1570          "unexpected argument registers");
1571   __ movptr(Address(r15_thread, in_bytes(JavaThread::windows_saved_rdi_offset())), rdi);
1572   __ movptr(Address(r15_thread, in_bytes(JavaThread::windows_saved_rsi_offset())), rsi);
1573 
1574   __ mov(rdi, rcx); // c_rarg0
1575   __ mov(rsi, rdx); // c_rarg1
1576   __ mov(rdx, r8);  // c_rarg2
1577   if (nargs == 4) {
1578     __ mov(rcx, rax); // c_rarg3 (via rax)
1579   }
1580 #else
1581   assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx,
1582          "unexpected argument registers");
1583 #endif
1584   DEBUG_ONLY(_regs_in_thread = true;)
1585 }
1586 
1587 
1588 void StubGenerator::restore_arg_regs_using_thread() {
1589   assert(_regs_in_thread, "wrong call to restore_arg_regs");
1590   const Register saved_r15 = r9;
1591 #ifdef _WIN64
1592   __ get_thread_slow(r15_thread);
1593   __ movptr(rsi, Address(r15_thread, in_bytes(JavaThread::windows_saved_rsi_offset())));
1594   __ movptr(rdi, Address(r15_thread, in_bytes(JavaThread::windows_saved_rdi_offset())));
1595   __ mov(r15, saved_r15);  // r15 is callee saved and needs to be restored
1596 #endif
1597 }
1598 
1599 
1600 void StubGenerator::setup_argument_regs(BasicType type) {
1601   if (type == T_BYTE || type == T_SHORT) {
1602     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1603                       // r9 and r10 may be used to save non-volatile registers
1604   } else {
1605     setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx
1606                                    // r9 is used to save r15_thread
1607   }
1608 }
1609 
1610 
1611 void StubGenerator::restore_argument_regs(BasicType type) {
1612   if (type == T_BYTE || type == T_SHORT) {
1613     restore_arg_regs();
1614   } else {
1615     restore_arg_regs_using_thread();
1616   }
1617 }
1618 
1619 address StubGenerator::generate_data_cache_writeback() {
1620   const Register src        = c_rarg0;  // source address
1621   StubId stub_id = StubId::stubgen_data_cache_writeback_id;
1622   int entry_count = StubInfo::entry_count(stub_id);
1623   assert(entry_count == 1, "sanity check");
1624   address start = load_archive_data(stub_id);
1625   if (start != nullptr) {
1626     return start;
1627   }
1628   __ align(CodeEntryAlignment);
1629   StubCodeMark mark(this, stub_id);
1630 
1631   start = __ pc();
1632 
1633   __ enter();
1634   __ cache_wb(Address(src, 0));
1635   __ leave();
1636   __ ret(0);
1637 
1638   // record the stub entry and end
1639   store_archive_data(stub_id, start, __ pc());
1640 
1641   return start;
1642 }
1643 
1644 address StubGenerator::generate_data_cache_writeback_sync() {
1645   const Register is_pre    = c_rarg0;  // pre or post sync
1646   StubId stub_id = StubId::stubgen_data_cache_writeback_sync_id;
1647   int entry_count = StubInfo::entry_count(stub_id);
1648   assert(entry_count == 1, "sanity check");
1649   address start = load_archive_data(stub_id);
1650   if (start != nullptr) {
1651     return start;
1652   }
1653   __ align(CodeEntryAlignment);
1654   StubCodeMark mark(this, stub_id);
1655 
1656   // pre wbsync is a no-op
1657   // post wbsync translates to an sfence
1658 
1659   Label skip;
1660   start = __ pc();
1661 
1662   __ enter();
1663   __ cmpl(is_pre, 0);
1664   __ jcc(Assembler::notEqual, skip);
1665   __ cache_wbsync(false);
1666   __ bind(skip);
1667   __ leave();
1668   __ ret(0);
1669 
1670   // record the stub entry and end
1671   store_archive_data(stub_id, start, __ pc());
1672 
1673   return start;
1674 }
1675 
1676 // ofs and limit are use for multi-block byte array.
1677 // int com.sun.security.provider.MD5.implCompress(byte[] b, int ofs)
1678 address StubGenerator::generate_md5_implCompress(StubId stub_id) {
1679   bool multi_block;
1680   switch (stub_id) {
1681   case StubId::stubgen_md5_implCompress_id:
1682     multi_block = false;
1683     break;
1684   case StubId::stubgen_md5_implCompressMB_id:
1685     multi_block = true;
1686     break;
1687   default:
1688     ShouldNotReachHere();
1689   }
1690   int entry_count = StubInfo::entry_count(stub_id);
1691   assert(entry_count == 1, "sanity check");
1692   address start = load_archive_data(stub_id);
1693   if (start != nullptr) {
1694     return start;
1695   }
1696   __ align(CodeEntryAlignment);
1697   StubCodeMark mark(this, stub_id);
1698   start = __ pc();
1699 
1700   const Register buf_param = r15;
1701   const Address state_param(rsp, 0 * wordSize);
1702   const Address ofs_param  (rsp, 1 * wordSize    );
1703   const Address limit_param(rsp, 1 * wordSize + 4);
1704 
1705   __ enter();
1706   __ push_ppx(rbx);
1707   __ push_ppx(rdi);
1708   __ push_ppx(rsi);
1709   __ push_ppx(r15);
1710   __ subptr(rsp, 2 * wordSize);
1711 
1712   __ movptr(buf_param, c_rarg0);
1713   __ movptr(state_param, c_rarg1);
1714   if (multi_block) {
1715     __ movl(ofs_param, c_rarg2);
1716     __ movl(limit_param, c_rarg3);
1717   }
1718   __ fast_md5(buf_param, state_param, ofs_param, limit_param, multi_block);
1719 
1720   __ addptr(rsp, 2 * wordSize);
1721   __ pop_ppx(r15);
1722   __ pop_ppx(rsi);
1723   __ pop_ppx(rdi);
1724   __ pop_ppx(rbx);
1725   __ leave();
1726   __ ret(0);
1727 
1728   // record the stub entry and end
1729   store_archive_data(stub_id, start, __ pc());
1730 
1731   return start;
1732 }
1733 
1734 address StubGenerator::generate_upper_word_mask() {
1735   StubId stub_id = StubId::stubgen_upper_word_mask_id;
1736   int entry_count = StubInfo::entry_count(stub_id);
1737   assert(entry_count == 1, "sanity check");
1738   address start = load_archive_data(stub_id);
1739   if (start != nullptr) {
1740     return start;
1741   }
1742   __ align64();
1743   StubCodeMark mark(this, stub_id);
1744   start = __ pc();
1745 
1746   __ emit_data64(0x0000000000000000, relocInfo::none);
1747   __ emit_data64(0xFFFFFFFF00000000, relocInfo::none);
1748 
1749   // record the stub entry and end
1750   store_archive_data(stub_id, start, __ pc());
1751 
1752   return start;
1753 }
1754 
1755 address StubGenerator::generate_shuffle_byte_flip_mask() {
1756   StubId stub_id = StubId::stubgen_shuffle_byte_flip_mask_id;
1757   int entry_count = StubInfo::entry_count(stub_id);
1758   assert(entry_count == 1, "sanity check");
1759   address start = load_archive_data(stub_id);
1760   if (start != nullptr) {
1761     return start;
1762   }
1763   __ align64();
1764   StubCodeMark mark(this, stub_id);
1765   start = __ pc();
1766 
1767   __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none);
1768   __ emit_data64(0x0001020304050607, relocInfo::none);
1769 
1770   // record the stub entry and end
1771   store_archive_data(stub_id, start, __ pc());
1772 
1773   return start;
1774 }
1775 
1776 // ofs and limit are use for multi-block byte array.
1777 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
1778 address StubGenerator::generate_sha1_implCompress(StubId stub_id) {
1779   bool multi_block;
1780   switch (stub_id) {
1781   case StubId::stubgen_sha1_implCompress_id:
1782     multi_block = false;
1783     break;
1784   case StubId::stubgen_sha1_implCompressMB_id:
1785     multi_block = true;
1786     break;
1787   default:
1788     ShouldNotReachHere();
1789   }
1790   int entry_count = StubInfo::entry_count(stub_id);
1791   assert(entry_count == 1, "sanity check");
1792   address start = load_archive_data(stub_id);
1793   if (start != nullptr) {
1794     return start;
1795   }
1796   __ align(CodeEntryAlignment);
1797   StubCodeMark mark(this, stub_id);
1798   start = __ pc();
1799 
1800   Register buf = c_rarg0;
1801   Register state = c_rarg1;
1802   Register ofs = c_rarg2;
1803   Register limit = c_rarg3;
1804 
1805   const XMMRegister abcd = xmm0;
1806   const XMMRegister e0 = xmm1;
1807   const XMMRegister e1 = xmm2;
1808   const XMMRegister msg0 = xmm3;
1809 
1810   const XMMRegister msg1 = xmm4;
1811   const XMMRegister msg2 = xmm5;
1812   const XMMRegister msg3 = xmm6;
1813   const XMMRegister shuf_mask = xmm7;
1814 
1815   __ enter();
1816 
1817   __ subptr(rsp, 4 * wordSize);
1818 
1819   __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask,
1820     buf, state, ofs, limit, rsp, multi_block);
1821 
1822   __ addptr(rsp, 4 * wordSize);
1823 
1824   __ leave();
1825   __ ret(0);
1826 
1827   // record the stub entry and end
1828   store_archive_data(stub_id, start, __ pc());
1829 
1830   return start;
1831 }
1832 
1833 address StubGenerator::generate_pshuffle_byte_flip_mask(address& entry_00ba, address& entry_dc00) {
1834   StubId stub_id = StubId::stubgen_pshuffle_byte_flip_mask_id;
1835   GrowableArray<address> entries;
1836   int entry_count = StubInfo::entry_count(stub_id);
1837   assert(entry_count == 3, "sanity check");
1838   address start = load_archive_data(stub_id, &entries);
1839   if (start != nullptr) {
1840     assert(entries.length() == entry_count - 1,
1841            "unexpected extra entry count %d", entries.length());
1842     entry_00ba = entries.at(0);
1843     entry_dc00 = entries.at(1);
1844     assert(VM_Version::supports_avx2() == (entry_00ba != nullptr && entry_dc00 != nullptr),
1845            "entries cannot be null when avx2 is enabled");
1846     return start;
1847   }
1848   __ align64();
1849   StubCodeMark mark(this, stub_id);
1850   start = __ pc();
1851   address entry2 = nullptr;
1852   address entry3 = nullptr;
1853   __ emit_data64(0x0405060700010203, relocInfo::none);
1854   __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none);
1855 
1856   if (VM_Version::supports_avx2()) {
1857     __ emit_data64(0x0405060700010203, relocInfo::none); // second copy
1858     __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none);
1859     // _SHUF_00BA
1860     entry2 = __ pc();
1861     __ emit_data64(0x0b0a090803020100, relocInfo::none);
1862     __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none);
1863     __ emit_data64(0x0b0a090803020100, relocInfo::none);
1864     __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none);
1865     // _SHUF_DC00
1866     entry3 = __ pc();
1867     __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none);
1868     __ emit_data64(0x0b0a090803020100, relocInfo::none);
1869     __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none);
1870     __ emit_data64(0x0b0a090803020100, relocInfo::none);
1871   }
1872   // have to track the 2nd and 3rd entries even if they are null
1873   entry_00ba = entry2;
1874   entries.push(entry_00ba);
1875   entry_dc00 = entry3;
1876   entries.push(entry_dc00);
1877 
1878   // record the stub entry and end plus all the auxiliary entries
1879   store_archive_data(stub_id, start, __ pc(), &entries);
1880 
1881   return start;
1882 }
1883 
1884 //Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb.
1885 address StubGenerator::generate_pshuffle_byte_flip_mask_sha512(address& entry_ymm_lo) {
1886   StubId stub_id = StubId::stubgen_pshuffle_byte_flip_mask_sha512_id;
1887   GrowableArray<address> entries;
1888   int entry_count = StubInfo::entry_count(stub_id);
1889   assert(entry_count == 2, "sanity check");
1890   address start = load_archive_data(stub_id, &entries);
1891   if (start != nullptr) {
1892     assert(entries.length() == entry_count - 1,
1893            "unexpected extra entry count %d", entries.length());
1894     entry_ymm_lo = entries.at(0);
1895     assert(VM_Version::supports_avx2() == (entry_ymm_lo != nullptr),
1896            "entry cannot be null when avx2 is enabled");
1897     return start;
1898   }
1899   __ align32();
1900   StubCodeMark mark(this, stub_id);
1901   start = __ pc();
1902   address entry2 = nullptr;
1903   if (VM_Version::supports_avx2()) {
1904     __ emit_data64(0x0001020304050607, relocInfo::none); // PSHUFFLE_BYTE_FLIP_MASK
1905     __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none);
1906     __ emit_data64(0x1011121314151617, relocInfo::none);
1907     __ emit_data64(0x18191a1b1c1d1e1f, relocInfo::none);
1908     // capture 2nd entry
1909     entry2 = __ pc();
1910     __ emit_data64(0x0000000000000000, relocInfo::none); //MASK_YMM_LO
1911     __ emit_data64(0x0000000000000000, relocInfo::none);
1912     __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none);
1913     __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none);
1914   }
1915   // have to track the 2nd entry even if it is null
1916   entry_ymm_lo = entry2;
1917   entries.push(entry2);
1918   // record the stub entry and end
1919   store_archive_data(stub_id, start, __ pc(), &entries);
1920 
1921   return start;
1922 }
1923 
1924 // ofs and limit are use for multi-block byte array.
1925 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
1926 address StubGenerator::generate_sha256_implCompress(StubId stub_id) {
1927   bool multi_block;
1928   switch (stub_id) {
1929   case StubId::stubgen_sha256_implCompress_id:
1930     multi_block = false;
1931     break;
1932   case StubId::stubgen_sha256_implCompressMB_id:
1933     multi_block = true;
1934     break;
1935   default:
1936     ShouldNotReachHere();
1937   }
1938   assert(VM_Version::supports_sha() || VM_Version::supports_avx2(), "");
1939   int entry_count = StubInfo::entry_count(stub_id);
1940   assert(entry_count == 1, "sanity check");
1941   address start = load_archive_data(stub_id);
1942   if (start != nullptr) {
1943     return start;
1944   }
1945   __ align(CodeEntryAlignment);
1946   StubCodeMark mark(this, stub_id);
1947   start = __ pc();
1948 
1949   Register buf = c_rarg0;
1950   Register state = c_rarg1;
1951   Register ofs = c_rarg2;
1952   Register limit = c_rarg3;
1953 
1954   const XMMRegister msg = xmm0;
1955   const XMMRegister state0 = xmm1;
1956   const XMMRegister state1 = xmm2;
1957   const XMMRegister msgtmp0 = xmm3;
1958 
1959   const XMMRegister msgtmp1 = xmm4;
1960   const XMMRegister msgtmp2 = xmm5;
1961   const XMMRegister msgtmp3 = xmm6;
1962   const XMMRegister msgtmp4 = xmm7;
1963 
1964   const XMMRegister shuf_mask = xmm8;
1965 
1966   __ enter();
1967 
1968   __ subptr(rsp, 4 * wordSize);
1969 
1970   if (VM_Version::supports_sha()) {
1971     __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4,
1972       buf, state, ofs, limit, rsp, multi_block, shuf_mask);
1973   } else if (VM_Version::supports_avx2()) {
1974     __ sha256_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4,
1975       buf, state, ofs, limit, rsp, multi_block, shuf_mask);
1976   }
1977   __ addptr(rsp, 4 * wordSize);
1978   __ vzeroupper();
1979   __ leave();
1980   __ ret(0);
1981 
1982   // record the stub entry and end
1983   store_archive_data(stub_id, start, __ pc());
1984 
1985   return start;
1986 }
1987 
1988 address StubGenerator::generate_sha512_implCompress(StubId stub_id) {
1989   bool multi_block;
1990   switch (stub_id) {
1991   case StubId::stubgen_sha512_implCompress_id:
1992     multi_block = false;
1993     break;
1994   case StubId::stubgen_sha512_implCompressMB_id:
1995     multi_block = true;
1996     break;
1997   default:
1998     ShouldNotReachHere();
1999   }
2000   assert(VM_Version::supports_avx2(), "");
2001   assert(VM_Version::supports_bmi2() || VM_Version::supports_sha512(), "");
2002   int entry_count = StubInfo::entry_count(stub_id);
2003   assert(entry_count == 1, "sanity check");
2004   address start = load_archive_data(stub_id);
2005   if (start != nullptr) {
2006     return start;
2007   }
2008   __ align(CodeEntryAlignment);
2009   StubCodeMark mark(this, stub_id);
2010   start = __ pc();
2011 
2012   Register buf = c_rarg0;
2013   Register state = c_rarg1;
2014   Register ofs = c_rarg2;
2015   Register limit = c_rarg3;
2016 
2017   __ enter();
2018 
2019   if (VM_Version::supports_sha512()) {
2020       __ sha512_update_ni_x1(state, buf, ofs, limit, multi_block);
2021   } else {
2022     const XMMRegister msg = xmm0;
2023     const XMMRegister state0 = xmm1;
2024     const XMMRegister state1 = xmm2;
2025     const XMMRegister msgtmp0 = xmm3;
2026     const XMMRegister msgtmp1 = xmm4;
2027     const XMMRegister msgtmp2 = xmm5;
2028     const XMMRegister msgtmp3 = xmm6;
2029     const XMMRegister msgtmp4 = xmm7;
2030 
2031     const XMMRegister shuf_mask = xmm8;
2032     __ sha512_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4,
2033       buf, state, ofs, limit, rsp, multi_block, shuf_mask);
2034   }
2035   __ vzeroupper();
2036   __ leave();
2037   __ ret(0);
2038 
2039   // record the stub entry and end
2040   store_archive_data(stub_id, start, __ pc());
2041 
2042   return start;
2043 }
2044 
2045 address StubGenerator::base64_shuffle_addr() {
2046   StubId stub_id = StubId::stubgen_shuffle_base64_id;
2047   int entry_count = StubInfo::entry_count(stub_id);
2048   assert(entry_count == 1, "sanity check");
2049   address start = load_archive_data(stub_id);
2050   if (start != nullptr) {
2051     return start;
2052   }
2053   __ align64();
2054   StubCodeMark mark(this, stub_id);
2055   start = __ pc();
2056 
2057   assert(((unsigned long long)start & 0x3f) == 0,
2058          "Alignment problem (0x%08llx)", (unsigned long long)start);
2059   __ emit_data64(0x0405030401020001, relocInfo::none);
2060   __ emit_data64(0x0a0b090a07080607, relocInfo::none);
2061   __ emit_data64(0x10110f100d0e0c0d, relocInfo::none);
2062   __ emit_data64(0x1617151613141213, relocInfo::none);
2063   __ emit_data64(0x1c1d1b1c191a1819, relocInfo::none);
2064   __ emit_data64(0x222321221f201e1f, relocInfo::none);
2065   __ emit_data64(0x2829272825262425, relocInfo::none);
2066   __ emit_data64(0x2e2f2d2e2b2c2a2b, relocInfo::none);
2067 
2068   // record the stub entry and end
2069   store_archive_data(stub_id, start, __ pc());
2070 
2071   return start;
2072 }
2073 
2074 address StubGenerator::base64_avx2_shuffle_addr() {
2075   StubId stub_id = StubId::stubgen_avx2_shuffle_base64_id;
2076   int entry_count = StubInfo::entry_count(stub_id);
2077   assert(entry_count == 1, "sanity check");
2078   address start = load_archive_data(stub_id);
2079   if (start != nullptr) {
2080     return start;
2081   }
2082   __ align32();
2083   StubCodeMark mark(this, stub_id);
2084   start = __ pc();
2085 
2086   __ emit_data64(0x0809070805060405, relocInfo::none);
2087   __ emit_data64(0x0e0f0d0e0b0c0a0b, relocInfo::none);
2088   __ emit_data64(0x0405030401020001, relocInfo::none);
2089   __ emit_data64(0x0a0b090a07080607, relocInfo::none);
2090 
2091   // record the stub entry and end
2092   store_archive_data(stub_id, start, __ pc());
2093 
2094   return start;
2095 }
2096 
2097 address StubGenerator::base64_avx2_input_mask_addr() {
2098   StubId stub_id = StubId::stubgen_avx2_input_mask_base64_id;
2099   int entry_count = StubInfo::entry_count(stub_id);
2100   assert(entry_count == 1, "sanity check");
2101   address start = load_archive_data(stub_id);
2102   if (start != nullptr) {
2103     return start;
2104   }
2105   __ align32();
2106   StubCodeMark mark(this, stub_id);
2107   start = __ pc();
2108 
2109   __ emit_data64(0x8000000000000000, relocInfo::none);
2110   __ emit_data64(0x8000000080000000, relocInfo::none);
2111   __ emit_data64(0x8000000080000000, relocInfo::none);
2112   __ emit_data64(0x8000000080000000, relocInfo::none);
2113 
2114   // record the stub entry and end
2115   store_archive_data(stub_id, start, __ pc());
2116 
2117   return start;
2118 }
2119 
2120 address StubGenerator::base64_avx2_lut_addr() {
2121   StubId stub_id = StubId::stubgen_avx2_lut_base64_id;
2122   int entry_count = StubInfo::entry_count(stub_id);
2123   assert(entry_count == 1, "sanity check");
2124   address start = load_archive_data(stub_id);
2125   if (start != nullptr) {
2126     return start;
2127   }
2128   __ align32();
2129   StubCodeMark mark(this, stub_id);
2130   start = __ pc();
2131 
2132   __ emit_data64(0xfcfcfcfcfcfc4741, relocInfo::none);
2133   __ emit_data64(0x0000f0edfcfcfcfc, relocInfo::none);
2134   __ emit_data64(0xfcfcfcfcfcfc4741, relocInfo::none);
2135   __ emit_data64(0x0000f0edfcfcfcfc, relocInfo::none);
2136 
2137   // URL LUT
2138   __ emit_data64(0xfcfcfcfcfcfc4741, relocInfo::none);
2139   __ emit_data64(0x000020effcfcfcfc, relocInfo::none);
2140   __ emit_data64(0xfcfcfcfcfcfc4741, relocInfo::none);
2141   __ emit_data64(0x000020effcfcfcfc, relocInfo::none);
2142 
2143   // record the stub entry and end
2144   store_archive_data(stub_id, start, __ pc());
2145 
2146   return start;
2147 }
2148 
2149 address StubGenerator::base64_encoding_table_addr() {
2150   StubId stub_id = StubId::stubgen_encoding_table_base64_id;
2151   int entry_count = StubInfo::entry_count(stub_id);
2152   assert(entry_count == 1, "sanity check");
2153   address start = load_archive_data(stub_id);
2154   if (start != nullptr) {
2155     return start;
2156   }
2157   __ align64();
2158   StubCodeMark mark(this, stub_id);
2159   start = __ pc();
2160 
2161   assert(((unsigned long long)start & 0x3f) == 0, "Alignment problem (0x%08llx)", (unsigned long long)start);
2162   __ emit_data64(0x4847464544434241, relocInfo::none);
2163   __ emit_data64(0x504f4e4d4c4b4a49, relocInfo::none);
2164   __ emit_data64(0x5857565554535251, relocInfo::none);
2165   __ emit_data64(0x6665646362615a59, relocInfo::none);
2166   __ emit_data64(0x6e6d6c6b6a696867, relocInfo::none);
2167   __ emit_data64(0x767574737271706f, relocInfo::none);
2168   __ emit_data64(0x333231307a797877, relocInfo::none);
2169   __ emit_data64(0x2f2b393837363534, relocInfo::none);
2170 
2171   // URL table
2172   __ emit_data64(0x4847464544434241, relocInfo::none);
2173   __ emit_data64(0x504f4e4d4c4b4a49, relocInfo::none);
2174   __ emit_data64(0x5857565554535251, relocInfo::none);
2175   __ emit_data64(0x6665646362615a59, relocInfo::none);
2176   __ emit_data64(0x6e6d6c6b6a696867, relocInfo::none);
2177   __ emit_data64(0x767574737271706f, relocInfo::none);
2178   __ emit_data64(0x333231307a797877, relocInfo::none);
2179   __ emit_data64(0x5f2d393837363534, relocInfo::none);
2180 
2181   // record the stub entry and end
2182   store_archive_data(stub_id, start, __ pc());
2183 
2184   return start;
2185 }
2186 
2187 // Code for generating Base64 encoding.
2188 // Intrinsic function prototype in Base64.java:
2189 // private void encodeBlock(byte[] src, int sp, int sl, byte[] dst, int dp,
2190 // boolean isURL) {
2191 address StubGenerator::generate_base64_encodeBlock()
2192 {
2193   StubId stub_id = StubId::stubgen_base64_encodeBlock_id;
2194   int entry_count = StubInfo::entry_count(stub_id);
2195   assert(entry_count == 1, "sanity check");
2196   address start = load_archive_data(stub_id);
2197   if (start != nullptr) {
2198     return start;
2199   }
2200   __ align(CodeEntryAlignment);
2201   StubCodeMark mark(this, stub_id);
2202   start = __ pc();
2203 
2204   __ enter();
2205 
2206   // Save callee-saved registers before using them
2207   __ push_ppx(r12);
2208   __ push_ppx(r13);
2209   __ push_ppx(r14);
2210   __ push_ppx(r15);
2211 
2212   // arguments
2213   const Register source = c_rarg0;       // Source Array
2214   const Register start_offset = c_rarg1; // start offset
2215   const Register end_offset = c_rarg2;   // end offset
2216   const Register dest = c_rarg3;   // destination array
2217 
2218 #ifndef _WIN64
2219   const Register dp = c_rarg4;    // Position for writing to dest array
2220   const Register isURL = c_rarg5; // Base64 or URL character set
2221 #else
2222   const Address dp_mem(rbp, 6 * wordSize); // length is on stack on Win64
2223   const Address isURL_mem(rbp, 7 * wordSize);
2224   const Register isURL = r10; // pick the volatile windows register
2225   const Register dp = r12;
2226   __ movl(dp, dp_mem);
2227   __ movl(isURL, isURL_mem);
2228 #endif
2229 
2230   const Register length = r14;
2231   const Register encode_table = r13;
2232   Label L_process3, L_exit, L_processdata, L_vbmiLoop, L_not512, L_32byteLoop;
2233 
2234   // calculate length from offsets
2235   __ movl(length, end_offset);
2236   __ subl(length, start_offset);
2237   __ jcc(Assembler::lessEqual, L_exit);
2238 
2239   // Code for 512-bit VBMI encoding.  Encodes 48 input bytes into 64
2240   // output bytes. We read 64 input bytes and ignore the last 16, so be
2241   // sure not to read past the end of the input buffer.
2242   if (VM_Version::supports_avx512_vbmi()) {
2243     __ cmpl(length, 64); // Do not overrun input buffer.
2244     __ jcc(Assembler::below, L_not512);
2245 
2246     __ shll(isURL, 6); // index into decode table based on isURL
2247     __ lea(encode_table, ExternalAddress(StubRoutines::x86::base64_encoding_table_addr()));
2248     __ addptr(encode_table, isURL);
2249     __ shrl(isURL, 6); // restore isURL
2250 
2251     __ mov64(rax, 0x3036242a1016040aull); // Shifts
2252     __ evmovdquq(xmm3, ExternalAddress(StubRoutines::x86::base64_shuffle_addr()), Assembler::AVX_512bit, r15);
2253     __ evmovdquq(xmm2, Address(encode_table, 0), Assembler::AVX_512bit);
2254     __ evpbroadcastq(xmm1, rax, Assembler::AVX_512bit);
2255 
2256     __ align32();
2257     __ BIND(L_vbmiLoop);
2258 
2259     __ vpermb(xmm0, xmm3, Address(source, start_offset), Assembler::AVX_512bit);
2260     __ subl(length, 48);
2261 
2262     // Put the input bytes into the proper lanes for writing, then
2263     // encode them.
2264     __ evpmultishiftqb(xmm0, xmm1, xmm0, Assembler::AVX_512bit);
2265     __ vpermb(xmm0, xmm0, xmm2, Assembler::AVX_512bit);
2266 
2267     // Write to destination
2268     __ evmovdquq(Address(dest, dp), xmm0, Assembler::AVX_512bit);
2269 
2270     __ addptr(dest, 64);
2271     __ addptr(source, 48);
2272     __ cmpl(length, 64);
2273     __ jcc(Assembler::aboveEqual, L_vbmiLoop);
2274 
2275     __ vzeroupper();
2276   }
2277 
2278   __ BIND(L_not512);
2279   if (VM_Version::supports_avx2()) {
2280     /*
2281     ** This AVX2 encoder is based off the paper at:
2282     **      https://dl.acm.org/doi/10.1145/3132709
2283     **
2284     ** We use AVX2 SIMD instructions to encode 24 bytes into 32
2285     ** output bytes.
2286     **
2287     */
2288     // Lengths under 32 bytes are done with scalar routine
2289     __ cmpl(length, 31);
2290     __ jcc(Assembler::belowEqual, L_process3);
2291 
2292     // Set up supporting constant table data
2293     __ vmovdqu(xmm9, ExternalAddress(StubRoutines::x86::base64_avx2_shuffle_addr()), rax);
2294     // 6-bit mask for 2nd and 4th (and multiples) 6-bit values
2295     __ movl(rax, 0x0fc0fc00);
2296     __ movdl(xmm8, rax);
2297     __ vmovdqu(xmm1, ExternalAddress(StubRoutines::x86::base64_avx2_input_mask_addr()), rax);
2298     __ vpbroadcastd(xmm8, xmm8, Assembler::AVX_256bit);
2299 
2300     // Multiplication constant for "shifting" right by 6 and 10
2301     // bits
2302     __ movl(rax, 0x04000040);
2303 
2304     __ subl(length, 24);
2305     __ movdl(xmm7, rax);
2306     __ vpbroadcastd(xmm7, xmm7, Assembler::AVX_256bit);
2307 
2308     // For the first load, we mask off reading of the first 4
2309     // bytes into the register. This is so we can get 4 3-byte
2310     // chunks into each lane of the register, avoiding having to
2311     // handle end conditions.  We then shuffle these bytes into a
2312     // specific order so that manipulation is easier.
2313     //
2314     // The initial read loads the XMM register like this:
2315     //
2316     // Lower 128-bit lane:
2317     // +----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+
2318     // | XX | XX | XX | XX | A0 | A1 | A2 | B0 | B1 | B2 | C0 | C1
2319     // | C2 | D0 | D1 | D2 |
2320     // +----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+
2321     //
2322     // Upper 128-bit lane:
2323     // +----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+
2324     // | E0 | E1 | E2 | F0 | F1 | F2 | G0 | G1 | G2 | H0 | H1 | H2
2325     // | XX | XX | XX | XX |
2326     // +----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+
2327     //
2328     // Where A0 is the first input byte, B0 is the fourth, etc.
2329     // The alphabetical significance denotes the 3 bytes to be
2330     // consumed and encoded into 4 bytes.
2331     //
2332     // We then shuffle the register so each 32-bit word contains
2333     // the sequence:
2334     //    A1 A0 A2 A1, B1, B0, B2, B1, etc.
2335     // Each of these byte sequences are then manipulated into 4
2336     // 6-bit values ready for encoding.
2337     //
2338     // If we focus on one set of 3-byte chunks, changing the
2339     // nomenclature such that A0 => a, A1 => b, and A2 => c, we
2340     // shuffle such that each 24-bit chunk contains:
2341     //
2342     // b7 b6 b5 b4 b3 b2 b1 b0 | a7 a6 a5 a4 a3 a2 a1 a0 | c7 c6
2343     // c5 c4 c3 c2 c1 c0 | b7 b6 b5 b4 b3 b2 b1 b0
2344     // Explain this step.
2345     // b3 b2 b1 b0 c5 c4 c3 c2 | c1 c0 d5 d4 d3 d2 d1 d0 | a5 a4
2346     // a3 a2 a1 a0 b5 b4 | b3 b2 b1 b0 c5 c4 c3 c2
2347     //
2348     // W first and off all but bits 4-9 and 16-21 (c5..c0 and
2349     // a5..a0) and shift them using a vector multiplication
2350     // operation (vpmulhuw) which effectively shifts c right by 6
2351     // bits and a right by 10 bits.  We similarly mask bits 10-15
2352     // (d5..d0) and 22-27 (b5..b0) and shift them left by 8 and 4
2353     // bits respectively.  This is done using vpmullw.  We end up
2354     // with 4 6-bit values, thus splitting the 3 input bytes,
2355     // ready for encoding:
2356     //    0 0 d5..d0 0 0 c5..c0 0 0 b5..b0 0 0 a5..a0
2357     //
2358     // For translation, we recognize that there are 5 distinct
2359     // ranges of legal Base64 characters as below:
2360     //
2361     //   +-------------+-------------+------------+
2362     //   | 6-bit value | ASCII range |   offset   |
2363     //   +-------------+-------------+------------+
2364     //   |    0..25    |    A..Z     |     65     |
2365     //   |   26..51    |    a..z     |     71     |
2366     //   |   52..61    |    0..9     |     -4     |
2367     //   |     62      |   + or -    | -19 or -17 |
2368     //   |     63      |   / or _    | -16 or 32  |
2369     //   +-------------+-------------+------------+
2370     //
2371     // We note that vpshufb does a parallel lookup in a
2372     // destination register using the lower 4 bits of bytes from a
2373     // source register.  If we use a saturated subtraction and
2374     // subtract 51 from each 6-bit value, bytes from [0,51]
2375     // saturate to 0, and [52,63] map to a range of [1,12].  We
2376     // distinguish the [0,25] and [26,51] ranges by assigning a
2377     // value of 13 for all 6-bit values less than 26.  We end up
2378     // with:
2379     //
2380     //   +-------------+-------------+------------+
2381     //   | 6-bit value |   Reduced   |   offset   |
2382     //   +-------------+-------------+------------+
2383     //   |    0..25    |     13      |     65     |
2384     //   |   26..51    |      0      |     71     |
2385     //   |   52..61    |    0..9     |     -4     |
2386     //   |     62      |     11      | -19 or -17 |
2387     //   |     63      |     12      | -16 or 32  |
2388     //   +-------------+-------------+------------+
2389     //
2390     // We then use a final vpshufb to add the appropriate offset,
2391     // translating the bytes.
2392     //
2393     // Load input bytes - only 28 bytes.  Mask the first load to
2394     // not load into the full register.
2395     __ vpmaskmovd(xmm1, xmm1, Address(source, start_offset, Address::times_1, -4), Assembler::AVX_256bit);
2396 
2397     // Move 3-byte chunks of input (12 bytes) into 16 bytes,
2398     // ordering by:
2399     //   1, 0, 2, 1; 4, 3, 5, 4; etc.  This groups 6-bit chunks
2400     //   for easy masking
2401     __ vpshufb(xmm1, xmm1, xmm9, Assembler::AVX_256bit);
2402 
2403     __ addl(start_offset, 24);
2404 
2405     // Load masking register for first and third (and multiples)
2406     // 6-bit values.
2407     __ movl(rax, 0x003f03f0);
2408     __ movdl(xmm6, rax);
2409     __ vpbroadcastd(xmm6, xmm6, Assembler::AVX_256bit);
2410     // Multiplication constant for "shifting" left by 4 and 8 bits
2411     __ movl(rax, 0x01000010);
2412     __ movdl(xmm5, rax);
2413     __ vpbroadcastd(xmm5, xmm5, Assembler::AVX_256bit);
2414 
2415     // Isolate 6-bit chunks of interest
2416     __ vpand(xmm0, xmm8, xmm1, Assembler::AVX_256bit);
2417 
2418     // Load constants for encoding
2419     __ movl(rax, 0x19191919);
2420     __ movdl(xmm3, rax);
2421     __ vpbroadcastd(xmm3, xmm3, Assembler::AVX_256bit);
2422     __ movl(rax, 0x33333333);
2423     __ movdl(xmm4, rax);
2424     __ vpbroadcastd(xmm4, xmm4, Assembler::AVX_256bit);
2425 
2426     // Shift output bytes 0 and 2 into proper lanes
2427     __ vpmulhuw(xmm2, xmm0, xmm7, Assembler::AVX_256bit);
2428 
2429     // Mask and shift output bytes 1 and 3 into proper lanes and
2430     // combine
2431     __ vpand(xmm0, xmm6, xmm1, Assembler::AVX_256bit);
2432     __ vpmullw(xmm0, xmm5, xmm0, Assembler::AVX_256bit);
2433     __ vpor(xmm0, xmm0, xmm2, Assembler::AVX_256bit);
2434 
2435     // Find out which are 0..25.  This indicates which input
2436     // values fall in the range of 'A'-'Z', which require an
2437     // additional offset (see comments above)
2438     __ vpcmpgtb(xmm2, xmm0, xmm3, Assembler::AVX_256bit);
2439     __ vpsubusb(xmm1, xmm0, xmm4, Assembler::AVX_256bit);
2440     __ vpsubb(xmm1, xmm1, xmm2, Assembler::AVX_256bit);
2441 
2442     // Load the proper lookup table
2443     __ lea(r11, ExternalAddress(StubRoutines::x86::base64_avx2_lut_addr()));
2444     __ movl(r15, isURL);
2445     __ shll(r15, 5);
2446     __ vmovdqu(xmm2, Address(r11, r15));
2447 
2448     // Shuffle the offsets based on the range calculation done
2449     // above. This allows us to add the correct offset to the
2450     // 6-bit value corresponding to the range documented above.
2451     __ vpshufb(xmm1, xmm2, xmm1, Assembler::AVX_256bit);
2452     __ vpaddb(xmm0, xmm1, xmm0, Assembler::AVX_256bit);
2453 
2454     // Store the encoded bytes
2455     __ vmovdqu(Address(dest, dp), xmm0);
2456     __ addl(dp, 32);
2457 
2458     __ cmpl(length, 31);
2459     __ jcc(Assembler::belowEqual, L_process3);
2460 
2461     __ align32();
2462     __ BIND(L_32byteLoop);
2463 
2464     // Get next 32 bytes
2465     __ vmovdqu(xmm1, Address(source, start_offset, Address::times_1, -4));
2466 
2467     __ subl(length, 24);
2468     __ addl(start_offset, 24);
2469 
2470     // This logic is identical to the above, with only constant
2471     // register loads removed.  Shuffle the input, mask off 6-bit
2472     // chunks, shift them into place, then add the offset to
2473     // encode.
2474     __ vpshufb(xmm1, xmm1, xmm9, Assembler::AVX_256bit);
2475 
2476     __ vpand(xmm0, xmm8, xmm1, Assembler::AVX_256bit);
2477     __ vpmulhuw(xmm10, xmm0, xmm7, Assembler::AVX_256bit);
2478     __ vpand(xmm0, xmm6, xmm1, Assembler::AVX_256bit);
2479     __ vpmullw(xmm0, xmm5, xmm0, Assembler::AVX_256bit);
2480     __ vpor(xmm0, xmm0, xmm10, Assembler::AVX_256bit);
2481     __ vpcmpgtb(xmm10, xmm0, xmm3, Assembler::AVX_256bit);
2482     __ vpsubusb(xmm1, xmm0, xmm4, Assembler::AVX_256bit);
2483     __ vpsubb(xmm1, xmm1, xmm10, Assembler::AVX_256bit);
2484     __ vpshufb(xmm1, xmm2, xmm1, Assembler::AVX_256bit);
2485     __ vpaddb(xmm0, xmm1, xmm0, Assembler::AVX_256bit);
2486 
2487     // Store the encoded bytes
2488     __ vmovdqu(Address(dest, dp), xmm0);
2489     __ addl(dp, 32);
2490 
2491     __ cmpl(length, 31);
2492     __ jcc(Assembler::above, L_32byteLoop);
2493 
2494     __ BIND(L_process3);
2495     __ vzeroupper();
2496   } else {
2497     __ BIND(L_process3);
2498   }
2499 
2500   __ cmpl(length, 3);
2501   __ jcc(Assembler::below, L_exit);
2502 
2503   // Load the encoding table based on isURL
2504   __ lea(r11, ExternalAddress(StubRoutines::x86::base64_encoding_table_addr()));
2505   __ movl(r15, isURL);
2506   __ shll(r15, 6);
2507   __ addptr(r11, r15);
2508 
2509   __ BIND(L_processdata);
2510 
2511   // Load 3 bytes
2512   __ load_unsigned_byte(r15, Address(source, start_offset));
2513   __ load_unsigned_byte(r10, Address(source, start_offset, Address::times_1, 1));
2514   __ load_unsigned_byte(r13, Address(source, start_offset, Address::times_1, 2));
2515 
2516   // Build a 32-bit word with bytes 1, 2, 0, 1
2517   __ movl(rax, r10);
2518   __ shll(r10, 24);
2519   __ orl(rax, r10);
2520 
2521   __ subl(length, 3);
2522 
2523   __ shll(r15, 8);
2524   __ shll(r13, 16);
2525   __ orl(rax, r15);
2526 
2527   __ addl(start_offset, 3);
2528 
2529   __ orl(rax, r13);
2530   // At this point, rax contains | byte1 | byte2 | byte0 | byte1
2531   // r13 has byte2 << 16 - need low-order 6 bits to translate.
2532   // This translated byte is the fourth output byte.
2533   __ shrl(r13, 16);
2534   __ andl(r13, 0x3f);
2535 
2536   // The high-order 6 bits of r15 (byte0) is translated.
2537   // The translated byte is the first output byte.
2538   __ shrl(r15, 10);
2539 
2540   __ load_unsigned_byte(r13, Address(r11, r13));
2541   __ load_unsigned_byte(r15, Address(r11, r15));
2542 
2543   __ movb(Address(dest, dp, Address::times_1, 3), r13);
2544 
2545   // Extract high-order 4 bits of byte1 and low-order 2 bits of byte0.
2546   // This translated byte is the second output byte.
2547   __ shrl(rax, 4);
2548   __ movl(r10, rax);
2549   __ andl(rax, 0x3f);
2550 
2551   __ movb(Address(dest, dp, Address::times_1, 0), r15);
2552 
2553   __ load_unsigned_byte(rax, Address(r11, rax));
2554 
2555   // Extract low-order 2 bits of byte1 and high-order 4 bits of byte2.
2556   // This translated byte is the third output byte.
2557   __ shrl(r10, 18);
2558   __ andl(r10, 0x3f);
2559 
2560   __ load_unsigned_byte(r10, Address(r11, r10));
2561 
2562   __ movb(Address(dest, dp, Address::times_1, 1), rax);
2563   __ movb(Address(dest, dp, Address::times_1, 2), r10);
2564 
2565   __ addl(dp, 4);
2566   __ cmpl(length, 3);
2567   __ jcc(Assembler::aboveEqual, L_processdata);
2568 
2569   __ BIND(L_exit);
2570   __ pop_ppx(r15);
2571   __ pop_ppx(r14);
2572   __ pop_ppx(r13);
2573   __ pop_ppx(r12);
2574   __ leave();
2575   __ ret(0);
2576 
2577   // record the stub entry and end
2578   store_archive_data(stub_id, start, __ pc());
2579 
2580   return start;
2581 }
2582 
2583 // base64 AVX512vbmi tables
2584 address StubGenerator::base64_vbmi_lookup_lo_addr() {
2585   StubId stub_id = StubId::stubgen_lookup_lo_base64_id;
2586   int entry_count = StubInfo::entry_count(stub_id);
2587   assert(entry_count == 1, "sanity check");
2588   address start = load_archive_data(stub_id);
2589   if (start != nullptr) {
2590     return start;
2591   }
2592   __ align64();
2593   StubCodeMark mark(this, stub_id);
2594   start = __ pc();
2595 
2596   assert(((unsigned long long)start & 0x3f) == 0,
2597          "Alignment problem (0x%08llx)", (unsigned long long)start);
2598   __ emit_data64(0x8080808080808080, relocInfo::none);
2599   __ emit_data64(0x8080808080808080, relocInfo::none);
2600   __ emit_data64(0x8080808080808080, relocInfo::none);
2601   __ emit_data64(0x8080808080808080, relocInfo::none);
2602   __ emit_data64(0x8080808080808080, relocInfo::none);
2603   __ emit_data64(0x3f8080803e808080, relocInfo::none);
2604   __ emit_data64(0x3b3a393837363534, relocInfo::none);
2605   __ emit_data64(0x8080808080803d3c, relocInfo::none);
2606 
2607   // record the stub entry and end
2608   store_archive_data(stub_id, start, __ pc());
2609 
2610   return start;
2611 }
2612 
2613 address StubGenerator::base64_vbmi_lookup_hi_addr() {
2614   StubId stub_id = StubId::stubgen_lookup_hi_base64_id;
2615   int entry_count = StubInfo::entry_count(stub_id);
2616   assert(entry_count == 1, "sanity check");
2617   address start = load_archive_data(stub_id);
2618   if (start != nullptr) {
2619     return start;
2620   }
2621   __ align64();
2622   StubCodeMark mark(this, stub_id);
2623   start = __ pc();
2624 
2625   assert(((unsigned long long)start & 0x3f) == 0,
2626          "Alignment problem (0x%08llx)", (unsigned long long)start);
2627   __ emit_data64(0x0605040302010080, relocInfo::none);
2628   __ emit_data64(0x0e0d0c0b0a090807, relocInfo::none);
2629   __ emit_data64(0x161514131211100f, relocInfo::none);
2630   __ emit_data64(0x8080808080191817, relocInfo::none);
2631   __ emit_data64(0x201f1e1d1c1b1a80, relocInfo::none);
2632   __ emit_data64(0x2827262524232221, relocInfo::none);
2633   __ emit_data64(0x302f2e2d2c2b2a29, relocInfo::none);
2634   __ emit_data64(0x8080808080333231, relocInfo::none);
2635 
2636   // record the stub entry and end
2637   store_archive_data(stub_id, start, __ pc());
2638 
2639   return start;
2640 }
2641 address StubGenerator::base64_vbmi_lookup_lo_url_addr() {
2642   StubId stub_id = StubId::stubgen_lookup_lo_base64url_id;
2643   int entry_count = StubInfo::entry_count(stub_id);
2644   assert(entry_count == 1, "sanity check");
2645   address start = load_archive_data(stub_id);
2646   if (start != nullptr) {
2647     return start;
2648   }
2649   __ align64();
2650   StubCodeMark mark(this, stub_id);
2651   start = __ pc();
2652 
2653   assert(((unsigned long long)start & 0x3f) == 0,
2654          "Alignment problem (0x%08llx)", (unsigned long long)start);
2655   __ emit_data64(0x8080808080808080, relocInfo::none);
2656   __ emit_data64(0x8080808080808080, relocInfo::none);
2657   __ emit_data64(0x8080808080808080, relocInfo::none);
2658   __ emit_data64(0x8080808080808080, relocInfo::none);
2659   __ emit_data64(0x8080808080808080, relocInfo::none);
2660   __ emit_data64(0x80803e8080808080, relocInfo::none);
2661   __ emit_data64(0x3b3a393837363534, relocInfo::none);
2662   __ emit_data64(0x8080808080803d3c, relocInfo::none);
2663 
2664   // record the stub entry and end
2665   store_archive_data(stub_id, start, __ pc());
2666 
2667   return start;
2668 }
2669 
2670 address StubGenerator::base64_vbmi_lookup_hi_url_addr() {
2671   StubId stub_id = StubId::stubgen_lookup_hi_base64url_id;
2672   int entry_count = StubInfo::entry_count(stub_id);
2673   assert(entry_count == 1, "sanity check");
2674   address start = load_archive_data(stub_id);
2675   if (start != nullptr) {
2676     return start;
2677   }
2678   __ align64();
2679   StubCodeMark mark(this, stub_id);
2680   start = __ pc();
2681 
2682   assert(((unsigned long long)start & 0x3f) == 0,
2683          "Alignment problem (0x%08llx)", (unsigned long long)start);
2684   __ emit_data64(0x0605040302010080, relocInfo::none);
2685   __ emit_data64(0x0e0d0c0b0a090807, relocInfo::none);
2686   __ emit_data64(0x161514131211100f, relocInfo::none);
2687   __ emit_data64(0x3f80808080191817, relocInfo::none);
2688   __ emit_data64(0x201f1e1d1c1b1a80, relocInfo::none);
2689   __ emit_data64(0x2827262524232221, relocInfo::none);
2690   __ emit_data64(0x302f2e2d2c2b2a29, relocInfo::none);
2691   __ emit_data64(0x8080808080333231, relocInfo::none);
2692 
2693   // record the stub entry and end
2694   store_archive_data(stub_id, start, __ pc());
2695 
2696   return start;
2697 }
2698 
2699 address StubGenerator::base64_vbmi_pack_vec_addr() {
2700   StubId stub_id = StubId::stubgen_pack_vec_base64_id;
2701   int entry_count = StubInfo::entry_count(stub_id);
2702   assert(entry_count == 1, "sanity check");
2703   address start = load_archive_data(stub_id);
2704   if (start != nullptr) {
2705     return start;
2706   }
2707   __ align64();
2708   StubCodeMark mark(this, stub_id);
2709   start = __ pc();
2710 
2711   assert(((unsigned long long)start & 0x3f) == 0,
2712          "Alignment problem (0x%08llx)", (unsigned long long)start);
2713   __ emit_data64(0x090a040506000102, relocInfo::none);
2714   __ emit_data64(0x161011120c0d0e08, relocInfo::none);
2715   __ emit_data64(0x1c1d1e18191a1415, relocInfo::none);
2716   __ emit_data64(0x292a242526202122, relocInfo::none);
2717   __ emit_data64(0x363031322c2d2e28, relocInfo::none);
2718   __ emit_data64(0x3c3d3e38393a3435, relocInfo::none);
2719   __ emit_data64(0x0000000000000000, relocInfo::none);
2720   __ emit_data64(0x0000000000000000, relocInfo::none);
2721 
2722   // record the stub entry and end
2723   store_archive_data(stub_id, start, __ pc());
2724 
2725   return start;
2726 }
2727 
2728 address StubGenerator::base64_vbmi_join_0_1_addr() {
2729   StubId stub_id = StubId::stubgen_join_0_1_base64_id;
2730   int entry_count = StubInfo::entry_count(stub_id);
2731   assert(entry_count == 1, "sanity check");
2732   address start = load_archive_data(stub_id);
2733   if (start != nullptr) {
2734     return start;
2735   }
2736   __ align64();
2737   StubCodeMark mark(this, stub_id);
2738   start = __ pc();
2739 
2740   assert(((unsigned long long)start & 0x3f) == 0,
2741          "Alignment problem (0x%08llx)", (unsigned long long)start);
2742   __ emit_data64(0x090a040506000102, relocInfo::none);
2743   __ emit_data64(0x161011120c0d0e08, relocInfo::none);
2744   __ emit_data64(0x1c1d1e18191a1415, relocInfo::none);
2745   __ emit_data64(0x292a242526202122, relocInfo::none);
2746   __ emit_data64(0x363031322c2d2e28, relocInfo::none);
2747   __ emit_data64(0x3c3d3e38393a3435, relocInfo::none);
2748   __ emit_data64(0x494a444546404142, relocInfo::none);
2749   __ emit_data64(0x565051524c4d4e48, relocInfo::none);
2750 
2751   // record the stub entry and end
2752   store_archive_data(stub_id, start, __ pc());
2753 
2754   return start;
2755 }
2756 
2757 address StubGenerator::base64_vbmi_join_1_2_addr() {
2758   StubId stub_id = StubId::stubgen_join_1_2_base64_id;
2759   int entry_count = StubInfo::entry_count(stub_id);
2760   assert(entry_count == 1, "sanity check");
2761   address start = load_archive_data(stub_id);
2762   if (start != nullptr) {
2763     return start;
2764   }
2765   __ align64();
2766   StubCodeMark mark(this, stub_id);
2767   start = __ pc();
2768 
2769   assert(((unsigned long long)start & 0x3f) == 0,
2770          "Alignment problem (0x%08llx)", (unsigned long long)start);
2771   __ emit_data64(0x1c1d1e18191a1415, relocInfo::none);
2772   __ emit_data64(0x292a242526202122, relocInfo::none);
2773   __ emit_data64(0x363031322c2d2e28, relocInfo::none);
2774   __ emit_data64(0x3c3d3e38393a3435, relocInfo::none);
2775   __ emit_data64(0x494a444546404142, relocInfo::none);
2776   __ emit_data64(0x565051524c4d4e48, relocInfo::none);
2777   __ emit_data64(0x5c5d5e58595a5455, relocInfo::none);
2778   __ emit_data64(0x696a646566606162, relocInfo::none);
2779 
2780   // record the stub entry and end
2781   store_archive_data(stub_id, start, __ pc());
2782 
2783   return start;
2784 }
2785 
2786 address StubGenerator::base64_vbmi_join_2_3_addr() {
2787   StubId stub_id = StubId::stubgen_join_2_3_base64_id;
2788   int entry_count = StubInfo::entry_count(stub_id);
2789   assert(entry_count == 1, "sanity check");
2790   address start = load_archive_data(stub_id);
2791   if (start != nullptr) {
2792     return start;
2793   }
2794   __ align64();
2795   StubCodeMark mark(this, stub_id);
2796   start = __ pc();
2797 
2798   assert(((unsigned long long)start & 0x3f) == 0,
2799          "Alignment problem (0x%08llx)", (unsigned long long)start);
2800   __ emit_data64(0x363031322c2d2e28, relocInfo::none);
2801   __ emit_data64(0x3c3d3e38393a3435, relocInfo::none);
2802   __ emit_data64(0x494a444546404142, relocInfo::none);
2803   __ emit_data64(0x565051524c4d4e48, relocInfo::none);
2804   __ emit_data64(0x5c5d5e58595a5455, relocInfo::none);
2805   __ emit_data64(0x696a646566606162, relocInfo::none);
2806   __ emit_data64(0x767071726c6d6e68, relocInfo::none);
2807   __ emit_data64(0x7c7d7e78797a7475, relocInfo::none);
2808 
2809   // record the stub entry and end
2810   store_archive_data(stub_id, start, __ pc());
2811 
2812   return start;
2813 }
2814 
2815 address StubGenerator::base64_AVX2_decode_tables_addr() {
2816   StubId stub_id = StubId::stubgen_avx2_decode_tables_base64_id;
2817   int entry_count = StubInfo::entry_count(stub_id);
2818   assert(entry_count == 1, "sanity check");
2819   address start = load_archive_data(stub_id);
2820   if (start != nullptr) {
2821     return start;
2822   }
2823   __ align64();
2824   StubCodeMark mark(this, stub_id);
2825   start = __ pc();
2826 
2827   assert(((unsigned long long)start & 0x3f) == 0,
2828          "Alignment problem (0x%08llx)", (unsigned long long)start);
2829   __ emit_data(0x2f2f2f2f, relocInfo::none, 0);
2830   __ emit_data(0x5f5f5f5f, relocInfo::none, 0);  // for URL
2831 
2832   __ emit_data(0xffffffff, relocInfo::none, 0);
2833   __ emit_data(0xfcfcfcfc, relocInfo::none, 0);  // for URL
2834 
2835   // Permute table
2836   __ emit_data64(0x0000000100000000, relocInfo::none);
2837   __ emit_data64(0x0000000400000002, relocInfo::none);
2838   __ emit_data64(0x0000000600000005, relocInfo::none);
2839   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2840 
2841   // Shuffle table
2842   __ emit_data64(0x090a040506000102, relocInfo::none);
2843   __ emit_data64(0xffffffff0c0d0e08, relocInfo::none);
2844   __ emit_data64(0x090a040506000102, relocInfo::none);
2845   __ emit_data64(0xffffffff0c0d0e08, relocInfo::none);
2846 
2847   // merge table
2848   __ emit_data(0x01400140, relocInfo::none, 0);
2849 
2850   // merge multiplier
2851   __ emit_data(0x00011000, relocInfo::none, 0);
2852 
2853   // record the stub entry and end
2854   store_archive_data(stub_id, start, __ pc());
2855 
2856   return start;
2857 }
2858 
2859 address StubGenerator::base64_AVX2_decode_LUT_tables_addr() {
2860   StubId stub_id = StubId::stubgen_avx2_decode_lut_tables_base64_id;
2861   int entry_count = StubInfo::entry_count(stub_id);
2862   assert(entry_count == 1, "sanity check");
2863   address start = load_archive_data(stub_id);
2864   if (start != nullptr) {
2865     return start;
2866   }
2867   __ align64();
2868   StubCodeMark mark(this, stub_id);
2869   start = __ pc();
2870 
2871   assert(((unsigned long long)start & 0x3f) == 0,
2872          "Alignment problem (0x%08llx)", (unsigned long long)start);
2873   // lut_lo
2874   __ emit_data64(0x1111111111111115, relocInfo::none);
2875   __ emit_data64(0x1a1b1b1b1a131111, relocInfo::none);
2876   __ emit_data64(0x1111111111111115, relocInfo::none);
2877   __ emit_data64(0x1a1b1b1b1a131111, relocInfo::none);
2878 
2879   // lut_roll
2880   __ emit_data64(0xb9b9bfbf04131000, relocInfo::none);
2881   __ emit_data64(0x0000000000000000, relocInfo::none);
2882   __ emit_data64(0xb9b9bfbf04131000, relocInfo::none);
2883   __ emit_data64(0x0000000000000000, relocInfo::none);
2884 
2885   // lut_lo URL
2886   __ emit_data64(0x1111111111111115, relocInfo::none);
2887   __ emit_data64(0x1b1b1a1b1b131111, relocInfo::none);
2888   __ emit_data64(0x1111111111111115, relocInfo::none);
2889   __ emit_data64(0x1b1b1a1b1b131111, relocInfo::none);
2890 
2891   // lut_roll URL
2892   __ emit_data64(0xb9b9bfbf0411e000, relocInfo::none);
2893   __ emit_data64(0x0000000000000000, relocInfo::none);
2894   __ emit_data64(0xb9b9bfbf0411e000, relocInfo::none);
2895   __ emit_data64(0x0000000000000000, relocInfo::none);
2896 
2897   // lut_hi
2898   __ emit_data64(0x0804080402011010, relocInfo::none);
2899   __ emit_data64(0x1010101010101010, relocInfo::none);
2900   __ emit_data64(0x0804080402011010, relocInfo::none);
2901   __ emit_data64(0x1010101010101010, relocInfo::none);
2902 
2903   // record the stub entry and end
2904   store_archive_data(stub_id, start, __ pc());
2905 
2906   return start;
2907 }
2908 
2909 address StubGenerator::base64_decoding_table_addr() {
2910   StubId stub_id = StubId::stubgen_decoding_table_base64_id;
2911   int entry_count = StubInfo::entry_count(stub_id);
2912   assert(entry_count == 1, "sanity check");
2913   address start = load_archive_data(stub_id);
2914   if (start != nullptr) {
2915     return start;
2916   }
2917   StubCodeMark mark(this, stub_id);
2918   start = __ pc();
2919 
2920   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2921   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2922   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2923   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2924   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2925   __ emit_data64(0x3fffffff3effffff, relocInfo::none);
2926   __ emit_data64(0x3b3a393837363534, relocInfo::none);
2927   __ emit_data64(0xffffffffffff3d3c, relocInfo::none);
2928   __ emit_data64(0x06050403020100ff, relocInfo::none);
2929   __ emit_data64(0x0e0d0c0b0a090807, relocInfo::none);
2930   __ emit_data64(0x161514131211100f, relocInfo::none);
2931   __ emit_data64(0xffffffffff191817, relocInfo::none);
2932   __ emit_data64(0x201f1e1d1c1b1aff, relocInfo::none);
2933   __ emit_data64(0x2827262524232221, relocInfo::none);
2934   __ emit_data64(0x302f2e2d2c2b2a29, relocInfo::none);
2935   __ emit_data64(0xffffffffff333231, relocInfo::none);
2936   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2937   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2938   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2939   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2940   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2941   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2942   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2943   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2944   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2945   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2946   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2947   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2948   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2949   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2950   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2951   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2952 
2953   // URL table
2954   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2955   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2956   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2957   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2958   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2959   __ emit_data64(0xffff3effffffffff, relocInfo::none);
2960   __ emit_data64(0x3b3a393837363534, relocInfo::none);
2961   __ emit_data64(0xffffffffffff3d3c, relocInfo::none);
2962   __ emit_data64(0x06050403020100ff, relocInfo::none);
2963   __ emit_data64(0x0e0d0c0b0a090807, relocInfo::none);
2964   __ emit_data64(0x161514131211100f, relocInfo::none);
2965   __ emit_data64(0x3fffffffff191817, relocInfo::none);
2966   __ emit_data64(0x201f1e1d1c1b1aff, relocInfo::none);
2967   __ emit_data64(0x2827262524232221, relocInfo::none);
2968   __ emit_data64(0x302f2e2d2c2b2a29, relocInfo::none);
2969   __ emit_data64(0xffffffffff333231, relocInfo::none);
2970   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2971   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2972   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2973   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2974   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2975   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2976   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2977   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2978   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2979   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2980   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2981   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2982   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2983   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2984   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2985   __ emit_data64(0xffffffffffffffff, relocInfo::none);
2986 
2987   // record the stub entry and end
2988   store_archive_data(stub_id, start, __ pc());
2989 
2990   return start;
2991 }
2992 
2993 
2994 // Code for generating Base64 decoding.
2995 //
2996 // Based on the article (and associated code) from https://arxiv.org/abs/1910.05109.
2997 //
2998 // Intrinsic function prototype in Base64.java:
2999 // private void decodeBlock(byte[] src, int sp, int sl, byte[] dst, int dp, boolean isURL, isMIME) {
3000 address StubGenerator::generate_base64_decodeBlock() {
3001   StubId stub_id = StubId::stubgen_base64_decodeBlock_id;
3002   int entry_count = StubInfo::entry_count(stub_id);
3003   assert(entry_count == 1, "sanity check");
3004   address start = load_archive_data(stub_id);
3005   if (start != nullptr) {
3006     return start;
3007   }
3008   __ align(CodeEntryAlignment);
3009   StubCodeMark mark(this, stub_id);
3010   start = __ pc();
3011 
3012   __ enter();
3013 
3014   // Save callee-saved registers before using them
3015   __ push_ppx(r12);
3016   __ push_ppx(r13);
3017   __ push_ppx(r14);
3018   __ push_ppx(r15);
3019   __ push_ppx(rbx);
3020 
3021   // arguments
3022   const Register source = c_rarg0; // Source Array
3023   const Register start_offset = c_rarg1; // start offset
3024   const Register end_offset = c_rarg2; // end offset
3025   const Register dest = c_rarg3; // destination array
3026   const Register isMIME = rbx;
3027 
3028 #ifndef _WIN64
3029   const Register dp = c_rarg4;  // Position for writing to dest array
3030   const Register isURL = c_rarg5;// Base64 or URL character set
3031   __ movl(isMIME, Address(rbp, 2 * wordSize));
3032 #else
3033   const Address dp_mem(rbp, 6 * wordSize);  // length is on stack on Win64
3034   const Address isURL_mem(rbp, 7 * wordSize);
3035   const Register isURL = r10;      // pick the volatile windows register
3036   const Register dp = r12;
3037   __ movl(dp, dp_mem);
3038   __ movl(isURL, isURL_mem);
3039   __ movl(isMIME, Address(rbp, 8 * wordSize));
3040 #endif
3041 
3042   const XMMRegister lookup_lo = xmm5;
3043   const XMMRegister lookup_hi = xmm6;
3044   const XMMRegister errorvec = xmm7;
3045   const XMMRegister pack16_op = xmm9;
3046   const XMMRegister pack32_op = xmm8;
3047   const XMMRegister input0 = xmm3;
3048   const XMMRegister input1 = xmm20;
3049   const XMMRegister input2 = xmm21;
3050   const XMMRegister input3 = xmm19;
3051   const XMMRegister join01 = xmm12;
3052   const XMMRegister join12 = xmm11;
3053   const XMMRegister join23 = xmm10;
3054   const XMMRegister translated0 = xmm2;
3055   const XMMRegister translated1 = xmm1;
3056   const XMMRegister translated2 = xmm0;
3057   const XMMRegister translated3 = xmm4;
3058 
3059   const XMMRegister merged0 = xmm2;
3060   const XMMRegister merged1 = xmm1;
3061   const XMMRegister merged2 = xmm0;
3062   const XMMRegister merged3 = xmm4;
3063   const XMMRegister merge_ab_bc0 = xmm2;
3064   const XMMRegister merge_ab_bc1 = xmm1;
3065   const XMMRegister merge_ab_bc2 = xmm0;
3066   const XMMRegister merge_ab_bc3 = xmm4;
3067 
3068   const XMMRegister pack24bits = xmm4;
3069 
3070   const Register length = r14;
3071   const Register output_size = r13;
3072   const Register output_mask = r15;
3073   const KRegister input_mask = k1;
3074 
3075   const XMMRegister input_initial_valid_b64 = xmm0;
3076   const XMMRegister tmp = xmm10;
3077   const XMMRegister mask = xmm0;
3078   const XMMRegister invalid_b64 = xmm1;
3079 
3080   Label L_process256, L_process64, L_process64Loop, L_exit, L_processdata, L_loadURL;
3081   Label L_continue, L_finalBit, L_padding, L_donePadding, L_bruteForce;
3082   Label L_forceLoop, L_bottomLoop, L_checkMIME, L_exit_no_vzero, L_lastChunk;
3083 
3084   // calculate length from offsets
3085   __ movl(length, end_offset);
3086   __ subl(length, start_offset);
3087   __ push_ppx(dest);          // Save for return value calc
3088 
3089   // If AVX512 VBMI not supported, just compile non-AVX code
3090   if(VM_Version::supports_avx512_vbmi() &&
3091      VM_Version::supports_avx512bw()) {
3092     __ cmpl(length, 31);     // 32-bytes is break-even for AVX-512
3093     __ jcc(Assembler::lessEqual, L_lastChunk);
3094 
3095     __ cmpl(isMIME, 0);
3096     __ jcc(Assembler::notEqual, L_lastChunk);
3097 
3098     // Load lookup tables based on isURL
3099     __ cmpl(isURL, 0);
3100     __ jcc(Assembler::notZero, L_loadURL);
3101 
3102     __ evmovdquq(lookup_lo, ExternalAddress(StubRoutines::x86::base64_vbmi_lookup_lo_addr()), Assembler::AVX_512bit, r13);
3103     __ evmovdquq(lookup_hi, ExternalAddress(StubRoutines::x86::base64_vbmi_lookup_hi_addr()), Assembler::AVX_512bit, r13);
3104 
3105     __ BIND(L_continue);
3106 
3107     __ movl(r15, 0x01400140);
3108     __ evpbroadcastd(pack16_op, r15, Assembler::AVX_512bit);
3109 
3110     __ movl(r15, 0x00011000);
3111     __ evpbroadcastd(pack32_op, r15, Assembler::AVX_512bit);
3112 
3113     __ cmpl(length, 0xff);
3114     __ jcc(Assembler::lessEqual, L_process64);
3115 
3116     // load masks required for decoding data
3117     __ BIND(L_processdata);
3118     __ evmovdquq(join01, ExternalAddress(StubRoutines::x86::base64_vbmi_join_0_1_addr()), Assembler::AVX_512bit,r13);
3119     __ evmovdquq(join12, ExternalAddress(StubRoutines::x86::base64_vbmi_join_1_2_addr()), Assembler::AVX_512bit, r13);
3120     __ evmovdquq(join23, ExternalAddress(StubRoutines::x86::base64_vbmi_join_2_3_addr()), Assembler::AVX_512bit, r13);
3121 
3122     __ align32();
3123     __ BIND(L_process256);
3124     // Grab input data
3125     __ evmovdquq(input0, Address(source, start_offset, Address::times_1, 0x00), Assembler::AVX_512bit);
3126     __ evmovdquq(input1, Address(source, start_offset, Address::times_1, 0x40), Assembler::AVX_512bit);
3127     __ evmovdquq(input2, Address(source, start_offset, Address::times_1, 0x80), Assembler::AVX_512bit);
3128     __ evmovdquq(input3, Address(source, start_offset, Address::times_1, 0xc0), Assembler::AVX_512bit);
3129 
3130     // Copy the low part of the lookup table into the destination of the permutation
3131     __ evmovdquq(translated0, lookup_lo, Assembler::AVX_512bit);
3132     __ evmovdquq(translated1, lookup_lo, Assembler::AVX_512bit);
3133     __ evmovdquq(translated2, lookup_lo, Assembler::AVX_512bit);
3134     __ evmovdquq(translated3, lookup_lo, Assembler::AVX_512bit);
3135 
3136     // Translate the base64 input into "decoded" bytes
3137     __ evpermt2b(translated0, input0, lookup_hi, Assembler::AVX_512bit);
3138     __ evpermt2b(translated1, input1, lookup_hi, Assembler::AVX_512bit);
3139     __ evpermt2b(translated2, input2, lookup_hi, Assembler::AVX_512bit);
3140     __ evpermt2b(translated3, input3, lookup_hi, Assembler::AVX_512bit);
3141 
3142     // OR all of the translations together to check for errors (high-order bit of byte set)
3143     __ vpternlogd(input0, 0xfe, input1, input2, Assembler::AVX_512bit);
3144 
3145     __ vpternlogd(input3, 0xfe, translated0, translated1, Assembler::AVX_512bit);
3146     __ vpternlogd(input0, 0xfe, translated2, translated3, Assembler::AVX_512bit);
3147     __ vpor(errorvec, input3, input0, Assembler::AVX_512bit);
3148 
3149     // Check if there was an error - if so, try 64-byte chunks
3150     __ evpmovb2m(k3, errorvec, Assembler::AVX_512bit);
3151     __ kortestql(k3, k3);
3152     __ jcc(Assembler::notZero, L_process64);
3153 
3154     // The merging and shuffling happens here
3155     // We multiply each byte pair [00dddddd | 00cccccc | 00bbbbbb | 00aaaaaa]
3156     // Multiply [00cccccc] by 2^6 added to [00dddddd] to get [0000cccc | ccdddddd]
3157     // The pack16_op is a vector of 0x01400140, so multiply D by 1 and C by 0x40
3158     __ vpmaddubsw(merge_ab_bc0, translated0, pack16_op, Assembler::AVX_512bit);
3159     __ vpmaddubsw(merge_ab_bc1, translated1, pack16_op, Assembler::AVX_512bit);
3160     __ vpmaddubsw(merge_ab_bc2, translated2, pack16_op, Assembler::AVX_512bit);
3161     __ vpmaddubsw(merge_ab_bc3, translated3, pack16_op, Assembler::AVX_512bit);
3162 
3163     // Now do the same with packed 16-bit values.
3164     // We start with [0000cccc | ccdddddd | 0000aaaa | aabbbbbb]
3165     // pack32_op is 0x00011000 (2^12, 1), so this multiplies [0000aaaa | aabbbbbb] by 2^12
3166     // and adds [0000cccc | ccdddddd] to yield [00000000 | aaaaaabb | bbbbcccc | ccdddddd]
3167     __ vpmaddwd(merged0, merge_ab_bc0, pack32_op, Assembler::AVX_512bit);
3168     __ vpmaddwd(merged1, merge_ab_bc1, pack32_op, Assembler::AVX_512bit);
3169     __ vpmaddwd(merged2, merge_ab_bc2, pack32_op, Assembler::AVX_512bit);
3170     __ vpmaddwd(merged3, merge_ab_bc3, pack32_op, Assembler::AVX_512bit);
3171 
3172     // The join vectors specify which byte from which vector goes into the outputs
3173     // One of every 4 bytes in the extended vector is zero, so we pack them into their
3174     // final positions in the register for storing (256 bytes in, 192 bytes out)
3175     __ evpermt2b(merged0, join01, merged1, Assembler::AVX_512bit);
3176     __ evpermt2b(merged1, join12, merged2, Assembler::AVX_512bit);
3177     __ evpermt2b(merged2, join23, merged3, Assembler::AVX_512bit);
3178 
3179     // Store result
3180     __ evmovdquq(Address(dest, dp, Address::times_1, 0x00), merged0, Assembler::AVX_512bit);
3181     __ evmovdquq(Address(dest, dp, Address::times_1, 0x40), merged1, Assembler::AVX_512bit);
3182     __ evmovdquq(Address(dest, dp, Address::times_1, 0x80), merged2, Assembler::AVX_512bit);
3183 
3184     __ addptr(source, 0x100);
3185     __ addptr(dest, 0xc0);
3186     __ subl(length, 0x100);
3187     __ cmpl(length, 64 * 4);
3188     __ jcc(Assembler::greaterEqual, L_process256);
3189 
3190     // At this point, we've decoded 64 * 4 * n bytes.
3191     // The remaining length will be <= 64 * 4 - 1.
3192     // UNLESS there was an error decoding the first 256-byte chunk.  In this
3193     // case, the length will be arbitrarily long.
3194     //
3195     // Note that this will be the path for MIME-encoded strings.
3196 
3197     __ BIND(L_process64);
3198 
3199     __ evmovdquq(pack24bits, ExternalAddress(StubRoutines::x86::base64_vbmi_pack_vec_addr()), Assembler::AVX_512bit, r13);
3200 
3201     __ cmpl(length, 63);
3202     __ jcc(Assembler::lessEqual, L_finalBit);
3203 
3204     __ mov64(rax, 0x0000ffffffffffff);
3205     __ kmovql(k2, rax);
3206 
3207     __ align32();
3208     __ BIND(L_process64Loop);
3209 
3210     // Handle first 64-byte block
3211 
3212     __ evmovdquq(input0, Address(source, start_offset), Assembler::AVX_512bit);
3213     __ evmovdquq(translated0, lookup_lo, Assembler::AVX_512bit);
3214     __ evpermt2b(translated0, input0, lookup_hi, Assembler::AVX_512bit);
3215 
3216     __ vpor(errorvec, translated0, input0, Assembler::AVX_512bit);
3217 
3218     // Check for error and bomb out before updating dest
3219     __ evpmovb2m(k3, errorvec, Assembler::AVX_512bit);
3220     __ kortestql(k3, k3);
3221     __ jcc(Assembler::notZero, L_exit);
3222 
3223     // Pack output register, selecting correct byte ordering
3224     __ vpmaddubsw(merge_ab_bc0, translated0, pack16_op, Assembler::AVX_512bit);
3225     __ vpmaddwd(merged0, merge_ab_bc0, pack32_op, Assembler::AVX_512bit);
3226     __ vpermb(merged0, pack24bits, merged0, Assembler::AVX_512bit);
3227 
3228     __ evmovdqub(Address(dest, dp), k2, merged0, true, Assembler::AVX_512bit);
3229 
3230     __ subl(length, 64);
3231     __ addptr(source, 64);
3232     __ addptr(dest, 48);
3233 
3234     __ cmpl(length, 64);
3235     __ jcc(Assembler::greaterEqual, L_process64Loop);
3236 
3237     __ cmpl(length, 0);
3238     __ jcc(Assembler::lessEqual, L_exit);
3239 
3240     __ BIND(L_finalBit);
3241     // Now have 1 to 63 bytes left to decode
3242 
3243     // I was going to let Java take care of the final fragment
3244     // however it will repeatedly call this routine for every 4 bytes
3245     // of input data, so handle the rest here.
3246     __ movq(rax, -1);
3247     __ bzhiq(rax, rax, length);    // Input mask in rax
3248 
3249     __ movl(output_size, length);
3250     __ shrl(output_size, 2);   // Find (len / 4) * 3 (output length)
3251     __ lea(output_size, Address(output_size, output_size, Address::times_2, 0));
3252     // output_size in r13
3253 
3254     // Strip pad characters, if any, and adjust length and mask
3255     __ addq(length, start_offset);
3256     __ cmpb(Address(source, length, Address::times_1, -1), '=');
3257     __ jcc(Assembler::equal, L_padding);
3258 
3259     __ BIND(L_donePadding);
3260     __ subq(length, start_offset);
3261 
3262     // Output size is (64 - output_size), output mask is (all 1s >> output_size).
3263     __ kmovql(input_mask, rax);
3264     __ movq(output_mask, -1);
3265     __ bzhiq(output_mask, output_mask, output_size);
3266 
3267     // Load initial input with all valid base64 characters.  Will be used
3268     // in merging source bytes to avoid masking when determining if an error occurred.
3269     __ movl(rax, 0x61616161);
3270     __ evpbroadcastd(input_initial_valid_b64, rax, Assembler::AVX_512bit);
3271 
3272     // A register containing all invalid base64 decoded values
3273     __ movl(rax, 0x80808080);
3274     __ evpbroadcastd(invalid_b64, rax, Assembler::AVX_512bit);
3275 
3276     // input_mask is in k1
3277     // output_size is in r13
3278     // output_mask is in r15
3279     // zmm0 - free
3280     // zmm1 - 0x00011000
3281     // zmm2 - 0x01400140
3282     // zmm3 - errorvec
3283     // zmm4 - pack vector
3284     // zmm5 - lookup_lo
3285     // zmm6 - lookup_hi
3286     // zmm7 - errorvec
3287     // zmm8 - 0x61616161
3288     // zmm9 - 0x80808080
3289 
3290     // Load only the bytes from source, merging into our "fully-valid" register
3291     __ evmovdqub(input_initial_valid_b64, input_mask, Address(source, start_offset, Address::times_1, 0x0), true, Assembler::AVX_512bit);
3292 
3293     // Decode all bytes within our merged input
3294     __ evmovdquq(tmp, lookup_lo, Assembler::AVX_512bit);
3295     __ evpermt2b(tmp, input_initial_valid_b64, lookup_hi, Assembler::AVX_512bit);
3296     __ evporq(mask, tmp, input_initial_valid_b64, Assembler::AVX_512bit);
3297 
3298     // Check for error.  Compare (decoded | initial) to all invalid.
3299     // If any bytes have their high-order bit set, then we have an error.
3300     __ evptestmb(k2, mask, invalid_b64, Assembler::AVX_512bit);
3301     __ kortestql(k2, k2);
3302 
3303     // If we have an error, use the brute force loop to decode what we can (4-byte chunks).
3304     __ jcc(Assembler::notZero, L_bruteForce);
3305 
3306     // Shuffle output bytes
3307     __ vpmaddubsw(tmp, tmp, pack16_op, Assembler::AVX_512bit);
3308     __ vpmaddwd(tmp, tmp, pack32_op, Assembler::AVX_512bit);
3309 
3310     __ vpermb(tmp, pack24bits, tmp, Assembler::AVX_512bit);
3311     __ kmovql(k1, output_mask);
3312     __ evmovdqub(Address(dest, dp), k1, tmp, true, Assembler::AVX_512bit);
3313 
3314     __ addptr(dest, output_size);
3315 
3316     __ BIND(L_exit);
3317     __ vzeroupper();
3318     __ pop_ppx(rax);             // Get original dest value
3319     __ subptr(dest, rax);      // Number of bytes converted
3320     __ movptr(rax, dest);
3321     __ pop_ppx(rbx);
3322     __ pop_ppx(r15);
3323     __ pop_ppx(r14);
3324     __ pop_ppx(r13);
3325     __ pop_ppx(r12);
3326     __ leave();
3327     __ ret(0);
3328 
3329     __ BIND(L_loadURL);
3330     __ evmovdquq(lookup_lo, ExternalAddress(StubRoutines::x86::base64_vbmi_lookup_lo_url_addr()), Assembler::AVX_512bit, r13);
3331     __ evmovdquq(lookup_hi, ExternalAddress(StubRoutines::x86::base64_vbmi_lookup_hi_url_addr()), Assembler::AVX_512bit, r13);
3332     __ jmp(L_continue);
3333 
3334     __ BIND(L_padding);
3335     __ decrementq(output_size, 1);
3336     __ shrq(rax, 1);
3337 
3338     __ cmpb(Address(source, length, Address::times_1, -2), '=');
3339     __ jcc(Assembler::notEqual, L_donePadding);
3340 
3341     __ decrementq(output_size, 1);
3342     __ shrq(rax, 1);
3343     __ jmp(L_donePadding);
3344 
3345     __ align32();
3346     __ BIND(L_bruteForce);
3347   }   // End of if(avx512_vbmi)
3348 
3349   if (VM_Version::supports_avx2()) {
3350     Label L_tailProc, L_topLoop, L_enterLoop;
3351 
3352     __ cmpl(isMIME, 0);
3353     __ jcc(Assembler::notEqual, L_lastChunk);
3354 
3355     // Check for buffer too small (for algorithm)
3356     __ subl(length, 0x2c);
3357     __ jcc(Assembler::less, L_tailProc);
3358 
3359     __ shll(isURL, 2);
3360 
3361     // Algorithm adapted from https://arxiv.org/abs/1704.00605, "Faster Base64
3362     // Encoding and Decoding using AVX2 Instructions".  URL modifications added.
3363 
3364     // Set up constants
3365     __ lea(r13, ExternalAddress(StubRoutines::x86::base64_AVX2_decode_tables_addr()));
3366     __ vpbroadcastd(xmm4, Address(r13, isURL, Address::times_1), Assembler::AVX_256bit);  // 2F or 5F
3367     __ vpbroadcastd(xmm10, Address(r13, isURL, Address::times_1, 0x08), Assembler::AVX_256bit);  // -1 or -4
3368     __ vmovdqu(xmm12, Address(r13, 0x10));  // permute
3369     __ vmovdqu(xmm13, Address(r13, 0x30)); // shuffle
3370     __ vpbroadcastd(xmm7, Address(r13, 0x50), Assembler::AVX_256bit);  // merge
3371     __ vpbroadcastd(xmm6, Address(r13, 0x54), Assembler::AVX_256bit);  // merge mult
3372 
3373     __ lea(r13, ExternalAddress(StubRoutines::x86::base64_AVX2_decode_LUT_tables_addr()));
3374     __ shll(isURL, 4);
3375     __ vmovdqu(xmm11, Address(r13, isURL, Address::times_1, 0x00));  // lut_lo
3376     __ vmovdqu(xmm8, Address(r13, isURL, Address::times_1, 0x20)); // lut_roll
3377     __ shrl(isURL, 6);  // restore isURL
3378     __ vmovdqu(xmm9, Address(r13, 0x80));  // lut_hi
3379     __ jmp(L_enterLoop);
3380 
3381     __ align32();
3382     __ bind(L_topLoop);
3383     // Add in the offset value (roll) to get 6-bit out values
3384     __ vpaddb(xmm0, xmm0, xmm2, Assembler::AVX_256bit);
3385     // Merge and permute the output bits into appropriate output byte lanes
3386     __ vpmaddubsw(xmm0, xmm0, xmm7, Assembler::AVX_256bit);
3387     __ vpmaddwd(xmm0, xmm0, xmm6, Assembler::AVX_256bit);
3388     __ vpshufb(xmm0, xmm0, xmm13, Assembler::AVX_256bit);
3389     __ vpermd(xmm0, xmm12, xmm0, Assembler::AVX_256bit);
3390     // Store the output bytes
3391     __ vmovdqu(Address(dest, dp, Address::times_1, 0), xmm0);
3392     __ addptr(source, 0x20);
3393     __ addptr(dest, 0x18);
3394     __ subl(length, 0x20);
3395     __ jcc(Assembler::less, L_tailProc);
3396 
3397     __ bind(L_enterLoop);
3398 
3399     // Load in encoded string (32 bytes)
3400     __ vmovdqu(xmm2, Address(source, start_offset, Address::times_1, 0x0));
3401     // Extract the high nibble for indexing into the lut tables.  High 4 bits are don't care.
3402     __ vpsrld(xmm1, xmm2, 0x4, Assembler::AVX_256bit);
3403     __ vpand(xmm1, xmm4, xmm1, Assembler::AVX_256bit);
3404     // Extract the low nibble. 5F/2F will isolate the low-order 4 bits.  High 4 bits are don't care.
3405     __ vpand(xmm3, xmm2, xmm4, Assembler::AVX_256bit);
3406     // Check for special-case (0x2F or 0x5F (URL))
3407     __ vpcmpeqb(xmm0, xmm4, xmm2, Assembler::AVX_256bit);
3408     // Get the bitset based on the low nibble.  vpshufb uses low-order 4 bits only.
3409     __ vpshufb(xmm3, xmm11, xmm3, Assembler::AVX_256bit);
3410     // Get the bit value of the high nibble
3411     __ vpshufb(xmm5, xmm9, xmm1, Assembler::AVX_256bit);
3412     // Make sure 2F / 5F shows as valid
3413     __ vpandn(xmm3, xmm0, xmm3, Assembler::AVX_256bit);
3414     // Make adjustment for roll index.  For non-URL, this is a no-op,
3415     // for URL, this adjusts by -4.  This is to properly index the
3416     // roll value for 2F / 5F.
3417     __ vpand(xmm0, xmm0, xmm10, Assembler::AVX_256bit);
3418     // If the and of the two is non-zero, we have an invalid input character
3419     __ vptest(xmm3, xmm5);
3420     // Extract the "roll" value - value to add to the input to get 6-bit out value
3421     __ vpaddb(xmm0, xmm0, xmm1, Assembler::AVX_256bit); // Handle 2F / 5F
3422     __ vpshufb(xmm0, xmm8, xmm0, Assembler::AVX_256bit);
3423     __ jcc(Assembler::equal, L_topLoop);  // Fall through on error
3424 
3425     __ bind(L_tailProc);
3426 
3427     __ addl(length, 0x2c);
3428 
3429     __ vzeroupper();
3430   }
3431 
3432   // Use non-AVX code to decode 4-byte chunks into 3 bytes of output
3433 
3434   // Register state (Linux):
3435   // r12-15 - saved on stack
3436   // rdi - src
3437   // rsi - sp
3438   // rdx - sl
3439   // rcx - dst
3440   // r8 - dp
3441   // r9 - isURL
3442 
3443   // Register state (Windows):
3444   // r12-15 - saved on stack
3445   // rcx - src
3446   // rdx - sp
3447   // r8 - sl
3448   // r9 - dst
3449   // r12 - dp
3450   // r10 - isURL
3451 
3452   // Registers (common):
3453   // length (r14) - bytes in src
3454 
3455   const Register decode_table = r11;
3456   const Register out_byte_count = rbx;
3457   const Register byte1 = r13;
3458   const Register byte2 = r15;
3459   const Register byte3 = WIN64_ONLY(r8) NOT_WIN64(rdx);
3460   const Register byte4 = WIN64_ONLY(r10) NOT_WIN64(r9);
3461 
3462   __ bind(L_lastChunk);
3463 
3464   __ shrl(length, 2);    // Multiple of 4 bytes only - length is # 4-byte chunks
3465   __ cmpl(length, 0);
3466   __ jcc(Assembler::lessEqual, L_exit_no_vzero);
3467 
3468   __ shll(isURL, 8);    // index into decode table based on isURL
3469   __ lea(decode_table, ExternalAddress(StubRoutines::x86::base64_decoding_table_addr()));
3470   __ addptr(decode_table, isURL);
3471 
3472   __ jmp(L_bottomLoop);
3473 
3474   __ align32();
3475   __ BIND(L_forceLoop);
3476   __ shll(byte1, 18);
3477   __ shll(byte2, 12);
3478   __ shll(byte3, 6);
3479   __ orl(byte1, byte2);
3480   __ orl(byte1, byte3);
3481   __ orl(byte1, byte4);
3482 
3483   __ addptr(source, 4);
3484 
3485   __ movb(Address(dest, dp, Address::times_1, 2), byte1);
3486   __ shrl(byte1, 8);
3487   __ movb(Address(dest, dp, Address::times_1, 1), byte1);
3488   __ shrl(byte1, 8);
3489   __ movb(Address(dest, dp, Address::times_1, 0), byte1);
3490 
3491   __ addptr(dest, 3);
3492   __ decrementl(length, 1);
3493   __ jcc(Assembler::zero, L_exit_no_vzero);
3494 
3495   __ BIND(L_bottomLoop);
3496   __ load_unsigned_byte(byte1, Address(source, start_offset, Address::times_1, 0x00));
3497   __ load_unsigned_byte(byte2, Address(source, start_offset, Address::times_1, 0x01));
3498   __ load_signed_byte(byte1, Address(decode_table, byte1));
3499   __ load_signed_byte(byte2, Address(decode_table, byte2));
3500   __ load_unsigned_byte(byte3, Address(source, start_offset, Address::times_1, 0x02));
3501   __ load_unsigned_byte(byte4, Address(source, start_offset, Address::times_1, 0x03));
3502   __ load_signed_byte(byte3, Address(decode_table, byte3));
3503   __ load_signed_byte(byte4, Address(decode_table, byte4));
3504 
3505   __ mov(rax, byte1);
3506   __ orl(rax, byte2);
3507   __ orl(rax, byte3);
3508   __ orl(rax, byte4);
3509   __ jcc(Assembler::positive, L_forceLoop);
3510 
3511   __ BIND(L_exit_no_vzero);
3512   __ pop_ppx(rax);             // Get original dest value
3513   __ subptr(dest, rax);                      // Number of bytes converted
3514   __ movptr(rax, dest);
3515   __ pop_ppx(rbx);
3516   __ pop_ppx(r15);
3517   __ pop_ppx(r14);
3518   __ pop_ppx(r13);
3519   __ pop_ppx(r12);
3520   __ leave();
3521   __ ret(0);
3522 
3523   // record the stub entry and end
3524   store_archive_data(stub_id, start, __ pc());
3525 
3526   return start;
3527 }
3528 
3529 
3530 /**
3531  *  Arguments:
3532  *
3533  * Inputs:
3534  *   c_rarg0   - int crc
3535  *   c_rarg1   - byte* buf
3536  *   c_rarg2   - int length
3537  *
3538  * Output:
3539  *       rax   - int crc result
3540  */
3541 address StubGenerator::generate_updateBytesCRC32() {
3542   assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions");
3543 
3544   StubId stub_id = StubId::stubgen_updateBytesCRC32_id;
3545   int entry_count = StubInfo::entry_count(stub_id);
3546   assert(entry_count == 1, "sanity check");
3547   address start = load_archive_data(stub_id);
3548   if (start != nullptr) {
3549     return start;
3550   }
3551   __ align(CodeEntryAlignment);
3552   StubCodeMark mark(this, stub_id);
3553 
3554   start = __ pc();
3555 
3556   // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
3557   // Unix:  rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
3558   // rscratch1: r10
3559   const Register crc   = c_rarg0;  // crc
3560   const Register buf   = c_rarg1;  // source java byte array address
3561   const Register len   = c_rarg2;  // length
3562   const Register table = c_rarg3;  // crc_table address (reuse register)
3563   const Register tmp1   = r11;
3564   const Register tmp2   = r10;
3565   assert_different_registers(crc, buf, len, table, tmp1, tmp2, rax);
3566 
3567   BLOCK_COMMENT("Entry:");
3568   __ enter(); // required for proper stackwalking of RuntimeStub frame
3569 
3570   if (VM_Version::supports_sse4_1() && VM_Version::supports_avx512_vpclmulqdq() &&
3571       VM_Version::supports_avx512bw() &&
3572       VM_Version::supports_avx512vl()) {
3573       // The constants used in the CRC32 algorithm requires the 1's compliment of the initial crc value.
3574       // However, the constant table for CRC32-C assumes the original crc value.  Account for this
3575       // difference before calling and after returning.
3576     __ lea(table, ExternalAddress(StubRoutines::x86::crc_table_avx512_addr()));
3577     __ notl(crc);
3578     __ kernel_crc32_avx512(crc, buf, len, table, tmp1, tmp2);
3579     __ notl(crc);
3580   } else {
3581     __ kernel_crc32(crc, buf, len, table, tmp1);
3582   }
3583 
3584   __ movl(rax, crc);
3585   __ vzeroupper();
3586   __ leave(); // required for proper stackwalking of RuntimeStub frame
3587   __ ret(0);
3588 
3589   // record the stub entry and end
3590   store_archive_data(stub_id, start, __ pc());
3591 
3592   return start;
3593 }
3594 
3595 /**
3596 *  Arguments:
3597 *
3598 * Inputs:
3599 *   c_rarg0   - int crc
3600 *   c_rarg1   - byte* buf
3601 *   c_rarg2   - long length
3602 *   c_rarg3   - table_start - optional (present only when doing a library_call,
3603 *              not used by x86 algorithm)
3604 *
3605 * Output:
3606 *       rax   - int crc result
3607 */
3608 address StubGenerator::generate_updateBytesCRC32C(bool is_pclmulqdq_supported) {
3609   assert(UseCRC32CIntrinsics, "need SSE4_2");
3610   StubId stub_id = StubId::stubgen_updateBytesCRC32C_id;
3611   int entry_count = StubInfo::entry_count(stub_id);
3612   assert(entry_count == 1, "sanity check");
3613   address start = load_archive_data(stub_id);
3614   if (start != nullptr) {
3615     return start;
3616   }
3617   __ align(CodeEntryAlignment);
3618   StubCodeMark mark(this, stub_id);
3619   start = __ pc();
3620 
3621   //reg.arg        int#0        int#1        int#2        int#3        int#4        int#5        float regs
3622   //Windows        RCX          RDX          R8           R9           none         none         XMM0..XMM3
3623   //Lin / Sol      RDI          RSI          RDX          RCX          R8           R9           XMM0..XMM7
3624   const Register crc = c_rarg0;  // crc
3625   const Register buf = c_rarg1;  // source java byte array address
3626   const Register len = c_rarg2;  // length
3627   const Register a = rax;
3628   const Register j = r9;
3629   const Register k = r10;
3630   const Register l = r11;
3631 #ifdef _WIN64
3632   const Register y = rdi;
3633   const Register z = rsi;
3634 #else
3635   const Register y = rcx;
3636   const Register z = r8;
3637 #endif
3638   assert_different_registers(crc, buf, len, a, j, k, l, y, z);
3639 
3640   BLOCK_COMMENT("Entry:");
3641   __ enter(); // required for proper stackwalking of RuntimeStub frame
3642   Label L_continue;
3643 
3644   if (VM_Version::supports_sse4_1() && VM_Version::supports_avx512_vpclmulqdq() &&
3645       VM_Version::supports_avx512bw() &&
3646       VM_Version::supports_avx512vl()) {
3647     Label L_doSmall;
3648 
3649     __ cmpl(len, 384);
3650     __ jcc(Assembler::lessEqual, L_doSmall);
3651 
3652     __ lea(j, ExternalAddress(StubRoutines::x86::crc32c_table_avx512_addr()));
3653     __ kernel_crc32_avx512(crc, buf, len, j, l, k);
3654 
3655     __ jmp(L_continue);
3656 
3657     __ bind(L_doSmall);
3658   }
3659 #ifdef _WIN64
3660   __ push_ppx(y);
3661   __ push_ppx(z);
3662 #endif
3663   __ crc32c_ipl_alg2_alt2(crc, buf, len,
3664                           a, j, k,
3665                           l, y, z,
3666                           c_farg0, c_farg1, c_farg2,
3667                           is_pclmulqdq_supported);
3668 #ifdef _WIN64
3669   __ pop_ppx(z);
3670   __ pop_ppx(y);
3671 #endif
3672 
3673   __ bind(L_continue);
3674   __ movl(rax, crc);
3675   __ vzeroupper();
3676   __ leave(); // required for proper stackwalking of RuntimeStub frame
3677   __ ret(0);
3678 
3679   // record the stub entry and end
3680   store_archive_data(stub_id, start, __ pc());
3681 
3682   return start;
3683 }
3684 
3685 
3686 /**
3687  *  Arguments:
3688  *
3689  *  Input:
3690  *    c_rarg0   - x address
3691  *    c_rarg1   - x length
3692  *    c_rarg2   - y address
3693  *    c_rarg3   - y length
3694  * not Win64
3695  *    c_rarg4   - z address
3696  * Win64
3697  *    rsp+40    - z address
3698  */
3699 address StubGenerator::generate_multiplyToLen() {
3700   StubId stub_id = StubId::stubgen_multiplyToLen_id;
3701   int entry_count = StubInfo::entry_count(stub_id);
3702   assert(entry_count == 1, "sanity check");
3703   address start = load_archive_data(stub_id);
3704   if (start != nullptr) {
3705     return start;
3706   }
3707   __ align(CodeEntryAlignment);
3708   StubCodeMark mark(this, stub_id);
3709   start = __ pc();
3710 
3711   // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
3712   // Unix:  rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
3713   const Register x     = rdi;
3714   const Register xlen  = rax;
3715   const Register y     = rsi;
3716   const Register ylen  = rcx;
3717   const Register z     = r8;
3718 
3719   // Next registers will be saved on stack in multiply_to_len().
3720   const Register tmp0  = r11;
3721   const Register tmp1  = r12;
3722   const Register tmp2  = r13;
3723   const Register tmp3  = r14;
3724   const Register tmp4  = r15;
3725   const Register tmp5  = rbx;
3726 
3727   BLOCK_COMMENT("Entry:");
3728   __ enter(); // required for proper stackwalking of RuntimeStub frame
3729 
3730   setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx
3731                      // ylen => rcx, z => r8
3732                      // r9 and r10 may be used to save non-volatile registers
3733 #ifdef _WIN64
3734   // last argument (#4) is on stack on Win64
3735   __ movptr(z, Address(rsp, 6 * wordSize));
3736 #endif
3737 
3738   __ movptr(xlen, rsi);
3739   __ movptr(y,    rdx);
3740   __ multiply_to_len(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5);
3741 
3742   restore_arg_regs();
3743 
3744   __ leave(); // required for proper stackwalking of RuntimeStub frame
3745   __ ret(0);
3746 
3747   // record the stub entry and end
3748   store_archive_data(stub_id, start, __ pc());
3749 
3750   return start;
3751 }
3752 
3753 /**
3754 *  Arguments:
3755 *
3756 *  Input:
3757 *    c_rarg0   - obja     address
3758 *    c_rarg1   - objb     address
3759 *    c_rarg3   - length   length
3760 *    c_rarg4   - scale    log2_array_indxscale
3761 *
3762 *  Output:
3763 *        rax   - int >= mismatched index, < 0 bitwise complement of tail
3764 */
3765 address StubGenerator::generate_vectorizedMismatch() {
3766   StubId stub_id = StubId::stubgen_vectorizedMismatch_id;
3767   int entry_count = StubInfo::entry_count(stub_id);
3768   assert(entry_count == 1, "sanity check");
3769   address start = load_archive_data(stub_id);
3770   if (start != nullptr) {
3771     return start;
3772   }
3773   __ align(CodeEntryAlignment);
3774   StubCodeMark mark(this, stub_id);
3775   start = __ pc();
3776 
3777   BLOCK_COMMENT("Entry:");
3778   __ enter();
3779 
3780 #ifdef _WIN64  // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
3781   const Register scale = c_rarg0;  //rcx, will exchange with r9
3782   const Register objb = c_rarg1;   //rdx
3783   const Register length = c_rarg2; //r8
3784   const Register obja = c_rarg3;   //r9
3785   __ xchgq(obja, scale);  //now obja and scale contains the correct contents
3786 
3787   const Register tmp1 = r10;
3788   const Register tmp2 = r11;
3789 #endif
3790 #ifndef _WIN64 // Unix:  rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
3791   const Register obja = c_rarg0;   //U:rdi
3792   const Register objb = c_rarg1;   //U:rsi
3793   const Register length = c_rarg2; //U:rdx
3794   const Register scale = c_rarg3;  //U:rcx
3795   const Register tmp1 = r8;
3796   const Register tmp2 = r9;
3797 #endif
3798   const Register result = rax; //return value
3799   const XMMRegister vec0 = xmm0;
3800   const XMMRegister vec1 = xmm1;
3801   const XMMRegister vec2 = xmm2;
3802 
3803   __ vectorized_mismatch(obja, objb, length, scale, result, tmp1, tmp2, vec0, vec1, vec2);
3804 
3805   __ vzeroupper();
3806   __ leave();
3807   __ ret(0);
3808 
3809   // record the stub entry and end
3810   store_archive_data(stub_id, start, __ pc());
3811 
3812   return start;
3813 }
3814 
3815 /**
3816  *  Arguments:
3817  *
3818 //  Input:
3819 //    c_rarg0   - x address
3820 //    c_rarg1   - x length
3821 //    c_rarg2   - z address
3822 //    c_rarg3   - z length
3823  *
3824  */
3825 address StubGenerator::generate_squareToLen() {
3826 
3827   StubId stub_id = StubId::stubgen_squareToLen_id;
3828   int entry_count = StubInfo::entry_count(stub_id);
3829   assert(entry_count == 1, "sanity check");
3830   address start = load_archive_data(stub_id);
3831   if (start != nullptr) {
3832     return start;
3833   }
3834   __ align(CodeEntryAlignment);
3835   StubCodeMark mark(this, stub_id);
3836   start = __ pc();
3837 
3838   // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
3839   // Unix:  rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...)
3840   const Register x      = rdi;
3841   const Register len    = rsi;
3842   const Register z      = r8;
3843   const Register zlen   = rcx;
3844 
3845  const Register tmp1      = r12;
3846  const Register tmp2      = r13;
3847  const Register tmp3      = r14;
3848  const Register tmp4      = r15;
3849  const Register tmp5      = rbx;
3850 
3851   BLOCK_COMMENT("Entry:");
3852   __ enter(); // required for proper stackwalking of RuntimeStub frame
3853 
3854   setup_arg_regs(4); // x => rdi, len => rsi, z => rdx
3855                      // zlen => rcx
3856                      // r9 and r10 may be used to save non-volatile registers
3857   __ movptr(r8, rdx);
3858   __ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax);
3859 
3860   restore_arg_regs();
3861 
3862   __ leave(); // required for proper stackwalking of RuntimeStub frame
3863   __ ret(0);
3864 
3865   // record the stub entry and end
3866   store_archive_data(stub_id, start, __ pc());
3867 
3868   return start;
3869 }
3870 
3871 address StubGenerator::generate_method_entry_barrier() {
3872   StubId stub_id = StubId::stubgen_method_entry_barrier_id;
3873   int entry_count = StubInfo::entry_count(stub_id);
3874   assert(entry_count == 1, "sanity check");
3875   address start = load_archive_data(stub_id);
3876   if (start != nullptr) {
3877     return start;
3878   }
3879   __ align(CodeEntryAlignment);
3880   StubCodeMark mark(this, stub_id);
3881   start = __ pc();
3882 
3883   Label deoptimize_label;
3884 
3885   __ push(-1); // cookie, this is used for writing the new rsp when deoptimizing
3886 
3887   BLOCK_COMMENT("Entry:");
3888   __ enter(); // save rbp
3889 
3890   // save c_rarg0, because we want to use that value.
3891   // We could do without it but then we depend on the number of slots used by pusha
3892   __ push_ppx(c_rarg0);
3893 
3894   __ lea(c_rarg0, Address(rsp, wordSize * 3)); // 1 for cookie, 1 for rbp, 1 for c_rarg0 - this should be the return address
3895 
3896   __ pusha();
3897 
3898   // The method may have floats as arguments, and we must spill them before calling
3899   // the VM runtime.
3900   assert(Argument::n_float_register_parameters_j == 8, "Assumption");
3901   const int xmm_size = wordSize * 2;
3902   const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j;
3903   __ subptr(rsp, xmm_spill_size);
3904   __ movdqu(Address(rsp, xmm_size * 7), xmm7);
3905   __ movdqu(Address(rsp, xmm_size * 6), xmm6);
3906   __ movdqu(Address(rsp, xmm_size * 5), xmm5);
3907   __ movdqu(Address(rsp, xmm_size * 4), xmm4);
3908   __ movdqu(Address(rsp, xmm_size * 3), xmm3);
3909   __ movdqu(Address(rsp, xmm_size * 2), xmm2);
3910   __ movdqu(Address(rsp, xmm_size * 1), xmm1);
3911   __ movdqu(Address(rsp, xmm_size * 0), xmm0);
3912 
3913   __ call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(address*)>(BarrierSetNMethod::nmethod_stub_entry_barrier)), 1);
3914 
3915   __ movdqu(xmm0, Address(rsp, xmm_size * 0));
3916   __ movdqu(xmm1, Address(rsp, xmm_size * 1));
3917   __ movdqu(xmm2, Address(rsp, xmm_size * 2));
3918   __ movdqu(xmm3, Address(rsp, xmm_size * 3));
3919   __ movdqu(xmm4, Address(rsp, xmm_size * 4));
3920   __ movdqu(xmm5, Address(rsp, xmm_size * 5));
3921   __ movdqu(xmm6, Address(rsp, xmm_size * 6));
3922   __ movdqu(xmm7, Address(rsp, xmm_size * 7));
3923   __ addptr(rsp, xmm_spill_size);
3924 
3925   __ cmpl(rax, 1); // 1 means deoptimize
3926   __ jcc(Assembler::equal, deoptimize_label);
3927 
3928   __ popa();
3929   __ pop_ppx(c_rarg0);
3930 
3931   __ leave();
3932 
3933   __ addptr(rsp, 1 * wordSize); // cookie
3934   __ ret(0);
3935 
3936 
3937   __ BIND(deoptimize_label);
3938 
3939   __ popa();
3940   __ pop_ppx(c_rarg0);
3941 
3942   __ leave();
3943 
3944   // this can be taken out, but is good for verification purposes. getting a SIGSEGV
3945   // here while still having a correct stack is valuable
3946   __ testptr(rsp, Address(rsp, 0));
3947 
3948   __ movptr(rsp, Address(rsp, 0)); // new rsp was written in the barrier
3949   __ jmp(Address(rsp, -1 * wordSize)); // jmp target should be callers verified_entry_point
3950 
3951   // record the stub entry and end
3952   store_archive_data(stub_id, start, __ pc());
3953 
3954   return start;
3955 }
3956 
3957  /**
3958  *  Arguments:
3959  *
3960  *  Input:
3961  *    c_rarg0   - out address
3962  *    c_rarg1   - in address
3963  *    c_rarg2   - offset
3964  *    c_rarg3   - len
3965  * not Win64
3966  *    c_rarg4   - k
3967  * Win64
3968  *    rsp+40    - k
3969  */
3970 address StubGenerator::generate_mulAdd() {
3971   StubId stub_id = StubId::stubgen_mulAdd_id;
3972   int entry_count = StubInfo::entry_count(stub_id);
3973   assert(entry_count == 1, "sanity check");
3974   address start = load_archive_data(stub_id);
3975   if (start != nullptr) {
3976     return start;
3977   }
3978   __ align(CodeEntryAlignment);
3979   StubCodeMark mark(this, stub_id);
3980   start = __ pc();
3981 
3982   // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
3983   // Unix:  rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
3984   const Register out     = rdi;
3985   const Register in      = rsi;
3986   const Register offset  = r11;
3987   const Register len     = rcx;
3988   const Register k       = r8;
3989 
3990   // Next registers will be saved on stack in mul_add().
3991   const Register tmp1  = r12;
3992   const Register tmp2  = r13;
3993   const Register tmp3  = r14;
3994   const Register tmp4  = r15;
3995   const Register tmp5  = rbx;
3996 
3997   BLOCK_COMMENT("Entry:");
3998   __ enter(); // required for proper stackwalking of RuntimeStub frame
3999 
4000   setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx
4001                      // len => rcx, k => r8
4002                      // r9 and r10 may be used to save non-volatile registers
4003 #ifdef _WIN64
4004   // last argument is on stack on Win64
4005   __ movl(k, Address(rsp, 6 * wordSize));
4006 #endif
4007   __ movptr(r11, rdx);  // move offset in rdx to offset(r11)
4008   __ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax);
4009 
4010   restore_arg_regs();
4011 
4012   __ leave(); // required for proper stackwalking of RuntimeStub frame
4013   __ ret(0);
4014 
4015   // record the stub entry and end
4016   store_archive_data(stub_id, start, __ pc());
4017 
4018   return start;
4019 }
4020 
4021 address StubGenerator::generate_bigIntegerRightShift() {
4022   StubId stub_id = StubId::stubgen_bigIntegerRightShiftWorker_id;
4023   int entry_count = StubInfo::entry_count(stub_id);
4024   assert(entry_count == 1, "sanity check");
4025   address start = load_archive_data(stub_id);
4026   if (start != nullptr) {
4027     return start;
4028   }
4029   __ align(CodeEntryAlignment);
4030   StubCodeMark mark(this, stub_id);
4031   start = __ pc();
4032 
4033   Label Shift512Loop, ShiftTwo, ShiftTwoLoop, ShiftOne, Exit;
4034   // For Unix, the arguments are as follows: rdi, rsi, rdx, rcx, r8.
4035   const Register newArr = rdi;
4036   const Register oldArr = rsi;
4037   const Register newIdx = rdx;
4038   const Register shiftCount = rcx;  // It was intentional to have shiftCount in rcx since it is used implicitly for shift.
4039   const Register totalNumIter = r8;
4040 
4041   // For windows, we use r9 and r10 as temps to save rdi and rsi. Thus we cannot allocate them for our temps.
4042   // For everything else, we prefer using r9 and r10 since we do not have to save them before use.
4043   const Register tmp1 = r11;                    // Caller save.
4044   const Register tmp2 = rax;                    // Caller save.
4045   const Register tmp3 = WIN64_ONLY(r12) NOT_WIN64(r9);   // Windows: Callee save. Linux: Caller save.
4046   const Register tmp4 = WIN64_ONLY(r13) NOT_WIN64(r10);  // Windows: Callee save. Linux: Caller save.
4047   const Register tmp5 = r14;                    // Callee save.
4048   const Register tmp6 = r15;
4049 
4050   const XMMRegister x0 = xmm0;
4051   const XMMRegister x1 = xmm1;
4052   const XMMRegister x2 = xmm2;
4053 
4054   BLOCK_COMMENT("Entry:");
4055   __ enter(); // required for proper stackwalking of RuntimeStub frame
4056 
4057 #ifdef _WIN64
4058   setup_arg_regs(4);
4059   // For windows, since last argument is on stack, we need to move it to the appropriate register.
4060   __ movl(totalNumIter, Address(rsp, 6 * wordSize));
4061   // Save callee save registers.
4062   __ push_ppx(tmp3);
4063   __ push_ppx(tmp4);
4064 #endif
4065   __ push_ppx(tmp5);
4066 
4067   // Rename temps used throughout the code.
4068   const Register idx = tmp1;
4069   const Register nIdx = tmp2;
4070 
4071   __ xorl(idx, idx);
4072 
4073   // Start right shift from end of the array.
4074   // For example, if #iteration = 4 and newIdx = 1
4075   // then dest[4] = src[4] >> shiftCount  | src[3] <<< (shiftCount - 32)
4076   // if #iteration = 4 and newIdx = 0
4077   // then dest[3] = src[4] >> shiftCount  | src[3] <<< (shiftCount - 32)
4078   __ movl(idx, totalNumIter);
4079   __ movl(nIdx, idx);
4080   __ addl(nIdx, newIdx);
4081 
4082   // If vectorization is enabled, check if the number of iterations is at least 64
4083   // If not, then go to ShifTwo processing 2 iterations
4084   if (VM_Version::supports_avx512_vbmi2()) {
4085     __ cmpptr(totalNumIter, (AVX3Threshold/64));
4086     __ jcc(Assembler::less, ShiftTwo);
4087 
4088     if (AVX3Threshold < 16 * 64) {
4089       __ cmpl(totalNumIter, 16);
4090       __ jcc(Assembler::less, ShiftTwo);
4091     }
4092     __ evpbroadcastd(x0, shiftCount, Assembler::AVX_512bit);
4093     __ subl(idx, 16);
4094     __ subl(nIdx, 16);
4095     __ BIND(Shift512Loop);
4096     __ evmovdqul(x2, Address(oldArr, idx, Address::times_4, 4), Assembler::AVX_512bit);
4097     __ evmovdqul(x1, Address(oldArr, idx, Address::times_4), Assembler::AVX_512bit);
4098     __ vpshrdvd(x2, x1, x0, Assembler::AVX_512bit);
4099     __ evmovdqul(Address(newArr, nIdx, Address::times_4), x2, Assembler::AVX_512bit);
4100     __ subl(nIdx, 16);
4101     __ subl(idx, 16);
4102     __ jcc(Assembler::greaterEqual, Shift512Loop);
4103     __ addl(idx, 16);
4104     __ addl(nIdx, 16);
4105   }
4106   __ BIND(ShiftTwo);
4107   __ cmpl(idx, 2);
4108   __ jcc(Assembler::less, ShiftOne);
4109   __ subl(idx, 2);
4110   __ subl(nIdx, 2);
4111   __ BIND(ShiftTwoLoop);
4112   __ movl(tmp5, Address(oldArr, idx, Address::times_4, 8));
4113   __ movl(tmp4, Address(oldArr, idx, Address::times_4, 4));
4114   __ movl(tmp3, Address(oldArr, idx, Address::times_4));
4115   __ shrdl(tmp5, tmp4);
4116   __ shrdl(tmp4, tmp3);
4117   __ movl(Address(newArr, nIdx, Address::times_4, 4), tmp5);
4118   __ movl(Address(newArr, nIdx, Address::times_4), tmp4);
4119   __ subl(nIdx, 2);
4120   __ subl(idx, 2);
4121   __ jcc(Assembler::greaterEqual, ShiftTwoLoop);
4122   __ addl(idx, 2);
4123   __ addl(nIdx, 2);
4124 
4125   // Do the last iteration
4126   __ BIND(ShiftOne);
4127   __ cmpl(idx, 1);
4128   __ jcc(Assembler::less, Exit);
4129   __ subl(idx, 1);
4130   __ subl(nIdx, 1);
4131   __ movl(tmp4, Address(oldArr, idx, Address::times_4, 4));
4132   __ movl(tmp3, Address(oldArr, idx, Address::times_4));
4133   __ shrdl(tmp4, tmp3);
4134   __ movl(Address(newArr, nIdx, Address::times_4), tmp4);
4135   __ BIND(Exit);
4136   __ vzeroupper();
4137   // Restore callee save registers.
4138   __ pop_ppx(tmp5);
4139 #ifdef _WIN64
4140   __ pop_ppx(tmp4);
4141   __ pop_ppx(tmp3);
4142   restore_arg_regs();
4143 #endif
4144   __ leave(); // required for proper stackwalking of RuntimeStub frame
4145   __ ret(0);
4146 
4147   // record the stub entry and end
4148   store_archive_data(stub_id, start, __ pc());
4149 
4150   return start;
4151 }
4152 
4153  /**
4154  *  Arguments:
4155  *
4156  *  Input:
4157  *    c_rarg0   - newArr address
4158  *    c_rarg1   - oldArr address
4159  *    c_rarg2   - newIdx
4160  *    c_rarg3   - shiftCount
4161  * not Win64
4162  *    c_rarg4   - numIter
4163  * Win64
4164  *    rsp40    - numIter
4165  */
4166 address StubGenerator::generate_bigIntegerLeftShift() {
4167   StubId stub_id = StubId::stubgen_bigIntegerLeftShiftWorker_id;
4168   int entry_count = StubInfo::entry_count(stub_id);
4169   assert(entry_count == 1, "sanity check");
4170   address start = load_archive_data(stub_id);
4171   if (start != nullptr) {
4172     return start;
4173   }
4174   __ align(CodeEntryAlignment);
4175   StubCodeMark mark(this, stub_id);
4176   start = __ pc();
4177 
4178   Label Shift512Loop, ShiftTwo, ShiftTwoLoop, ShiftOne, Exit;
4179   // For Unix, the arguments are as follows: rdi, rsi, rdx, rcx, r8.
4180   const Register newArr = rdi;
4181   const Register oldArr = rsi;
4182   const Register newIdx = rdx;
4183   const Register shiftCount = rcx;  // It was intentional to have shiftCount in rcx since it is used implicitly for shift.
4184   const Register totalNumIter = r8;
4185   // For windows, we use r9 and r10 as temps to save rdi and rsi. Thus we cannot allocate them for our temps.
4186   // For everything else, we prefer using r9 and r10 since we do not have to save them before use.
4187   const Register tmp1 = r11;                    // Caller save.
4188   const Register tmp2 = rax;                    // Caller save.
4189   const Register tmp3 = WIN64_ONLY(r12) NOT_WIN64(r9);   // Windows: Callee save. Linux: Caller save.
4190   const Register tmp4 = WIN64_ONLY(r13) NOT_WIN64(r10);  // Windows: Callee save. Linux: Caller save.
4191   const Register tmp5 = r14;                    // Callee save.
4192 
4193   const XMMRegister x0 = xmm0;
4194   const XMMRegister x1 = xmm1;
4195   const XMMRegister x2 = xmm2;
4196   BLOCK_COMMENT("Entry:");
4197   __ enter(); // required for proper stackwalking of RuntimeStub frame
4198 
4199 #ifdef _WIN64
4200   setup_arg_regs(4);
4201   // For windows, since last argument is on stack, we need to move it to the appropriate register.
4202   __ movl(totalNumIter, Address(rsp, 6 * wordSize));
4203   // Save callee save registers.
4204   __ push_ppx(tmp3);
4205   __ push_ppx(tmp4);
4206 #endif
4207   __ push_ppx(tmp5);
4208 
4209   // Rename temps used throughout the code
4210   const Register idx = tmp1;
4211   const Register numIterTmp = tmp2;
4212 
4213   // Start idx from zero.
4214   __ xorl(idx, idx);
4215   // Compute interior pointer for new array. We do this so that we can use same index for both old and new arrays.
4216   __ lea(newArr, Address(newArr, newIdx, Address::times_4));
4217   __ movl(numIterTmp, totalNumIter);
4218 
4219   // If vectorization is enabled, check if the number of iterations is at least 64
4220   // If not, then go to ShiftTwo shifting two numbers at a time
4221   if (VM_Version::supports_avx512_vbmi2()) {
4222     __ cmpl(totalNumIter, (AVX3Threshold/64));
4223     __ jcc(Assembler::less, ShiftTwo);
4224 
4225     if (AVX3Threshold < 16 * 64) {
4226       __ cmpl(totalNumIter, 16);
4227       __ jcc(Assembler::less, ShiftTwo);
4228     }
4229     __ evpbroadcastd(x0, shiftCount, Assembler::AVX_512bit);
4230     __ subl(numIterTmp, 16);
4231     __ BIND(Shift512Loop);
4232     __ evmovdqul(x1, Address(oldArr, idx, Address::times_4), Assembler::AVX_512bit);
4233     __ evmovdqul(x2, Address(oldArr, idx, Address::times_4, 0x4), Assembler::AVX_512bit);
4234     __ vpshldvd(x1, x2, x0, Assembler::AVX_512bit);
4235     __ evmovdqul(Address(newArr, idx, Address::times_4), x1, Assembler::AVX_512bit);
4236     __ addl(idx, 16);
4237     __ subl(numIterTmp, 16);
4238     __ jcc(Assembler::greaterEqual, Shift512Loop);
4239     __ addl(numIterTmp, 16);
4240   }
4241   __ BIND(ShiftTwo);
4242   __ cmpl(totalNumIter, 1);
4243   __ jcc(Assembler::less, Exit);
4244   __ movl(tmp3, Address(oldArr, idx, Address::times_4));
4245   __ subl(numIterTmp, 2);
4246   __ jcc(Assembler::less, ShiftOne);
4247 
4248   __ BIND(ShiftTwoLoop);
4249   __ movl(tmp4, Address(oldArr, idx, Address::times_4, 0x4));
4250   __ movl(tmp5, Address(oldArr, idx, Address::times_4, 0x8));
4251   __ shldl(tmp3, tmp4);
4252   __ shldl(tmp4, tmp5);
4253   __ movl(Address(newArr, idx, Address::times_4), tmp3);
4254   __ movl(Address(newArr, idx, Address::times_4, 0x4), tmp4);
4255   __ movl(tmp3, tmp5);
4256   __ addl(idx, 2);
4257   __ subl(numIterTmp, 2);
4258   __ jcc(Assembler::greaterEqual, ShiftTwoLoop);
4259 
4260   // Do the last iteration
4261   __ BIND(ShiftOne);
4262   __ addl(numIterTmp, 2);
4263   __ cmpl(numIterTmp, 1);
4264   __ jcc(Assembler::less, Exit);
4265   __ movl(tmp4, Address(oldArr, idx, Address::times_4, 0x4));
4266   __ shldl(tmp3, tmp4);
4267   __ movl(Address(newArr, idx, Address::times_4), tmp3);
4268 
4269   __ BIND(Exit);
4270   __ vzeroupper();
4271   // Restore callee save registers.
4272   __ pop_ppx(tmp5);
4273 #ifdef _WIN64
4274   __ pop_ppx(tmp4);
4275   __ pop_ppx(tmp3);
4276   restore_arg_regs();
4277 #endif
4278   __ leave(); // required for proper stackwalking of RuntimeStub frame
4279   __ ret(0);
4280 
4281   // record the stub entry and end
4282   store_archive_data(stub_id, start, __ pc());
4283 
4284   return start;
4285 }
4286 
4287 void StubGenerator::generate_libm_stubs() {
4288   if (UseLibmIntrinsic && InlineIntrinsics) {
4289     if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
4290       StubRoutines::_dsin = generate_libmSin(); // from stubGenerator_x86_64_sin.cpp
4291     }
4292     if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
4293       StubRoutines::_dcos = generate_libmCos(); // from stubGenerator_x86_64_cos.cpp
4294     }
4295     if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) {
4296       StubRoutines::_dtan = generate_libmTan(); // from stubGenerator_x86_64_tan.cpp
4297     }
4298     if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsinh)) {
4299       StubRoutines::_dsinh = generate_libmSinh(); // from stubGenerator_x86_64_sinh.cpp
4300     }
4301     if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtanh)) {
4302       StubRoutines::_dtanh = generate_libmTanh(); // from stubGenerator_x86_64_tanh.cpp
4303     }
4304     if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcbrt)) {
4305       StubRoutines::_dcbrt = generate_libmCbrt(); // from stubGenerator_x86_64_cbrt.cpp
4306     }
4307     if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dexp)) {
4308       StubRoutines::_dexp = generate_libmExp(); // from stubGenerator_x86_64_exp.cpp
4309     }
4310     if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dpow)) {
4311       StubRoutines::_dpow = generate_libmPow(); // from stubGenerator_x86_64_pow.cpp
4312     }
4313     if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) {
4314       StubRoutines::_dlog = generate_libmLog(); // from stubGenerator_x86_64_log.cpp
4315     }
4316     if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog10)) {
4317       StubRoutines::_dlog10 = generate_libmLog10(); // from stubGenerator_x86_64_log.cpp
4318     }
4319   }
4320 }
4321 
4322 /**
4323 *  Arguments:
4324 *
4325 *  Input:
4326 *    c_rarg0   - float16  jshort
4327 *
4328 *  Output:
4329 *       xmm0   - float
4330 */
4331 address StubGenerator::generate_float16ToFloat() {
4332   StubId stub_id = StubId::stubgen_hf2f_id;
4333   int entry_count = StubInfo::entry_count(stub_id);
4334   assert(entry_count == 1, "sanity check");
4335   address start = load_archive_data(stub_id);
4336   if (start != nullptr) {
4337     return start;
4338   }
4339   StubCodeMark mark(this, stub_id);
4340 
4341   start = __ pc();
4342 
4343   BLOCK_COMMENT("Entry:");
4344   // No need for RuntimeStub frame since it is called only during JIT compilation
4345 
4346   // Load value into xmm0 and convert
4347   __ flt16_to_flt(xmm0, c_rarg0);
4348 
4349   __ ret(0);
4350 
4351   // record the stub entry and end
4352   store_archive_data(stub_id, start, __ pc());
4353 
4354   return start;
4355 }
4356 
4357 /**
4358 *  Arguments:
4359 *
4360 *  Input:
4361 *       xmm0   - float
4362 *
4363 *  Output:
4364 *        rax   - float16  jshort
4365 */
4366 address StubGenerator::generate_floatToFloat16() {
4367   StubId stub_id = StubId::stubgen_f2hf_id;
4368   int entry_count = StubInfo::entry_count(stub_id);
4369   assert(entry_count == 1, "sanity check");
4370   address start = load_archive_data(stub_id);
4371   if (start != nullptr) {
4372     return start;
4373   }
4374   StubCodeMark mark(this, stub_id);
4375 
4376   start = __ pc();
4377 
4378   BLOCK_COMMENT("Entry:");
4379   // No need for RuntimeStub frame since it is called only during JIT compilation
4380 
4381   // Convert and put result into rax
4382   __ flt_to_flt16(rax, xmm0, xmm1);
4383 
4384   __ ret(0);
4385 
4386   // record the stub entry and end
4387   store_archive_data(stub_id, start, __ pc());
4388 
4389   return start;
4390 }
4391 
4392 static void save_return_registers(MacroAssembler* masm) {
4393   masm->push_ppx(rax);
4394   if (InlineTypeReturnedAsFields) {
4395     masm->push(rdi);
4396     masm->push(rsi);
4397     masm->push(rdx);
4398     masm->push(rcx);
4399     masm->push(r8);
4400     masm->push(r9);
4401   }
4402   masm->push_d(xmm0);
4403   if (InlineTypeReturnedAsFields) {
4404     masm->push_d(xmm1);
4405     masm->push_d(xmm2);
4406     masm->push_d(xmm3);
4407     masm->push_d(xmm4);
4408     masm->push_d(xmm5);
4409     masm->push_d(xmm6);
4410     masm->push_d(xmm7);
4411   }
4412 #ifdef ASSERT
4413   masm->movq(rax, 0xBADC0FFE);
4414   masm->movq(rdi, rax);
4415   masm->movq(rsi, rax);
4416   masm->movq(rdx, rax);
4417   masm->movq(rcx, rax);
4418   masm->movq(r8, rax);
4419   masm->movq(r9, rax);
4420   masm->movq(xmm0, rax);
4421   masm->movq(xmm1, rax);
4422   masm->movq(xmm2, rax);
4423   masm->movq(xmm3, rax);
4424   masm->movq(xmm4, rax);
4425   masm->movq(xmm5, rax);
4426   masm->movq(xmm6, rax);
4427   masm->movq(xmm7, rax);
4428 #endif
4429 }
4430 
4431 static void restore_return_registers(MacroAssembler* masm) {
4432   if (InlineTypeReturnedAsFields) {
4433     masm->pop_d(xmm7);
4434     masm->pop_d(xmm6);
4435     masm->pop_d(xmm5);
4436     masm->pop_d(xmm4);
4437     masm->pop_d(xmm3);
4438     masm->pop_d(xmm2);
4439     masm->pop_d(xmm1);
4440   }
4441   masm->pop_d(xmm0);
4442   if (InlineTypeReturnedAsFields) {
4443     masm->pop(r9);
4444     masm->pop(r8);
4445     masm->pop(rcx);
4446     masm->pop(rdx);
4447     masm->pop(rsi);
4448     masm->pop(rdi);
4449   }
4450   masm->pop_ppx(rax);
4451 }
4452 
4453 address StubGenerator::generate_cont_thaw(StubId stub_id) {
4454   if (!Continuations::enabled()) return nullptr;
4455 
4456   bool return_barrier;
4457   bool return_barrier_exception;
4458   Continuation::thaw_kind kind;
4459 
4460   switch (stub_id) {
4461   case StubId::stubgen_cont_thaw_id:
4462     return_barrier = false;
4463     return_barrier_exception = false;
4464     kind = Continuation::thaw_top;
4465     break;
4466   case StubId::stubgen_cont_returnBarrier_id:
4467     return_barrier = true;
4468     return_barrier_exception = false;
4469     kind = Continuation::thaw_return_barrier;
4470     break;
4471   case StubId::stubgen_cont_returnBarrierExc_id:
4472     return_barrier = true;
4473     return_barrier_exception = true;
4474     kind = Continuation::thaw_return_barrier_exception;
4475     break;
4476   default:
4477     ShouldNotReachHere();
4478   }
4479   int entry_count = StubInfo::entry_count(stub_id);
4480   assert(entry_count == 1, "sanity check");
4481   address start = load_archive_data(stub_id);
4482   if (start != nullptr) {
4483     return start;
4484   }
4485   StubCodeMark mark(this, stub_id);
4486   start = __ pc();
4487 
4488   // TODO: Handle Valhalla return types. May require generating different return barriers.
4489 
4490   if (!return_barrier) {
4491     // Pop return address. If we don't do this, we get a drift,
4492     // where the bottom-most frozen frame continuously grows.
4493     __ pop(c_rarg3);
4494   } else {
4495     __ movptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
4496   }
4497 
4498 #ifdef ASSERT
4499   {
4500     Label L_good_sp;
4501     __ cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
4502     __ jcc(Assembler::equal, L_good_sp);
4503     __ stop("Incorrect rsp at thaw entry");
4504     __ BIND(L_good_sp);
4505   }
4506 #endif // ASSERT
4507 
4508   if (return_barrier) {
4509     // Preserve possible return value from a method returning to the return barrier.
4510     save_return_registers(_masm);
4511   }
4512 
4513   __ movptr(c_rarg0, r15_thread);
4514   __ movptr(c_rarg1, (return_barrier ? 1 : 0));
4515   __ call_VM_leaf(CAST_FROM_FN_PTR(address, Continuation::prepare_thaw), 2);
4516   __ movptr(rbx, rax);
4517 
4518   if (return_barrier) {
4519     // Restore return value from a method returning to the return barrier.
4520     // No safepoint in the call to thaw, so even an oop return value should be OK.
4521     restore_return_registers(_masm);
4522   }
4523 
4524 #ifdef ASSERT
4525   {
4526     Label L_good_sp;
4527     __ cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
4528     __ jcc(Assembler::equal, L_good_sp);
4529     __ stop("Incorrect rsp after prepare thaw");
4530     __ BIND(L_good_sp);
4531   }
4532 #endif // ASSERT
4533 
4534   // rbx contains the size of the frames to thaw, 0 if overflow or no more frames
4535   Label L_thaw_success;
4536   __ testptr(rbx, rbx);
4537   __ jccb(Assembler::notZero, L_thaw_success);
4538   __ jump(RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry()));
4539   __ bind(L_thaw_success);
4540 
4541   // Make room for the thawed frames and align the stack.
4542   __ subptr(rsp, rbx);
4543   __ andptr(rsp, -StackAlignmentInBytes);
4544 
4545   if (return_barrier) {
4546     // Preserve possible return value from a method returning to the return barrier. (Again.)
4547     save_return_registers(_masm);
4548   }
4549 
4550   // If we want, we can templatize thaw by kind, and have three different entries.
4551   __ movptr(c_rarg0, r15_thread);
4552   __ movptr(c_rarg1, kind);
4553   __ call_VM_leaf(Continuation::thaw_entry(), 2);
4554   __ movptr(rbx, rax);
4555 
4556   if (return_barrier) {
4557     // Restore return value from a method returning to the return barrier. (Again.)
4558     // No safepoint in the call to thaw, so even an oop return value should be OK.
4559     restore_return_registers(_masm);
4560   } else {
4561     // Return 0 (success) from doYield.
4562     __ xorptr(rax, rax);
4563   }
4564 
4565   // After thawing, rbx is the SP of the yielding frame.
4566   // Move there, and then to saved RBP slot.
4567   __ movptr(rsp, rbx);
4568   __ subptr(rsp, 2*wordSize);
4569 
4570   if (return_barrier_exception) {
4571     __ movptr(c_rarg0, r15_thread);
4572     __ movptr(c_rarg1, Address(rsp, wordSize)); // return address
4573 
4574     // rax still holds the original exception oop, save it before the call
4575     __ push_ppx(rax);
4576 
4577     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 2);
4578     __ movptr(rbx, rax);
4579 
4580     // Continue at exception handler:
4581     //   rax: exception oop
4582     //   rbx: exception handler
4583     //   rdx: exception pc
4584     __ pop_ppx(rax);
4585     __ verify_oop(rax);
4586     __ pop(rbp); // pop out RBP here too
4587     __ pop(rdx);
4588     __ jmp(rbx);
4589   } else {
4590     // We are "returning" into the topmost thawed frame; see Thaw::push_return_frame
4591     __ pop(rbp);
4592     __ ret(0);
4593   }
4594 
4595   // record the stub entry and end
4596   store_archive_data(stub_id, start, __ pc());
4597 
4598   return start;
4599 }
4600 
4601 address StubGenerator::generate_cont_thaw() {
4602   return generate_cont_thaw(StubId::stubgen_cont_thaw_id);
4603 }
4604 
4605 // TODO: will probably need multiple return barriers depending on return type
4606 
4607 address StubGenerator::generate_cont_returnBarrier() {
4608   return generate_cont_thaw(StubId::stubgen_cont_returnBarrier_id);
4609 }
4610 
4611 address StubGenerator::generate_cont_returnBarrier_exception() {
4612   return generate_cont_thaw(StubId::stubgen_cont_returnBarrierExc_id);
4613 }
4614 
4615 address StubGenerator::generate_cont_preempt_stub() {
4616   if (!Continuations::enabled()) return nullptr;
4617   StubId stub_id = StubId::stubgen_cont_preempt_id;
4618   int entry_count = StubInfo::entry_count(stub_id);
4619   assert(entry_count == 1, "sanity check");
4620   address start = load_archive_data(stub_id);
4621   if (start != nullptr) {
4622     return start;
4623   }
4624   StubCodeMark mark(this, stub_id);
4625   start = __ pc();
4626 
4627   __ reset_last_Java_frame(true);
4628 
4629   // Set rsp to enterSpecial frame, i.e. remove all frames copied into the heap.
4630   __ movptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
4631 
4632   Label preemption_cancelled;
4633   __ movbool(rscratch1, Address(r15_thread, JavaThread::preemption_cancelled_offset()));
4634   __ testbool(rscratch1);
4635   __ jcc(Assembler::notZero, preemption_cancelled);
4636 
4637   // Remove enterSpecial frame from the stack and return to Continuation.run() to unmount.
4638   SharedRuntime::continuation_enter_cleanup(_masm);
4639   __ pop(rbp);
4640   __ ret(0);
4641 
4642   // We acquired the monitor after freezing the frames so call thaw to continue execution.
4643   __ bind(preemption_cancelled);
4644   __ movbool(Address(r15_thread, JavaThread::preemption_cancelled_offset()), false);
4645   __ lea(rbp, Address(rsp, checked_cast<int32_t>(ContinuationEntry::size())));
4646   __ movptr(rscratch1, ExternalAddress(ContinuationEntry::thaw_call_pc_address()));
4647   __ jmp(rscratch1);
4648 
4649   // record the stub entry and end
4650   store_archive_data(stub_id, start, __ pc());
4651 
4652   return start;
4653 }
4654 
4655 // exception handler for upcall stubs
4656 address StubGenerator::generate_upcall_stub_exception_handler() {
4657   StubId stub_id = StubId::stubgen_upcall_stub_exception_handler_id;
4658   int entry_count = StubInfo::entry_count(stub_id);
4659   assert(entry_count == 1, "sanity check");
4660   address start = load_archive_data(stub_id);
4661   if (start != nullptr) {
4662     return start;
4663   }
4664   StubCodeMark mark(this, stub_id);
4665   start = __ pc();
4666 
4667   // native caller has no idea how to handle exceptions
4668   // we just crash here. Up to callee to catch exceptions.
4669   __ verify_oop(rax);
4670   __ vzeroupper();
4671   __ mov(c_rarg0, rax);
4672   __ andptr(rsp, -StackAlignmentInBytes); // align stack as required by ABI
4673   __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
4674   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, UpcallLinker::handle_uncaught_exception)));
4675   __ should_not_reach_here();
4676 
4677   // record the stub entry and end
4678   store_archive_data(stub_id, start, __ pc());
4679 
4680   return start;
4681 }
4682 
4683 // load Method* target of MethodHandle
4684 // j_rarg0 = jobject receiver
4685 // rbx = result
4686 address StubGenerator::generate_upcall_stub_load_target() {
4687   StubId stub_id = StubId::stubgen_upcall_stub_load_target_id;
4688   int entry_count = StubInfo::entry_count(stub_id);
4689   assert(entry_count == 1, "sanity check");
4690   address start = load_archive_data(stub_id);
4691   if (start != nullptr) {
4692     return start;
4693   }
4694   StubCodeMark mark(this, stub_id);
4695   start = __ pc();
4696 
4697   __ resolve_global_jobject(j_rarg0, rscratch1);
4698     // Load target method from receiver
4699   __ load_heap_oop(rbx, Address(j_rarg0, java_lang_invoke_MethodHandle::form_offset()), rscratch1);
4700   __ load_heap_oop(rbx, Address(rbx, java_lang_invoke_LambdaForm::vmentry_offset()), rscratch1);
4701   __ load_heap_oop(rbx, Address(rbx, java_lang_invoke_MemberName::method_offset()), rscratch1);
4702   __ access_load_at(T_ADDRESS, IN_HEAP, rbx,
4703                     Address(rbx, java_lang_invoke_ResolvedMethodName::vmtarget_offset()),
4704                     noreg);
4705   __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx); // just in case callee is deoptimized
4706 
4707   __ ret(0);
4708 
4709   // record the stub entry and end
4710   store_archive_data(stub_id, start, __ pc());
4711 
4712   return start;
4713 }
4714 
4715 void StubGenerator::generate_lookup_secondary_supers_table_stub() {
4716   StubId stub_id = StubId::stubgen_lookup_secondary_supers_table_id;
4717   GrowableArray<address> entries;
4718   int entry_count = StubInfo::entry_count(stub_id);
4719   assert(entry_count == Klass::SECONDARY_SUPERS_TABLE_SIZE, "sanity check");
4720   address start = load_archive_data(stub_id, &entries);
4721   if (start != nullptr) {
4722     assert(entries.length() == Klass::SECONDARY_SUPERS_TABLE_SIZE - 1,
4723            "unexpected extra entry count %d", entries.length());
4724     StubRoutines::_lookup_secondary_supers_table_stubs[0] = start;
4725     for (int slot = 1; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) {
4726       StubRoutines::_lookup_secondary_supers_table_stubs[slot] = entries.at(slot - 1);
4727     }
4728     return;
4729   }
4730   StubCodeMark mark(this, stub_id);
4731 
4732   const Register
4733       r_super_klass = rax,
4734       r_sub_klass   = rsi,
4735       result        = rdi;
4736 
4737   for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) {
4738     address next_entry = __ pc();
4739     if (slot == 0) {
4740       start = next_entry;
4741     } else {
4742       entries.append(next_entry);
4743     }
4744     StubRoutines::_lookup_secondary_supers_table_stubs[slot] = next_entry;
4745     __ lookup_secondary_supers_table_const(r_sub_klass, r_super_klass,
4746                                            rdx, rcx, rbx, r11, // temps
4747                                            result,
4748                                            slot);
4749     __ ret(0);
4750   }
4751 
4752   // record the stub entry and end plus all the auxiliary entries
4753   store_archive_data(stub_id, start, __ pc(), &entries);
4754 }
4755 
4756 // Slow path implementation for UseSecondarySupersTable.
4757 address StubGenerator::generate_lookup_secondary_supers_table_slow_path_stub() {
4758   StubId stub_id = StubId::stubgen_lookup_secondary_supers_table_slow_path_id;
4759   int entry_count = StubInfo::entry_count(stub_id);
4760   assert(entry_count == 1, "sanity check");
4761   address start = load_archive_data(stub_id);
4762   if (start != nullptr) {
4763     return start;
4764   }
4765   StubCodeMark mark(this, stub_id);
4766   start = __ pc();
4767 
4768   const Register
4769       r_super_klass  = rax,
4770       r_array_base   = rbx,
4771       r_array_index  = rdx,
4772       r_sub_klass    = rsi,
4773       r_bitmap       = r11,
4774       result         = rdi;
4775 
4776   Label L_success;
4777   __ lookup_secondary_supers_table_slow_path(r_super_klass, r_array_base, r_array_index, r_bitmap,
4778                                              rcx, rdi, // temps
4779                                              &L_success);
4780   // bind(L_failure);
4781   __ movl(result, 1);
4782   __ ret(0);
4783 
4784   __ bind(L_success);
4785   __ movl(result, 0);
4786   __ ret(0);
4787 
4788   // record the stub entry and end
4789   store_archive_data(stub_id, start, __ pc());
4790 
4791   return start;
4792 }
4793 
4794 void StubGenerator::create_control_words() {
4795   // Round to nearest, 64-bit mode, exceptions masked, flags specialized
4796   StubRoutines::x86::_mxcsr_std = EnableX86ECoreOpts ? 0x1FBF : 0x1F80;
4797   // Round to zero, 64-bit mode, exceptions masked, flags specialized
4798   StubRoutines::x86::_mxcsr_rz = EnableX86ECoreOpts ? 0x7FBF : 0x7F80;
4799 }
4800 
4801 // Initialization
4802 void StubGenerator::generate_preuniverse_stubs() {
4803   // atomic calls
4804   StubRoutines::_fence_entry                = generate_orderaccess_fence();
4805 }
4806 
4807 void StubGenerator::generate_initial_stubs() {
4808   // Generates all stubs and initializes the entry points
4809 
4810   // This platform-specific settings are needed by generate_call_stub()
4811   create_control_words();
4812 
4813   // Initialize table for unsafe copy memeory check.
4814   if (UnsafeMemoryAccess::_table == nullptr) {
4815     UnsafeMemoryAccess::create_table(16 + 4); // 16 for copyMemory; 4 for setMemory
4816   }
4817 
4818   // entry points that exist in all platforms Note: This is code
4819   // that could be shared among different platforms - however the
4820   // benefit seems to be smaller than the disadvantage of having a
4821   // much more complicated generator structure. See also comment in
4822   // stubRoutines.hpp.
4823 
4824   StubRoutines::_forward_exception_entry = generate_forward_exception();
4825 
4826   // Generate these first because they are called from other stubs
4827   if (InlineTypeReturnedAsFields) {
4828     StubRoutines::_load_inline_type_fields_in_regs =
4829       generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::load_inline_type_fields_in_regs),
4830                                  "load_inline_type_fields_in_regs", false);
4831     StubRoutines::_store_inline_type_fields_to_buf =
4832       generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::store_inline_type_fields_to_buf),
4833                                  "store_inline_type_fields_to_buf", true);
4834   }
4835 
4836   StubRoutines::_call_stub_entry =
4837     generate_call_stub(StubRoutines::_call_stub_return_address);
4838 
4839   // is referenced by megamorphic call
4840   StubRoutines::_catch_exception_entry = generate_catch_exception();
4841 
4842   // platform dependent
4843   StubRoutines::x86::_verify_mxcsr_entry    = generate_verify_mxcsr();
4844 
4845   StubRoutines::x86::_f2i_fixup             = generate_f2i_fixup();
4846   StubRoutines::x86::_f2l_fixup             = generate_f2l_fixup();
4847   StubRoutines::x86::_d2i_fixup             = generate_d2i_fixup();
4848   StubRoutines::x86::_d2l_fixup             = generate_d2l_fixup();
4849 
4850   StubRoutines::x86::_float_sign_mask       = generate_fp_mask(StubId::stubgen_float_sign_mask_id,  0x7FFFFFFF7FFFFFFF);
4851   StubRoutines::x86::_float_sign_flip       = generate_fp_mask(StubId::stubgen_float_sign_flip_id,  0x8000000080000000);
4852   StubRoutines::x86::_double_sign_mask      = generate_fp_mask(StubId::stubgen_double_sign_mask_id, 0x7FFFFFFFFFFFFFFF);
4853   StubRoutines::x86::_double_sign_flip      = generate_fp_mask(StubId::stubgen_double_sign_flip_id, 0x8000000000000000);
4854 
4855   if (UseCRC32Intrinsics) {
4856     StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
4857   }
4858 
4859   if (UseCRC32CIntrinsics) {
4860     bool supports_clmul = VM_Version::supports_clmul();
4861     StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul);
4862   }
4863 
4864   if (VM_Version::supports_float16()) {
4865     // For results consistency both intrinsics should be enabled.
4866     // vmIntrinsics checks InlineIntrinsics flag, no need to check it here.
4867     if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_float16ToFloat) &&
4868         vmIntrinsics::is_intrinsic_available(vmIntrinsics::_floatToFloat16)) {
4869       StubRoutines::_hf2f = generate_float16ToFloat();
4870       StubRoutines::_f2hf = generate_floatToFloat16();
4871     }
4872   }
4873 
4874   generate_libm_stubs();
4875 
4876   StubRoutines::_fmod = generate_libmFmod(); // from stubGenerator_x86_64_fmod.cpp
4877 }
4878 
4879 // Call here from the interpreter or compiled code to either load
4880 // multiple returned values from the inline type instance being
4881 // returned to registers or to store returned values to a newly
4882 // allocated inline type instance.
4883 // Register is a class, but it would be assigned numerical value.
4884 // "0" is assigned for xmm0. Thus we need to ignore -Wnonnull.
4885 PRAGMA_DIAG_PUSH
4886 PRAGMA_NONNULL_IGNORED
4887 address StubGenerator::generate_return_value_stub(address destination, const char* name, bool has_res) {
4888   // We need to save all registers the calling convention may use so
4889   // the runtime calls read or update those registers. This needs to
4890   // be in sync with SharedRuntime::java_return_convention().
4891   enum layout {
4892     pad_off = frame::arg_reg_save_area_bytes/BytesPerInt, pad_off_2,
4893     rax_off, rax_off_2,
4894     j_rarg5_off, j_rarg5_2,
4895     j_rarg4_off, j_rarg4_2,
4896     j_rarg3_off, j_rarg3_2,
4897     j_rarg2_off, j_rarg2_2,
4898     j_rarg1_off, j_rarg1_2,
4899     j_rarg0_off, j_rarg0_2,
4900     j_farg0_off, j_farg0_2,
4901     j_farg1_off, j_farg1_2,
4902     j_farg2_off, j_farg2_2,
4903     j_farg3_off, j_farg3_2,
4904     j_farg4_off, j_farg4_2,
4905     j_farg5_off, j_farg5_2,
4906     j_farg6_off, j_farg6_2,
4907     j_farg7_off, j_farg7_2,
4908     rbp_off, rbp_off_2,
4909     return_off, return_off_2,
4910 
4911     framesize
4912   };
4913 
4914   CodeBuffer buffer(name, 1000, 512);
4915   MacroAssembler* _masm = new MacroAssembler(&buffer);
4916 
4917   int frame_size_in_bytes = align_up(framesize*BytesPerInt, 16);
4918   assert(frame_size_in_bytes == framesize*BytesPerInt, "misaligned");
4919   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
4920   int frame_size_in_words = frame_size_in_bytes / wordSize;
4921 
4922   OopMapSet *oop_maps = new OopMapSet();
4923   OopMap* map = new OopMap(frame_size_in_slots, 0);
4924 
4925   map->set_callee_saved(VMRegImpl::stack2reg(rax_off), rax->as_VMReg());
4926   map->set_callee_saved(VMRegImpl::stack2reg(j_rarg5_off), j_rarg5->as_VMReg());
4927   map->set_callee_saved(VMRegImpl::stack2reg(j_rarg4_off), j_rarg4->as_VMReg());
4928   map->set_callee_saved(VMRegImpl::stack2reg(j_rarg3_off), j_rarg3->as_VMReg());
4929   map->set_callee_saved(VMRegImpl::stack2reg(j_rarg2_off), j_rarg2->as_VMReg());
4930   map->set_callee_saved(VMRegImpl::stack2reg(j_rarg1_off), j_rarg1->as_VMReg());
4931   map->set_callee_saved(VMRegImpl::stack2reg(j_rarg0_off), j_rarg0->as_VMReg());
4932   map->set_callee_saved(VMRegImpl::stack2reg(j_farg0_off), j_farg0->as_VMReg());
4933   map->set_callee_saved(VMRegImpl::stack2reg(j_farg1_off), j_farg1->as_VMReg());
4934   map->set_callee_saved(VMRegImpl::stack2reg(j_farg2_off), j_farg2->as_VMReg());
4935   map->set_callee_saved(VMRegImpl::stack2reg(j_farg3_off), j_farg3->as_VMReg());
4936   map->set_callee_saved(VMRegImpl::stack2reg(j_farg4_off), j_farg4->as_VMReg());
4937   map->set_callee_saved(VMRegImpl::stack2reg(j_farg5_off), j_farg5->as_VMReg());
4938   map->set_callee_saved(VMRegImpl::stack2reg(j_farg6_off), j_farg6->as_VMReg());
4939   map->set_callee_saved(VMRegImpl::stack2reg(j_farg7_off), j_farg7->as_VMReg());
4940 
4941   int start = __ offset();
4942 
4943   __ subptr(rsp, frame_size_in_bytes - 8 /* return address*/);
4944 
4945   __ movptr(Address(rsp, rbp_off * BytesPerInt), rbp);
4946   __ movdbl(Address(rsp, j_farg7_off * BytesPerInt), j_farg7);
4947   __ movdbl(Address(rsp, j_farg6_off * BytesPerInt), j_farg6);
4948   __ movdbl(Address(rsp, j_farg5_off * BytesPerInt), j_farg5);
4949   __ movdbl(Address(rsp, j_farg4_off * BytesPerInt), j_farg4);
4950   __ movdbl(Address(rsp, j_farg3_off * BytesPerInt), j_farg3);
4951   __ movdbl(Address(rsp, j_farg2_off * BytesPerInt), j_farg2);
4952   __ movdbl(Address(rsp, j_farg1_off * BytesPerInt), j_farg1);
4953   __ movdbl(Address(rsp, j_farg0_off * BytesPerInt), j_farg0);
4954 
4955   __ movptr(Address(rsp, j_rarg0_off * BytesPerInt), j_rarg0);
4956   __ movptr(Address(rsp, j_rarg1_off * BytesPerInt), j_rarg1);
4957   __ movptr(Address(rsp, j_rarg2_off * BytesPerInt), j_rarg2);
4958   __ movptr(Address(rsp, j_rarg3_off * BytesPerInt), j_rarg3);
4959   __ movptr(Address(rsp, j_rarg4_off * BytesPerInt), j_rarg4);
4960   __ movptr(Address(rsp, j_rarg5_off * BytesPerInt), j_rarg5);
4961   __ movptr(Address(rsp, rax_off * BytesPerInt), rax);
4962 
4963   int frame_complete = __ offset();
4964 
4965   __ set_last_Java_frame(noreg, noreg, nullptr, rscratch1);
4966 
4967   __ mov(c_rarg0, r15_thread);
4968   __ mov(c_rarg1, rax);
4969 
4970   __ call(RuntimeAddress(destination));
4971 
4972   // Set an oopmap for the call site.
4973 
4974   oop_maps->add_gc_map( __ offset() - start, map);
4975 
4976   // clear last_Java_sp
4977   __ reset_last_Java_frame(false);
4978 
4979   __ movptr(rbp, Address(rsp, rbp_off * BytesPerInt));
4980   __ movdbl(j_farg7, Address(rsp, j_farg7_off * BytesPerInt));
4981   __ movdbl(j_farg6, Address(rsp, j_farg6_off * BytesPerInt));
4982   __ movdbl(j_farg5, Address(rsp, j_farg5_off * BytesPerInt));
4983   __ movdbl(j_farg4, Address(rsp, j_farg4_off * BytesPerInt));
4984   __ movdbl(j_farg3, Address(rsp, j_farg3_off * BytesPerInt));
4985   __ movdbl(j_farg2, Address(rsp, j_farg2_off * BytesPerInt));
4986   __ movdbl(j_farg1, Address(rsp, j_farg1_off * BytesPerInt));
4987   __ movdbl(j_farg0, Address(rsp, j_farg0_off * BytesPerInt));
4988 
4989   __ movptr(j_rarg0, Address(rsp, j_rarg0_off * BytesPerInt));
4990   __ movptr(j_rarg1, Address(rsp, j_rarg1_off * BytesPerInt));
4991   __ movptr(j_rarg2, Address(rsp, j_rarg2_off * BytesPerInt));
4992   __ movptr(j_rarg3, Address(rsp, j_rarg3_off * BytesPerInt));
4993   __ movptr(j_rarg4, Address(rsp, j_rarg4_off * BytesPerInt));
4994   __ movptr(j_rarg5, Address(rsp, j_rarg5_off * BytesPerInt));
4995   __ movptr(rax, Address(rsp, rax_off * BytesPerInt));
4996 
4997   __ addptr(rsp, frame_size_in_bytes-8);
4998 
4999   // check for pending exceptions
5000   Label pending;
5001   __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
5002   __ jcc(Assembler::notEqual, pending);
5003 
5004   if (has_res) {
5005     __ get_vm_result_oop(rax);
5006   }
5007 
5008   __ ret(0);
5009 
5010   __ bind(pending);
5011 
5012   __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
5013   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
5014 
5015   // -------------
5016   // make sure all code is generated
5017   _masm->flush();
5018 
5019   RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, false);
5020   return stub->entry_point();
5021 }
5022 
5023 void StubGenerator::generate_continuation_stubs() {
5024   // Continuation stubs:
5025   StubRoutines::_cont_thaw          = generate_cont_thaw();
5026   StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
5027   StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
5028   StubRoutines::_cont_preempt_stub = generate_cont_preempt_stub();
5029 }
5030 
5031 void StubGenerator::generate_final_stubs() {
5032   // Generates the rest of stubs and initializes the entry points
5033 
5034   // support for verify_oop (must happen after universe_init)
5035   if (VerifyOops) {
5036     StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
5037   }
5038 
5039   // arraycopy stubs used by compilers
5040   generate_arraycopy_stubs();
5041 
5042   StubRoutines::_method_entry_barrier = generate_method_entry_barrier();
5043 
5044 #ifdef COMPILER2
5045   if (UseSecondarySupersTable) {
5046     StubRoutines::_lookup_secondary_supers_table_slow_path_stub = generate_lookup_secondary_supers_table_slow_path_stub();
5047     if (! InlineSecondarySupersTest) {
5048       generate_lookup_secondary_supers_table_stub();
5049     }
5050   }
5051 #endif // COMPILER2
5052 
5053   if (UseVectorizedMismatchIntrinsic) {
5054     StubRoutines::_vectorizedMismatch = generate_vectorizedMismatch();
5055   }
5056 
5057   StubRoutines::_upcall_stub_exception_handler = generate_upcall_stub_exception_handler();
5058   StubRoutines::_upcall_stub_load_target = generate_upcall_stub_load_target();
5059 }
5060 
5061 void StubGenerator::generate_compiler_stubs() {
5062 #if COMPILER2_OR_JVMCI
5063 
5064   // Entry points that are C2 compiler specific.
5065 
5066   StubRoutines::x86::_vector_float_sign_mask = generate_vector_mask(StubId::stubgen_vector_float_sign_mask_id, 0x7FFFFFFF7FFFFFFF);
5067   StubRoutines::x86::_vector_float_sign_flip = generate_vector_mask(StubId::stubgen_vector_float_sign_flip_id, 0x8000000080000000);
5068   StubRoutines::x86::_vector_double_sign_mask = generate_vector_mask(StubId::stubgen_vector_double_sign_mask_id, 0x7FFFFFFFFFFFFFFF);
5069   StubRoutines::x86::_vector_double_sign_flip = generate_vector_mask(StubId::stubgen_vector_double_sign_flip_id, 0x8000000000000000);
5070   StubRoutines::x86::_vector_all_bits_set = generate_vector_mask(StubId::stubgen_vector_all_bits_set_id, 0xFFFFFFFFFFFFFFFF);
5071   StubRoutines::x86::_vector_int_mask_cmp_bits = generate_vector_mask(StubId::stubgen_vector_int_mask_cmp_bits_id, 0x0000000100000001);
5072   StubRoutines::x86::_vector_short_to_byte_mask = generate_vector_mask(StubId::stubgen_vector_short_to_byte_mask_id, 0x00ff00ff00ff00ff);
5073   StubRoutines::x86::_vector_byte_perm_mask = generate_vector_byte_perm_mask();
5074   StubRoutines::x86::_vector_int_to_byte_mask = generate_vector_mask(StubId::stubgen_vector_int_to_byte_mask_id, 0x000000ff000000ff);
5075   StubRoutines::x86::_vector_int_to_short_mask = generate_vector_mask(StubId::stubgen_vector_int_to_short_mask_id, 0x0000ffff0000ffff);
5076   StubRoutines::x86::_vector_32_bit_mask = generate_vector_custom_i32(StubId::stubgen_vector_32_bit_mask_id, Assembler::AVX_512bit,
5077                                                                       0xFFFFFFFF, 0, 0, 0);
5078   StubRoutines::x86::_vector_64_bit_mask = generate_vector_custom_i32(StubId::stubgen_vector_64_bit_mask_id, Assembler::AVX_512bit,
5079                                                                       0xFFFFFFFF, 0xFFFFFFFF, 0, 0);
5080   StubRoutines::x86::_vector_int_shuffle_mask = generate_vector_mask(StubId::stubgen_vector_int_shuffle_mask_id, 0x0302010003020100);
5081   StubRoutines::x86::_vector_byte_shuffle_mask = generate_vector_byte_shuffle_mask();
5082   StubRoutines::x86::_vector_short_shuffle_mask = generate_vector_mask(StubId::stubgen_vector_short_shuffle_mask_id, 0x0100010001000100);
5083   StubRoutines::x86::_vector_long_shuffle_mask = generate_vector_mask(StubId::stubgen_vector_long_shuffle_mask_id, 0x0000000100000000);
5084   StubRoutines::x86::_vector_long_sign_mask = generate_vector_mask(StubId::stubgen_vector_long_sign_mask_id, 0x8000000000000000);
5085   generate_iota_indices();
5086   StubRoutines::x86::_vector_count_leading_zeros_lut = generate_count_leading_zeros_lut();
5087   StubRoutines::x86::_vector_reverse_bit_lut = generate_vector_reverse_bit_lut();
5088   StubRoutines::x86::_vector_reverse_byte_perm_mask_long = generate_vector_reverse_byte_perm_mask_long();
5089   StubRoutines::x86::_vector_reverse_byte_perm_mask_int = generate_vector_reverse_byte_perm_mask_int();
5090   StubRoutines::x86::_vector_reverse_byte_perm_mask_short = generate_vector_reverse_byte_perm_mask_short();
5091 
5092   if (VM_Version::supports_avx2() && !VM_Version::supports_avx512vl()) {
5093     StubRoutines::x86::_compress_perm_table32 = generate_compress_perm_table(StubId::stubgen_compress_perm_table32_id);
5094     StubRoutines::x86::_compress_perm_table64 = generate_compress_perm_table(StubId::stubgen_compress_perm_table64_id);
5095     StubRoutines::x86::_expand_perm_table32 = generate_expand_perm_table(StubId::stubgen_expand_perm_table32_id);
5096     StubRoutines::x86::_expand_perm_table64 = generate_expand_perm_table(StubId::stubgen_expand_perm_table64_id);
5097   }
5098 
5099   if (VM_Version::supports_avx2() && !VM_Version::supports_avx512_vpopcntdq()) {
5100     // lut implementation influenced by counting 1s algorithm from section 5-1 of Hackers' Delight.
5101     StubRoutines::x86::_vector_popcount_lut = generate_popcount_avx_lut();
5102   }
5103 
5104   generate_aes_stubs();
5105 
5106   generate_ghash_stubs();
5107 
5108   generate_chacha_stubs();
5109 
5110   generate_kyber_stubs();
5111 
5112   generate_dilithium_stubs();
5113 
5114   generate_sha3_stubs();
5115 
5116   // data cache line writeback
5117   StubRoutines::_data_cache_writeback = generate_data_cache_writeback();
5118   StubRoutines::_data_cache_writeback_sync = generate_data_cache_writeback_sync();
5119 
5120 #ifdef COMPILER2
5121   if ((UseAVX == 2) && EnableX86ECoreOpts) {
5122     generate_string_indexof(StubRoutines::_string_indexof_array);
5123   }
5124 #endif
5125 
5126   if (UseAdler32Intrinsics) {
5127      StubRoutines::_updateBytesAdler32 = generate_updateBytesAdler32();
5128   }
5129 
5130   if (UsePoly1305Intrinsics) {
5131     StubRoutines::_poly1305_processBlocks = generate_poly1305_processBlocks();
5132   }
5133 
5134   if (UseIntPolyIntrinsics) {
5135     StubRoutines::_intpoly_montgomeryMult_P256 = generate_intpoly_montgomeryMult_P256();
5136     StubRoutines::_intpoly_assign = generate_intpoly_assign();
5137   }
5138 
5139   if (UseMD5Intrinsics) {
5140     StubRoutines::_md5_implCompress = generate_md5_implCompress(StubId::stubgen_md5_implCompress_id);
5141     StubRoutines::_md5_implCompressMB = generate_md5_implCompress(StubId::stubgen_md5_implCompressMB_id);
5142   }
5143 
5144   if (UseSHA1Intrinsics) {
5145     StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask();
5146     StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask();
5147     StubRoutines::_sha1_implCompress = generate_sha1_implCompress(StubId::stubgen_sha1_implCompress_id);
5148     StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(StubId::stubgen_sha1_implCompressMB_id);
5149   }
5150 
5151   if (UseSHA256Intrinsics) {
5152     address entry2 = nullptr;
5153     address entry3 = nullptr;
5154     StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256;
5155     char* dst = (char*)StubRoutines::x86::_k256_W;
5156     char* src = (char*)StubRoutines::x86::_k256;
5157     for (int ii = 0; ii < 16; ++ii) {
5158       memcpy(dst + 32 * ii,      src + 16 * ii, 16);
5159       memcpy(dst + 32 * ii + 16, src + 16 * ii, 16);
5160     }
5161     StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W;
5162     StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(entry2, entry3);
5163     StubRoutines::x86::_pshuffle_byte_flip_mask_00ba_addr = entry2;
5164     StubRoutines::x86::_pshuffle_byte_flip_mask_dc00_addr = entry3;
5165     StubRoutines::_sha256_implCompress = generate_sha256_implCompress(StubId::stubgen_sha256_implCompress_id);
5166     StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(StubId::stubgen_sha256_implCompressMB_id);
5167   }
5168 
5169   if (UseSHA512Intrinsics) {
5170     address entry2 = nullptr;
5171     StubRoutines::x86::_k512_W_addr = (address)StubRoutines::x86::_k512_W;
5172     StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = generate_pshuffle_byte_flip_mask_sha512(entry2);
5173     StubRoutines::x86::_pshuffle_byte_flip_mask_ymm_lo_addr_sha512 = entry2;
5174     StubRoutines::_sha512_implCompress = generate_sha512_implCompress(StubId::stubgen_sha512_implCompress_id);
5175     StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(StubId::stubgen_sha512_implCompressMB_id);
5176   }
5177 
5178   if (UseBASE64Intrinsics) {
5179     if(VM_Version::supports_avx2()) {
5180       StubRoutines::x86::_avx2_shuffle_base64 = base64_avx2_shuffle_addr();
5181       StubRoutines::x86::_avx2_input_mask_base64 = base64_avx2_input_mask_addr();
5182       StubRoutines::x86::_avx2_lut_base64 = base64_avx2_lut_addr();
5183       StubRoutines::x86::_avx2_decode_tables_base64 = base64_AVX2_decode_tables_addr();
5184       StubRoutines::x86::_avx2_decode_lut_tables_base64 = base64_AVX2_decode_LUT_tables_addr();
5185     }
5186     StubRoutines::x86::_encoding_table_base64 = base64_encoding_table_addr();
5187     if (VM_Version::supports_avx512_vbmi()) {
5188       StubRoutines::x86::_shuffle_base64 = base64_shuffle_addr();
5189       StubRoutines::x86::_lookup_lo_base64 = base64_vbmi_lookup_lo_addr();
5190       StubRoutines::x86::_lookup_hi_base64 = base64_vbmi_lookup_hi_addr();
5191       StubRoutines::x86::_lookup_lo_base64url = base64_vbmi_lookup_lo_url_addr();
5192       StubRoutines::x86::_lookup_hi_base64url = base64_vbmi_lookup_hi_url_addr();
5193       StubRoutines::x86::_pack_vec_base64 = base64_vbmi_pack_vec_addr();
5194       StubRoutines::x86::_join_0_1_base64 = base64_vbmi_join_0_1_addr();
5195       StubRoutines::x86::_join_1_2_base64 = base64_vbmi_join_1_2_addr();
5196       StubRoutines::x86::_join_2_3_base64 = base64_vbmi_join_2_3_addr();
5197     }
5198     StubRoutines::x86::_decoding_table_base64 = base64_decoding_table_addr();
5199     StubRoutines::_base64_encodeBlock = generate_base64_encodeBlock();
5200     StubRoutines::_base64_decodeBlock = generate_base64_decodeBlock();
5201   }
5202 
5203 #ifdef COMPILER2
5204   if (UseMultiplyToLenIntrinsic) {
5205     StubRoutines::_multiplyToLen = generate_multiplyToLen();
5206   }
5207   if (UseSquareToLenIntrinsic) {
5208     StubRoutines::_squareToLen = generate_squareToLen();
5209   }
5210   if (UseMulAddIntrinsic) {
5211     StubRoutines::_mulAdd = generate_mulAdd();
5212   }
5213   if (VM_Version::supports_avx512_vbmi2()) {
5214     StubRoutines::_bigIntegerRightShiftWorker = generate_bigIntegerRightShift();
5215     StubRoutines::_bigIntegerLeftShiftWorker = generate_bigIntegerLeftShift();
5216   }
5217   if (UseMontgomeryMultiplyIntrinsic) {
5218     StubRoutines::_montgomeryMultiply
5219       = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply);
5220   }
5221   if (UseMontgomerySquareIntrinsic) {
5222     StubRoutines::_montgomerySquare
5223       = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square);
5224   }
5225 
5226   // Load x86_64_sort library on supported hardware to enable SIMD sort and partition intrinsics
5227 
5228   if (VM_Version::supports_avx512dq() || VM_Version::supports_avx2()) {
5229     void *libsimdsort = nullptr;
5230     char ebuf_[1024];
5231     char dll_name_simd_sort[JVM_MAXPATHLEN];
5232     if (os::dll_locate_lib(dll_name_simd_sort, sizeof(dll_name_simd_sort), Arguments::get_dll_dir(), "simdsort")) {
5233       libsimdsort = os::dll_load(dll_name_simd_sort, ebuf_, sizeof ebuf_);
5234     }
5235     // Get addresses for SIMD sort and partition routines
5236     if (libsimdsort != nullptr) {
5237       log_info(library)("Loaded library %s, handle " INTPTR_FORMAT, JNI_LIB_PREFIX "simdsort" JNI_LIB_SUFFIX, p2i(libsimdsort));
5238 
5239       os::snprintf_checked(ebuf_, sizeof(ebuf_), VM_Version::supports_avx512_simd_sort() ? "avx512_sort" : "avx2_sort");
5240       StubRoutines::_array_sort = (address)os::dll_lookup(libsimdsort, ebuf_);
5241 
5242       os::snprintf_checked(ebuf_, sizeof(ebuf_), VM_Version::supports_avx512_simd_sort() ? "avx512_partition" : "avx2_partition");
5243       StubRoutines::_array_partition = (address)os::dll_lookup(libsimdsort, ebuf_);
5244     }
5245   }
5246 
5247 #endif // COMPILER2
5248 #endif // COMPILER2_OR_JVMCI
5249 }
5250 
5251 StubGenerator::StubGenerator(CodeBuffer* code, BlobId blob_id, AOTStubData* stub_data) : StubCodeGenerator(code, blob_id, stub_data) {
5252   switch(blob_id) {
5253   case BlobId::stubgen_preuniverse_id:
5254     generate_preuniverse_stubs();
5255     break;
5256   case BlobId::stubgen_initial_id:
5257     generate_initial_stubs();
5258     break;
5259   case BlobId::stubgen_continuation_id:
5260     generate_continuation_stubs();
5261     break;
5262   case BlobId::stubgen_compiler_id:
5263     generate_compiler_stubs();
5264     break;
5265   case BlobId::stubgen_final_id:
5266     generate_final_stubs();
5267     break;
5268   default:
5269     fatal("unexpected blob id: %s", StubInfo::name(blob_id));
5270     break;
5271   };
5272 }
5273 
5274 #if INCLUDE_CDS
5275 // publish addresses of static data defined in this file and in other
5276 // stubgen stub generator files
5277 void StubGenerator::init_AOTAddressTable(GrowableArray<address>& external_addresses) {
5278   init_AOTAddressTable_adler(external_addresses);
5279   init_AOTAddressTable_aes(external_addresses);
5280   init_AOTAddressTable_cbrt(external_addresses);
5281   init_AOTAddressTable_chacha(external_addresses);
5282   // constants publishes for all of address use by cos and almost all of sin
5283   init_AOTAddressTable_constants(external_addresses);
5284   init_AOTAddressTable_dilithium(external_addresses);
5285   init_AOTAddressTable_exp(external_addresses);
5286   init_AOTAddressTable_fmod(external_addresses);
5287   init_AOTAddressTable_ghash(external_addresses);
5288   init_AOTAddressTable_kyber(external_addresses);
5289   init_AOTAddressTable_log(external_addresses);
5290   init_AOTAddressTable_poly1305(external_addresses);
5291   init_AOTAddressTable_poly_mont(external_addresses);
5292   init_AOTAddressTable_pow(external_addresses);
5293   init_AOTAddressTable_sha3(external_addresses);
5294   init_AOTAddressTable_sin(external_addresses);
5295   init_AOTAddressTable_sinh(external_addresses);
5296   init_AOTAddressTable_tan(external_addresses);
5297   init_AOTAddressTable_tanh(external_addresses);
5298 }
5299 #endif // INCLUDE_CDS
5300 
5301 void StubGenerator_generate(CodeBuffer* code, BlobId blob_id, AOTStubData* stub_data) {
5302   StubGenerator g(code, blob_id, stub_data);
5303 }
5304 
5305 #undef __