1 /*
2 * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/macroAssembler.hpp"
26 #include "classfile/javaClasses.hpp"
27 #include "classfile/vmIntrinsics.hpp"
28 #include "compiler/oopMap.hpp"
29 #include "gc/shared/barrierSet.hpp"
30 #include "gc/shared/barrierSetAssembler.hpp"
31 #include "gc/shared/barrierSetNMethod.hpp"
32 #include "gc/shared/gc_globals.hpp"
33 #include "memory/universe.hpp"
34 #include "prims/jvmtiExport.hpp"
35 #include "prims/upcallLinker.hpp"
36 #include "runtime/arguments.hpp"
37 #include "runtime/continuationEntry.hpp"
38 #include "runtime/javaThread.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/stubRoutines.hpp"
41 #include "stubGenerator_x86_64.hpp"
42 #ifdef COMPILER2
43 #include "opto/runtime.hpp"
44 #include "opto/c2_globals.hpp"
45 #endif
46 #if INCLUDE_JVMCI
47 #include "jvmci/jvmci_globals.hpp"
48 #endif
49
50 // For a more detailed description of the stub routine structure
51 // see the comment in stubRoutines.hpp
52
53 #define __ _masm->
54 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
55
56 #ifdef PRODUCT
57 #define BLOCK_COMMENT(str) /* nothing */
58 #else
59 #define BLOCK_COMMENT(str) __ block_comment(str)
60 #endif // PRODUCT
61
62 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
63
64 //
65 // Linux Arguments:
66 // c_rarg0: call wrapper address address
67 // c_rarg1: result address
68 // c_rarg2: result type BasicType
69 // c_rarg3: method Method*
70 // c_rarg4: (interpreter) entry point address
71 // c_rarg5: parameters intptr_t*
72 // 16(rbp): parameter size (in words) int
73 // 24(rbp): thread Thread*
74 //
75 // [ return_from_Java ] <--- rsp
76 // [ argument word n ]
77 // ...
78 // -12 [ argument word 1 ]
79 // -11 [ saved r15 ] <--- rsp_after_call
80 // -10 [ saved r14 ]
81 // -9 [ saved r13 ]
82 // -8 [ saved r12 ]
83 // -7 [ saved rbx ]
84 // -6 [ call wrapper ]
85 // -5 [ result ]
86 // -4 [ result type ]
87 // -3 [ method ]
88 // -2 [ entry point ]
89 // -1 [ parameters ]
90 // 0 [ saved rbp ] <--- rbp
91 // 1 [ return address ]
92 // 2 [ parameter size ]
93 // 3 [ thread ]
94 //
95 // Windows Arguments:
96 // c_rarg0: call wrapper address address
97 // c_rarg1: result address
98 // c_rarg2: result type BasicType
99 // c_rarg3: method Method*
100 // 48(rbp): (interpreter) entry point address
101 // 56(rbp): parameters intptr_t*
102 // 64(rbp): parameter size (in words) int
103 // 72(rbp): thread Thread*
104 //
105 // [ return_from_Java ] <--- rsp
106 // [ argument word n ]
107 // ...
108 // -28 [ argument word 1 ]
109 // -27 [ saved xmm15 ] <--- rsp after_call
110 // [ saved xmm7-xmm14 ]
111 // -9 [ saved xmm6 ] (each xmm register takes 2 slots)
112 // -7 [ saved r15 ]
113 // -6 [ saved r14 ]
114 // -5 [ saved r13 ]
115 // -4 [ saved r12 ]
116 // -3 [ saved rdi ]
117 // -2 [ saved rsi ]
118 // -1 [ saved rbx ]
119 // 0 [ saved rbp ] <--- rbp
120 // 1 [ return address ]
121 // 2 [ call wrapper ]
122 // 3 [ result ]
123 // 4 [ result type ]
124 // 5 [ method ]
125 // 6 [ entry point ]
126 // 7 [ parameters ]
127 // 8 [ parameter size ]
128 // 9 [ thread ]
129 //
130 // Windows reserves the callers stack space for arguments 1-4.
131 // We spill c_rarg0-c_rarg3 to this space.
132
133 // Call stub stack layout word offsets from rbp
134 #ifdef _WIN64
135 enum call_stub_layout {
136 xmm_save_first = 6, // save from xmm6
137 xmm_save_last = 15, // to xmm15
138 xmm_save_base = -9,
139 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27
140 r15_off = -7,
141 r14_off = -6,
142 r13_off = -5,
143 r12_off = -4,
144 rdi_off = -3,
145 rsi_off = -2,
146 rbx_off = -1,
147 rbp_off = 0,
148 retaddr_off = 1,
149 call_wrapper_off = 2,
150 result_off = 3,
151 result_type_off = 4,
152 method_off = 5,
153 entry_point_off = 6,
154 parameters_off = 7,
155 parameter_size_off = 8,
156 thread_off = 9
157 };
158
159 static Address xmm_save(int reg) {
160 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range");
161 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize);
162 }
163 #else // !_WIN64
164 enum call_stub_layout {
165 rsp_after_call_off = -12,
166 mxcsr_off = rsp_after_call_off,
167 r15_off = -11,
168 r14_off = -10,
169 r13_off = -9,
170 r12_off = -8,
171 rbx_off = -7,
172 call_wrapper_off = -6,
173 result_off = -5,
174 result_type_off = -4,
175 method_off = -3,
176 entry_point_off = -2,
177 parameters_off = -1,
178 rbp_off = 0,
179 retaddr_off = 1,
180 parameter_size_off = 2,
181 thread_off = 3
182 };
183 #endif // _WIN64
184
185 address StubGenerator::generate_call_stub(address& return_address) {
186
187 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 &&
188 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off,
189 "adjust this code");
190 StubId stub_id = StubId::stubgen_call_stub_id;
191 GrowableArray<address> entries;
192 int entry_count = StubInfo::entry_count(stub_id);
193 assert(entry_count == 2, "sanity check");
194 address start = load_archive_data(stub_id, &entries);
195 if (start != nullptr) {
196 assert(entries.length() == 1, "expected 1 extra entry");
197 return_address = entries.at(0);
198 return start;
199 }
200
201 StubCodeMark mark(this, stub_id);
202 start = __ pc();
203
204 // same as in generate_catch_exception()!
205 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize);
206
207 const Address call_wrapper (rbp, call_wrapper_off * wordSize);
208 const Address result (rbp, result_off * wordSize);
209 const Address result_type (rbp, result_type_off * wordSize);
210 const Address method (rbp, method_off * wordSize);
211 const Address entry_point (rbp, entry_point_off * wordSize);
212 const Address parameters (rbp, parameters_off * wordSize);
213 const Address parameter_size(rbp, parameter_size_off * wordSize);
214
215 // same as in generate_catch_exception()!
216 const Address thread (rbp, thread_off * wordSize);
217
218 const Address r15_save(rbp, r15_off * wordSize);
219 const Address r14_save(rbp, r14_off * wordSize);
220 const Address r13_save(rbp, r13_off * wordSize);
221 const Address r12_save(rbp, r12_off * wordSize);
222 const Address rbx_save(rbp, rbx_off * wordSize);
223
224 // stub code
225 __ enter();
226 __ subptr(rsp, -rsp_after_call_off * wordSize);
227
228 // save register parameters
229 #ifndef _WIN64
230 __ movptr(parameters, c_rarg5); // parameters
231 __ movptr(entry_point, c_rarg4); // entry_point
232 #endif
233
234 __ movptr(method, c_rarg3); // method
235 __ movl(result_type, c_rarg2); // result type
236 __ movptr(result, c_rarg1); // result
237 __ movptr(call_wrapper, c_rarg0); // call wrapper
238
239 // save regs belonging to calling function
240 __ movptr(rbx_save, rbx);
241 __ movptr(r12_save, r12);
242 __ movptr(r13_save, r13);
243 __ movptr(r14_save, r14);
244 __ movptr(r15_save, r15);
245
246 #ifdef _WIN64
247 int last_reg = 15;
248 for (int i = xmm_save_first; i <= last_reg; i++) {
249 __ movdqu(xmm_save(i), as_XMMRegister(i));
250 }
251
252 const Address rdi_save(rbp, rdi_off * wordSize);
253 const Address rsi_save(rbp, rsi_off * wordSize);
254
255 __ movptr(rsi_save, rsi);
256 __ movptr(rdi_save, rdi);
257 #else
258 const Address mxcsr_save(rbp, mxcsr_off * wordSize);
259 {
260 Label skip_ldmx;
261 __ cmp32_mxcsr_std(mxcsr_save, rax, rscratch1);
262 __ jcc(Assembler::equal, skip_ldmx);
263 ExternalAddress mxcsr_std(StubRoutines::x86::addr_mxcsr_std());
264 __ ldmxcsr(mxcsr_std, rscratch1);
265 __ bind(skip_ldmx);
266 }
267 #endif
268
269 // Load up thread register
270 __ movptr(r15_thread, thread);
271 __ reinit_heapbase();
272
273 #ifdef ASSERT
274 // make sure we have no pending exceptions
275 {
276 Label L;
277 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
278 __ jcc(Assembler::equal, L);
279 __ stop("StubRoutines::call_stub: entered with pending exception");
280 __ bind(L);
281 }
282 #endif
283
284 // pass parameters if any
285 BLOCK_COMMENT("pass parameters if any");
286 Label parameters_done;
287 __ movl(c_rarg3, parameter_size);
288 __ testl(c_rarg3, c_rarg3);
289 __ jcc(Assembler::zero, parameters_done);
290
291 Label loop;
292 __ movptr(c_rarg2, parameters); // parameter pointer
293 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1
294 __ BIND(loop);
295 __ movptr(rax, Address(c_rarg2, 0));// get parameter
296 __ addptr(c_rarg2, wordSize); // advance to next parameter
297 __ decrementl(c_rarg1); // decrement counter
298 __ push(rax); // pass parameter
299 __ jcc(Assembler::notZero, loop);
300
301 // call Java function
302 __ BIND(parameters_done);
303 __ movptr(rbx, method); // get Method*
304 __ movptr(c_rarg1, entry_point); // get entry_point
305 __ mov(r13, rsp); // set sender sp
306 BLOCK_COMMENT("call Java function");
307 __ call(c_rarg1);
308
309 BLOCK_COMMENT("call_stub_return_address:");
310 return_address = __ pc();
311 entries.append(return_address);
312
313 // store result depending on type (everything that is not
314 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
315 __ movptr(c_rarg0, result);
316 Label is_long, is_float, is_double, exit;
317 __ movl(c_rarg1, result_type);
318 __ cmpl(c_rarg1, T_OBJECT);
319 __ jcc(Assembler::equal, is_long);
320 __ cmpl(c_rarg1, T_LONG);
321 __ jcc(Assembler::equal, is_long);
322 __ cmpl(c_rarg1, T_FLOAT);
323 __ jcc(Assembler::equal, is_float);
324 __ cmpl(c_rarg1, T_DOUBLE);
325 __ jcc(Assembler::equal, is_double);
326 #ifdef ASSERT
327 // make sure the type is INT
328 {
329 Label L;
330 __ cmpl(c_rarg1, T_INT);
331 __ jcc(Assembler::equal, L);
332 __ stop("StubRoutines::call_stub: unexpected result type");
333 __ bind(L);
334 }
335 #endif
336
337 // handle T_INT case
338 __ movl(Address(c_rarg0, 0), rax);
339
340 __ BIND(exit);
341
342 // pop parameters
343 __ lea(rsp, rsp_after_call);
344
345 #ifdef ASSERT
346 // verify that threads correspond
347 {
348 Label L1, L2, L3;
349 __ cmpptr(r15_thread, thread);
350 __ jcc(Assembler::equal, L1);
351 __ stop("StubRoutines::call_stub: r15_thread is corrupted");
352 __ bind(L1);
353 __ get_thread_slow(rbx);
354 __ cmpptr(r15_thread, thread);
355 __ jcc(Assembler::equal, L2);
356 __ stop("StubRoutines::call_stub: r15_thread is modified by call");
357 __ bind(L2);
358 __ cmpptr(r15_thread, rbx);
359 __ jcc(Assembler::equal, L3);
360 __ stop("StubRoutines::call_stub: threads must correspond");
361 __ bind(L3);
362 }
363 #endif
364
365 __ pop_cont_fastpath();
366
367 // restore regs belonging to calling function
368 #ifdef _WIN64
369 // emit the restores for xmm regs
370 for (int i = xmm_save_first; i <= last_reg; i++) {
371 __ movdqu(as_XMMRegister(i), xmm_save(i));
372 }
373 #endif
374 __ movptr(r15, r15_save);
375 __ movptr(r14, r14_save);
376 __ movptr(r13, r13_save);
377 __ movptr(r12, r12_save);
378 __ movptr(rbx, rbx_save);
379
380 #ifdef _WIN64
381 __ movptr(rdi, rdi_save);
382 __ movptr(rsi, rsi_save);
383 #else
384 __ ldmxcsr(mxcsr_save);
385 #endif
386
387 // restore rsp
388 __ addptr(rsp, -rsp_after_call_off * wordSize);
389
390 // return
391 __ vzeroupper();
392 __ pop(rbp);
393 __ ret(0);
394
395 // handle return types different from T_INT
396 __ BIND(is_long);
397 __ movq(Address(c_rarg0, 0), rax);
398 __ jmp(exit);
399
400 __ BIND(is_float);
401 __ movflt(Address(c_rarg0, 0), xmm0);
402 __ jmp(exit);
403
404 __ BIND(is_double);
405 __ movdbl(Address(c_rarg0, 0), xmm0);
406 __ jmp(exit);
407
408 // record the stub entry and end plus the auxiliary entry
409 store_archive_data(stub_id, start, __ pc(), &entries);
410
411 return start;
412 }
413
414 // Return point for a Java call if there's an exception thrown in
415 // Java code. The exception is caught and transformed into a
416 // pending exception stored in JavaThread that can be tested from
417 // within the VM.
418 //
419 // Note: Usually the parameters are removed by the callee. In case
420 // of an exception crossing an activation frame boundary, that is
421 // not the case if the callee is compiled code => need to setup the
422 // rsp.
423 //
424 // rax: exception oop
425
426 address StubGenerator::generate_catch_exception() {
427 StubId stub_id = StubId::stubgen_catch_exception_id;
428 int entry_count = StubInfo::entry_count(stub_id);
429 assert(entry_count == 1, "sanity check");
430 address start = load_archive_data(stub_id);
431 if (start != nullptr) {
432 return start;
433 }
434
435 StubCodeMark mark(this, stub_id);
436 start = __ pc();
437
438 // same as in generate_call_stub():
439 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize);
440 const Address thread (rbp, thread_off * wordSize);
441
442 #ifdef ASSERT
443 // verify that threads correspond
444 {
445 Label L1, L2, L3;
446 __ cmpptr(r15_thread, thread);
447 __ jcc(Assembler::equal, L1);
448 __ stop("StubRoutines::catch_exception: r15_thread is corrupted");
449 __ bind(L1);
450 __ get_thread_slow(rbx);
451 __ cmpptr(r15_thread, thread);
452 __ jcc(Assembler::equal, L2);
453 __ stop("StubRoutines::catch_exception: r15_thread is modified by call");
454 __ bind(L2);
455 __ cmpptr(r15_thread, rbx);
456 __ jcc(Assembler::equal, L3);
457 __ stop("StubRoutines::catch_exception: threads must correspond");
458 __ bind(L3);
459 }
460 #endif
461
462 // set pending exception
463 __ verify_oop(rax);
464
465 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax);
466 // special case -- add file name string to AOT address table
467 address file = (address)AOTCodeCache::add_C_string(__FILE__);
468 __ lea(rscratch1, ExternalAddress(file));
469 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1);
470 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__);
471
472 // complete return to VM
473 assert(StubRoutines::_call_stub_return_address != nullptr,
474 "_call_stub_return_address must have been generated before");
475 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address));
476
477 // record the stub entry and end
478 store_archive_data(stub_id, start, __ pc());
479
480 return start;
481 }
482
483 // Continuation point for runtime calls returning with a pending
484 // exception. The pending exception check happened in the runtime
485 // or native call stub. The pending exception in Thread is
486 // converted into a Java-level exception.
487 //
488 // Contract with Java-level exception handlers:
489 // rax: exception
490 // rdx: throwing pc
491 //
492 // NOTE: At entry of this stub, exception-pc must be on stack !!
493
494 address StubGenerator::generate_forward_exception() {
495 StubId stub_id = StubId::stubgen_forward_exception_id;
496 int entry_count = StubInfo::entry_count(stub_id);
497 assert(entry_count == 1, "sanity check");
498 address start = load_archive_data(stub_id);
499 if (start != nullptr) {
500 return start;
501 }
502 StubCodeMark mark(this, stub_id);
503 start = __ pc();
504
505 // Upon entry, the sp points to the return address returning into
506 // Java (interpreted or compiled) code; i.e., the return address
507 // becomes the throwing pc.
508 //
509 // Arguments pushed before the runtime call are still on the stack
510 // but the exception handler will reset the stack pointer ->
511 // ignore them. A potential result in registers can be ignored as
512 // well.
513
514 #ifdef ASSERT
515 // make sure this code is only executed if there is a pending exception
516 {
517 Label L;
518 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
519 __ jcc(Assembler::notEqual, L);
520 __ stop("StubRoutines::forward exception: no pending exception (1)");
521 __ bind(L);
522 }
523 #endif
524
525 // compute exception handler into rbx
526 __ movptr(c_rarg0, Address(rsp, 0));
527 BLOCK_COMMENT("call exception_handler_for_return_address");
528 __ call_VM_leaf(CAST_FROM_FN_PTR(address,
529 SharedRuntime::exception_handler_for_return_address),
530 r15_thread, c_rarg0);
531 __ mov(rbx, rax);
532
533 // setup rax & rdx, remove return address & clear pending exception
534 __ pop(rdx);
535 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
536 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
537
538 #ifdef ASSERT
539 // make sure exception is set
540 {
541 Label L;
542 __ testptr(rax, rax);
543 __ jcc(Assembler::notEqual, L);
544 __ stop("StubRoutines::forward exception: no pending exception (2)");
545 __ bind(L);
546 }
547 #endif
548
549 // continue at exception handler (return address removed)
550 // rax: exception
551 // rbx: exception handler
552 // rdx: throwing pc
553 __ verify_oop(rax);
554 __ jmp(rbx);
555
556 // record the stub entry and end
557 store_archive_data(stub_id, start, __ pc());
558
559 return start;
560 }
561
562 // Support for intptr_t OrderAccess::fence()
563 //
564 // Arguments :
565 //
566 // Result:
567 address StubGenerator::generate_orderaccess_fence() {
568 StubId stub_id = StubId::stubgen_fence_id;
569 int entry_count = StubInfo::entry_count(stub_id);
570 assert(entry_count == 1, "sanity check");
571 address start = load_archive_data(stub_id);
572 if (start != nullptr) {
573 return start;
574 }
575 StubCodeMark mark(this, stub_id);
576 start = __ pc();
577
578 __ membar(Assembler::StoreLoad);
579 __ ret(0);
580
581 // record the stub entry and end
582 store_archive_data(stub_id, start, __ pc());
583
584 return start;
585 }
586
587
588 //----------------------------------------------------------------------------------------------------
589 // Support for void verify_mxcsr()
590 //
591 // This routine is used with -Xcheck:jni to verify that native
592 // JNI code does not return to Java code without restoring the
593 // MXCSR register to our expected state.
594
595 address StubGenerator::generate_verify_mxcsr() {
596 StubId stub_id = StubId::stubgen_verify_mxcsr_id;
597 int entry_count = StubInfo::entry_count(stub_id);
598 assert(entry_count == 1, "sanity check");
599 address start = load_archive_data(stub_id);
600 if (start != nullptr) {
601 return start;
602 }
603 StubCodeMark mark(this, stub_id);
604 start = __ pc();
605
606 const Address mxcsr_save(rsp, 0);
607
608 if (CheckJNICalls) {
609 Label ok_ret;
610 ExternalAddress mxcsr_std(StubRoutines::x86::addr_mxcsr_std());
611 __ push_ppx(rax);
612 __ subptr(rsp, wordSize); // allocate a temp location
613 __ cmp32_mxcsr_std(mxcsr_save, rax, rscratch1);
614 __ jcc(Assembler::equal, ok_ret);
615
616 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall");
617
618 __ ldmxcsr(mxcsr_std, rscratch1);
619
620 __ bind(ok_ret);
621 __ addptr(rsp, wordSize);
622 __ pop_ppx(rax);
623 }
624
625 __ ret(0);
626
627 // record the stub entry and end
628 store_archive_data(stub_id, start, __ pc());
629
630 return start;
631 }
632
633 address StubGenerator::generate_f2i_fixup() {
634 StubId stub_id = StubId::stubgen_f2i_fixup_id;
635 int entry_count = StubInfo::entry_count(stub_id);
636 assert(entry_count == 1, "sanity check");
637 address start = load_archive_data(stub_id);
638 if (start != nullptr) {
639 return start;
640 }
641 StubCodeMark mark(this, stub_id);
642 Address inout(rsp, 5 * wordSize); // return address + 4 saves
643
644 start = __ pc();
645
646 Label L;
647
648 __ push_ppx(rax);
649 __ push_ppx(c_rarg3);
650 __ push_ppx(c_rarg2);
651 __ push_ppx(c_rarg1);
652
653 __ movl(rax, 0x7f800000);
654 __ xorl(c_rarg3, c_rarg3);
655 __ movl(c_rarg2, inout);
656 __ movl(c_rarg1, c_rarg2);
657 __ andl(c_rarg1, 0x7fffffff);
658 __ cmpl(rax, c_rarg1); // NaN? -> 0
659 __ jcc(Assembler::negative, L);
660 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint
661 __ movl(c_rarg3, 0x80000000);
662 __ movl(rax, 0x7fffffff);
663 __ cmovl(Assembler::positive, c_rarg3, rax);
664
665 __ bind(L);
666 __ movptr(inout, c_rarg3);
667
668 __ pop_ppx(c_rarg1);
669 __ pop_ppx(c_rarg2);
670 __ pop_ppx(c_rarg3);
671 __ pop_ppx(rax);
672
673 __ ret(0);
674
675 // record the stub entry and end
676 store_archive_data(stub_id, start, __ pc());
677
678 return start;
679 }
680
681 address StubGenerator::generate_f2l_fixup() {
682 StubId stub_id = StubId::stubgen_f2l_fixup_id;
683 int entry_count = StubInfo::entry_count(stub_id);
684 assert(entry_count == 1, "sanity check");
685 address start = load_archive_data(stub_id);
686 if (start != nullptr) {
687 return start;
688 }
689 StubCodeMark mark(this, stub_id);
690 Address inout(rsp, 5 * wordSize); // return address + 4 saves
691 start = __ pc();
692
693 Label L;
694
695 __ push_ppx(rax);
696 __ push_ppx(c_rarg3);
697 __ push_ppx(c_rarg2);
698 __ push_ppx(c_rarg1);
699
700 __ movl(rax, 0x7f800000);
701 __ xorl(c_rarg3, c_rarg3);
702 __ movl(c_rarg2, inout);
703 __ movl(c_rarg1, c_rarg2);
704 __ andl(c_rarg1, 0x7fffffff);
705 __ cmpl(rax, c_rarg1); // NaN? -> 0
706 __ jcc(Assembler::negative, L);
707 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong
708 __ mov64(c_rarg3, 0x8000000000000000);
709 __ mov64(rax, 0x7fffffffffffffff);
710 __ cmov(Assembler::positive, c_rarg3, rax);
711
712 __ bind(L);
713 __ movptr(inout, c_rarg3);
714
715 __ pop_ppx(c_rarg1);
716 __ pop_ppx(c_rarg2);
717 __ pop_ppx(c_rarg3);
718 __ pop_ppx(rax);
719
720 __ ret(0);
721
722 // record the stub entry and end
723 store_archive_data(stub_id, start, __ pc());
724
725 return start;
726 }
727
728 address StubGenerator::generate_d2i_fixup() {
729 StubId stub_id = StubId::stubgen_d2i_fixup_id;
730 int entry_count = StubInfo::entry_count(stub_id);
731 assert(entry_count == 1, "sanity check");
732 address start = load_archive_data(stub_id);
733 if (start != nullptr) {
734 return start;
735 }
736 StubCodeMark mark(this, stub_id);
737 Address inout(rsp, 6 * wordSize); // return address + 5 saves
738
739 start = __ pc();
740
741 Label L;
742
743 __ push_ppx(rax);
744 __ push_ppx(c_rarg3);
745 __ push_ppx(c_rarg2);
746 __ push_ppx(c_rarg1);
747 __ push_ppx(c_rarg0);
748
749 __ movl(rax, 0x7ff00000);
750 __ movq(c_rarg2, inout);
751 __ movl(c_rarg3, c_rarg2);
752 __ mov(c_rarg1, c_rarg2);
753 __ mov(c_rarg0, c_rarg2);
754 __ negl(c_rarg3);
755 __ shrptr(c_rarg1, 0x20);
756 __ orl(c_rarg3, c_rarg2);
757 __ andl(c_rarg1, 0x7fffffff);
758 __ xorl(c_rarg2, c_rarg2);
759 __ shrl(c_rarg3, 0x1f);
760 __ orl(c_rarg1, c_rarg3);
761 __ cmpl(rax, c_rarg1);
762 __ jcc(Assembler::negative, L); // NaN -> 0
763 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint
764 __ movl(c_rarg2, 0x80000000);
765 __ movl(rax, 0x7fffffff);
766 __ cmov(Assembler::positive, c_rarg2, rax);
767
768 __ bind(L);
769 __ movptr(inout, c_rarg2);
770
771 __ pop_ppx(c_rarg0);
772 __ pop_ppx(c_rarg1);
773 __ pop_ppx(c_rarg2);
774 __ pop_ppx(c_rarg3);
775 __ pop_ppx(rax);
776
777 __ ret(0);
778
779 // record the stub entry and end
780 store_archive_data(stub_id, start, __ pc());
781
782 return start;
783 }
784
785 address StubGenerator::generate_d2l_fixup() {
786 StubId stub_id = StubId::stubgen_d2l_fixup_id;
787 int entry_count = StubInfo::entry_count(stub_id);
788 assert(entry_count == 1, "sanity check");
789 address start = load_archive_data(stub_id);
790 if (start != nullptr) {
791 return start;
792 }
793 StubCodeMark mark(this, stub_id);
794 Address inout(rsp, 6 * wordSize); // return address + 5 saves
795
796 start = __ pc();
797
798 Label L;
799
800 __ push_ppx(rax);
801 __ push_ppx(c_rarg3);
802 __ push_ppx(c_rarg2);
803 __ push_ppx(c_rarg1);
804 __ push_ppx(c_rarg0);
805
806 __ movl(rax, 0x7ff00000);
807 __ movq(c_rarg2, inout);
808 __ movl(c_rarg3, c_rarg2);
809 __ mov(c_rarg1, c_rarg2);
810 __ mov(c_rarg0, c_rarg2);
811 __ negl(c_rarg3);
812 __ shrptr(c_rarg1, 0x20);
813 __ orl(c_rarg3, c_rarg2);
814 __ andl(c_rarg1, 0x7fffffff);
815 __ xorl(c_rarg2, c_rarg2);
816 __ shrl(c_rarg3, 0x1f);
817 __ orl(c_rarg1, c_rarg3);
818 __ cmpl(rax, c_rarg1);
819 __ jcc(Assembler::negative, L); // NaN -> 0
820 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong
821 __ mov64(c_rarg2, 0x8000000000000000);
822 __ mov64(rax, 0x7fffffffffffffff);
823 __ cmovq(Assembler::positive, c_rarg2, rax);
824
825 __ bind(L);
826 __ movq(inout, c_rarg2);
827
828 __ pop_ppx(c_rarg0);
829 __ pop_ppx(c_rarg1);
830 __ pop_ppx(c_rarg2);
831 __ pop_ppx(c_rarg3);
832 __ pop_ppx(rax);
833
834 __ ret(0);
835
836 // record the stub entry and end
837 store_archive_data(stub_id, start, __ pc());
838
839 return start;
840 }
841
842 address StubGenerator::generate_count_leading_zeros_lut() {
843 StubId stub_id = StubId::stubgen_vector_count_leading_zeros_lut_id;
844 int entry_count = StubInfo::entry_count(stub_id);
845 assert(entry_count == 1, "sanity check");
846 address start = load_archive_data(stub_id);
847 if (start != nullptr) {
848 return start;
849 }
850 __ align64();
851 StubCodeMark mark(this, stub_id);
852 start = __ pc();
853
854 __ emit_data64(0x0101010102020304, relocInfo::none);
855 __ emit_data64(0x0000000000000000, relocInfo::none);
856 __ emit_data64(0x0101010102020304, relocInfo::none);
857 __ emit_data64(0x0000000000000000, relocInfo::none);
858 __ emit_data64(0x0101010102020304, relocInfo::none);
859 __ emit_data64(0x0000000000000000, relocInfo::none);
860 __ emit_data64(0x0101010102020304, relocInfo::none);
861 __ emit_data64(0x0000000000000000, relocInfo::none);
862
863 // record the stub entry and end
864 store_archive_data(stub_id, start, __ pc());
865
866 return start;
867 }
868
869 address StubGenerator::generate_popcount_avx_lut() {
870 StubId stub_id = StubId::stubgen_vector_popcount_lut_id;
871 int entry_count = StubInfo::entry_count(stub_id);
872 assert(entry_count == 1, "sanity check");
873 address start = load_archive_data(stub_id);
874 if (start != nullptr) {
875 return start;
876 }
877 __ align64();
878 StubCodeMark mark(this, stub_id);
879 start = __ pc();
880
881 __ emit_data64(0x0302020102010100, relocInfo::none);
882 __ emit_data64(0x0403030203020201, relocInfo::none);
883 __ emit_data64(0x0302020102010100, relocInfo::none);
884 __ emit_data64(0x0403030203020201, relocInfo::none);
885 __ emit_data64(0x0302020102010100, relocInfo::none);
886 __ emit_data64(0x0403030203020201, relocInfo::none);
887 __ emit_data64(0x0302020102010100, relocInfo::none);
888 __ emit_data64(0x0403030203020201, relocInfo::none);
889
890 // record the stub entry and end
891 store_archive_data(stub_id, start, __ pc());
892
893 return start;
894 }
895
896 void StubGenerator::generate_iota_indices() {
897 StubId stub_id = StubId::stubgen_vector_iota_indices_id;
898 GrowableArray<address> entries;
899 int entry_count = StubInfo::entry_count(stub_id);
900 assert(entry_count == VECTOR_IOTA_COUNT, "sanity check");
901 address start = load_archive_data(stub_id, &entries);
902 if (start != nullptr) {
903 assert(entries.length() == VECTOR_IOTA_COUNT - 1,
904 "unexpected extra entry count %d", entries.length());
905 StubRoutines::x86::_vector_iota_indices[0] = start;
906 for (int i = 1; i < VECTOR_IOTA_COUNT; i++) {
907 StubRoutines::x86::_vector_iota_indices[i] = entries.at(i - 1);
908 }
909 return;
910 }
911 __ align(CodeEntryAlignment);
912 StubCodeMark mark(this, stub_id);
913 start = __ pc();
914 // B
915 __ emit_data64(0x0706050403020100, relocInfo::none);
916 __ emit_data64(0x0F0E0D0C0B0A0908, relocInfo::none);
917 __ emit_data64(0x1716151413121110, relocInfo::none);
918 __ emit_data64(0x1F1E1D1C1B1A1918, relocInfo::none);
919 __ emit_data64(0x2726252423222120, relocInfo::none);
920 __ emit_data64(0x2F2E2D2C2B2A2928, relocInfo::none);
921 __ emit_data64(0x3736353433323130, relocInfo::none);
922 __ emit_data64(0x3F3E3D3C3B3A3938, relocInfo::none);
923 entries.append(__ pc());
924 // W
925 __ emit_data64(0x0003000200010000, relocInfo::none);
926 __ emit_data64(0x0007000600050004, relocInfo::none);
927 __ emit_data64(0x000B000A00090008, relocInfo::none);
928 __ emit_data64(0x000F000E000D000C, relocInfo::none);
929 __ emit_data64(0x0013001200110010, relocInfo::none);
930 __ emit_data64(0x0017001600150014, relocInfo::none);
931 __ emit_data64(0x001B001A00190018, relocInfo::none);
932 __ emit_data64(0x001F001E001D001C, relocInfo::none);
933 entries.append(__ pc());
934 // D
935 __ emit_data64(0x0000000100000000, relocInfo::none);
936 __ emit_data64(0x0000000300000002, relocInfo::none);
937 __ emit_data64(0x0000000500000004, relocInfo::none);
938 __ emit_data64(0x0000000700000006, relocInfo::none);
939 __ emit_data64(0x0000000900000008, relocInfo::none);
940 __ emit_data64(0x0000000B0000000A, relocInfo::none);
941 __ emit_data64(0x0000000D0000000C, relocInfo::none);
942 __ emit_data64(0x0000000F0000000E, relocInfo::none);
943 entries.append(__ pc());
944 // Q
945 __ emit_data64(0x0000000000000000, relocInfo::none);
946 __ emit_data64(0x0000000000000001, relocInfo::none);
947 __ emit_data64(0x0000000000000002, relocInfo::none);
948 __ emit_data64(0x0000000000000003, relocInfo::none);
949 __ emit_data64(0x0000000000000004, relocInfo::none);
950 __ emit_data64(0x0000000000000005, relocInfo::none);
951 __ emit_data64(0x0000000000000006, relocInfo::none);
952 __ emit_data64(0x0000000000000007, relocInfo::none);
953 entries.append(__ pc());
954 // D - FP
955 __ emit_data64(0x3F80000000000000, relocInfo::none); // 0.0f, 1.0f
956 __ emit_data64(0x4040000040000000, relocInfo::none); // 2.0f, 3.0f
957 __ emit_data64(0x40A0000040800000, relocInfo::none); // 4.0f, 5.0f
958 __ emit_data64(0x40E0000040C00000, relocInfo::none); // 6.0f, 7.0f
959 __ emit_data64(0x4110000041000000, relocInfo::none); // 8.0f, 9.0f
960 __ emit_data64(0x4130000041200000, relocInfo::none); // 10.0f, 11.0f
961 __ emit_data64(0x4150000041400000, relocInfo::none); // 12.0f, 13.0f
962 __ emit_data64(0x4170000041600000, relocInfo::none); // 14.0f, 15.0f
963 entries.append(__ pc());
964 // Q - FP
965 __ emit_data64(0x0000000000000000, relocInfo::none); // 0.0d
966 __ emit_data64(0x3FF0000000000000, relocInfo::none); // 1.0d
967 __ emit_data64(0x4000000000000000, relocInfo::none); // 2.0d
968 __ emit_data64(0x4008000000000000, relocInfo::none); // 3.0d
969 __ emit_data64(0x4010000000000000, relocInfo::none); // 4.0d
970 __ emit_data64(0x4014000000000000, relocInfo::none); // 5.0d
971 __ emit_data64(0x4018000000000000, relocInfo::none); // 6.0d
972 __ emit_data64(0x401c000000000000, relocInfo::none); // 7.0d
973
974 // record the stub entry and end
975 store_archive_data(stub_id, start, __ pc(), &entries);
976
977 // install the entry addresses in the entry array
978 assert(entries.length() == entry_count - 1,
979 "unexpected entries count %d", entries.length());
980 StubRoutines::x86::_vector_iota_indices[0] = start;
981 for (int i = 1; i < VECTOR_IOTA_COUNT; i++) {
982 StubRoutines::x86::_vector_iota_indices[i] = entries.at(i - 1);
983 }
984 }
985
986 address StubGenerator::generate_vector_reverse_bit_lut() {
987 StubId stub_id = StubId::stubgen_vector_reverse_bit_lut_id;
988 int entry_count = StubInfo::entry_count(stub_id);
989 assert(entry_count == 1, "sanity check");
990 address start = load_archive_data(stub_id);
991 if (start != nullptr) {
992 return start;
993 }
994 __ align(CodeEntryAlignment);
995 StubCodeMark mark(this, stub_id);
996 start = __ pc();
997
998 __ emit_data64(0x0E060A020C040800, relocInfo::none);
999 __ emit_data64(0x0F070B030D050901, relocInfo::none);
1000 __ emit_data64(0x0E060A020C040800, relocInfo::none);
1001 __ emit_data64(0x0F070B030D050901, relocInfo::none);
1002 __ emit_data64(0x0E060A020C040800, relocInfo::none);
1003 __ emit_data64(0x0F070B030D050901, relocInfo::none);
1004 __ emit_data64(0x0E060A020C040800, relocInfo::none);
1005 __ emit_data64(0x0F070B030D050901, relocInfo::none);
1006
1007 // record the stub entry and end
1008 store_archive_data(stub_id, start, __ pc());
1009
1010 return start;
1011 }
1012
1013 address StubGenerator::generate_vector_reverse_byte_perm_mask_long() {
1014 StubId stub_id = StubId::stubgen_vector_reverse_byte_perm_mask_long_id;
1015 int entry_count = StubInfo::entry_count(stub_id);
1016 assert(entry_count == 1, "sanity check");
1017 address start = load_archive_data(stub_id);
1018 if (start != nullptr) {
1019 return start;
1020 }
1021 __ align(CodeEntryAlignment);
1022 StubCodeMark mark(this, stub_id);
1023 start = __ pc();
1024
1025 __ emit_data64(0x0001020304050607, relocInfo::none);
1026 __ emit_data64(0x08090A0B0C0D0E0F, relocInfo::none);
1027 __ emit_data64(0x0001020304050607, relocInfo::none);
1028 __ emit_data64(0x08090A0B0C0D0E0F, relocInfo::none);
1029 __ emit_data64(0x0001020304050607, relocInfo::none);
1030 __ emit_data64(0x08090A0B0C0D0E0F, relocInfo::none);
1031 __ emit_data64(0x0001020304050607, relocInfo::none);
1032 __ emit_data64(0x08090A0B0C0D0E0F, relocInfo::none);
1033
1034 // record the stub entry and end
1035 store_archive_data(stub_id, start, __ pc());
1036
1037 return start;
1038 }
1039
1040 address StubGenerator::generate_vector_reverse_byte_perm_mask_int() {
1041 StubId stub_id = StubId::stubgen_vector_reverse_byte_perm_mask_int_id;
1042 int entry_count = StubInfo::entry_count(stub_id);
1043 assert(entry_count == 1, "sanity check");
1044 address start = load_archive_data(stub_id);
1045 if (start != nullptr) {
1046 return start;
1047 }
1048 __ align(CodeEntryAlignment);
1049 StubCodeMark mark(this, stub_id);
1050 start = __ pc();
1051
1052 __ emit_data64(0x0405060700010203, relocInfo::none);
1053 __ emit_data64(0x0C0D0E0F08090A0B, relocInfo::none);
1054 __ emit_data64(0x0405060700010203, relocInfo::none);
1055 __ emit_data64(0x0C0D0E0F08090A0B, relocInfo::none);
1056 __ emit_data64(0x0405060700010203, relocInfo::none);
1057 __ emit_data64(0x0C0D0E0F08090A0B, relocInfo::none);
1058 __ emit_data64(0x0405060700010203, relocInfo::none);
1059 __ emit_data64(0x0C0D0E0F08090A0B, relocInfo::none);
1060
1061 // record the stub entry and end
1062 store_archive_data(stub_id, start, __ pc());
1063
1064 return start;
1065 }
1066
1067 address StubGenerator::generate_vector_reverse_byte_perm_mask_short() {
1068 StubId stub_id = StubId::stubgen_vector_reverse_byte_perm_mask_short_id;
1069 int entry_count = StubInfo::entry_count(stub_id);
1070 assert(entry_count == 1, "sanity check");
1071 address start = load_archive_data(stub_id);
1072 if (start != nullptr) {
1073 return start;
1074 }
1075 __ align(CodeEntryAlignment);
1076 StubCodeMark mark(this, stub_id);
1077 start = __ pc();
1078
1079 __ emit_data64(0x0607040502030001, relocInfo::none);
1080 __ emit_data64(0x0E0F0C0D0A0B0809, relocInfo::none);
1081 __ emit_data64(0x0607040502030001, relocInfo::none);
1082 __ emit_data64(0x0E0F0C0D0A0B0809, relocInfo::none);
1083 __ emit_data64(0x0607040502030001, relocInfo::none);
1084 __ emit_data64(0x0E0F0C0D0A0B0809, relocInfo::none);
1085 __ emit_data64(0x0607040502030001, relocInfo::none);
1086 __ emit_data64(0x0E0F0C0D0A0B0809, relocInfo::none);
1087
1088 // record the stub entry and end
1089 store_archive_data(stub_id, start, __ pc());
1090
1091 return start;
1092 }
1093
1094 address StubGenerator::generate_vector_byte_shuffle_mask() {
1095 StubId stub_id = StubId::stubgen_vector_byte_shuffle_mask_id;
1096 int entry_count = StubInfo::entry_count(stub_id);
1097 assert(entry_count == 1, "sanity check");
1098 address start = load_archive_data(stub_id);
1099 if (start != nullptr) {
1100 return start;
1101 }
1102 __ align(CodeEntryAlignment);
1103 StubCodeMark mark(this, stub_id);
1104 start = __ pc();
1105
1106 __ emit_data64(0x7070707070707070, relocInfo::none);
1107 __ emit_data64(0x7070707070707070, relocInfo::none);
1108 __ emit_data64(0xF0F0F0F0F0F0F0F0, relocInfo::none);
1109 __ emit_data64(0xF0F0F0F0F0F0F0F0, relocInfo::none);
1110
1111 // record the stub entry and end
1112 store_archive_data(stub_id, start, __ pc());
1113
1114 return start;
1115 }
1116
1117 address StubGenerator::generate_fp_mask(StubId stub_id, int64_t mask) {
1118 int entry_count = StubInfo::entry_count(stub_id);
1119 assert(entry_count == 1, "sanity check");
1120 address start = load_archive_data(stub_id);
1121 if (start != nullptr) {
1122 return start;
1123 }
1124 __ align(CodeEntryAlignment);
1125 StubCodeMark mark(this, stub_id);
1126 start = __ pc();
1127
1128 __ emit_data64( mask, relocInfo::none );
1129 __ emit_data64( mask, relocInfo::none );
1130
1131 // record the stub entry and end
1132 store_archive_data(stub_id, start, __ pc());
1133
1134 return start;
1135 }
1136
1137 address StubGenerator::generate_compress_perm_table(StubId stub_id) {
1138 int esize;
1139 switch (stub_id) {
1140 case StubId::stubgen_compress_perm_table32_id:
1141 esize = 32;
1142 break;
1143 case StubId::stubgen_compress_perm_table64_id:
1144 esize = 64;
1145 break;
1146 default:
1147 ShouldNotReachHere();
1148 }
1149 int entry_count = StubInfo::entry_count(stub_id);
1150 assert(entry_count == 1, "sanity check");
1151 address start = load_archive_data(stub_id);
1152 if (start != nullptr) {
1153 return start;
1154 }
1155 __ align(CodeEntryAlignment);
1156 StubCodeMark mark(this, stub_id);
1157 start = __ pc();
1158 if (esize == 32) {
1159 // Loop to generate 256 x 8 int compression permute index table. A row is
1160 // accessed using 8 bit index computed using vector mask. An entry in
1161 // a row holds either a valid permute index corresponding to set bit position
1162 // or a -1 (default) value.
1163 for (int mask = 0; mask < 256; mask++) {
1164 int ctr = 0;
1165 for (int j = 0; j < 8; j++) {
1166 if (mask & (1 << j)) {
1167 __ emit_data(j, relocInfo::none);
1168 ctr++;
1169 }
1170 }
1171 for (; ctr < 8; ctr++) {
1172 __ emit_data(-1, relocInfo::none);
1173 }
1174 }
1175 } else {
1176 assert(esize == 64, "");
1177 // Loop to generate 16 x 4 long compression permute index table. A row is
1178 // accessed using 4 bit index computed using vector mask. An entry in
1179 // a row holds either a valid permute index pair for a quadword corresponding
1180 // to set bit position or a -1 (default) value.
1181 for (int mask = 0; mask < 16; mask++) {
1182 int ctr = 0;
1183 for (int j = 0; j < 4; j++) {
1184 if (mask & (1 << j)) {
1185 __ emit_data(2 * j, relocInfo::none);
1186 __ emit_data(2 * j + 1, relocInfo::none);
1187 ctr++;
1188 }
1189 }
1190 for (; ctr < 4; ctr++) {
1191 __ emit_data64(-1L, relocInfo::none);
1192 }
1193 }
1194 }
1195 // record the stub entry and end
1196 store_archive_data(stub_id, start, __ pc());
1197
1198 return start;
1199 }
1200
1201 address StubGenerator::generate_expand_perm_table(StubId stub_id) {
1202 int esize;
1203 switch (stub_id) {
1204 case StubId::stubgen_expand_perm_table32_id:
1205 esize = 32;
1206 break;
1207 case StubId::stubgen_expand_perm_table64_id:
1208 esize = 64;
1209 break;
1210 default:
1211 ShouldNotReachHere();
1212 }
1213 int entry_count = StubInfo::entry_count(stub_id);
1214 assert(entry_count == 1, "sanity check");
1215 address start = load_archive_data(stub_id);
1216 if (start != nullptr) {
1217 return start;
1218 }
1219 __ align(CodeEntryAlignment);
1220 StubCodeMark mark(this, stub_id);
1221 start = __ pc();
1222 if (esize == 32) {
1223 // Loop to generate 256 x 8 int expand permute index table. A row is accessed
1224 // using 8 bit index computed using vector mask. An entry in a row holds either
1225 // a valid permute index (starting from least significant lane) placed at poisition
1226 // corresponding to set bit position or a -1 (default) value.
1227 for (int mask = 0; mask < 256; mask++) {
1228 int ctr = 0;
1229 for (int j = 0; j < 8; j++) {
1230 if (mask & (1 << j)) {
1231 __ emit_data(ctr++, relocInfo::none);
1232 } else {
1233 __ emit_data(-1, relocInfo::none);
1234 }
1235 }
1236 }
1237 } else {
1238 assert(esize == 64, "");
1239 // Loop to generate 16 x 4 long expand permute index table. A row is accessed
1240 // using 4 bit index computed using vector mask. An entry in a row holds either
1241 // a valid doubleword permute index pair representing a quadword index (starting
1242 // from least significant lane) placed at poisition corresponding to set bit
1243 // position or a -1 (default) value.
1244 for (int mask = 0; mask < 16; mask++) {
1245 int ctr = 0;
1246 for (int j = 0; j < 4; j++) {
1247 if (mask & (1 << j)) {
1248 __ emit_data(2 * ctr, relocInfo::none);
1249 __ emit_data(2 * ctr + 1, relocInfo::none);
1250 ctr++;
1251 } else {
1252 __ emit_data64(-1L, relocInfo::none);
1253 }
1254 }
1255 }
1256 }
1257 // record the stub entry and end
1258 store_archive_data(stub_id, start, __ pc());
1259
1260 return start;
1261 }
1262
1263 address StubGenerator::generate_vector_mask(StubId stub_id, int64_t mask) {
1264 int entry_count = StubInfo::entry_count(stub_id);
1265 assert(entry_count == 1, "sanity check");
1266 address start = load_archive_data(stub_id);
1267 if (start != nullptr) {
1268 return start;
1269 }
1270 __ align(CodeEntryAlignment);
1271 StubCodeMark mark(this, stub_id);
1272 start = __ pc();
1273
1274 __ emit_data64(mask, relocInfo::none);
1275 __ emit_data64(mask, relocInfo::none);
1276 __ emit_data64(mask, relocInfo::none);
1277 __ emit_data64(mask, relocInfo::none);
1278 __ emit_data64(mask, relocInfo::none);
1279 __ emit_data64(mask, relocInfo::none);
1280 __ emit_data64(mask, relocInfo::none);
1281 __ emit_data64(mask, relocInfo::none);
1282
1283 // record the stub entry and end
1284 store_archive_data(stub_id, start, __ pc());
1285
1286 return start;
1287 }
1288
1289 address StubGenerator::generate_vector_byte_perm_mask() {
1290 StubId stub_id = StubId::stubgen_vector_byte_perm_mask_id;
1291 int entry_count = StubInfo::entry_count(stub_id);
1292 assert(entry_count == 1, "sanity check");
1293 address start = load_archive_data(stub_id);
1294 if (start != nullptr) {
1295 return start;
1296 }
1297 __ align(CodeEntryAlignment);
1298 StubCodeMark mark(this, stub_id);
1299 start = __ pc();
1300
1301 __ emit_data64(0x0000000000000001, relocInfo::none);
1302 __ emit_data64(0x0000000000000003, relocInfo::none);
1303 __ emit_data64(0x0000000000000005, relocInfo::none);
1304 __ emit_data64(0x0000000000000007, relocInfo::none);
1305 __ emit_data64(0x0000000000000000, relocInfo::none);
1306 __ emit_data64(0x0000000000000002, relocInfo::none);
1307 __ emit_data64(0x0000000000000004, relocInfo::none);
1308 __ emit_data64(0x0000000000000006, relocInfo::none);
1309
1310 // record the stub entry and end
1311 store_archive_data(stub_id, start, __ pc());
1312
1313 return start;
1314 }
1315
1316 address StubGenerator::generate_vector_fp_mask(StubId stub_id, int64_t mask) {
1317 int entry_count = StubInfo::entry_count(stub_id);
1318 assert(entry_count == 1, "sanity check");
1319 address start = load_archive_data(stub_id);
1320 if (start != nullptr) {
1321 return start;
1322 }
1323 __ align(CodeEntryAlignment);
1324 StubCodeMark mark(this, stub_id);
1325 start = __ pc();
1326
1327 __ emit_data64(mask, relocInfo::none);
1328 __ emit_data64(mask, relocInfo::none);
1329 __ emit_data64(mask, relocInfo::none);
1330 __ emit_data64(mask, relocInfo::none);
1331 __ emit_data64(mask, relocInfo::none);
1332 __ emit_data64(mask, relocInfo::none);
1333 __ emit_data64(mask, relocInfo::none);
1334 __ emit_data64(mask, relocInfo::none);
1335
1336 // record the stub entry and end
1337 store_archive_data(stub_id, start, __ pc());
1338
1339 return start;
1340 }
1341
1342 address StubGenerator::generate_vector_custom_i32(StubId stub_id, Assembler::AvxVectorLen len,
1343 int32_t val0, int32_t val1, int32_t val2, int32_t val3,
1344 int32_t val4, int32_t val5, int32_t val6, int32_t val7,
1345 int32_t val8, int32_t val9, int32_t val10, int32_t val11,
1346 int32_t val12, int32_t val13, int32_t val14, int32_t val15) {
1347 int entry_count = StubInfo::entry_count(stub_id);
1348 assert(entry_count == 1, "sanity check");
1349 address start = load_archive_data(stub_id);
1350 if (start != nullptr) {
1351 return start;
1352 }
1353 __ align(CodeEntryAlignment);
1354 StubCodeMark mark(this, stub_id);
1355 start = __ pc();
1356
1357 assert(len != Assembler::AVX_NoVec, "vector len must be specified");
1358 __ emit_data(val0, relocInfo::none, 0);
1359 __ emit_data(val1, relocInfo::none, 0);
1360 __ emit_data(val2, relocInfo::none, 0);
1361 __ emit_data(val3, relocInfo::none, 0);
1362 if (len >= Assembler::AVX_256bit) {
1363 __ emit_data(val4, relocInfo::none, 0);
1364 __ emit_data(val5, relocInfo::none, 0);
1365 __ emit_data(val6, relocInfo::none, 0);
1366 __ emit_data(val7, relocInfo::none, 0);
1367 if (len >= Assembler::AVX_512bit) {
1368 __ emit_data(val8, relocInfo::none, 0);
1369 __ emit_data(val9, relocInfo::none, 0);
1370 __ emit_data(val10, relocInfo::none, 0);
1371 __ emit_data(val11, relocInfo::none, 0);
1372 __ emit_data(val12, relocInfo::none, 0);
1373 __ emit_data(val13, relocInfo::none, 0);
1374 __ emit_data(val14, relocInfo::none, 0);
1375 __ emit_data(val15, relocInfo::none, 0);
1376 }
1377 }
1378 // record the stub entry and end
1379 store_archive_data(stub_id, start, __ pc());
1380
1381 return start;
1382 }
1383
1384 // Non-destructive plausibility checks for oops
1385 //
1386 // Arguments:
1387 // all args on stack!
1388 //
1389 // Stack after saving c_rarg3:
1390 // [tos + 0]: saved c_rarg3
1391 // [tos + 1]: saved c_rarg2
1392 // [tos + 2]: saved r12 (several TemplateTable methods use it)
1393 // [tos + 3]: saved flags
1394 // [tos + 4]: return address
1395 // * [tos + 5]: error message (char*)
1396 // * [tos + 6]: object to verify (oop)
1397 // * [tos + 7]: saved rax - saved by caller and bashed
1398 // * [tos + 8]: saved r10 (rscratch1) - saved by caller
1399 // * = popped on exit
1400 address StubGenerator::generate_verify_oop() {
1401 StubId stub_id = StubId::stubgen_verify_oop_id;
1402 int entry_count = StubInfo::entry_count(stub_id);
1403 assert(entry_count == 1, "sanity check");
1404 address start = load_archive_data(stub_id);
1405 if (start != nullptr) {
1406 return start;
1407 }
1408 StubCodeMark mark(this, stub_id);
1409 start = __ pc();
1410
1411 Label exit, error;
1412
1413 __ pushf();
1414 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr()), rscratch1);
1415
1416 __ push_ppx(r12);
1417
1418 // save c_rarg2 and c_rarg3
1419 __ push_ppx(c_rarg2);
1420 __ push_ppx(c_rarg3);
1421
1422 enum {
1423 // After previous pushes.
1424 oop_to_verify = 6 * wordSize,
1425 saved_rax = 7 * wordSize,
1426 saved_r10 = 8 * wordSize,
1427
1428 // Before the call to MacroAssembler::debug(), see below.
1429 return_addr = 16 * wordSize,
1430 error_msg = 17 * wordSize
1431 };
1432
1433 // get object
1434 __ movptr(rax, Address(rsp, oop_to_verify));
1435
1436 // make sure object is 'reasonable'
1437 __ testptr(rax, rax);
1438 __ jcc(Assembler::zero, exit); // if obj is null it is OK
1439
1440 BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
1441 bs_asm->check_oop(_masm, rax, c_rarg2, c_rarg3, error);
1442
1443 // return if everything seems ok
1444 __ bind(exit);
1445 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back
1446 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back
1447 __ pop_ppx(c_rarg3); // restore c_rarg3
1448 __ pop_ppx(c_rarg2); // restore c_rarg2
1449 __ pop_ppx(r12); // restore r12
1450 __ popf(); // restore flags
1451 __ ret(4 * wordSize); // pop caller saved stuff
1452
1453 // handle errors
1454 __ bind(error);
1455 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back
1456 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back
1457 __ pop_ppx(c_rarg3); // get saved c_rarg3 back
1458 __ pop_ppx(c_rarg2); // get saved c_rarg2 back
1459 __ pop_ppx(r12); // get saved r12 back
1460 __ popf(); // get saved flags off stack --
1461 // will be ignored
1462
1463 __ pusha(); // push registers
1464 // (rip is already
1465 // already pushed)
1466 // debug(char* msg, int64_t pc, int64_t regs[])
1467 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and
1468 // pushed all the registers, so now the stack looks like:
1469 // [tos + 0] 16 saved registers
1470 // [tos + 16] return address
1471 // * [tos + 17] error message (char*)
1472 // * [tos + 18] object to verify (oop)
1473 // * [tos + 19] saved rax - saved by caller and bashed
1474 // * [tos + 20] saved r10 (rscratch1) - saved by caller
1475 // * = popped on exit
1476
1477 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message
1478 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address
1479 __ movq(c_rarg2, rsp); // pass address of regs on stack
1480 __ mov(r12, rsp); // remember rsp
1481 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1482 __ andptr(rsp, -16); // align stack as required by ABI
1483 BLOCK_COMMENT("call MacroAssembler::debug");
1484 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
1485 __ hlt();
1486
1487 // record the stub entry and end
1488 store_archive_data(stub_id, start, __ pc());
1489
1490 return start;
1491 }
1492
1493
1494 // Shuffle first three arg regs on Windows into Linux/Solaris locations.
1495 //
1496 // Outputs:
1497 // rdi - rcx
1498 // rsi - rdx
1499 // rdx - r8
1500 // rcx - r9
1501 //
1502 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter
1503 // are non-volatile. r9 and r10 should not be used by the caller.
1504 //
1505 void StubGenerator::setup_arg_regs(int nargs) {
1506 const Register saved_rdi = r9;
1507 const Register saved_rsi = r10;
1508 assert(nargs == 3 || nargs == 4, "else fix");
1509 #ifdef _WIN64
1510 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9,
1511 "unexpected argument registers");
1512 if (nargs == 4) {
1513 __ mov(rax, r9); // r9 is also saved_rdi
1514 }
1515 __ movptr(saved_rdi, rdi);
1516 __ movptr(saved_rsi, rsi);
1517 __ mov(rdi, rcx); // c_rarg0
1518 __ mov(rsi, rdx); // c_rarg1
1519 __ mov(rdx, r8); // c_rarg2
1520 if (nargs == 4) {
1521 __ mov(rcx, rax); // c_rarg3 (via rax)
1522 }
1523 #else
1524 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx,
1525 "unexpected argument registers");
1526 #endif
1527 DEBUG_ONLY(_regs_in_thread = false;)
1528 }
1529
1530
1531 void StubGenerator::restore_arg_regs() {
1532 assert(!_regs_in_thread, "wrong call to restore_arg_regs");
1533 const Register saved_rdi = r9;
1534 const Register saved_rsi = r10;
1535 #ifdef _WIN64
1536 __ movptr(rdi, saved_rdi);
1537 __ movptr(rsi, saved_rsi);
1538 #endif
1539 }
1540
1541
1542 // This is used in places where r10 is a scratch register, and can
1543 // be adapted if r9 is needed also.
1544 void StubGenerator::setup_arg_regs_using_thread(int nargs) {
1545 const Register saved_r15 = r9;
1546 assert(nargs == 3 || nargs == 4, "else fix");
1547 #ifdef _WIN64
1548 if (nargs == 4) {
1549 __ mov(rax, r9); // r9 is also saved_r15
1550 }
1551 __ mov(saved_r15, r15); // r15 is callee saved and needs to be restored
1552 __ get_thread_slow(r15_thread);
1553 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9,
1554 "unexpected argument registers");
1555 __ movptr(Address(r15_thread, in_bytes(JavaThread::windows_saved_rdi_offset())), rdi);
1556 __ movptr(Address(r15_thread, in_bytes(JavaThread::windows_saved_rsi_offset())), rsi);
1557
1558 __ mov(rdi, rcx); // c_rarg0
1559 __ mov(rsi, rdx); // c_rarg1
1560 __ mov(rdx, r8); // c_rarg2
1561 if (nargs == 4) {
1562 __ mov(rcx, rax); // c_rarg3 (via rax)
1563 }
1564 #else
1565 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx,
1566 "unexpected argument registers");
1567 #endif
1568 DEBUG_ONLY(_regs_in_thread = true;)
1569 }
1570
1571
1572 void StubGenerator::restore_arg_regs_using_thread() {
1573 assert(_regs_in_thread, "wrong call to restore_arg_regs");
1574 const Register saved_r15 = r9;
1575 #ifdef _WIN64
1576 __ get_thread_slow(r15_thread);
1577 __ movptr(rsi, Address(r15_thread, in_bytes(JavaThread::windows_saved_rsi_offset())));
1578 __ movptr(rdi, Address(r15_thread, in_bytes(JavaThread::windows_saved_rdi_offset())));
1579 __ mov(r15, saved_r15); // r15 is callee saved and needs to be restored
1580 #endif
1581 }
1582
1583
1584 void StubGenerator::setup_argument_regs(BasicType type) {
1585 if (type == T_BYTE || type == T_SHORT) {
1586 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1587 // r9 and r10 may be used to save non-volatile registers
1588 } else {
1589 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx
1590 // r9 is used to save r15_thread
1591 }
1592 }
1593
1594
1595 void StubGenerator::restore_argument_regs(BasicType type) {
1596 if (type == T_BYTE || type == T_SHORT) {
1597 restore_arg_regs();
1598 } else {
1599 restore_arg_regs_using_thread();
1600 }
1601 }
1602
1603 address StubGenerator::generate_data_cache_writeback() {
1604 const Register src = c_rarg0; // source address
1605 StubId stub_id = StubId::stubgen_data_cache_writeback_id;
1606 int entry_count = StubInfo::entry_count(stub_id);
1607 assert(entry_count == 1, "sanity check");
1608 address start = load_archive_data(stub_id);
1609 if (start != nullptr) {
1610 return start;
1611 }
1612 __ align(CodeEntryAlignment);
1613 StubCodeMark mark(this, stub_id);
1614
1615 start = __ pc();
1616
1617 __ enter();
1618 __ cache_wb(Address(src, 0));
1619 __ leave();
1620 __ ret(0);
1621
1622 // record the stub entry and end
1623 store_archive_data(stub_id, start, __ pc());
1624
1625 return start;
1626 }
1627
1628 address StubGenerator::generate_data_cache_writeback_sync() {
1629 const Register is_pre = c_rarg0; // pre or post sync
1630 StubId stub_id = StubId::stubgen_data_cache_writeback_sync_id;
1631 int entry_count = StubInfo::entry_count(stub_id);
1632 assert(entry_count == 1, "sanity check");
1633 address start = load_archive_data(stub_id);
1634 if (start != nullptr) {
1635 return start;
1636 }
1637 __ align(CodeEntryAlignment);
1638 StubCodeMark mark(this, stub_id);
1639
1640 // pre wbsync is a no-op
1641 // post wbsync translates to an sfence
1642
1643 Label skip;
1644 start = __ pc();
1645
1646 __ enter();
1647 __ cmpl(is_pre, 0);
1648 __ jcc(Assembler::notEqual, skip);
1649 __ cache_wbsync(false);
1650 __ bind(skip);
1651 __ leave();
1652 __ ret(0);
1653
1654 // record the stub entry and end
1655 store_archive_data(stub_id, start, __ pc());
1656
1657 return start;
1658 }
1659
1660 // ofs and limit are use for multi-block byte array.
1661 // int com.sun.security.provider.MD5.implCompress(byte[] b, int ofs)
1662 address StubGenerator::generate_md5_implCompress(StubId stub_id) {
1663 bool multi_block;
1664 switch (stub_id) {
1665 case StubId::stubgen_md5_implCompress_id:
1666 multi_block = false;
1667 break;
1668 case StubId::stubgen_md5_implCompressMB_id:
1669 multi_block = true;
1670 break;
1671 default:
1672 ShouldNotReachHere();
1673 }
1674 int entry_count = StubInfo::entry_count(stub_id);
1675 assert(entry_count == 1, "sanity check");
1676 address start = load_archive_data(stub_id);
1677 if (start != nullptr) {
1678 return start;
1679 }
1680 __ align(CodeEntryAlignment);
1681 StubCodeMark mark(this, stub_id);
1682 start = __ pc();
1683
1684 const Register buf_param = r15;
1685 const Address state_param(rsp, 0 * wordSize);
1686 const Address ofs_param (rsp, 1 * wordSize );
1687 const Address limit_param(rsp, 1 * wordSize + 4);
1688
1689 __ enter();
1690 __ push_ppx(rbx);
1691 __ push_ppx(rdi);
1692 __ push_ppx(rsi);
1693 __ push_ppx(r15);
1694 __ subptr(rsp, 2 * wordSize);
1695
1696 __ movptr(buf_param, c_rarg0);
1697 __ movptr(state_param, c_rarg1);
1698 if (multi_block) {
1699 __ movl(ofs_param, c_rarg2);
1700 __ movl(limit_param, c_rarg3);
1701 }
1702 __ fast_md5(buf_param, state_param, ofs_param, limit_param, multi_block);
1703
1704 __ addptr(rsp, 2 * wordSize);
1705 __ pop_ppx(r15);
1706 __ pop_ppx(rsi);
1707 __ pop_ppx(rdi);
1708 __ pop_ppx(rbx);
1709 __ leave();
1710 __ ret(0);
1711
1712 // record the stub entry and end
1713 store_archive_data(stub_id, start, __ pc());
1714
1715 return start;
1716 }
1717
1718 address StubGenerator::generate_upper_word_mask() {
1719 StubId stub_id = StubId::stubgen_upper_word_mask_id;
1720 int entry_count = StubInfo::entry_count(stub_id);
1721 assert(entry_count == 1, "sanity check");
1722 address start = load_archive_data(stub_id);
1723 if (start != nullptr) {
1724 return start;
1725 }
1726 __ align64();
1727 StubCodeMark mark(this, stub_id);
1728 start = __ pc();
1729
1730 __ emit_data64(0x0000000000000000, relocInfo::none);
1731 __ emit_data64(0xFFFFFFFF00000000, relocInfo::none);
1732
1733 // record the stub entry and end
1734 store_archive_data(stub_id, start, __ pc());
1735
1736 return start;
1737 }
1738
1739 address StubGenerator::generate_shuffle_byte_flip_mask() {
1740 StubId stub_id = StubId::stubgen_shuffle_byte_flip_mask_id;
1741 int entry_count = StubInfo::entry_count(stub_id);
1742 assert(entry_count == 1, "sanity check");
1743 address start = load_archive_data(stub_id);
1744 if (start != nullptr) {
1745 return start;
1746 }
1747 __ align64();
1748 StubCodeMark mark(this, stub_id);
1749 start = __ pc();
1750
1751 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none);
1752 __ emit_data64(0x0001020304050607, relocInfo::none);
1753
1754 // record the stub entry and end
1755 store_archive_data(stub_id, start, __ pc());
1756
1757 return start;
1758 }
1759
1760 // ofs and limit are use for multi-block byte array.
1761 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
1762 address StubGenerator::generate_sha1_implCompress(StubId stub_id) {
1763 bool multi_block;
1764 switch (stub_id) {
1765 case StubId::stubgen_sha1_implCompress_id:
1766 multi_block = false;
1767 break;
1768 case StubId::stubgen_sha1_implCompressMB_id:
1769 multi_block = true;
1770 break;
1771 default:
1772 ShouldNotReachHere();
1773 }
1774 int entry_count = StubInfo::entry_count(stub_id);
1775 assert(entry_count == 1, "sanity check");
1776 address start = load_archive_data(stub_id);
1777 if (start != nullptr) {
1778 return start;
1779 }
1780 __ align(CodeEntryAlignment);
1781 StubCodeMark mark(this, stub_id);
1782 start = __ pc();
1783
1784 Register buf = c_rarg0;
1785 Register state = c_rarg1;
1786 Register ofs = c_rarg2;
1787 Register limit = c_rarg3;
1788
1789 const XMMRegister abcd = xmm0;
1790 const XMMRegister e0 = xmm1;
1791 const XMMRegister e1 = xmm2;
1792 const XMMRegister msg0 = xmm3;
1793
1794 const XMMRegister msg1 = xmm4;
1795 const XMMRegister msg2 = xmm5;
1796 const XMMRegister msg3 = xmm6;
1797 const XMMRegister shuf_mask = xmm7;
1798
1799 __ enter();
1800
1801 __ subptr(rsp, 4 * wordSize);
1802
1803 __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask,
1804 buf, state, ofs, limit, rsp, multi_block);
1805
1806 __ addptr(rsp, 4 * wordSize);
1807
1808 __ leave();
1809 __ ret(0);
1810
1811 // record the stub entry and end
1812 store_archive_data(stub_id, start, __ pc());
1813
1814 return start;
1815 }
1816
1817 address StubGenerator::generate_pshuffle_byte_flip_mask(address& entry_00ba, address& entry_dc00) {
1818 StubId stub_id = StubId::stubgen_pshuffle_byte_flip_mask_id;
1819 GrowableArray<address> entries;
1820 int entry_count = StubInfo::entry_count(stub_id);
1821 assert(entry_count == 3, "sanity check");
1822 address start = load_archive_data(stub_id, &entries);
1823 if (start != nullptr) {
1824 assert(entries.length() == entry_count - 1,
1825 "unexpected extra entry count %d", entries.length());
1826 entry_00ba = entries.at(0);
1827 entry_dc00 = entries.at(1);
1828 assert(VM_Version::supports_avx2() == (entry_00ba != nullptr && entry_dc00 != nullptr),
1829 "entries cannot be null when avx2 is enabled");
1830 return start;
1831 }
1832 __ align64();
1833 StubCodeMark mark(this, stub_id);
1834 start = __ pc();
1835 address entry2 = nullptr;
1836 address entry3 = nullptr;
1837 __ emit_data64(0x0405060700010203, relocInfo::none);
1838 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none);
1839
1840 if (VM_Version::supports_avx2()) {
1841 __ emit_data64(0x0405060700010203, relocInfo::none); // second copy
1842 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none);
1843 // _SHUF_00BA
1844 entry2 = __ pc();
1845 __ emit_data64(0x0b0a090803020100, relocInfo::none);
1846 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none);
1847 __ emit_data64(0x0b0a090803020100, relocInfo::none);
1848 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none);
1849 // _SHUF_DC00
1850 entry3 = __ pc();
1851 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none);
1852 __ emit_data64(0x0b0a090803020100, relocInfo::none);
1853 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none);
1854 __ emit_data64(0x0b0a090803020100, relocInfo::none);
1855 }
1856 // have to track the 2nd and 3rd entries even if they are null
1857 entry_00ba = entry2;
1858 entries.push(entry_00ba);
1859 entry_dc00 = entry3;
1860 entries.push(entry_dc00);
1861
1862 // record the stub entry and end plus all the auxiliary entries
1863 store_archive_data(stub_id, start, __ pc(), &entries);
1864
1865 return start;
1866 }
1867
1868 //Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb.
1869 address StubGenerator::generate_pshuffle_byte_flip_mask_sha512(address& entry_ymm_lo) {
1870 StubId stub_id = StubId::stubgen_pshuffle_byte_flip_mask_sha512_id;
1871 GrowableArray<address> entries;
1872 int entry_count = StubInfo::entry_count(stub_id);
1873 assert(entry_count == 2, "sanity check");
1874 address start = load_archive_data(stub_id, &entries);
1875 if (start != nullptr) {
1876 assert(entries.length() == entry_count - 1,
1877 "unexpected extra entry count %d", entries.length());
1878 entry_ymm_lo = entries.at(0);
1879 assert(VM_Version::supports_avx2() == (entry_ymm_lo != nullptr),
1880 "entry cannot be null when avx2 is enabled");
1881 return start;
1882 }
1883 __ align32();
1884 StubCodeMark mark(this, stub_id);
1885 start = __ pc();
1886 address entry2 = nullptr;
1887 if (VM_Version::supports_avx2()) {
1888 __ emit_data64(0x0001020304050607, relocInfo::none); // PSHUFFLE_BYTE_FLIP_MASK
1889 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none);
1890 __ emit_data64(0x1011121314151617, relocInfo::none);
1891 __ emit_data64(0x18191a1b1c1d1e1f, relocInfo::none);
1892 // capture 2nd entry
1893 entry2 = __ pc();
1894 __ emit_data64(0x0000000000000000, relocInfo::none); //MASK_YMM_LO
1895 __ emit_data64(0x0000000000000000, relocInfo::none);
1896 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none);
1897 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none);
1898 }
1899 // have to track the 2nd entry even if it is null
1900 entry_ymm_lo = entry2;
1901 entries.push(entry2);
1902 // record the stub entry and end
1903 store_archive_data(stub_id, start, __ pc(), &entries);
1904
1905 return start;
1906 }
1907
1908 // ofs and limit are use for multi-block byte array.
1909 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
1910 address StubGenerator::generate_sha256_implCompress(StubId stub_id) {
1911 bool multi_block;
1912 switch (stub_id) {
1913 case StubId::stubgen_sha256_implCompress_id:
1914 multi_block = false;
1915 break;
1916 case StubId::stubgen_sha256_implCompressMB_id:
1917 multi_block = true;
1918 break;
1919 default:
1920 ShouldNotReachHere();
1921 }
1922 assert(VM_Version::supports_sha() || VM_Version::supports_avx2(), "");
1923 int entry_count = StubInfo::entry_count(stub_id);
1924 assert(entry_count == 1, "sanity check");
1925 address start = load_archive_data(stub_id);
1926 if (start != nullptr) {
1927 return start;
1928 }
1929 __ align(CodeEntryAlignment);
1930 StubCodeMark mark(this, stub_id);
1931 start = __ pc();
1932
1933 Register buf = c_rarg0;
1934 Register state = c_rarg1;
1935 Register ofs = c_rarg2;
1936 Register limit = c_rarg3;
1937
1938 const XMMRegister msg = xmm0;
1939 const XMMRegister state0 = xmm1;
1940 const XMMRegister state1 = xmm2;
1941 const XMMRegister msgtmp0 = xmm3;
1942
1943 const XMMRegister msgtmp1 = xmm4;
1944 const XMMRegister msgtmp2 = xmm5;
1945 const XMMRegister msgtmp3 = xmm6;
1946 const XMMRegister msgtmp4 = xmm7;
1947
1948 const XMMRegister shuf_mask = xmm8;
1949
1950 __ enter();
1951
1952 __ subptr(rsp, 4 * wordSize);
1953
1954 if (VM_Version::supports_sha()) {
1955 __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4,
1956 buf, state, ofs, limit, rsp, multi_block, shuf_mask);
1957 } else if (VM_Version::supports_avx2()) {
1958 __ sha256_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4,
1959 buf, state, ofs, limit, rsp, multi_block, shuf_mask);
1960 }
1961 __ addptr(rsp, 4 * wordSize);
1962 __ vzeroupper();
1963 __ leave();
1964 __ ret(0);
1965
1966 // record the stub entry and end
1967 store_archive_data(stub_id, start, __ pc());
1968
1969 return start;
1970 }
1971
1972 address StubGenerator::generate_sha512_implCompress(StubId stub_id) {
1973 bool multi_block;
1974 switch (stub_id) {
1975 case StubId::stubgen_sha512_implCompress_id:
1976 multi_block = false;
1977 break;
1978 case StubId::stubgen_sha512_implCompressMB_id:
1979 multi_block = true;
1980 break;
1981 default:
1982 ShouldNotReachHere();
1983 }
1984 assert(VM_Version::supports_avx2(), "");
1985 assert(VM_Version::supports_bmi2() || VM_Version::supports_sha512(), "");
1986 int entry_count = StubInfo::entry_count(stub_id);
1987 assert(entry_count == 1, "sanity check");
1988 address start = load_archive_data(stub_id);
1989 if (start != nullptr) {
1990 return start;
1991 }
1992 __ align(CodeEntryAlignment);
1993 StubCodeMark mark(this, stub_id);
1994 start = __ pc();
1995
1996 Register buf = c_rarg0;
1997 Register state = c_rarg1;
1998 Register ofs = c_rarg2;
1999 Register limit = c_rarg3;
2000
2001 __ enter();
2002
2003 if (VM_Version::supports_sha512()) {
2004 __ sha512_update_ni_x1(state, buf, ofs, limit, multi_block);
2005 } else {
2006 const XMMRegister msg = xmm0;
2007 const XMMRegister state0 = xmm1;
2008 const XMMRegister state1 = xmm2;
2009 const XMMRegister msgtmp0 = xmm3;
2010 const XMMRegister msgtmp1 = xmm4;
2011 const XMMRegister msgtmp2 = xmm5;
2012 const XMMRegister msgtmp3 = xmm6;
2013 const XMMRegister msgtmp4 = xmm7;
2014
2015 const XMMRegister shuf_mask = xmm8;
2016 __ sha512_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4,
2017 buf, state, ofs, limit, rsp, multi_block, shuf_mask);
2018 }
2019 __ vzeroupper();
2020 __ leave();
2021 __ ret(0);
2022
2023 // record the stub entry and end
2024 store_archive_data(stub_id, start, __ pc());
2025
2026 return start;
2027 }
2028
2029 address StubGenerator::base64_shuffle_addr() {
2030 StubId stub_id = StubId::stubgen_shuffle_base64_id;
2031 int entry_count = StubInfo::entry_count(stub_id);
2032 assert(entry_count == 1, "sanity check");
2033 address start = load_archive_data(stub_id);
2034 if (start != nullptr) {
2035 return start;
2036 }
2037 __ align64();
2038 StubCodeMark mark(this, stub_id);
2039 start = __ pc();
2040
2041 assert(((unsigned long long)start & 0x3f) == 0,
2042 "Alignment problem (0x%08llx)", (unsigned long long)start);
2043 __ emit_data64(0x0405030401020001, relocInfo::none);
2044 __ emit_data64(0x0a0b090a07080607, relocInfo::none);
2045 __ emit_data64(0x10110f100d0e0c0d, relocInfo::none);
2046 __ emit_data64(0x1617151613141213, relocInfo::none);
2047 __ emit_data64(0x1c1d1b1c191a1819, relocInfo::none);
2048 __ emit_data64(0x222321221f201e1f, relocInfo::none);
2049 __ emit_data64(0x2829272825262425, relocInfo::none);
2050 __ emit_data64(0x2e2f2d2e2b2c2a2b, relocInfo::none);
2051
2052 // record the stub entry and end
2053 store_archive_data(stub_id, start, __ pc());
2054
2055 return start;
2056 }
2057
2058 address StubGenerator::base64_avx2_shuffle_addr() {
2059 StubId stub_id = StubId::stubgen_avx2_shuffle_base64_id;
2060 int entry_count = StubInfo::entry_count(stub_id);
2061 assert(entry_count == 1, "sanity check");
2062 address start = load_archive_data(stub_id);
2063 if (start != nullptr) {
2064 return start;
2065 }
2066 __ align32();
2067 StubCodeMark mark(this, stub_id);
2068 start = __ pc();
2069
2070 __ emit_data64(0x0809070805060405, relocInfo::none);
2071 __ emit_data64(0x0e0f0d0e0b0c0a0b, relocInfo::none);
2072 __ emit_data64(0x0405030401020001, relocInfo::none);
2073 __ emit_data64(0x0a0b090a07080607, relocInfo::none);
2074
2075 // record the stub entry and end
2076 store_archive_data(stub_id, start, __ pc());
2077
2078 return start;
2079 }
2080
2081 address StubGenerator::base64_avx2_input_mask_addr() {
2082 StubId stub_id = StubId::stubgen_avx2_input_mask_base64_id;
2083 int entry_count = StubInfo::entry_count(stub_id);
2084 assert(entry_count == 1, "sanity check");
2085 address start = load_archive_data(stub_id);
2086 if (start != nullptr) {
2087 return start;
2088 }
2089 __ align32();
2090 StubCodeMark mark(this, stub_id);
2091 start = __ pc();
2092
2093 __ emit_data64(0x8000000000000000, relocInfo::none);
2094 __ emit_data64(0x8000000080000000, relocInfo::none);
2095 __ emit_data64(0x8000000080000000, relocInfo::none);
2096 __ emit_data64(0x8000000080000000, relocInfo::none);
2097
2098 // record the stub entry and end
2099 store_archive_data(stub_id, start, __ pc());
2100
2101 return start;
2102 }
2103
2104 address StubGenerator::base64_avx2_lut_addr() {
2105 StubId stub_id = StubId::stubgen_avx2_lut_base64_id;
2106 int entry_count = StubInfo::entry_count(stub_id);
2107 assert(entry_count == 1, "sanity check");
2108 address start = load_archive_data(stub_id);
2109 if (start != nullptr) {
2110 return start;
2111 }
2112 __ align32();
2113 StubCodeMark mark(this, stub_id);
2114 start = __ pc();
2115
2116 __ emit_data64(0xfcfcfcfcfcfc4741, relocInfo::none);
2117 __ emit_data64(0x0000f0edfcfcfcfc, relocInfo::none);
2118 __ emit_data64(0xfcfcfcfcfcfc4741, relocInfo::none);
2119 __ emit_data64(0x0000f0edfcfcfcfc, relocInfo::none);
2120
2121 // URL LUT
2122 __ emit_data64(0xfcfcfcfcfcfc4741, relocInfo::none);
2123 __ emit_data64(0x000020effcfcfcfc, relocInfo::none);
2124 __ emit_data64(0xfcfcfcfcfcfc4741, relocInfo::none);
2125 __ emit_data64(0x000020effcfcfcfc, relocInfo::none);
2126
2127 // record the stub entry and end
2128 store_archive_data(stub_id, start, __ pc());
2129
2130 return start;
2131 }
2132
2133 address StubGenerator::base64_encoding_table_addr() {
2134 StubId stub_id = StubId::stubgen_encoding_table_base64_id;
2135 int entry_count = StubInfo::entry_count(stub_id);
2136 assert(entry_count == 1, "sanity check");
2137 address start = load_archive_data(stub_id);
2138 if (start != nullptr) {
2139 return start;
2140 }
2141 __ align64();
2142 StubCodeMark mark(this, stub_id);
2143 start = __ pc();
2144
2145 assert(((unsigned long long)start & 0x3f) == 0, "Alignment problem (0x%08llx)", (unsigned long long)start);
2146 __ emit_data64(0x4847464544434241, relocInfo::none);
2147 __ emit_data64(0x504f4e4d4c4b4a49, relocInfo::none);
2148 __ emit_data64(0x5857565554535251, relocInfo::none);
2149 __ emit_data64(0x6665646362615a59, relocInfo::none);
2150 __ emit_data64(0x6e6d6c6b6a696867, relocInfo::none);
2151 __ emit_data64(0x767574737271706f, relocInfo::none);
2152 __ emit_data64(0x333231307a797877, relocInfo::none);
2153 __ emit_data64(0x2f2b393837363534, relocInfo::none);
2154
2155 // URL table
2156 __ emit_data64(0x4847464544434241, relocInfo::none);
2157 __ emit_data64(0x504f4e4d4c4b4a49, relocInfo::none);
2158 __ emit_data64(0x5857565554535251, relocInfo::none);
2159 __ emit_data64(0x6665646362615a59, relocInfo::none);
2160 __ emit_data64(0x6e6d6c6b6a696867, relocInfo::none);
2161 __ emit_data64(0x767574737271706f, relocInfo::none);
2162 __ emit_data64(0x333231307a797877, relocInfo::none);
2163 __ emit_data64(0x5f2d393837363534, relocInfo::none);
2164
2165 // record the stub entry and end
2166 store_archive_data(stub_id, start, __ pc());
2167
2168 return start;
2169 }
2170
2171 // Code for generating Base64 encoding.
2172 // Intrinsic function prototype in Base64.java:
2173 // private void encodeBlock(byte[] src, int sp, int sl, byte[] dst, int dp,
2174 // boolean isURL) {
2175 address StubGenerator::generate_base64_encodeBlock()
2176 {
2177 StubId stub_id = StubId::stubgen_base64_encodeBlock_id;
2178 int entry_count = StubInfo::entry_count(stub_id);
2179 assert(entry_count == 1, "sanity check");
2180 address start = load_archive_data(stub_id);
2181 if (start != nullptr) {
2182 return start;
2183 }
2184 __ align(CodeEntryAlignment);
2185 StubCodeMark mark(this, stub_id);
2186 start = __ pc();
2187
2188 __ enter();
2189
2190 // Save callee-saved registers before using them
2191 __ push_ppx(r12);
2192 __ push_ppx(r13);
2193 __ push_ppx(r14);
2194 __ push_ppx(r15);
2195
2196 // arguments
2197 const Register source = c_rarg0; // Source Array
2198 const Register start_offset = c_rarg1; // start offset
2199 const Register end_offset = c_rarg2; // end offset
2200 const Register dest = c_rarg3; // destination array
2201
2202 #ifndef _WIN64
2203 const Register dp = c_rarg4; // Position for writing to dest array
2204 const Register isURL = c_rarg5; // Base64 or URL character set
2205 #else
2206 const Address dp_mem(rbp, 6 * wordSize); // length is on stack on Win64
2207 const Address isURL_mem(rbp, 7 * wordSize);
2208 const Register isURL = r10; // pick the volatile windows register
2209 const Register dp = r12;
2210 __ movl(dp, dp_mem);
2211 __ movl(isURL, isURL_mem);
2212 #endif
2213
2214 const Register length = r14;
2215 const Register encode_table = r13;
2216 Label L_process3, L_exit, L_processdata, L_vbmiLoop, L_not512, L_32byteLoop;
2217
2218 // calculate length from offsets
2219 __ movl(length, end_offset);
2220 __ subl(length, start_offset);
2221 __ jcc(Assembler::lessEqual, L_exit);
2222
2223 // Code for 512-bit VBMI encoding. Encodes 48 input bytes into 64
2224 // output bytes. We read 64 input bytes and ignore the last 16, so be
2225 // sure not to read past the end of the input buffer.
2226 if (VM_Version::supports_avx512_vbmi()) {
2227 __ cmpl(length, 64); // Do not overrun input buffer.
2228 __ jcc(Assembler::below, L_not512);
2229
2230 __ shll(isURL, 6); // index into decode table based on isURL
2231 __ lea(encode_table, ExternalAddress(StubRoutines::x86::base64_encoding_table_addr()));
2232 __ addptr(encode_table, isURL);
2233 __ shrl(isURL, 6); // restore isURL
2234
2235 __ mov64(rax, 0x3036242a1016040aull); // Shifts
2236 __ evmovdquq(xmm3, ExternalAddress(StubRoutines::x86::base64_shuffle_addr()), Assembler::AVX_512bit, r15);
2237 __ evmovdquq(xmm2, Address(encode_table, 0), Assembler::AVX_512bit);
2238 __ evpbroadcastq(xmm1, rax, Assembler::AVX_512bit);
2239
2240 __ align32();
2241 __ BIND(L_vbmiLoop);
2242
2243 __ vpermb(xmm0, xmm3, Address(source, start_offset), Assembler::AVX_512bit);
2244 __ subl(length, 48);
2245
2246 // Put the input bytes into the proper lanes for writing, then
2247 // encode them.
2248 __ evpmultishiftqb(xmm0, xmm1, xmm0, Assembler::AVX_512bit);
2249 __ vpermb(xmm0, xmm0, xmm2, Assembler::AVX_512bit);
2250
2251 // Write to destination
2252 __ evmovdquq(Address(dest, dp), xmm0, Assembler::AVX_512bit);
2253
2254 __ addptr(dest, 64);
2255 __ addptr(source, 48);
2256 __ cmpl(length, 64);
2257 __ jcc(Assembler::aboveEqual, L_vbmiLoop);
2258
2259 __ vzeroupper();
2260 }
2261
2262 __ BIND(L_not512);
2263 if (VM_Version::supports_avx2()) {
2264 /*
2265 ** This AVX2 encoder is based off the paper at:
2266 ** https://dl.acm.org/doi/10.1145/3132709
2267 **
2268 ** We use AVX2 SIMD instructions to encode 24 bytes into 32
2269 ** output bytes.
2270 **
2271 */
2272 // Lengths under 32 bytes are done with scalar routine
2273 __ cmpl(length, 31);
2274 __ jcc(Assembler::belowEqual, L_process3);
2275
2276 // Set up supporting constant table data
2277 __ vmovdqu(xmm9, ExternalAddress(StubRoutines::x86::base64_avx2_shuffle_addr()), rax);
2278 // 6-bit mask for 2nd and 4th (and multiples) 6-bit values
2279 __ movl(rax, 0x0fc0fc00);
2280 __ movdl(xmm8, rax);
2281 __ vmovdqu(xmm1, ExternalAddress(StubRoutines::x86::base64_avx2_input_mask_addr()), rax);
2282 __ vpbroadcastd(xmm8, xmm8, Assembler::AVX_256bit);
2283
2284 // Multiplication constant for "shifting" right by 6 and 10
2285 // bits
2286 __ movl(rax, 0x04000040);
2287
2288 __ subl(length, 24);
2289 __ movdl(xmm7, rax);
2290 __ vpbroadcastd(xmm7, xmm7, Assembler::AVX_256bit);
2291
2292 // For the first load, we mask off reading of the first 4
2293 // bytes into the register. This is so we can get 4 3-byte
2294 // chunks into each lane of the register, avoiding having to
2295 // handle end conditions. We then shuffle these bytes into a
2296 // specific order so that manipulation is easier.
2297 //
2298 // The initial read loads the XMM register like this:
2299 //
2300 // Lower 128-bit lane:
2301 // +----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+
2302 // | XX | XX | XX | XX | A0 | A1 | A2 | B0 | B1 | B2 | C0 | C1
2303 // | C2 | D0 | D1 | D2 |
2304 // +----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+
2305 //
2306 // Upper 128-bit lane:
2307 // +----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+
2308 // | E0 | E1 | E2 | F0 | F1 | F2 | G0 | G1 | G2 | H0 | H1 | H2
2309 // | XX | XX | XX | XX |
2310 // +----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+
2311 //
2312 // Where A0 is the first input byte, B0 is the fourth, etc.
2313 // The alphabetical significance denotes the 3 bytes to be
2314 // consumed and encoded into 4 bytes.
2315 //
2316 // We then shuffle the register so each 32-bit word contains
2317 // the sequence:
2318 // A1 A0 A2 A1, B1, B0, B2, B1, etc.
2319 // Each of these byte sequences are then manipulated into 4
2320 // 6-bit values ready for encoding.
2321 //
2322 // If we focus on one set of 3-byte chunks, changing the
2323 // nomenclature such that A0 => a, A1 => b, and A2 => c, we
2324 // shuffle such that each 24-bit chunk contains:
2325 //
2326 // b7 b6 b5 b4 b3 b2 b1 b0 | a7 a6 a5 a4 a3 a2 a1 a0 | c7 c6
2327 // c5 c4 c3 c2 c1 c0 | b7 b6 b5 b4 b3 b2 b1 b0
2328 // Explain this step.
2329 // b3 b2 b1 b0 c5 c4 c3 c2 | c1 c0 d5 d4 d3 d2 d1 d0 | a5 a4
2330 // a3 a2 a1 a0 b5 b4 | b3 b2 b1 b0 c5 c4 c3 c2
2331 //
2332 // W first and off all but bits 4-9 and 16-21 (c5..c0 and
2333 // a5..a0) and shift them using a vector multiplication
2334 // operation (vpmulhuw) which effectively shifts c right by 6
2335 // bits and a right by 10 bits. We similarly mask bits 10-15
2336 // (d5..d0) and 22-27 (b5..b0) and shift them left by 8 and 4
2337 // bits respectively. This is done using vpmullw. We end up
2338 // with 4 6-bit values, thus splitting the 3 input bytes,
2339 // ready for encoding:
2340 // 0 0 d5..d0 0 0 c5..c0 0 0 b5..b0 0 0 a5..a0
2341 //
2342 // For translation, we recognize that there are 5 distinct
2343 // ranges of legal Base64 characters as below:
2344 //
2345 // +-------------+-------------+------------+
2346 // | 6-bit value | ASCII range | offset |
2347 // +-------------+-------------+------------+
2348 // | 0..25 | A..Z | 65 |
2349 // | 26..51 | a..z | 71 |
2350 // | 52..61 | 0..9 | -4 |
2351 // | 62 | + or - | -19 or -17 |
2352 // | 63 | / or _ | -16 or 32 |
2353 // +-------------+-------------+------------+
2354 //
2355 // We note that vpshufb does a parallel lookup in a
2356 // destination register using the lower 4 bits of bytes from a
2357 // source register. If we use a saturated subtraction and
2358 // subtract 51 from each 6-bit value, bytes from [0,51]
2359 // saturate to 0, and [52,63] map to a range of [1,12]. We
2360 // distinguish the [0,25] and [26,51] ranges by assigning a
2361 // value of 13 for all 6-bit values less than 26. We end up
2362 // with:
2363 //
2364 // +-------------+-------------+------------+
2365 // | 6-bit value | Reduced | offset |
2366 // +-------------+-------------+------------+
2367 // | 0..25 | 13 | 65 |
2368 // | 26..51 | 0 | 71 |
2369 // | 52..61 | 0..9 | -4 |
2370 // | 62 | 11 | -19 or -17 |
2371 // | 63 | 12 | -16 or 32 |
2372 // +-------------+-------------+------------+
2373 //
2374 // We then use a final vpshufb to add the appropriate offset,
2375 // translating the bytes.
2376 //
2377 // Load input bytes - only 28 bytes. Mask the first load to
2378 // not load into the full register.
2379 __ vpmaskmovd(xmm1, xmm1, Address(source, start_offset, Address::times_1, -4), Assembler::AVX_256bit);
2380
2381 // Move 3-byte chunks of input (12 bytes) into 16 bytes,
2382 // ordering by:
2383 // 1, 0, 2, 1; 4, 3, 5, 4; etc. This groups 6-bit chunks
2384 // for easy masking
2385 __ vpshufb(xmm1, xmm1, xmm9, Assembler::AVX_256bit);
2386
2387 __ addl(start_offset, 24);
2388
2389 // Load masking register for first and third (and multiples)
2390 // 6-bit values.
2391 __ movl(rax, 0x003f03f0);
2392 __ movdl(xmm6, rax);
2393 __ vpbroadcastd(xmm6, xmm6, Assembler::AVX_256bit);
2394 // Multiplication constant for "shifting" left by 4 and 8 bits
2395 __ movl(rax, 0x01000010);
2396 __ movdl(xmm5, rax);
2397 __ vpbroadcastd(xmm5, xmm5, Assembler::AVX_256bit);
2398
2399 // Isolate 6-bit chunks of interest
2400 __ vpand(xmm0, xmm8, xmm1, Assembler::AVX_256bit);
2401
2402 // Load constants for encoding
2403 __ movl(rax, 0x19191919);
2404 __ movdl(xmm3, rax);
2405 __ vpbroadcastd(xmm3, xmm3, Assembler::AVX_256bit);
2406 __ movl(rax, 0x33333333);
2407 __ movdl(xmm4, rax);
2408 __ vpbroadcastd(xmm4, xmm4, Assembler::AVX_256bit);
2409
2410 // Shift output bytes 0 and 2 into proper lanes
2411 __ vpmulhuw(xmm2, xmm0, xmm7, Assembler::AVX_256bit);
2412
2413 // Mask and shift output bytes 1 and 3 into proper lanes and
2414 // combine
2415 __ vpand(xmm0, xmm6, xmm1, Assembler::AVX_256bit);
2416 __ vpmullw(xmm0, xmm5, xmm0, Assembler::AVX_256bit);
2417 __ vpor(xmm0, xmm0, xmm2, Assembler::AVX_256bit);
2418
2419 // Find out which are 0..25. This indicates which input
2420 // values fall in the range of 'A'-'Z', which require an
2421 // additional offset (see comments above)
2422 __ vpcmpgtb(xmm2, xmm0, xmm3, Assembler::AVX_256bit);
2423 __ vpsubusb(xmm1, xmm0, xmm4, Assembler::AVX_256bit);
2424 __ vpsubb(xmm1, xmm1, xmm2, Assembler::AVX_256bit);
2425
2426 // Load the proper lookup table
2427 __ lea(r11, ExternalAddress(StubRoutines::x86::base64_avx2_lut_addr()));
2428 __ movl(r15, isURL);
2429 __ shll(r15, 5);
2430 __ vmovdqu(xmm2, Address(r11, r15));
2431
2432 // Shuffle the offsets based on the range calculation done
2433 // above. This allows us to add the correct offset to the
2434 // 6-bit value corresponding to the range documented above.
2435 __ vpshufb(xmm1, xmm2, xmm1, Assembler::AVX_256bit);
2436 __ vpaddb(xmm0, xmm1, xmm0, Assembler::AVX_256bit);
2437
2438 // Store the encoded bytes
2439 __ vmovdqu(Address(dest, dp), xmm0);
2440 __ addl(dp, 32);
2441
2442 __ cmpl(length, 31);
2443 __ jcc(Assembler::belowEqual, L_process3);
2444
2445 __ align32();
2446 __ BIND(L_32byteLoop);
2447
2448 // Get next 32 bytes
2449 __ vmovdqu(xmm1, Address(source, start_offset, Address::times_1, -4));
2450
2451 __ subl(length, 24);
2452 __ addl(start_offset, 24);
2453
2454 // This logic is identical to the above, with only constant
2455 // register loads removed. Shuffle the input, mask off 6-bit
2456 // chunks, shift them into place, then add the offset to
2457 // encode.
2458 __ vpshufb(xmm1, xmm1, xmm9, Assembler::AVX_256bit);
2459
2460 __ vpand(xmm0, xmm8, xmm1, Assembler::AVX_256bit);
2461 __ vpmulhuw(xmm10, xmm0, xmm7, Assembler::AVX_256bit);
2462 __ vpand(xmm0, xmm6, xmm1, Assembler::AVX_256bit);
2463 __ vpmullw(xmm0, xmm5, xmm0, Assembler::AVX_256bit);
2464 __ vpor(xmm0, xmm0, xmm10, Assembler::AVX_256bit);
2465 __ vpcmpgtb(xmm10, xmm0, xmm3, Assembler::AVX_256bit);
2466 __ vpsubusb(xmm1, xmm0, xmm4, Assembler::AVX_256bit);
2467 __ vpsubb(xmm1, xmm1, xmm10, Assembler::AVX_256bit);
2468 __ vpshufb(xmm1, xmm2, xmm1, Assembler::AVX_256bit);
2469 __ vpaddb(xmm0, xmm1, xmm0, Assembler::AVX_256bit);
2470
2471 // Store the encoded bytes
2472 __ vmovdqu(Address(dest, dp), xmm0);
2473 __ addl(dp, 32);
2474
2475 __ cmpl(length, 31);
2476 __ jcc(Assembler::above, L_32byteLoop);
2477
2478 __ BIND(L_process3);
2479 __ vzeroupper();
2480 } else {
2481 __ BIND(L_process3);
2482 }
2483
2484 __ cmpl(length, 3);
2485 __ jcc(Assembler::below, L_exit);
2486
2487 // Load the encoding table based on isURL
2488 __ lea(r11, ExternalAddress(StubRoutines::x86::base64_encoding_table_addr()));
2489 __ movl(r15, isURL);
2490 __ shll(r15, 6);
2491 __ addptr(r11, r15);
2492
2493 __ BIND(L_processdata);
2494
2495 // Load 3 bytes
2496 __ load_unsigned_byte(r15, Address(source, start_offset));
2497 __ load_unsigned_byte(r10, Address(source, start_offset, Address::times_1, 1));
2498 __ load_unsigned_byte(r13, Address(source, start_offset, Address::times_1, 2));
2499
2500 // Build a 32-bit word with bytes 1, 2, 0, 1
2501 __ movl(rax, r10);
2502 __ shll(r10, 24);
2503 __ orl(rax, r10);
2504
2505 __ subl(length, 3);
2506
2507 __ shll(r15, 8);
2508 __ shll(r13, 16);
2509 __ orl(rax, r15);
2510
2511 __ addl(start_offset, 3);
2512
2513 __ orl(rax, r13);
2514 // At this point, rax contains | byte1 | byte2 | byte0 | byte1
2515 // r13 has byte2 << 16 - need low-order 6 bits to translate.
2516 // This translated byte is the fourth output byte.
2517 __ shrl(r13, 16);
2518 __ andl(r13, 0x3f);
2519
2520 // The high-order 6 bits of r15 (byte0) is translated.
2521 // The translated byte is the first output byte.
2522 __ shrl(r15, 10);
2523
2524 __ load_unsigned_byte(r13, Address(r11, r13));
2525 __ load_unsigned_byte(r15, Address(r11, r15));
2526
2527 __ movb(Address(dest, dp, Address::times_1, 3), r13);
2528
2529 // Extract high-order 4 bits of byte1 and low-order 2 bits of byte0.
2530 // This translated byte is the second output byte.
2531 __ shrl(rax, 4);
2532 __ movl(r10, rax);
2533 __ andl(rax, 0x3f);
2534
2535 __ movb(Address(dest, dp, Address::times_1, 0), r15);
2536
2537 __ load_unsigned_byte(rax, Address(r11, rax));
2538
2539 // Extract low-order 2 bits of byte1 and high-order 4 bits of byte2.
2540 // This translated byte is the third output byte.
2541 __ shrl(r10, 18);
2542 __ andl(r10, 0x3f);
2543
2544 __ load_unsigned_byte(r10, Address(r11, r10));
2545
2546 __ movb(Address(dest, dp, Address::times_1, 1), rax);
2547 __ movb(Address(dest, dp, Address::times_1, 2), r10);
2548
2549 __ addl(dp, 4);
2550 __ cmpl(length, 3);
2551 __ jcc(Assembler::aboveEqual, L_processdata);
2552
2553 __ BIND(L_exit);
2554 __ pop_ppx(r15);
2555 __ pop_ppx(r14);
2556 __ pop_ppx(r13);
2557 __ pop_ppx(r12);
2558 __ leave();
2559 __ ret(0);
2560
2561 // record the stub entry and end
2562 store_archive_data(stub_id, start, __ pc());
2563
2564 return start;
2565 }
2566
2567 // base64 AVX512vbmi tables
2568 address StubGenerator::base64_vbmi_lookup_lo_addr() {
2569 StubId stub_id = StubId::stubgen_lookup_lo_base64_id;
2570 int entry_count = StubInfo::entry_count(stub_id);
2571 assert(entry_count == 1, "sanity check");
2572 address start = load_archive_data(stub_id);
2573 if (start != nullptr) {
2574 return start;
2575 }
2576 __ align64();
2577 StubCodeMark mark(this, stub_id);
2578 start = __ pc();
2579
2580 assert(((unsigned long long)start & 0x3f) == 0,
2581 "Alignment problem (0x%08llx)", (unsigned long long)start);
2582 __ emit_data64(0x8080808080808080, relocInfo::none);
2583 __ emit_data64(0x8080808080808080, relocInfo::none);
2584 __ emit_data64(0x8080808080808080, relocInfo::none);
2585 __ emit_data64(0x8080808080808080, relocInfo::none);
2586 __ emit_data64(0x8080808080808080, relocInfo::none);
2587 __ emit_data64(0x3f8080803e808080, relocInfo::none);
2588 __ emit_data64(0x3b3a393837363534, relocInfo::none);
2589 __ emit_data64(0x8080808080803d3c, relocInfo::none);
2590
2591 // record the stub entry and end
2592 store_archive_data(stub_id, start, __ pc());
2593
2594 return start;
2595 }
2596
2597 address StubGenerator::base64_vbmi_lookup_hi_addr() {
2598 StubId stub_id = StubId::stubgen_lookup_hi_base64_id;
2599 int entry_count = StubInfo::entry_count(stub_id);
2600 assert(entry_count == 1, "sanity check");
2601 address start = load_archive_data(stub_id);
2602 if (start != nullptr) {
2603 return start;
2604 }
2605 __ align64();
2606 StubCodeMark mark(this, stub_id);
2607 start = __ pc();
2608
2609 assert(((unsigned long long)start & 0x3f) == 0,
2610 "Alignment problem (0x%08llx)", (unsigned long long)start);
2611 __ emit_data64(0x0605040302010080, relocInfo::none);
2612 __ emit_data64(0x0e0d0c0b0a090807, relocInfo::none);
2613 __ emit_data64(0x161514131211100f, relocInfo::none);
2614 __ emit_data64(0x8080808080191817, relocInfo::none);
2615 __ emit_data64(0x201f1e1d1c1b1a80, relocInfo::none);
2616 __ emit_data64(0x2827262524232221, relocInfo::none);
2617 __ emit_data64(0x302f2e2d2c2b2a29, relocInfo::none);
2618 __ emit_data64(0x8080808080333231, relocInfo::none);
2619
2620 // record the stub entry and end
2621 store_archive_data(stub_id, start, __ pc());
2622
2623 return start;
2624 }
2625 address StubGenerator::base64_vbmi_lookup_lo_url_addr() {
2626 StubId stub_id = StubId::stubgen_lookup_lo_base64url_id;
2627 int entry_count = StubInfo::entry_count(stub_id);
2628 assert(entry_count == 1, "sanity check");
2629 address start = load_archive_data(stub_id);
2630 if (start != nullptr) {
2631 return start;
2632 }
2633 __ align64();
2634 StubCodeMark mark(this, stub_id);
2635 start = __ pc();
2636
2637 assert(((unsigned long long)start & 0x3f) == 0,
2638 "Alignment problem (0x%08llx)", (unsigned long long)start);
2639 __ emit_data64(0x8080808080808080, relocInfo::none);
2640 __ emit_data64(0x8080808080808080, relocInfo::none);
2641 __ emit_data64(0x8080808080808080, relocInfo::none);
2642 __ emit_data64(0x8080808080808080, relocInfo::none);
2643 __ emit_data64(0x8080808080808080, relocInfo::none);
2644 __ emit_data64(0x80803e8080808080, relocInfo::none);
2645 __ emit_data64(0x3b3a393837363534, relocInfo::none);
2646 __ emit_data64(0x8080808080803d3c, relocInfo::none);
2647
2648 // record the stub entry and end
2649 store_archive_data(stub_id, start, __ pc());
2650
2651 return start;
2652 }
2653
2654 address StubGenerator::base64_vbmi_lookup_hi_url_addr() {
2655 StubId stub_id = StubId::stubgen_lookup_hi_base64url_id;
2656 int entry_count = StubInfo::entry_count(stub_id);
2657 assert(entry_count == 1, "sanity check");
2658 address start = load_archive_data(stub_id);
2659 if (start != nullptr) {
2660 return start;
2661 }
2662 __ align64();
2663 StubCodeMark mark(this, stub_id);
2664 start = __ pc();
2665
2666 assert(((unsigned long long)start & 0x3f) == 0,
2667 "Alignment problem (0x%08llx)", (unsigned long long)start);
2668 __ emit_data64(0x0605040302010080, relocInfo::none);
2669 __ emit_data64(0x0e0d0c0b0a090807, relocInfo::none);
2670 __ emit_data64(0x161514131211100f, relocInfo::none);
2671 __ emit_data64(0x3f80808080191817, relocInfo::none);
2672 __ emit_data64(0x201f1e1d1c1b1a80, relocInfo::none);
2673 __ emit_data64(0x2827262524232221, relocInfo::none);
2674 __ emit_data64(0x302f2e2d2c2b2a29, relocInfo::none);
2675 __ emit_data64(0x8080808080333231, relocInfo::none);
2676
2677 // record the stub entry and end
2678 store_archive_data(stub_id, start, __ pc());
2679
2680 return start;
2681 }
2682
2683 address StubGenerator::base64_vbmi_pack_vec_addr() {
2684 StubId stub_id = StubId::stubgen_pack_vec_base64_id;
2685 int entry_count = StubInfo::entry_count(stub_id);
2686 assert(entry_count == 1, "sanity check");
2687 address start = load_archive_data(stub_id);
2688 if (start != nullptr) {
2689 return start;
2690 }
2691 __ align64();
2692 StubCodeMark mark(this, stub_id);
2693 start = __ pc();
2694
2695 assert(((unsigned long long)start & 0x3f) == 0,
2696 "Alignment problem (0x%08llx)", (unsigned long long)start);
2697 __ emit_data64(0x090a040506000102, relocInfo::none);
2698 __ emit_data64(0x161011120c0d0e08, relocInfo::none);
2699 __ emit_data64(0x1c1d1e18191a1415, relocInfo::none);
2700 __ emit_data64(0x292a242526202122, relocInfo::none);
2701 __ emit_data64(0x363031322c2d2e28, relocInfo::none);
2702 __ emit_data64(0x3c3d3e38393a3435, relocInfo::none);
2703 __ emit_data64(0x0000000000000000, relocInfo::none);
2704 __ emit_data64(0x0000000000000000, relocInfo::none);
2705
2706 // record the stub entry and end
2707 store_archive_data(stub_id, start, __ pc());
2708
2709 return start;
2710 }
2711
2712 address StubGenerator::base64_vbmi_join_0_1_addr() {
2713 StubId stub_id = StubId::stubgen_join_0_1_base64_id;
2714 int entry_count = StubInfo::entry_count(stub_id);
2715 assert(entry_count == 1, "sanity check");
2716 address start = load_archive_data(stub_id);
2717 if (start != nullptr) {
2718 return start;
2719 }
2720 __ align64();
2721 StubCodeMark mark(this, stub_id);
2722 start = __ pc();
2723
2724 assert(((unsigned long long)start & 0x3f) == 0,
2725 "Alignment problem (0x%08llx)", (unsigned long long)start);
2726 __ emit_data64(0x090a040506000102, relocInfo::none);
2727 __ emit_data64(0x161011120c0d0e08, relocInfo::none);
2728 __ emit_data64(0x1c1d1e18191a1415, relocInfo::none);
2729 __ emit_data64(0x292a242526202122, relocInfo::none);
2730 __ emit_data64(0x363031322c2d2e28, relocInfo::none);
2731 __ emit_data64(0x3c3d3e38393a3435, relocInfo::none);
2732 __ emit_data64(0x494a444546404142, relocInfo::none);
2733 __ emit_data64(0x565051524c4d4e48, relocInfo::none);
2734
2735 // record the stub entry and end
2736 store_archive_data(stub_id, start, __ pc());
2737
2738 return start;
2739 }
2740
2741 address StubGenerator::base64_vbmi_join_1_2_addr() {
2742 StubId stub_id = StubId::stubgen_join_1_2_base64_id;
2743 int entry_count = StubInfo::entry_count(stub_id);
2744 assert(entry_count == 1, "sanity check");
2745 address start = load_archive_data(stub_id);
2746 if (start != nullptr) {
2747 return start;
2748 }
2749 __ align64();
2750 StubCodeMark mark(this, stub_id);
2751 start = __ pc();
2752
2753 assert(((unsigned long long)start & 0x3f) == 0,
2754 "Alignment problem (0x%08llx)", (unsigned long long)start);
2755 __ emit_data64(0x1c1d1e18191a1415, relocInfo::none);
2756 __ emit_data64(0x292a242526202122, relocInfo::none);
2757 __ emit_data64(0x363031322c2d2e28, relocInfo::none);
2758 __ emit_data64(0x3c3d3e38393a3435, relocInfo::none);
2759 __ emit_data64(0x494a444546404142, relocInfo::none);
2760 __ emit_data64(0x565051524c4d4e48, relocInfo::none);
2761 __ emit_data64(0x5c5d5e58595a5455, relocInfo::none);
2762 __ emit_data64(0x696a646566606162, relocInfo::none);
2763
2764 // record the stub entry and end
2765 store_archive_data(stub_id, start, __ pc());
2766
2767 return start;
2768 }
2769
2770 address StubGenerator::base64_vbmi_join_2_3_addr() {
2771 StubId stub_id = StubId::stubgen_join_2_3_base64_id;
2772 int entry_count = StubInfo::entry_count(stub_id);
2773 assert(entry_count == 1, "sanity check");
2774 address start = load_archive_data(stub_id);
2775 if (start != nullptr) {
2776 return start;
2777 }
2778 __ align64();
2779 StubCodeMark mark(this, stub_id);
2780 start = __ pc();
2781
2782 assert(((unsigned long long)start & 0x3f) == 0,
2783 "Alignment problem (0x%08llx)", (unsigned long long)start);
2784 __ emit_data64(0x363031322c2d2e28, relocInfo::none);
2785 __ emit_data64(0x3c3d3e38393a3435, relocInfo::none);
2786 __ emit_data64(0x494a444546404142, relocInfo::none);
2787 __ emit_data64(0x565051524c4d4e48, relocInfo::none);
2788 __ emit_data64(0x5c5d5e58595a5455, relocInfo::none);
2789 __ emit_data64(0x696a646566606162, relocInfo::none);
2790 __ emit_data64(0x767071726c6d6e68, relocInfo::none);
2791 __ emit_data64(0x7c7d7e78797a7475, relocInfo::none);
2792
2793 // record the stub entry and end
2794 store_archive_data(stub_id, start, __ pc());
2795
2796 return start;
2797 }
2798
2799 address StubGenerator::base64_AVX2_decode_tables_addr() {
2800 StubId stub_id = StubId::stubgen_avx2_decode_tables_base64_id;
2801 int entry_count = StubInfo::entry_count(stub_id);
2802 assert(entry_count == 1, "sanity check");
2803 address start = load_archive_data(stub_id);
2804 if (start != nullptr) {
2805 return start;
2806 }
2807 __ align64();
2808 StubCodeMark mark(this, stub_id);
2809 start = __ pc();
2810
2811 assert(((unsigned long long)start & 0x3f) == 0,
2812 "Alignment problem (0x%08llx)", (unsigned long long)start);
2813 __ emit_data(0x2f2f2f2f, relocInfo::none, 0);
2814 __ emit_data(0x5f5f5f5f, relocInfo::none, 0); // for URL
2815
2816 __ emit_data(0xffffffff, relocInfo::none, 0);
2817 __ emit_data(0xfcfcfcfc, relocInfo::none, 0); // for URL
2818
2819 // Permute table
2820 __ emit_data64(0x0000000100000000, relocInfo::none);
2821 __ emit_data64(0x0000000400000002, relocInfo::none);
2822 __ emit_data64(0x0000000600000005, relocInfo::none);
2823 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2824
2825 // Shuffle table
2826 __ emit_data64(0x090a040506000102, relocInfo::none);
2827 __ emit_data64(0xffffffff0c0d0e08, relocInfo::none);
2828 __ emit_data64(0x090a040506000102, relocInfo::none);
2829 __ emit_data64(0xffffffff0c0d0e08, relocInfo::none);
2830
2831 // merge table
2832 __ emit_data(0x01400140, relocInfo::none, 0);
2833
2834 // merge multiplier
2835 __ emit_data(0x00011000, relocInfo::none, 0);
2836
2837 // record the stub entry and end
2838 store_archive_data(stub_id, start, __ pc());
2839
2840 return start;
2841 }
2842
2843 address StubGenerator::base64_AVX2_decode_LUT_tables_addr() {
2844 StubId stub_id = StubId::stubgen_avx2_decode_lut_tables_base64_id;
2845 int entry_count = StubInfo::entry_count(stub_id);
2846 assert(entry_count == 1, "sanity check");
2847 address start = load_archive_data(stub_id);
2848 if (start != nullptr) {
2849 return start;
2850 }
2851 __ align64();
2852 StubCodeMark mark(this, stub_id);
2853 start = __ pc();
2854
2855 assert(((unsigned long long)start & 0x3f) == 0,
2856 "Alignment problem (0x%08llx)", (unsigned long long)start);
2857 // lut_lo
2858 __ emit_data64(0x1111111111111115, relocInfo::none);
2859 __ emit_data64(0x1a1b1b1b1a131111, relocInfo::none);
2860 __ emit_data64(0x1111111111111115, relocInfo::none);
2861 __ emit_data64(0x1a1b1b1b1a131111, relocInfo::none);
2862
2863 // lut_roll
2864 __ emit_data64(0xb9b9bfbf04131000, relocInfo::none);
2865 __ emit_data64(0x0000000000000000, relocInfo::none);
2866 __ emit_data64(0xb9b9bfbf04131000, relocInfo::none);
2867 __ emit_data64(0x0000000000000000, relocInfo::none);
2868
2869 // lut_lo URL
2870 __ emit_data64(0x1111111111111115, relocInfo::none);
2871 __ emit_data64(0x1b1b1a1b1b131111, relocInfo::none);
2872 __ emit_data64(0x1111111111111115, relocInfo::none);
2873 __ emit_data64(0x1b1b1a1b1b131111, relocInfo::none);
2874
2875 // lut_roll URL
2876 __ emit_data64(0xb9b9bfbf0411e000, relocInfo::none);
2877 __ emit_data64(0x0000000000000000, relocInfo::none);
2878 __ emit_data64(0xb9b9bfbf0411e000, relocInfo::none);
2879 __ emit_data64(0x0000000000000000, relocInfo::none);
2880
2881 // lut_hi
2882 __ emit_data64(0x0804080402011010, relocInfo::none);
2883 __ emit_data64(0x1010101010101010, relocInfo::none);
2884 __ emit_data64(0x0804080402011010, relocInfo::none);
2885 __ emit_data64(0x1010101010101010, relocInfo::none);
2886
2887 // record the stub entry and end
2888 store_archive_data(stub_id, start, __ pc());
2889
2890 return start;
2891 }
2892
2893 address StubGenerator::base64_decoding_table_addr() {
2894 StubId stub_id = StubId::stubgen_decoding_table_base64_id;
2895 int entry_count = StubInfo::entry_count(stub_id);
2896 assert(entry_count == 1, "sanity check");
2897 address start = load_archive_data(stub_id);
2898 if (start != nullptr) {
2899 return start;
2900 }
2901 StubCodeMark mark(this, stub_id);
2902 start = __ pc();
2903
2904 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2905 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2906 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2907 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2908 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2909 __ emit_data64(0x3fffffff3effffff, relocInfo::none);
2910 __ emit_data64(0x3b3a393837363534, relocInfo::none);
2911 __ emit_data64(0xffffffffffff3d3c, relocInfo::none);
2912 __ emit_data64(0x06050403020100ff, relocInfo::none);
2913 __ emit_data64(0x0e0d0c0b0a090807, relocInfo::none);
2914 __ emit_data64(0x161514131211100f, relocInfo::none);
2915 __ emit_data64(0xffffffffff191817, relocInfo::none);
2916 __ emit_data64(0x201f1e1d1c1b1aff, relocInfo::none);
2917 __ emit_data64(0x2827262524232221, relocInfo::none);
2918 __ emit_data64(0x302f2e2d2c2b2a29, relocInfo::none);
2919 __ emit_data64(0xffffffffff333231, relocInfo::none);
2920 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2921 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2922 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2923 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2924 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2925 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2926 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2927 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2928 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2929 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2930 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2931 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2932 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2933 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2934 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2935 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2936
2937 // URL table
2938 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2939 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2940 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2941 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2942 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2943 __ emit_data64(0xffff3effffffffff, relocInfo::none);
2944 __ emit_data64(0x3b3a393837363534, relocInfo::none);
2945 __ emit_data64(0xffffffffffff3d3c, relocInfo::none);
2946 __ emit_data64(0x06050403020100ff, relocInfo::none);
2947 __ emit_data64(0x0e0d0c0b0a090807, relocInfo::none);
2948 __ emit_data64(0x161514131211100f, relocInfo::none);
2949 __ emit_data64(0x3fffffffff191817, relocInfo::none);
2950 __ emit_data64(0x201f1e1d1c1b1aff, relocInfo::none);
2951 __ emit_data64(0x2827262524232221, relocInfo::none);
2952 __ emit_data64(0x302f2e2d2c2b2a29, relocInfo::none);
2953 __ emit_data64(0xffffffffff333231, relocInfo::none);
2954 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2955 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2956 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2957 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2958 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2959 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2960 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2961 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2962 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2963 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2964 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2965 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2966 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2967 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2968 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2969 __ emit_data64(0xffffffffffffffff, relocInfo::none);
2970
2971 // record the stub entry and end
2972 store_archive_data(stub_id, start, __ pc());
2973
2974 return start;
2975 }
2976
2977
2978 // Code for generating Base64 decoding.
2979 //
2980 // Based on the article (and associated code) from https://arxiv.org/abs/1910.05109.
2981 //
2982 // Intrinsic function prototype in Base64.java:
2983 // private void decodeBlock(byte[] src, int sp, int sl, byte[] dst, int dp, boolean isURL, isMIME) {
2984 address StubGenerator::generate_base64_decodeBlock() {
2985 StubId stub_id = StubId::stubgen_base64_decodeBlock_id;
2986 int entry_count = StubInfo::entry_count(stub_id);
2987 assert(entry_count == 1, "sanity check");
2988 address start = load_archive_data(stub_id);
2989 if (start != nullptr) {
2990 return start;
2991 }
2992 __ align(CodeEntryAlignment);
2993 StubCodeMark mark(this, stub_id);
2994 start = __ pc();
2995
2996 __ enter();
2997
2998 // Save callee-saved registers before using them
2999 __ push_ppx(r12);
3000 __ push_ppx(r13);
3001 __ push_ppx(r14);
3002 __ push_ppx(r15);
3003 __ push_ppx(rbx);
3004
3005 // arguments
3006 const Register source = c_rarg0; // Source Array
3007 const Register start_offset = c_rarg1; // start offset
3008 const Register end_offset = c_rarg2; // end offset
3009 const Register dest = c_rarg3; // destination array
3010 const Register isMIME = rbx;
3011
3012 #ifndef _WIN64
3013 const Register dp = c_rarg4; // Position for writing to dest array
3014 const Register isURL = c_rarg5;// Base64 or URL character set
3015 __ movl(isMIME, Address(rbp, 2 * wordSize));
3016 #else
3017 const Address dp_mem(rbp, 6 * wordSize); // length is on stack on Win64
3018 const Address isURL_mem(rbp, 7 * wordSize);
3019 const Register isURL = r10; // pick the volatile windows register
3020 const Register dp = r12;
3021 __ movl(dp, dp_mem);
3022 __ movl(isURL, isURL_mem);
3023 __ movl(isMIME, Address(rbp, 8 * wordSize));
3024 #endif
3025
3026 const XMMRegister lookup_lo = xmm5;
3027 const XMMRegister lookup_hi = xmm6;
3028 const XMMRegister errorvec = xmm7;
3029 const XMMRegister pack16_op = xmm9;
3030 const XMMRegister pack32_op = xmm8;
3031 const XMMRegister input0 = xmm3;
3032 const XMMRegister input1 = xmm20;
3033 const XMMRegister input2 = xmm21;
3034 const XMMRegister input3 = xmm19;
3035 const XMMRegister join01 = xmm12;
3036 const XMMRegister join12 = xmm11;
3037 const XMMRegister join23 = xmm10;
3038 const XMMRegister translated0 = xmm2;
3039 const XMMRegister translated1 = xmm1;
3040 const XMMRegister translated2 = xmm0;
3041 const XMMRegister translated3 = xmm4;
3042
3043 const XMMRegister merged0 = xmm2;
3044 const XMMRegister merged1 = xmm1;
3045 const XMMRegister merged2 = xmm0;
3046 const XMMRegister merged3 = xmm4;
3047 const XMMRegister merge_ab_bc0 = xmm2;
3048 const XMMRegister merge_ab_bc1 = xmm1;
3049 const XMMRegister merge_ab_bc2 = xmm0;
3050 const XMMRegister merge_ab_bc3 = xmm4;
3051
3052 const XMMRegister pack24bits = xmm4;
3053
3054 const Register length = r14;
3055 const Register output_size = r13;
3056 const Register output_mask = r15;
3057 const KRegister input_mask = k1;
3058
3059 const XMMRegister input_initial_valid_b64 = xmm0;
3060 const XMMRegister tmp = xmm10;
3061 const XMMRegister mask = xmm0;
3062 const XMMRegister invalid_b64 = xmm1;
3063
3064 Label L_process256, L_process64, L_process64Loop, L_exit, L_processdata, L_loadURL;
3065 Label L_continue, L_finalBit, L_padding, L_donePadding, L_bruteForce;
3066 Label L_forceLoop, L_bottomLoop, L_checkMIME, L_exit_no_vzero, L_lastChunk;
3067
3068 // calculate length from offsets
3069 __ movl(length, end_offset);
3070 __ subl(length, start_offset);
3071 __ push_ppx(dest); // Save for return value calc
3072
3073 // If AVX512 VBMI not supported, just compile non-AVX code
3074 if(VM_Version::supports_avx512_vbmi() &&
3075 VM_Version::supports_avx512bw()) {
3076 __ cmpl(length, 31); // 32-bytes is break-even for AVX-512
3077 __ jcc(Assembler::lessEqual, L_lastChunk);
3078
3079 __ cmpl(isMIME, 0);
3080 __ jcc(Assembler::notEqual, L_lastChunk);
3081
3082 // Load lookup tables based on isURL
3083 __ cmpl(isURL, 0);
3084 __ jcc(Assembler::notZero, L_loadURL);
3085
3086 __ evmovdquq(lookup_lo, ExternalAddress(StubRoutines::x86::base64_vbmi_lookup_lo_addr()), Assembler::AVX_512bit, r13);
3087 __ evmovdquq(lookup_hi, ExternalAddress(StubRoutines::x86::base64_vbmi_lookup_hi_addr()), Assembler::AVX_512bit, r13);
3088
3089 __ BIND(L_continue);
3090
3091 __ movl(r15, 0x01400140);
3092 __ evpbroadcastd(pack16_op, r15, Assembler::AVX_512bit);
3093
3094 __ movl(r15, 0x00011000);
3095 __ evpbroadcastd(pack32_op, r15, Assembler::AVX_512bit);
3096
3097 __ cmpl(length, 0xff);
3098 __ jcc(Assembler::lessEqual, L_process64);
3099
3100 // load masks required for decoding data
3101 __ BIND(L_processdata);
3102 __ evmovdquq(join01, ExternalAddress(StubRoutines::x86::base64_vbmi_join_0_1_addr()), Assembler::AVX_512bit,r13);
3103 __ evmovdquq(join12, ExternalAddress(StubRoutines::x86::base64_vbmi_join_1_2_addr()), Assembler::AVX_512bit, r13);
3104 __ evmovdquq(join23, ExternalAddress(StubRoutines::x86::base64_vbmi_join_2_3_addr()), Assembler::AVX_512bit, r13);
3105
3106 __ align32();
3107 __ BIND(L_process256);
3108 // Grab input data
3109 __ evmovdquq(input0, Address(source, start_offset, Address::times_1, 0x00), Assembler::AVX_512bit);
3110 __ evmovdquq(input1, Address(source, start_offset, Address::times_1, 0x40), Assembler::AVX_512bit);
3111 __ evmovdquq(input2, Address(source, start_offset, Address::times_1, 0x80), Assembler::AVX_512bit);
3112 __ evmovdquq(input3, Address(source, start_offset, Address::times_1, 0xc0), Assembler::AVX_512bit);
3113
3114 // Copy the low part of the lookup table into the destination of the permutation
3115 __ evmovdquq(translated0, lookup_lo, Assembler::AVX_512bit);
3116 __ evmovdquq(translated1, lookup_lo, Assembler::AVX_512bit);
3117 __ evmovdquq(translated2, lookup_lo, Assembler::AVX_512bit);
3118 __ evmovdquq(translated3, lookup_lo, Assembler::AVX_512bit);
3119
3120 // Translate the base64 input into "decoded" bytes
3121 __ evpermt2b(translated0, input0, lookup_hi, Assembler::AVX_512bit);
3122 __ evpermt2b(translated1, input1, lookup_hi, Assembler::AVX_512bit);
3123 __ evpermt2b(translated2, input2, lookup_hi, Assembler::AVX_512bit);
3124 __ evpermt2b(translated3, input3, lookup_hi, Assembler::AVX_512bit);
3125
3126 // OR all of the translations together to check for errors (high-order bit of byte set)
3127 __ vpternlogd(input0, 0xfe, input1, input2, Assembler::AVX_512bit);
3128
3129 __ vpternlogd(input3, 0xfe, translated0, translated1, Assembler::AVX_512bit);
3130 __ vpternlogd(input0, 0xfe, translated2, translated3, Assembler::AVX_512bit);
3131 __ vpor(errorvec, input3, input0, Assembler::AVX_512bit);
3132
3133 // Check if there was an error - if so, try 64-byte chunks
3134 __ evpmovb2m(k3, errorvec, Assembler::AVX_512bit);
3135 __ kortestql(k3, k3);
3136 __ jcc(Assembler::notZero, L_process64);
3137
3138 // The merging and shuffling happens here
3139 // We multiply each byte pair [00dddddd | 00cccccc | 00bbbbbb | 00aaaaaa]
3140 // Multiply [00cccccc] by 2^6 added to [00dddddd] to get [0000cccc | ccdddddd]
3141 // The pack16_op is a vector of 0x01400140, so multiply D by 1 and C by 0x40
3142 __ vpmaddubsw(merge_ab_bc0, translated0, pack16_op, Assembler::AVX_512bit);
3143 __ vpmaddubsw(merge_ab_bc1, translated1, pack16_op, Assembler::AVX_512bit);
3144 __ vpmaddubsw(merge_ab_bc2, translated2, pack16_op, Assembler::AVX_512bit);
3145 __ vpmaddubsw(merge_ab_bc3, translated3, pack16_op, Assembler::AVX_512bit);
3146
3147 // Now do the same with packed 16-bit values.
3148 // We start with [0000cccc | ccdddddd | 0000aaaa | aabbbbbb]
3149 // pack32_op is 0x00011000 (2^12, 1), so this multiplies [0000aaaa | aabbbbbb] by 2^12
3150 // and adds [0000cccc | ccdddddd] to yield [00000000 | aaaaaabb | bbbbcccc | ccdddddd]
3151 __ vpmaddwd(merged0, merge_ab_bc0, pack32_op, Assembler::AVX_512bit);
3152 __ vpmaddwd(merged1, merge_ab_bc1, pack32_op, Assembler::AVX_512bit);
3153 __ vpmaddwd(merged2, merge_ab_bc2, pack32_op, Assembler::AVX_512bit);
3154 __ vpmaddwd(merged3, merge_ab_bc3, pack32_op, Assembler::AVX_512bit);
3155
3156 // The join vectors specify which byte from which vector goes into the outputs
3157 // One of every 4 bytes in the extended vector is zero, so we pack them into their
3158 // final positions in the register for storing (256 bytes in, 192 bytes out)
3159 __ evpermt2b(merged0, join01, merged1, Assembler::AVX_512bit);
3160 __ evpermt2b(merged1, join12, merged2, Assembler::AVX_512bit);
3161 __ evpermt2b(merged2, join23, merged3, Assembler::AVX_512bit);
3162
3163 // Store result
3164 __ evmovdquq(Address(dest, dp, Address::times_1, 0x00), merged0, Assembler::AVX_512bit);
3165 __ evmovdquq(Address(dest, dp, Address::times_1, 0x40), merged1, Assembler::AVX_512bit);
3166 __ evmovdquq(Address(dest, dp, Address::times_1, 0x80), merged2, Assembler::AVX_512bit);
3167
3168 __ addptr(source, 0x100);
3169 __ addptr(dest, 0xc0);
3170 __ subl(length, 0x100);
3171 __ cmpl(length, 64 * 4);
3172 __ jcc(Assembler::greaterEqual, L_process256);
3173
3174 // At this point, we've decoded 64 * 4 * n bytes.
3175 // The remaining length will be <= 64 * 4 - 1.
3176 // UNLESS there was an error decoding the first 256-byte chunk. In this
3177 // case, the length will be arbitrarily long.
3178 //
3179 // Note that this will be the path for MIME-encoded strings.
3180
3181 __ BIND(L_process64);
3182
3183 __ evmovdquq(pack24bits, ExternalAddress(StubRoutines::x86::base64_vbmi_pack_vec_addr()), Assembler::AVX_512bit, r13);
3184
3185 __ cmpl(length, 63);
3186 __ jcc(Assembler::lessEqual, L_finalBit);
3187
3188 __ mov64(rax, 0x0000ffffffffffff);
3189 __ kmovql(k2, rax);
3190
3191 __ align32();
3192 __ BIND(L_process64Loop);
3193
3194 // Handle first 64-byte block
3195
3196 __ evmovdquq(input0, Address(source, start_offset), Assembler::AVX_512bit);
3197 __ evmovdquq(translated0, lookup_lo, Assembler::AVX_512bit);
3198 __ evpermt2b(translated0, input0, lookup_hi, Assembler::AVX_512bit);
3199
3200 __ vpor(errorvec, translated0, input0, Assembler::AVX_512bit);
3201
3202 // Check for error and bomb out before updating dest
3203 __ evpmovb2m(k3, errorvec, Assembler::AVX_512bit);
3204 __ kortestql(k3, k3);
3205 __ jcc(Assembler::notZero, L_exit);
3206
3207 // Pack output register, selecting correct byte ordering
3208 __ vpmaddubsw(merge_ab_bc0, translated0, pack16_op, Assembler::AVX_512bit);
3209 __ vpmaddwd(merged0, merge_ab_bc0, pack32_op, Assembler::AVX_512bit);
3210 __ vpermb(merged0, pack24bits, merged0, Assembler::AVX_512bit);
3211
3212 __ evmovdqub(Address(dest, dp), k2, merged0, true, Assembler::AVX_512bit);
3213
3214 __ subl(length, 64);
3215 __ addptr(source, 64);
3216 __ addptr(dest, 48);
3217
3218 __ cmpl(length, 64);
3219 __ jcc(Assembler::greaterEqual, L_process64Loop);
3220
3221 __ cmpl(length, 0);
3222 __ jcc(Assembler::lessEqual, L_exit);
3223
3224 __ BIND(L_finalBit);
3225 // Now have 1 to 63 bytes left to decode
3226
3227 // I was going to let Java take care of the final fragment
3228 // however it will repeatedly call this routine for every 4 bytes
3229 // of input data, so handle the rest here.
3230 __ movq(rax, -1);
3231 __ bzhiq(rax, rax, length); // Input mask in rax
3232
3233 __ movl(output_size, length);
3234 __ shrl(output_size, 2); // Find (len / 4) * 3 (output length)
3235 __ lea(output_size, Address(output_size, output_size, Address::times_2, 0));
3236 // output_size in r13
3237
3238 // Strip pad characters, if any, and adjust length and mask
3239 __ addq(length, start_offset);
3240 __ cmpb(Address(source, length, Address::times_1, -1), '=');
3241 __ jcc(Assembler::equal, L_padding);
3242
3243 __ BIND(L_donePadding);
3244 __ subq(length, start_offset);
3245
3246 // Output size is (64 - output_size), output mask is (all 1s >> output_size).
3247 __ kmovql(input_mask, rax);
3248 __ movq(output_mask, -1);
3249 __ bzhiq(output_mask, output_mask, output_size);
3250
3251 // Load initial input with all valid base64 characters. Will be used
3252 // in merging source bytes to avoid masking when determining if an error occurred.
3253 __ movl(rax, 0x61616161);
3254 __ evpbroadcastd(input_initial_valid_b64, rax, Assembler::AVX_512bit);
3255
3256 // A register containing all invalid base64 decoded values
3257 __ movl(rax, 0x80808080);
3258 __ evpbroadcastd(invalid_b64, rax, Assembler::AVX_512bit);
3259
3260 // input_mask is in k1
3261 // output_size is in r13
3262 // output_mask is in r15
3263 // zmm0 - free
3264 // zmm1 - 0x00011000
3265 // zmm2 - 0x01400140
3266 // zmm3 - errorvec
3267 // zmm4 - pack vector
3268 // zmm5 - lookup_lo
3269 // zmm6 - lookup_hi
3270 // zmm7 - errorvec
3271 // zmm8 - 0x61616161
3272 // zmm9 - 0x80808080
3273
3274 // Load only the bytes from source, merging into our "fully-valid" register
3275 __ evmovdqub(input_initial_valid_b64, input_mask, Address(source, start_offset, Address::times_1, 0x0), true, Assembler::AVX_512bit);
3276
3277 // Decode all bytes within our merged input
3278 __ evmovdquq(tmp, lookup_lo, Assembler::AVX_512bit);
3279 __ evpermt2b(tmp, input_initial_valid_b64, lookup_hi, Assembler::AVX_512bit);
3280 __ evporq(mask, tmp, input_initial_valid_b64, Assembler::AVX_512bit);
3281
3282 // Check for error. Compare (decoded | initial) to all invalid.
3283 // If any bytes have their high-order bit set, then we have an error.
3284 __ evptestmb(k2, mask, invalid_b64, Assembler::AVX_512bit);
3285 __ kortestql(k2, k2);
3286
3287 // If we have an error, use the brute force loop to decode what we can (4-byte chunks).
3288 __ jcc(Assembler::notZero, L_bruteForce);
3289
3290 // Shuffle output bytes
3291 __ vpmaddubsw(tmp, tmp, pack16_op, Assembler::AVX_512bit);
3292 __ vpmaddwd(tmp, tmp, pack32_op, Assembler::AVX_512bit);
3293
3294 __ vpermb(tmp, pack24bits, tmp, Assembler::AVX_512bit);
3295 __ kmovql(k1, output_mask);
3296 __ evmovdqub(Address(dest, dp), k1, tmp, true, Assembler::AVX_512bit);
3297
3298 __ addptr(dest, output_size);
3299
3300 __ BIND(L_exit);
3301 __ vzeroupper();
3302 __ pop_ppx(rax); // Get original dest value
3303 __ subptr(dest, rax); // Number of bytes converted
3304 __ movptr(rax, dest);
3305 __ pop_ppx(rbx);
3306 __ pop_ppx(r15);
3307 __ pop_ppx(r14);
3308 __ pop_ppx(r13);
3309 __ pop_ppx(r12);
3310 __ leave();
3311 __ ret(0);
3312
3313 __ BIND(L_loadURL);
3314 __ evmovdquq(lookup_lo, ExternalAddress(StubRoutines::x86::base64_vbmi_lookup_lo_url_addr()), Assembler::AVX_512bit, r13);
3315 __ evmovdquq(lookup_hi, ExternalAddress(StubRoutines::x86::base64_vbmi_lookup_hi_url_addr()), Assembler::AVX_512bit, r13);
3316 __ jmp(L_continue);
3317
3318 __ BIND(L_padding);
3319 __ decrementq(output_size, 1);
3320 __ shrq(rax, 1);
3321
3322 __ cmpb(Address(source, length, Address::times_1, -2), '=');
3323 __ jcc(Assembler::notEqual, L_donePadding);
3324
3325 __ decrementq(output_size, 1);
3326 __ shrq(rax, 1);
3327 __ jmp(L_donePadding);
3328
3329 __ align32();
3330 __ BIND(L_bruteForce);
3331 } // End of if(avx512_vbmi)
3332
3333 if (VM_Version::supports_avx2()) {
3334 Label L_tailProc, L_topLoop, L_enterLoop;
3335
3336 __ cmpl(isMIME, 0);
3337 __ jcc(Assembler::notEqual, L_lastChunk);
3338
3339 // Check for buffer too small (for algorithm)
3340 __ subl(length, 0x2c);
3341 __ jcc(Assembler::less, L_tailProc);
3342
3343 __ shll(isURL, 2);
3344
3345 // Algorithm adapted from https://arxiv.org/abs/1704.00605, "Faster Base64
3346 // Encoding and Decoding using AVX2 Instructions". URL modifications added.
3347
3348 // Set up constants
3349 __ lea(r13, ExternalAddress(StubRoutines::x86::base64_AVX2_decode_tables_addr()));
3350 __ vpbroadcastd(xmm4, Address(r13, isURL, Address::times_1), Assembler::AVX_256bit); // 2F or 5F
3351 __ vpbroadcastd(xmm10, Address(r13, isURL, Address::times_1, 0x08), Assembler::AVX_256bit); // -1 or -4
3352 __ vmovdqu(xmm12, Address(r13, 0x10)); // permute
3353 __ vmovdqu(xmm13, Address(r13, 0x30)); // shuffle
3354 __ vpbroadcastd(xmm7, Address(r13, 0x50), Assembler::AVX_256bit); // merge
3355 __ vpbroadcastd(xmm6, Address(r13, 0x54), Assembler::AVX_256bit); // merge mult
3356
3357 __ lea(r13, ExternalAddress(StubRoutines::x86::base64_AVX2_decode_LUT_tables_addr()));
3358 __ shll(isURL, 4);
3359 __ vmovdqu(xmm11, Address(r13, isURL, Address::times_1, 0x00)); // lut_lo
3360 __ vmovdqu(xmm8, Address(r13, isURL, Address::times_1, 0x20)); // lut_roll
3361 __ shrl(isURL, 6); // restore isURL
3362 __ vmovdqu(xmm9, Address(r13, 0x80)); // lut_hi
3363 __ jmp(L_enterLoop);
3364
3365 __ align32();
3366 __ bind(L_topLoop);
3367 // Add in the offset value (roll) to get 6-bit out values
3368 __ vpaddb(xmm0, xmm0, xmm2, Assembler::AVX_256bit);
3369 // Merge and permute the output bits into appropriate output byte lanes
3370 __ vpmaddubsw(xmm0, xmm0, xmm7, Assembler::AVX_256bit);
3371 __ vpmaddwd(xmm0, xmm0, xmm6, Assembler::AVX_256bit);
3372 __ vpshufb(xmm0, xmm0, xmm13, Assembler::AVX_256bit);
3373 __ vpermd(xmm0, xmm12, xmm0, Assembler::AVX_256bit);
3374 // Store the output bytes
3375 __ vmovdqu(Address(dest, dp, Address::times_1, 0), xmm0);
3376 __ addptr(source, 0x20);
3377 __ addptr(dest, 0x18);
3378 __ subl(length, 0x20);
3379 __ jcc(Assembler::less, L_tailProc);
3380
3381 __ bind(L_enterLoop);
3382
3383 // Load in encoded string (32 bytes)
3384 __ vmovdqu(xmm2, Address(source, start_offset, Address::times_1, 0x0));
3385 // Extract the high nibble for indexing into the lut tables. High 4 bits are don't care.
3386 __ vpsrld(xmm1, xmm2, 0x4, Assembler::AVX_256bit);
3387 __ vpand(xmm1, xmm4, xmm1, Assembler::AVX_256bit);
3388 // Extract the low nibble. 5F/2F will isolate the low-order 4 bits. High 4 bits are don't care.
3389 __ vpand(xmm3, xmm2, xmm4, Assembler::AVX_256bit);
3390 // Check for special-case (0x2F or 0x5F (URL))
3391 __ vpcmpeqb(xmm0, xmm4, xmm2, Assembler::AVX_256bit);
3392 // Get the bitset based on the low nibble. vpshufb uses low-order 4 bits only.
3393 __ vpshufb(xmm3, xmm11, xmm3, Assembler::AVX_256bit);
3394 // Get the bit value of the high nibble
3395 __ vpshufb(xmm5, xmm9, xmm1, Assembler::AVX_256bit);
3396 // Make sure 2F / 5F shows as valid
3397 __ vpandn(xmm3, xmm0, xmm3, Assembler::AVX_256bit);
3398 // Make adjustment for roll index. For non-URL, this is a no-op,
3399 // for URL, this adjusts by -4. This is to properly index the
3400 // roll value for 2F / 5F.
3401 __ vpand(xmm0, xmm0, xmm10, Assembler::AVX_256bit);
3402 // If the and of the two is non-zero, we have an invalid input character
3403 __ vptest(xmm3, xmm5);
3404 // Extract the "roll" value - value to add to the input to get 6-bit out value
3405 __ vpaddb(xmm0, xmm0, xmm1, Assembler::AVX_256bit); // Handle 2F / 5F
3406 __ vpshufb(xmm0, xmm8, xmm0, Assembler::AVX_256bit);
3407 __ jcc(Assembler::equal, L_topLoop); // Fall through on error
3408
3409 __ bind(L_tailProc);
3410
3411 __ addl(length, 0x2c);
3412
3413 __ vzeroupper();
3414 }
3415
3416 // Use non-AVX code to decode 4-byte chunks into 3 bytes of output
3417
3418 // Register state (Linux):
3419 // r12-15 - saved on stack
3420 // rdi - src
3421 // rsi - sp
3422 // rdx - sl
3423 // rcx - dst
3424 // r8 - dp
3425 // r9 - isURL
3426
3427 // Register state (Windows):
3428 // r12-15 - saved on stack
3429 // rcx - src
3430 // rdx - sp
3431 // r8 - sl
3432 // r9 - dst
3433 // r12 - dp
3434 // r10 - isURL
3435
3436 // Registers (common):
3437 // length (r14) - bytes in src
3438
3439 const Register decode_table = r11;
3440 const Register out_byte_count = rbx;
3441 const Register byte1 = r13;
3442 const Register byte2 = r15;
3443 const Register byte3 = WIN64_ONLY(r8) NOT_WIN64(rdx);
3444 const Register byte4 = WIN64_ONLY(r10) NOT_WIN64(r9);
3445
3446 __ bind(L_lastChunk);
3447
3448 __ shrl(length, 2); // Multiple of 4 bytes only - length is # 4-byte chunks
3449 __ cmpl(length, 0);
3450 __ jcc(Assembler::lessEqual, L_exit_no_vzero);
3451
3452 __ shll(isURL, 8); // index into decode table based on isURL
3453 __ lea(decode_table, ExternalAddress(StubRoutines::x86::base64_decoding_table_addr()));
3454 __ addptr(decode_table, isURL);
3455
3456 __ jmp(L_bottomLoop);
3457
3458 __ align32();
3459 __ BIND(L_forceLoop);
3460 __ shll(byte1, 18);
3461 __ shll(byte2, 12);
3462 __ shll(byte3, 6);
3463 __ orl(byte1, byte2);
3464 __ orl(byte1, byte3);
3465 __ orl(byte1, byte4);
3466
3467 __ addptr(source, 4);
3468
3469 __ movb(Address(dest, dp, Address::times_1, 2), byte1);
3470 __ shrl(byte1, 8);
3471 __ movb(Address(dest, dp, Address::times_1, 1), byte1);
3472 __ shrl(byte1, 8);
3473 __ movb(Address(dest, dp, Address::times_1, 0), byte1);
3474
3475 __ addptr(dest, 3);
3476 __ decrementl(length, 1);
3477 __ jcc(Assembler::zero, L_exit_no_vzero);
3478
3479 __ BIND(L_bottomLoop);
3480 __ load_unsigned_byte(byte1, Address(source, start_offset, Address::times_1, 0x00));
3481 __ load_unsigned_byte(byte2, Address(source, start_offset, Address::times_1, 0x01));
3482 __ load_signed_byte(byte1, Address(decode_table, byte1));
3483 __ load_signed_byte(byte2, Address(decode_table, byte2));
3484 __ load_unsigned_byte(byte3, Address(source, start_offset, Address::times_1, 0x02));
3485 __ load_unsigned_byte(byte4, Address(source, start_offset, Address::times_1, 0x03));
3486 __ load_signed_byte(byte3, Address(decode_table, byte3));
3487 __ load_signed_byte(byte4, Address(decode_table, byte4));
3488
3489 __ mov(rax, byte1);
3490 __ orl(rax, byte2);
3491 __ orl(rax, byte3);
3492 __ orl(rax, byte4);
3493 __ jcc(Assembler::positive, L_forceLoop);
3494
3495 __ BIND(L_exit_no_vzero);
3496 __ pop_ppx(rax); // Get original dest value
3497 __ subptr(dest, rax); // Number of bytes converted
3498 __ movptr(rax, dest);
3499 __ pop_ppx(rbx);
3500 __ pop_ppx(r15);
3501 __ pop_ppx(r14);
3502 __ pop_ppx(r13);
3503 __ pop_ppx(r12);
3504 __ leave();
3505 __ ret(0);
3506
3507 // record the stub entry and end
3508 store_archive_data(stub_id, start, __ pc());
3509
3510 return start;
3511 }
3512
3513
3514 /**
3515 * Arguments:
3516 *
3517 * Inputs:
3518 * c_rarg0 - int crc
3519 * c_rarg1 - byte* buf
3520 * c_rarg2 - int length
3521 *
3522 * Output:
3523 * rax - int crc result
3524 */
3525 address StubGenerator::generate_updateBytesCRC32() {
3526 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions");
3527
3528 StubId stub_id = StubId::stubgen_updateBytesCRC32_id;
3529 int entry_count = StubInfo::entry_count(stub_id);
3530 assert(entry_count == 1, "sanity check");
3531 address start = load_archive_data(stub_id);
3532 if (start != nullptr) {
3533 return start;
3534 }
3535 __ align(CodeEntryAlignment);
3536 StubCodeMark mark(this, stub_id);
3537
3538 start = __ pc();
3539
3540 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
3541 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
3542 // rscratch1: r10
3543 const Register crc = c_rarg0; // crc
3544 const Register buf = c_rarg1; // source java byte array address
3545 const Register len = c_rarg2; // length
3546 const Register table = c_rarg3; // crc_table address (reuse register)
3547 const Register tmp1 = r11;
3548 const Register tmp2 = r10;
3549 assert_different_registers(crc, buf, len, table, tmp1, tmp2, rax);
3550
3551 BLOCK_COMMENT("Entry:");
3552 __ enter(); // required for proper stackwalking of RuntimeStub frame
3553
3554 if (VM_Version::supports_sse4_1() && VM_Version::supports_avx512_vpclmulqdq() &&
3555 VM_Version::supports_avx512bw() &&
3556 VM_Version::supports_avx512vl()) {
3557 // The constants used in the CRC32 algorithm requires the 1's compliment of the initial crc value.
3558 // However, the constant table for CRC32-C assumes the original crc value. Account for this
3559 // difference before calling and after returning.
3560 __ lea(table, ExternalAddress(StubRoutines::x86::crc_table_avx512_addr()));
3561 __ notl(crc);
3562 __ kernel_crc32_avx512(crc, buf, len, table, tmp1, tmp2);
3563 __ notl(crc);
3564 } else {
3565 __ kernel_crc32(crc, buf, len, table, tmp1);
3566 }
3567
3568 __ movl(rax, crc);
3569 __ vzeroupper();
3570 __ leave(); // required for proper stackwalking of RuntimeStub frame
3571 __ ret(0);
3572
3573 // record the stub entry and end
3574 store_archive_data(stub_id, start, __ pc());
3575
3576 return start;
3577 }
3578
3579 /**
3580 * Arguments:
3581 *
3582 * Inputs:
3583 * c_rarg0 - int crc
3584 * c_rarg1 - byte* buf
3585 * c_rarg2 - long length
3586 * c_rarg3 - table_start - optional (present only when doing a library_call,
3587 * not used by x86 algorithm)
3588 *
3589 * Output:
3590 * rax - int crc result
3591 */
3592 address StubGenerator::generate_updateBytesCRC32C(bool is_pclmulqdq_supported) {
3593 assert(UseCRC32CIntrinsics, "need SSE4_2");
3594 StubId stub_id = StubId::stubgen_updateBytesCRC32C_id;
3595 int entry_count = StubInfo::entry_count(stub_id);
3596 assert(entry_count == 1, "sanity check");
3597 address start = load_archive_data(stub_id);
3598 if (start != nullptr) {
3599 return start;
3600 }
3601 __ align(CodeEntryAlignment);
3602 StubCodeMark mark(this, stub_id);
3603 start = __ pc();
3604
3605 //reg.arg int#0 int#1 int#2 int#3 int#4 int#5 float regs
3606 //Windows RCX RDX R8 R9 none none XMM0..XMM3
3607 //Lin / Sol RDI RSI RDX RCX R8 R9 XMM0..XMM7
3608 const Register crc = c_rarg0; // crc
3609 const Register buf = c_rarg1; // source java byte array address
3610 const Register len = c_rarg2; // length
3611 const Register a = rax;
3612 const Register j = r9;
3613 const Register k = r10;
3614 const Register l = r11;
3615 #ifdef _WIN64
3616 const Register y = rdi;
3617 const Register z = rsi;
3618 #else
3619 const Register y = rcx;
3620 const Register z = r8;
3621 #endif
3622 assert_different_registers(crc, buf, len, a, j, k, l, y, z);
3623
3624 BLOCK_COMMENT("Entry:");
3625 __ enter(); // required for proper stackwalking of RuntimeStub frame
3626 Label L_continue;
3627
3628 if (VM_Version::supports_sse4_1() && VM_Version::supports_avx512_vpclmulqdq() &&
3629 VM_Version::supports_avx512bw() &&
3630 VM_Version::supports_avx512vl()) {
3631 Label L_doSmall;
3632
3633 __ cmpl(len, 384);
3634 __ jcc(Assembler::lessEqual, L_doSmall);
3635
3636 __ lea(j, ExternalAddress(StubRoutines::x86::crc32c_table_avx512_addr()));
3637 __ kernel_crc32_avx512(crc, buf, len, j, l, k);
3638
3639 __ jmp(L_continue);
3640
3641 __ bind(L_doSmall);
3642 }
3643 #ifdef _WIN64
3644 __ push_ppx(y);
3645 __ push_ppx(z);
3646 #endif
3647 __ crc32c_ipl_alg2_alt2(crc, buf, len,
3648 a, j, k,
3649 l, y, z,
3650 c_farg0, c_farg1, c_farg2,
3651 is_pclmulqdq_supported);
3652 #ifdef _WIN64
3653 __ pop_ppx(z);
3654 __ pop_ppx(y);
3655 #endif
3656
3657 __ bind(L_continue);
3658 __ movl(rax, crc);
3659 __ vzeroupper();
3660 __ leave(); // required for proper stackwalking of RuntimeStub frame
3661 __ ret(0);
3662
3663 // record the stub entry and end
3664 store_archive_data(stub_id, start, __ pc());
3665
3666 return start;
3667 }
3668
3669
3670 /**
3671 * Arguments:
3672 *
3673 * Input:
3674 * c_rarg0 - x address
3675 * c_rarg1 - x length
3676 * c_rarg2 - y address
3677 * c_rarg3 - y length
3678 * not Win64
3679 * c_rarg4 - z address
3680 * Win64
3681 * rsp+40 - z address
3682 */
3683 address StubGenerator::generate_multiplyToLen() {
3684 StubId stub_id = StubId::stubgen_multiplyToLen_id;
3685 int entry_count = StubInfo::entry_count(stub_id);
3686 assert(entry_count == 1, "sanity check");
3687 address start = load_archive_data(stub_id);
3688 if (start != nullptr) {
3689 return start;
3690 }
3691 __ align(CodeEntryAlignment);
3692 StubCodeMark mark(this, stub_id);
3693 start = __ pc();
3694
3695 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
3696 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
3697 const Register x = rdi;
3698 const Register xlen = rax;
3699 const Register y = rsi;
3700 const Register ylen = rcx;
3701 const Register z = r8;
3702
3703 // Next registers will be saved on stack in multiply_to_len().
3704 const Register tmp0 = r11;
3705 const Register tmp1 = r12;
3706 const Register tmp2 = r13;
3707 const Register tmp3 = r14;
3708 const Register tmp4 = r15;
3709 const Register tmp5 = rbx;
3710
3711 BLOCK_COMMENT("Entry:");
3712 __ enter(); // required for proper stackwalking of RuntimeStub frame
3713
3714 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx
3715 // ylen => rcx, z => r8
3716 // r9 and r10 may be used to save non-volatile registers
3717 #ifdef _WIN64
3718 // last argument (#4) is on stack on Win64
3719 __ movptr(z, Address(rsp, 6 * wordSize));
3720 #endif
3721
3722 __ movptr(xlen, rsi);
3723 __ movptr(y, rdx);
3724 __ multiply_to_len(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5);
3725
3726 restore_arg_regs();
3727
3728 __ leave(); // required for proper stackwalking of RuntimeStub frame
3729 __ ret(0);
3730
3731 // record the stub entry and end
3732 store_archive_data(stub_id, start, __ pc());
3733
3734 return start;
3735 }
3736
3737 /**
3738 * Arguments:
3739 *
3740 * Input:
3741 * c_rarg0 - obja address
3742 * c_rarg1 - objb address
3743 * c_rarg3 - length length
3744 * c_rarg4 - scale log2_array_indxscale
3745 *
3746 * Output:
3747 * rax - int >= mismatched index, < 0 bitwise complement of tail
3748 */
3749 address StubGenerator::generate_vectorizedMismatch() {
3750 StubId stub_id = StubId::stubgen_vectorizedMismatch_id;
3751 int entry_count = StubInfo::entry_count(stub_id);
3752 assert(entry_count == 1, "sanity check");
3753 address start = load_archive_data(stub_id);
3754 if (start != nullptr) {
3755 return start;
3756 }
3757 __ align(CodeEntryAlignment);
3758 StubCodeMark mark(this, stub_id);
3759 start = __ pc();
3760
3761 BLOCK_COMMENT("Entry:");
3762 __ enter();
3763
3764 #ifdef _WIN64 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
3765 const Register scale = c_rarg0; //rcx, will exchange with r9
3766 const Register objb = c_rarg1; //rdx
3767 const Register length = c_rarg2; //r8
3768 const Register obja = c_rarg3; //r9
3769 __ xchgq(obja, scale); //now obja and scale contains the correct contents
3770
3771 const Register tmp1 = r10;
3772 const Register tmp2 = r11;
3773 #endif
3774 #ifndef _WIN64 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
3775 const Register obja = c_rarg0; //U:rdi
3776 const Register objb = c_rarg1; //U:rsi
3777 const Register length = c_rarg2; //U:rdx
3778 const Register scale = c_rarg3; //U:rcx
3779 const Register tmp1 = r8;
3780 const Register tmp2 = r9;
3781 #endif
3782 const Register result = rax; //return value
3783 const XMMRegister vec0 = xmm0;
3784 const XMMRegister vec1 = xmm1;
3785 const XMMRegister vec2 = xmm2;
3786
3787 __ vectorized_mismatch(obja, objb, length, scale, result, tmp1, tmp2, vec0, vec1, vec2);
3788
3789 __ vzeroupper();
3790 __ leave();
3791 __ ret(0);
3792
3793 // record the stub entry and end
3794 store_archive_data(stub_id, start, __ pc());
3795
3796 return start;
3797 }
3798
3799 /**
3800 * Arguments:
3801 *
3802 // Input:
3803 // c_rarg0 - x address
3804 // c_rarg1 - x length
3805 // c_rarg2 - z address
3806 // c_rarg3 - z length
3807 *
3808 */
3809 address StubGenerator::generate_squareToLen() {
3810
3811 StubId stub_id = StubId::stubgen_squareToLen_id;
3812 int entry_count = StubInfo::entry_count(stub_id);
3813 assert(entry_count == 1, "sanity check");
3814 address start = load_archive_data(stub_id);
3815 if (start != nullptr) {
3816 return start;
3817 }
3818 __ align(CodeEntryAlignment);
3819 StubCodeMark mark(this, stub_id);
3820 start = __ pc();
3821
3822 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
3823 // Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...)
3824 const Register x = rdi;
3825 const Register len = rsi;
3826 const Register z = r8;
3827 const Register zlen = rcx;
3828
3829 const Register tmp1 = r12;
3830 const Register tmp2 = r13;
3831 const Register tmp3 = r14;
3832 const Register tmp4 = r15;
3833 const Register tmp5 = rbx;
3834
3835 BLOCK_COMMENT("Entry:");
3836 __ enter(); // required for proper stackwalking of RuntimeStub frame
3837
3838 setup_arg_regs(4); // x => rdi, len => rsi, z => rdx
3839 // zlen => rcx
3840 // r9 and r10 may be used to save non-volatile registers
3841 __ movptr(r8, rdx);
3842 __ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax);
3843
3844 restore_arg_regs();
3845
3846 __ leave(); // required for proper stackwalking of RuntimeStub frame
3847 __ ret(0);
3848
3849 // record the stub entry and end
3850 store_archive_data(stub_id, start, __ pc());
3851
3852 return start;
3853 }
3854
3855 address StubGenerator::generate_method_entry_barrier() {
3856 StubId stub_id = StubId::stubgen_method_entry_barrier_id;
3857 int entry_count = StubInfo::entry_count(stub_id);
3858 assert(entry_count == 1, "sanity check");
3859 address start = load_archive_data(stub_id);
3860 if (start != nullptr) {
3861 return start;
3862 }
3863 __ align(CodeEntryAlignment);
3864 StubCodeMark mark(this, stub_id);
3865 start = __ pc();
3866
3867 Label deoptimize_label;
3868
3869 __ push(-1); // cookie, this is used for writing the new rsp when deoptimizing
3870
3871 BLOCK_COMMENT("Entry:");
3872 __ enter(); // save rbp
3873
3874 // save c_rarg0, because we want to use that value.
3875 // We could do without it but then we depend on the number of slots used by pusha
3876 __ push_ppx(c_rarg0);
3877
3878 __ lea(c_rarg0, Address(rsp, wordSize * 3)); // 1 for cookie, 1 for rbp, 1 for c_rarg0 - this should be the return address
3879
3880 __ pusha();
3881
3882 // The method may have floats as arguments, and we must spill them before calling
3883 // the VM runtime.
3884 assert(Argument::n_float_register_parameters_j == 8, "Assumption");
3885 const int xmm_size = wordSize * 2;
3886 const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j;
3887 __ subptr(rsp, xmm_spill_size);
3888 __ movdqu(Address(rsp, xmm_size * 7), xmm7);
3889 __ movdqu(Address(rsp, xmm_size * 6), xmm6);
3890 __ movdqu(Address(rsp, xmm_size * 5), xmm5);
3891 __ movdqu(Address(rsp, xmm_size * 4), xmm4);
3892 __ movdqu(Address(rsp, xmm_size * 3), xmm3);
3893 __ movdqu(Address(rsp, xmm_size * 2), xmm2);
3894 __ movdqu(Address(rsp, xmm_size * 1), xmm1);
3895 __ movdqu(Address(rsp, xmm_size * 0), xmm0);
3896
3897 __ call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(address*)>(BarrierSetNMethod::nmethod_stub_entry_barrier)), 1);
3898
3899 __ movdqu(xmm0, Address(rsp, xmm_size * 0));
3900 __ movdqu(xmm1, Address(rsp, xmm_size * 1));
3901 __ movdqu(xmm2, Address(rsp, xmm_size * 2));
3902 __ movdqu(xmm3, Address(rsp, xmm_size * 3));
3903 __ movdqu(xmm4, Address(rsp, xmm_size * 4));
3904 __ movdqu(xmm5, Address(rsp, xmm_size * 5));
3905 __ movdqu(xmm6, Address(rsp, xmm_size * 6));
3906 __ movdqu(xmm7, Address(rsp, xmm_size * 7));
3907 __ addptr(rsp, xmm_spill_size);
3908
3909 __ cmpl(rax, 1); // 1 means deoptimize
3910 __ jcc(Assembler::equal, deoptimize_label);
3911
3912 __ popa();
3913 __ pop_ppx(c_rarg0);
3914
3915 __ leave();
3916
3917 __ addptr(rsp, 1 * wordSize); // cookie
3918 __ ret(0);
3919
3920
3921 __ BIND(deoptimize_label);
3922
3923 __ popa();
3924 __ pop_ppx(c_rarg0);
3925
3926 __ leave();
3927
3928 // this can be taken out, but is good for verification purposes. getting a SIGSEGV
3929 // here while still having a correct stack is valuable
3930 __ testptr(rsp, Address(rsp, 0));
3931
3932 __ movptr(rsp, Address(rsp, 0)); // new rsp was written in the barrier
3933 __ jmp(Address(rsp, -1 * wordSize)); // jmp target should be callers verified_entry_point
3934
3935 // record the stub entry and end
3936 store_archive_data(stub_id, start, __ pc());
3937
3938 return start;
3939 }
3940
3941 /**
3942 * Arguments:
3943 *
3944 * Input:
3945 * c_rarg0 - out address
3946 * c_rarg1 - in address
3947 * c_rarg2 - offset
3948 * c_rarg3 - len
3949 * not Win64
3950 * c_rarg4 - k
3951 * Win64
3952 * rsp+40 - k
3953 */
3954 address StubGenerator::generate_mulAdd() {
3955 StubId stub_id = StubId::stubgen_mulAdd_id;
3956 int entry_count = StubInfo::entry_count(stub_id);
3957 assert(entry_count == 1, "sanity check");
3958 address start = load_archive_data(stub_id);
3959 if (start != nullptr) {
3960 return start;
3961 }
3962 __ align(CodeEntryAlignment);
3963 StubCodeMark mark(this, stub_id);
3964 start = __ pc();
3965
3966 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
3967 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
3968 const Register out = rdi;
3969 const Register in = rsi;
3970 const Register offset = r11;
3971 const Register len = rcx;
3972 const Register k = r8;
3973
3974 // Next registers will be saved on stack in mul_add().
3975 const Register tmp1 = r12;
3976 const Register tmp2 = r13;
3977 const Register tmp3 = r14;
3978 const Register tmp4 = r15;
3979 const Register tmp5 = rbx;
3980
3981 BLOCK_COMMENT("Entry:");
3982 __ enter(); // required for proper stackwalking of RuntimeStub frame
3983
3984 setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx
3985 // len => rcx, k => r8
3986 // r9 and r10 may be used to save non-volatile registers
3987 #ifdef _WIN64
3988 // last argument is on stack on Win64
3989 __ movl(k, Address(rsp, 6 * wordSize));
3990 #endif
3991 __ movptr(r11, rdx); // move offset in rdx to offset(r11)
3992 __ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax);
3993
3994 restore_arg_regs();
3995
3996 __ leave(); // required for proper stackwalking of RuntimeStub frame
3997 __ ret(0);
3998
3999 // record the stub entry and end
4000 store_archive_data(stub_id, start, __ pc());
4001
4002 return start;
4003 }
4004
4005 address StubGenerator::generate_bigIntegerRightShift() {
4006 StubId stub_id = StubId::stubgen_bigIntegerRightShiftWorker_id;
4007 int entry_count = StubInfo::entry_count(stub_id);
4008 assert(entry_count == 1, "sanity check");
4009 address start = load_archive_data(stub_id);
4010 if (start != nullptr) {
4011 return start;
4012 }
4013 __ align(CodeEntryAlignment);
4014 StubCodeMark mark(this, stub_id);
4015 start = __ pc();
4016
4017 Label Shift512Loop, ShiftTwo, ShiftTwoLoop, ShiftOne, Exit;
4018 // For Unix, the arguments are as follows: rdi, rsi, rdx, rcx, r8.
4019 const Register newArr = rdi;
4020 const Register oldArr = rsi;
4021 const Register newIdx = rdx;
4022 const Register shiftCount = rcx; // It was intentional to have shiftCount in rcx since it is used implicitly for shift.
4023 const Register totalNumIter = r8;
4024
4025 // For windows, we use r9 and r10 as temps to save rdi and rsi. Thus we cannot allocate them for our temps.
4026 // For everything else, we prefer using r9 and r10 since we do not have to save them before use.
4027 const Register tmp1 = r11; // Caller save.
4028 const Register tmp2 = rax; // Caller save.
4029 const Register tmp3 = WIN64_ONLY(r12) NOT_WIN64(r9); // Windows: Callee save. Linux: Caller save.
4030 const Register tmp4 = WIN64_ONLY(r13) NOT_WIN64(r10); // Windows: Callee save. Linux: Caller save.
4031 const Register tmp5 = r14; // Callee save.
4032 const Register tmp6 = r15;
4033
4034 const XMMRegister x0 = xmm0;
4035 const XMMRegister x1 = xmm1;
4036 const XMMRegister x2 = xmm2;
4037
4038 BLOCK_COMMENT("Entry:");
4039 __ enter(); // required for proper stackwalking of RuntimeStub frame
4040
4041 #ifdef _WIN64
4042 setup_arg_regs(4);
4043 // For windows, since last argument is on stack, we need to move it to the appropriate register.
4044 __ movl(totalNumIter, Address(rsp, 6 * wordSize));
4045 // Save callee save registers.
4046 __ push_ppx(tmp3);
4047 __ push_ppx(tmp4);
4048 #endif
4049 __ push_ppx(tmp5);
4050
4051 // Rename temps used throughout the code.
4052 const Register idx = tmp1;
4053 const Register nIdx = tmp2;
4054
4055 __ xorl(idx, idx);
4056
4057 // Start right shift from end of the array.
4058 // For example, if #iteration = 4 and newIdx = 1
4059 // then dest[4] = src[4] >> shiftCount | src[3] <<< (shiftCount - 32)
4060 // if #iteration = 4 and newIdx = 0
4061 // then dest[3] = src[4] >> shiftCount | src[3] <<< (shiftCount - 32)
4062 __ movl(idx, totalNumIter);
4063 __ movl(nIdx, idx);
4064 __ addl(nIdx, newIdx);
4065
4066 // If vectorization is enabled, check if the number of iterations is at least 64
4067 // If not, then go to ShifTwo processing 2 iterations
4068 if (VM_Version::supports_avx512_vbmi2()) {
4069 __ cmpptr(totalNumIter, (AVX3Threshold/64));
4070 __ jcc(Assembler::less, ShiftTwo);
4071
4072 if (AVX3Threshold < 16 * 64) {
4073 __ cmpl(totalNumIter, 16);
4074 __ jcc(Assembler::less, ShiftTwo);
4075 }
4076 __ evpbroadcastd(x0, shiftCount, Assembler::AVX_512bit);
4077 __ subl(idx, 16);
4078 __ subl(nIdx, 16);
4079 __ BIND(Shift512Loop);
4080 __ evmovdqul(x2, Address(oldArr, idx, Address::times_4, 4), Assembler::AVX_512bit);
4081 __ evmovdqul(x1, Address(oldArr, idx, Address::times_4), Assembler::AVX_512bit);
4082 __ vpshrdvd(x2, x1, x0, Assembler::AVX_512bit);
4083 __ evmovdqul(Address(newArr, nIdx, Address::times_4), x2, Assembler::AVX_512bit);
4084 __ subl(nIdx, 16);
4085 __ subl(idx, 16);
4086 __ jcc(Assembler::greaterEqual, Shift512Loop);
4087 __ addl(idx, 16);
4088 __ addl(nIdx, 16);
4089 }
4090 __ BIND(ShiftTwo);
4091 __ cmpl(idx, 2);
4092 __ jcc(Assembler::less, ShiftOne);
4093 __ subl(idx, 2);
4094 __ subl(nIdx, 2);
4095 __ BIND(ShiftTwoLoop);
4096 __ movl(tmp5, Address(oldArr, idx, Address::times_4, 8));
4097 __ movl(tmp4, Address(oldArr, idx, Address::times_4, 4));
4098 __ movl(tmp3, Address(oldArr, idx, Address::times_4));
4099 __ shrdl(tmp5, tmp4);
4100 __ shrdl(tmp4, tmp3);
4101 __ movl(Address(newArr, nIdx, Address::times_4, 4), tmp5);
4102 __ movl(Address(newArr, nIdx, Address::times_4), tmp4);
4103 __ subl(nIdx, 2);
4104 __ subl(idx, 2);
4105 __ jcc(Assembler::greaterEqual, ShiftTwoLoop);
4106 __ addl(idx, 2);
4107 __ addl(nIdx, 2);
4108
4109 // Do the last iteration
4110 __ BIND(ShiftOne);
4111 __ cmpl(idx, 1);
4112 __ jcc(Assembler::less, Exit);
4113 __ subl(idx, 1);
4114 __ subl(nIdx, 1);
4115 __ movl(tmp4, Address(oldArr, idx, Address::times_4, 4));
4116 __ movl(tmp3, Address(oldArr, idx, Address::times_4));
4117 __ shrdl(tmp4, tmp3);
4118 __ movl(Address(newArr, nIdx, Address::times_4), tmp4);
4119 __ BIND(Exit);
4120 __ vzeroupper();
4121 // Restore callee save registers.
4122 __ pop_ppx(tmp5);
4123 #ifdef _WIN64
4124 __ pop_ppx(tmp4);
4125 __ pop_ppx(tmp3);
4126 restore_arg_regs();
4127 #endif
4128 __ leave(); // required for proper stackwalking of RuntimeStub frame
4129 __ ret(0);
4130
4131 // record the stub entry and end
4132 store_archive_data(stub_id, start, __ pc());
4133
4134 return start;
4135 }
4136
4137 /**
4138 * Arguments:
4139 *
4140 * Input:
4141 * c_rarg0 - newArr address
4142 * c_rarg1 - oldArr address
4143 * c_rarg2 - newIdx
4144 * c_rarg3 - shiftCount
4145 * not Win64
4146 * c_rarg4 - numIter
4147 * Win64
4148 * rsp40 - numIter
4149 */
4150 address StubGenerator::generate_bigIntegerLeftShift() {
4151 StubId stub_id = StubId::stubgen_bigIntegerLeftShiftWorker_id;
4152 int entry_count = StubInfo::entry_count(stub_id);
4153 assert(entry_count == 1, "sanity check");
4154 address start = load_archive_data(stub_id);
4155 if (start != nullptr) {
4156 return start;
4157 }
4158 __ align(CodeEntryAlignment);
4159 StubCodeMark mark(this, stub_id);
4160 start = __ pc();
4161
4162 Label Shift512Loop, ShiftTwo, ShiftTwoLoop, ShiftOne, Exit;
4163 // For Unix, the arguments are as follows: rdi, rsi, rdx, rcx, r8.
4164 const Register newArr = rdi;
4165 const Register oldArr = rsi;
4166 const Register newIdx = rdx;
4167 const Register shiftCount = rcx; // It was intentional to have shiftCount in rcx since it is used implicitly for shift.
4168 const Register totalNumIter = r8;
4169 // For windows, we use r9 and r10 as temps to save rdi and rsi. Thus we cannot allocate them for our temps.
4170 // For everything else, we prefer using r9 and r10 since we do not have to save them before use.
4171 const Register tmp1 = r11; // Caller save.
4172 const Register tmp2 = rax; // Caller save.
4173 const Register tmp3 = WIN64_ONLY(r12) NOT_WIN64(r9); // Windows: Callee save. Linux: Caller save.
4174 const Register tmp4 = WIN64_ONLY(r13) NOT_WIN64(r10); // Windows: Callee save. Linux: Caller save.
4175 const Register tmp5 = r14; // Callee save.
4176
4177 const XMMRegister x0 = xmm0;
4178 const XMMRegister x1 = xmm1;
4179 const XMMRegister x2 = xmm2;
4180 BLOCK_COMMENT("Entry:");
4181 __ enter(); // required for proper stackwalking of RuntimeStub frame
4182
4183 #ifdef _WIN64
4184 setup_arg_regs(4);
4185 // For windows, since last argument is on stack, we need to move it to the appropriate register.
4186 __ movl(totalNumIter, Address(rsp, 6 * wordSize));
4187 // Save callee save registers.
4188 __ push_ppx(tmp3);
4189 __ push_ppx(tmp4);
4190 #endif
4191 __ push_ppx(tmp5);
4192
4193 // Rename temps used throughout the code
4194 const Register idx = tmp1;
4195 const Register numIterTmp = tmp2;
4196
4197 // Start idx from zero.
4198 __ xorl(idx, idx);
4199 // Compute interior pointer for new array. We do this so that we can use same index for both old and new arrays.
4200 __ lea(newArr, Address(newArr, newIdx, Address::times_4));
4201 __ movl(numIterTmp, totalNumIter);
4202
4203 // If vectorization is enabled, check if the number of iterations is at least 64
4204 // If not, then go to ShiftTwo shifting two numbers at a time
4205 if (VM_Version::supports_avx512_vbmi2()) {
4206 __ cmpl(totalNumIter, (AVX3Threshold/64));
4207 __ jcc(Assembler::less, ShiftTwo);
4208
4209 if (AVX3Threshold < 16 * 64) {
4210 __ cmpl(totalNumIter, 16);
4211 __ jcc(Assembler::less, ShiftTwo);
4212 }
4213 __ evpbroadcastd(x0, shiftCount, Assembler::AVX_512bit);
4214 __ subl(numIterTmp, 16);
4215 __ BIND(Shift512Loop);
4216 __ evmovdqul(x1, Address(oldArr, idx, Address::times_4), Assembler::AVX_512bit);
4217 __ evmovdqul(x2, Address(oldArr, idx, Address::times_4, 0x4), Assembler::AVX_512bit);
4218 __ vpshldvd(x1, x2, x0, Assembler::AVX_512bit);
4219 __ evmovdqul(Address(newArr, idx, Address::times_4), x1, Assembler::AVX_512bit);
4220 __ addl(idx, 16);
4221 __ subl(numIterTmp, 16);
4222 __ jcc(Assembler::greaterEqual, Shift512Loop);
4223 __ addl(numIterTmp, 16);
4224 }
4225 __ BIND(ShiftTwo);
4226 __ cmpl(totalNumIter, 1);
4227 __ jcc(Assembler::less, Exit);
4228 __ movl(tmp3, Address(oldArr, idx, Address::times_4));
4229 __ subl(numIterTmp, 2);
4230 __ jcc(Assembler::less, ShiftOne);
4231
4232 __ BIND(ShiftTwoLoop);
4233 __ movl(tmp4, Address(oldArr, idx, Address::times_4, 0x4));
4234 __ movl(tmp5, Address(oldArr, idx, Address::times_4, 0x8));
4235 __ shldl(tmp3, tmp4);
4236 __ shldl(tmp4, tmp5);
4237 __ movl(Address(newArr, idx, Address::times_4), tmp3);
4238 __ movl(Address(newArr, idx, Address::times_4, 0x4), tmp4);
4239 __ movl(tmp3, tmp5);
4240 __ addl(idx, 2);
4241 __ subl(numIterTmp, 2);
4242 __ jcc(Assembler::greaterEqual, ShiftTwoLoop);
4243
4244 // Do the last iteration
4245 __ BIND(ShiftOne);
4246 __ addl(numIterTmp, 2);
4247 __ cmpl(numIterTmp, 1);
4248 __ jcc(Assembler::less, Exit);
4249 __ movl(tmp4, Address(oldArr, idx, Address::times_4, 0x4));
4250 __ shldl(tmp3, tmp4);
4251 __ movl(Address(newArr, idx, Address::times_4), tmp3);
4252
4253 __ BIND(Exit);
4254 __ vzeroupper();
4255 // Restore callee save registers.
4256 __ pop_ppx(tmp5);
4257 #ifdef _WIN64
4258 __ pop_ppx(tmp4);
4259 __ pop_ppx(tmp3);
4260 restore_arg_regs();
4261 #endif
4262 __ leave(); // required for proper stackwalking of RuntimeStub frame
4263 __ ret(0);
4264
4265 // record the stub entry and end
4266 store_archive_data(stub_id, start, __ pc());
4267
4268 return start;
4269 }
4270
4271 void StubGenerator::generate_libm_stubs() {
4272 if (UseLibmIntrinsic && InlineIntrinsics) {
4273 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
4274 StubRoutines::_dsin = generate_libmSin(); // from stubGenerator_x86_64_sin.cpp
4275 }
4276 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
4277 StubRoutines::_dcos = generate_libmCos(); // from stubGenerator_x86_64_cos.cpp
4278 }
4279 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) {
4280 StubRoutines::_dtan = generate_libmTan(); // from stubGenerator_x86_64_tan.cpp
4281 }
4282 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsinh)) {
4283 StubRoutines::_dsinh = generate_libmSinh(); // from stubGenerator_x86_64_sinh.cpp
4284 }
4285 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtanh)) {
4286 StubRoutines::_dtanh = generate_libmTanh(); // from stubGenerator_x86_64_tanh.cpp
4287 }
4288 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcbrt)) {
4289 StubRoutines::_dcbrt = generate_libmCbrt(); // from stubGenerator_x86_64_cbrt.cpp
4290 }
4291 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dexp)) {
4292 StubRoutines::_dexp = generate_libmExp(); // from stubGenerator_x86_64_exp.cpp
4293 }
4294 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dpow)) {
4295 StubRoutines::_dpow = generate_libmPow(); // from stubGenerator_x86_64_pow.cpp
4296 }
4297 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) {
4298 StubRoutines::_dlog = generate_libmLog(); // from stubGenerator_x86_64_log.cpp
4299 }
4300 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog10)) {
4301 StubRoutines::_dlog10 = generate_libmLog10(); // from stubGenerator_x86_64_log.cpp
4302 }
4303 }
4304 }
4305
4306 /**
4307 * Arguments:
4308 *
4309 * Input:
4310 * c_rarg0 - float16 jshort
4311 *
4312 * Output:
4313 * xmm0 - float
4314 */
4315 address StubGenerator::generate_float16ToFloat() {
4316 StubId stub_id = StubId::stubgen_hf2f_id;
4317 int entry_count = StubInfo::entry_count(stub_id);
4318 assert(entry_count == 1, "sanity check");
4319 address start = load_archive_data(stub_id);
4320 if (start != nullptr) {
4321 return start;
4322 }
4323 StubCodeMark mark(this, stub_id);
4324
4325 start = __ pc();
4326
4327 BLOCK_COMMENT("Entry:");
4328 // No need for RuntimeStub frame since it is called only during JIT compilation
4329
4330 // Load value into xmm0 and convert
4331 __ flt16_to_flt(xmm0, c_rarg0);
4332
4333 __ ret(0);
4334
4335 // record the stub entry and end
4336 store_archive_data(stub_id, start, __ pc());
4337
4338 return start;
4339 }
4340
4341 /**
4342 * Arguments:
4343 *
4344 * Input:
4345 * xmm0 - float
4346 *
4347 * Output:
4348 * rax - float16 jshort
4349 */
4350 address StubGenerator::generate_floatToFloat16() {
4351 StubId stub_id = StubId::stubgen_f2hf_id;
4352 int entry_count = StubInfo::entry_count(stub_id);
4353 assert(entry_count == 1, "sanity check");
4354 address start = load_archive_data(stub_id);
4355 if (start != nullptr) {
4356 return start;
4357 }
4358 StubCodeMark mark(this, stub_id);
4359
4360 start = __ pc();
4361
4362 BLOCK_COMMENT("Entry:");
4363 // No need for RuntimeStub frame since it is called only during JIT compilation
4364
4365 // Convert and put result into rax
4366 __ flt_to_flt16(rax, xmm0, xmm1);
4367
4368 __ ret(0);
4369
4370 // record the stub entry and end
4371 store_archive_data(stub_id, start, __ pc());
4372
4373 return start;
4374 }
4375
4376 address StubGenerator::generate_cont_thaw(StubId stub_id) {
4377 if (!Continuations::enabled()) return nullptr;
4378
4379 bool return_barrier;
4380 bool return_barrier_exception;
4381 Continuation::thaw_kind kind;
4382
4383 switch (stub_id) {
4384 case StubId::stubgen_cont_thaw_id:
4385 return_barrier = false;
4386 return_barrier_exception = false;
4387 kind = Continuation::thaw_top;
4388 break;
4389 case StubId::stubgen_cont_returnBarrier_id:
4390 return_barrier = true;
4391 return_barrier_exception = false;
4392 kind = Continuation::thaw_return_barrier;
4393 break;
4394 case StubId::stubgen_cont_returnBarrierExc_id:
4395 return_barrier = true;
4396 return_barrier_exception = true;
4397 kind = Continuation::thaw_return_barrier_exception;
4398 break;
4399 default:
4400 ShouldNotReachHere();
4401 }
4402 int entry_count = StubInfo::entry_count(stub_id);
4403 assert(entry_count == 1, "sanity check");
4404 address start = load_archive_data(stub_id);
4405 if (start != nullptr) {
4406 return start;
4407 }
4408 StubCodeMark mark(this, stub_id);
4409 start = __ pc();
4410
4411 // TODO: Handle Valhalla return types. May require generating different return barriers.
4412
4413 if (!return_barrier) {
4414 // Pop return address. If we don't do this, we get a drift,
4415 // where the bottom-most frozen frame continuously grows.
4416 __ pop(c_rarg3);
4417 } else {
4418 __ movptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
4419 }
4420
4421 #ifdef ASSERT
4422 {
4423 Label L_good_sp;
4424 __ cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
4425 __ jcc(Assembler::equal, L_good_sp);
4426 __ stop("Incorrect rsp at thaw entry");
4427 __ BIND(L_good_sp);
4428 }
4429 #endif // ASSERT
4430
4431 if (return_barrier) {
4432 // Preserve possible return value from a method returning to the return barrier.
4433 __ push_ppx(rax);
4434 __ push_d(xmm0);
4435 }
4436
4437 __ movptr(c_rarg0, r15_thread);
4438 __ movptr(c_rarg1, (return_barrier ? 1 : 0));
4439 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Continuation::prepare_thaw), 2);
4440 __ movptr(rbx, rax);
4441
4442 if (return_barrier) {
4443 // Restore return value from a method returning to the return barrier.
4444 // No safepoint in the call to thaw, so even an oop return value should be OK.
4445 __ pop_d(xmm0);
4446 __ pop_ppx(rax);
4447 }
4448
4449 #ifdef ASSERT
4450 {
4451 Label L_good_sp;
4452 __ cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
4453 __ jcc(Assembler::equal, L_good_sp);
4454 __ stop("Incorrect rsp after prepare thaw");
4455 __ BIND(L_good_sp);
4456 }
4457 #endif // ASSERT
4458
4459 // rbx contains the size of the frames to thaw, 0 if overflow or no more frames
4460 Label L_thaw_success;
4461 __ testptr(rbx, rbx);
4462 __ jccb(Assembler::notZero, L_thaw_success);
4463 __ jump(RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry()));
4464 __ bind(L_thaw_success);
4465
4466 // Make room for the thawed frames and align the stack.
4467 __ subptr(rsp, rbx);
4468 __ andptr(rsp, -StackAlignmentInBytes);
4469
4470 if (return_barrier) {
4471 // Preserve possible return value from a method returning to the return barrier. (Again.)
4472 __ push_ppx(rax);
4473 __ push_d(xmm0);
4474 }
4475
4476 // If we want, we can templatize thaw by kind, and have three different entries.
4477 __ movptr(c_rarg0, r15_thread);
4478 __ movptr(c_rarg1, kind);
4479 __ call_VM_leaf(Continuation::thaw_entry(), 2);
4480 __ movptr(rbx, rax);
4481
4482 if (return_barrier) {
4483 // Restore return value from a method returning to the return barrier. (Again.)
4484 // No safepoint in the call to thaw, so even an oop return value should be OK.
4485 __ pop_d(xmm0);
4486 __ pop_ppx(rax);
4487 } else {
4488 // Return 0 (success) from doYield.
4489 __ xorptr(rax, rax);
4490 }
4491
4492 // After thawing, rbx is the SP of the yielding frame.
4493 // Move there, and then to saved RBP slot.
4494 __ movptr(rsp, rbx);
4495 __ subptr(rsp, 2*wordSize);
4496
4497 if (return_barrier_exception) {
4498 __ movptr(c_rarg0, r15_thread);
4499 __ movptr(c_rarg1, Address(rsp, wordSize)); // return address
4500
4501 // rax still holds the original exception oop, save it before the call
4502 __ push_ppx(rax);
4503
4504 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 2);
4505 __ movptr(rbx, rax);
4506
4507 // Continue at exception handler:
4508 // rax: exception oop
4509 // rbx: exception handler
4510 // rdx: exception pc
4511 __ pop_ppx(rax);
4512 __ verify_oop(rax);
4513 __ pop(rbp); // pop out RBP here too
4514 __ pop(rdx);
4515 __ jmp(rbx);
4516 } else {
4517 // We are "returning" into the topmost thawed frame; see Thaw::push_return_frame
4518 __ pop(rbp);
4519 __ ret(0);
4520 }
4521
4522 // record the stub entry and end
4523 store_archive_data(stub_id, start, __ pc());
4524
4525 return start;
4526 }
4527
4528 address StubGenerator::generate_cont_thaw() {
4529 return generate_cont_thaw(StubId::stubgen_cont_thaw_id);
4530 }
4531
4532 // TODO: will probably need multiple return barriers depending on return type
4533
4534 address StubGenerator::generate_cont_returnBarrier() {
4535 return generate_cont_thaw(StubId::stubgen_cont_returnBarrier_id);
4536 }
4537
4538 address StubGenerator::generate_cont_returnBarrier_exception() {
4539 return generate_cont_thaw(StubId::stubgen_cont_returnBarrierExc_id);
4540 }
4541
4542 address StubGenerator::generate_cont_preempt_stub() {
4543 if (!Continuations::enabled()) return nullptr;
4544 StubId stub_id = StubId::stubgen_cont_preempt_id;
4545 int entry_count = StubInfo::entry_count(stub_id);
4546 assert(entry_count == 1, "sanity check");
4547 address start = load_archive_data(stub_id);
4548 if (start != nullptr) {
4549 return start;
4550 }
4551 StubCodeMark mark(this, stub_id);
4552 start = __ pc();
4553
4554 __ reset_last_Java_frame(true);
4555
4556 // Set rsp to enterSpecial frame, i.e. remove all frames copied into the heap.
4557 __ movptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
4558
4559 Label preemption_cancelled;
4560 __ movbool(rscratch1, Address(r15_thread, JavaThread::preemption_cancelled_offset()));
4561 __ testbool(rscratch1);
4562 __ jcc(Assembler::notZero, preemption_cancelled);
4563
4564 // Remove enterSpecial frame from the stack and return to Continuation.run() to unmount.
4565 SharedRuntime::continuation_enter_cleanup(_masm);
4566 __ pop(rbp);
4567 __ ret(0);
4568
4569 // We acquired the monitor after freezing the frames so call thaw to continue execution.
4570 __ bind(preemption_cancelled);
4571 __ movbool(Address(r15_thread, JavaThread::preemption_cancelled_offset()), false);
4572 __ lea(rbp, Address(rsp, checked_cast<int32_t>(ContinuationEntry::size())));
4573 __ movptr(rscratch1, ExternalAddress(ContinuationEntry::thaw_call_pc_address()));
4574 __ jmp(rscratch1);
4575
4576 // record the stub entry and end
4577 store_archive_data(stub_id, start, __ pc());
4578
4579 return start;
4580 }
4581
4582 // exception handler for upcall stubs
4583 address StubGenerator::generate_upcall_stub_exception_handler() {
4584 StubId stub_id = StubId::stubgen_upcall_stub_exception_handler_id;
4585 int entry_count = StubInfo::entry_count(stub_id);
4586 assert(entry_count == 1, "sanity check");
4587 address start = load_archive_data(stub_id);
4588 if (start != nullptr) {
4589 return start;
4590 }
4591 StubCodeMark mark(this, stub_id);
4592 start = __ pc();
4593
4594 // native caller has no idea how to handle exceptions
4595 // we just crash here. Up to callee to catch exceptions.
4596 __ verify_oop(rax);
4597 __ vzeroupper();
4598 __ mov(c_rarg0, rax);
4599 __ andptr(rsp, -StackAlignmentInBytes); // align stack as required by ABI
4600 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
4601 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, UpcallLinker::handle_uncaught_exception)));
4602 __ should_not_reach_here();
4603
4604 // record the stub entry and end
4605 store_archive_data(stub_id, start, __ pc());
4606
4607 return start;
4608 }
4609
4610 // load Method* target of MethodHandle
4611 // j_rarg0 = jobject receiver
4612 // rbx = result
4613 address StubGenerator::generate_upcall_stub_load_target() {
4614 StubId stub_id = StubId::stubgen_upcall_stub_load_target_id;
4615 int entry_count = StubInfo::entry_count(stub_id);
4616 assert(entry_count == 1, "sanity check");
4617 address start = load_archive_data(stub_id);
4618 if (start != nullptr) {
4619 return start;
4620 }
4621 StubCodeMark mark(this, stub_id);
4622 start = __ pc();
4623
4624 __ resolve_global_jobject(j_rarg0, rscratch1);
4625 // Load target method from receiver
4626 __ load_heap_oop(rbx, Address(j_rarg0, java_lang_invoke_MethodHandle::form_offset()), rscratch1);
4627 __ load_heap_oop(rbx, Address(rbx, java_lang_invoke_LambdaForm::vmentry_offset()), rscratch1);
4628 __ load_heap_oop(rbx, Address(rbx, java_lang_invoke_MemberName::method_offset()), rscratch1);
4629 __ access_load_at(T_ADDRESS, IN_HEAP, rbx,
4630 Address(rbx, java_lang_invoke_ResolvedMethodName::vmtarget_offset()),
4631 noreg);
4632 __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx); // just in case callee is deoptimized
4633
4634 __ ret(0);
4635
4636 // record the stub entry and end
4637 store_archive_data(stub_id, start, __ pc());
4638
4639 return start;
4640 }
4641
4642 void StubGenerator::generate_lookup_secondary_supers_table_stub() {
4643 StubId stub_id = StubId::stubgen_lookup_secondary_supers_table_id;
4644 GrowableArray<address> entries;
4645 int entry_count = StubInfo::entry_count(stub_id);
4646 assert(entry_count == Klass::SECONDARY_SUPERS_TABLE_SIZE, "sanity check");
4647 address start = load_archive_data(stub_id, &entries);
4648 if (start != nullptr) {
4649 assert(entries.length() == Klass::SECONDARY_SUPERS_TABLE_SIZE - 1,
4650 "unexpected extra entry count %d", entries.length());
4651 StubRoutines::_lookup_secondary_supers_table_stubs[0] = start;
4652 for (int slot = 1; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) {
4653 StubRoutines::_lookup_secondary_supers_table_stubs[slot] = entries.at(slot - 1);
4654 }
4655 return;
4656 }
4657 StubCodeMark mark(this, stub_id);
4658
4659 const Register
4660 r_super_klass = rax,
4661 r_sub_klass = rsi,
4662 result = rdi;
4663
4664 for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) {
4665 address next_entry = __ pc();
4666 if (slot == 0) {
4667 start = next_entry;
4668 } else {
4669 entries.append(next_entry);
4670 }
4671 StubRoutines::_lookup_secondary_supers_table_stubs[slot] = next_entry;
4672 __ lookup_secondary_supers_table_const(r_sub_klass, r_super_klass,
4673 rdx, rcx, rbx, r11, // temps
4674 result,
4675 slot);
4676 __ ret(0);
4677 }
4678
4679 // record the stub entry and end plus all the auxiliary entries
4680 store_archive_data(stub_id, start, __ pc(), &entries);
4681 }
4682
4683 // Slow path implementation for UseSecondarySupersTable.
4684 address StubGenerator::generate_lookup_secondary_supers_table_slow_path_stub() {
4685 StubId stub_id = StubId::stubgen_lookup_secondary_supers_table_slow_path_id;
4686 int entry_count = StubInfo::entry_count(stub_id);
4687 assert(entry_count == 1, "sanity check");
4688 address start = load_archive_data(stub_id);
4689 if (start != nullptr) {
4690 return start;
4691 }
4692 StubCodeMark mark(this, stub_id);
4693 start = __ pc();
4694
4695 const Register
4696 r_super_klass = rax,
4697 r_array_base = rbx,
4698 r_array_index = rdx,
4699 r_sub_klass = rsi,
4700 r_bitmap = r11,
4701 result = rdi;
4702
4703 Label L_success;
4704 __ lookup_secondary_supers_table_slow_path(r_super_klass, r_array_base, r_array_index, r_bitmap,
4705 rcx, rdi, // temps
4706 &L_success);
4707 // bind(L_failure);
4708 __ movl(result, 1);
4709 __ ret(0);
4710
4711 __ bind(L_success);
4712 __ movl(result, 0);
4713 __ ret(0);
4714
4715 // record the stub entry and end
4716 store_archive_data(stub_id, start, __ pc());
4717
4718 return start;
4719 }
4720
4721 void StubGenerator::create_control_words() {
4722 // Round to nearest, 64-bit mode, exceptions masked, flags specialized
4723 StubRoutines::x86::_mxcsr_std = EnableX86ECoreOpts ? 0x1FBF : 0x1F80;
4724 // Round to zero, 64-bit mode, exceptions masked, flags specialized
4725 StubRoutines::x86::_mxcsr_rz = EnableX86ECoreOpts ? 0x7FBF : 0x7F80;
4726 }
4727
4728 // Initialization
4729 void StubGenerator::generate_preuniverse_stubs() {
4730 // atomic calls
4731 StubRoutines::_fence_entry = generate_orderaccess_fence();
4732 }
4733
4734 void StubGenerator::generate_initial_stubs() {
4735 // Generates all stubs and initializes the entry points
4736
4737 // This platform-specific settings are needed by generate_call_stub()
4738 create_control_words();
4739
4740 // Initialize table for unsafe copy memeory check.
4741 if (UnsafeMemoryAccess::_table == nullptr) {
4742 UnsafeMemoryAccess::create_table(16 + 4); // 16 for copyMemory; 4 for setMemory
4743 }
4744
4745 // entry points that exist in all platforms Note: This is code
4746 // that could be shared among different platforms - however the
4747 // benefit seems to be smaller than the disadvantage of having a
4748 // much more complicated generator structure. See also comment in
4749 // stubRoutines.hpp.
4750
4751 StubRoutines::_forward_exception_entry = generate_forward_exception();
4752
4753 StubRoutines::_call_stub_entry =
4754 generate_call_stub(StubRoutines::_call_stub_return_address);
4755
4756 // is referenced by megamorphic call
4757 StubRoutines::_catch_exception_entry = generate_catch_exception();
4758
4759 // platform dependent
4760 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr();
4761
4762 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup();
4763 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup();
4764 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup();
4765 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup();
4766
4767 StubRoutines::x86::_float_sign_mask = generate_fp_mask(StubId::stubgen_float_sign_mask_id, 0x7FFFFFFF7FFFFFFF);
4768 StubRoutines::x86::_float_sign_flip = generate_fp_mask(StubId::stubgen_float_sign_flip_id, 0x8000000080000000);
4769 StubRoutines::x86::_double_sign_mask = generate_fp_mask(StubId::stubgen_double_sign_mask_id, 0x7FFFFFFFFFFFFFFF);
4770 StubRoutines::x86::_double_sign_flip = generate_fp_mask(StubId::stubgen_double_sign_flip_id, 0x8000000000000000);
4771
4772 if (UseCRC32Intrinsics) {
4773 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
4774 }
4775
4776 if (UseCRC32CIntrinsics) {
4777 bool supports_clmul = VM_Version::supports_clmul();
4778 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul);
4779 }
4780
4781 if (VM_Version::supports_float16()) {
4782 // For results consistency both intrinsics should be enabled.
4783 // vmIntrinsics checks InlineIntrinsics flag, no need to check it here.
4784 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_float16ToFloat) &&
4785 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_floatToFloat16)) {
4786 StubRoutines::_hf2f = generate_float16ToFloat();
4787 StubRoutines::_f2hf = generate_floatToFloat16();
4788 }
4789 }
4790
4791 generate_libm_stubs();
4792
4793 StubRoutines::_fmod = generate_libmFmod(); // from stubGenerator_x86_64_fmod.cpp
4794 }
4795
4796 void StubGenerator::generate_continuation_stubs() {
4797 // Continuation stubs:
4798 StubRoutines::_cont_thaw = generate_cont_thaw();
4799 StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
4800 StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
4801 StubRoutines::_cont_preempt_stub = generate_cont_preempt_stub();
4802 }
4803
4804 void StubGenerator::generate_final_stubs() {
4805 // Generates the rest of stubs and initializes the entry points
4806
4807 // support for verify_oop (must happen after universe_init)
4808 if (VerifyOops) {
4809 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
4810 }
4811
4812 // arraycopy stubs used by compilers
4813 generate_arraycopy_stubs();
4814
4815 StubRoutines::_method_entry_barrier = generate_method_entry_barrier();
4816
4817 #ifdef COMPILER2
4818 if (UseSecondarySupersTable) {
4819 StubRoutines::_lookup_secondary_supers_table_slow_path_stub = generate_lookup_secondary_supers_table_slow_path_stub();
4820 if (! InlineSecondarySupersTest) {
4821 generate_lookup_secondary_supers_table_stub();
4822 }
4823 }
4824 #endif // COMPILER2
4825
4826 if (UseVectorizedMismatchIntrinsic) {
4827 StubRoutines::_vectorizedMismatch = generate_vectorizedMismatch();
4828 }
4829
4830 StubRoutines::_upcall_stub_exception_handler = generate_upcall_stub_exception_handler();
4831 StubRoutines::_upcall_stub_load_target = generate_upcall_stub_load_target();
4832 }
4833
4834 void StubGenerator::generate_compiler_stubs() {
4835 #if COMPILER2_OR_JVMCI
4836
4837 // Entry points that are C2 compiler specific.
4838
4839 StubRoutines::x86::_vector_float_sign_mask = generate_vector_mask(StubId::stubgen_vector_float_sign_mask_id, 0x7FFFFFFF7FFFFFFF);
4840 StubRoutines::x86::_vector_float_sign_flip = generate_vector_mask(StubId::stubgen_vector_float_sign_flip_id, 0x8000000080000000);
4841 StubRoutines::x86::_vector_double_sign_mask = generate_vector_mask(StubId::stubgen_vector_double_sign_mask_id, 0x7FFFFFFFFFFFFFFF);
4842 StubRoutines::x86::_vector_double_sign_flip = generate_vector_mask(StubId::stubgen_vector_double_sign_flip_id, 0x8000000000000000);
4843 StubRoutines::x86::_vector_all_bits_set = generate_vector_mask(StubId::stubgen_vector_all_bits_set_id, 0xFFFFFFFFFFFFFFFF);
4844 StubRoutines::x86::_vector_int_mask_cmp_bits = generate_vector_mask(StubId::stubgen_vector_int_mask_cmp_bits_id, 0x0000000100000001);
4845 StubRoutines::x86::_vector_short_to_byte_mask = generate_vector_mask(StubId::stubgen_vector_short_to_byte_mask_id, 0x00ff00ff00ff00ff);
4846 StubRoutines::x86::_vector_byte_perm_mask = generate_vector_byte_perm_mask();
4847 StubRoutines::x86::_vector_int_to_byte_mask = generate_vector_mask(StubId::stubgen_vector_int_to_byte_mask_id, 0x000000ff000000ff);
4848 StubRoutines::x86::_vector_int_to_short_mask = generate_vector_mask(StubId::stubgen_vector_int_to_short_mask_id, 0x0000ffff0000ffff);
4849 StubRoutines::x86::_vector_32_bit_mask = generate_vector_custom_i32(StubId::stubgen_vector_32_bit_mask_id, Assembler::AVX_512bit,
4850 0xFFFFFFFF, 0, 0, 0);
4851 StubRoutines::x86::_vector_64_bit_mask = generate_vector_custom_i32(StubId::stubgen_vector_64_bit_mask_id, Assembler::AVX_512bit,
4852 0xFFFFFFFF, 0xFFFFFFFF, 0, 0);
4853 StubRoutines::x86::_vector_int_shuffle_mask = generate_vector_mask(StubId::stubgen_vector_int_shuffle_mask_id, 0x0302010003020100);
4854 StubRoutines::x86::_vector_byte_shuffle_mask = generate_vector_byte_shuffle_mask();
4855 StubRoutines::x86::_vector_short_shuffle_mask = generate_vector_mask(StubId::stubgen_vector_short_shuffle_mask_id, 0x0100010001000100);
4856 StubRoutines::x86::_vector_long_shuffle_mask = generate_vector_mask(StubId::stubgen_vector_long_shuffle_mask_id, 0x0000000100000000);
4857 StubRoutines::x86::_vector_long_sign_mask = generate_vector_mask(StubId::stubgen_vector_long_sign_mask_id, 0x8000000000000000);
4858 generate_iota_indices();
4859 StubRoutines::x86::_vector_count_leading_zeros_lut = generate_count_leading_zeros_lut();
4860 StubRoutines::x86::_vector_reverse_bit_lut = generate_vector_reverse_bit_lut();
4861 StubRoutines::x86::_vector_reverse_byte_perm_mask_long = generate_vector_reverse_byte_perm_mask_long();
4862 StubRoutines::x86::_vector_reverse_byte_perm_mask_int = generate_vector_reverse_byte_perm_mask_int();
4863 StubRoutines::x86::_vector_reverse_byte_perm_mask_short = generate_vector_reverse_byte_perm_mask_short();
4864
4865 if (VM_Version::supports_avx2() && !VM_Version::supports_avx512vl()) {
4866 StubRoutines::x86::_compress_perm_table32 = generate_compress_perm_table(StubId::stubgen_compress_perm_table32_id);
4867 StubRoutines::x86::_compress_perm_table64 = generate_compress_perm_table(StubId::stubgen_compress_perm_table64_id);
4868 StubRoutines::x86::_expand_perm_table32 = generate_expand_perm_table(StubId::stubgen_expand_perm_table32_id);
4869 StubRoutines::x86::_expand_perm_table64 = generate_expand_perm_table(StubId::stubgen_expand_perm_table64_id);
4870 }
4871
4872 if (VM_Version::supports_avx2() && !VM_Version::supports_avx512_vpopcntdq()) {
4873 // lut implementation influenced by counting 1s algorithm from section 5-1 of Hackers' Delight.
4874 StubRoutines::x86::_vector_popcount_lut = generate_popcount_avx_lut();
4875 }
4876
4877 generate_aes_stubs();
4878
4879 generate_ghash_stubs();
4880
4881 generate_chacha_stubs();
4882
4883 generate_kyber_stubs();
4884
4885 generate_dilithium_stubs();
4886
4887 generate_sha3_stubs();
4888
4889 // data cache line writeback
4890 StubRoutines::_data_cache_writeback = generate_data_cache_writeback();
4891 StubRoutines::_data_cache_writeback_sync = generate_data_cache_writeback_sync();
4892
4893 #ifdef COMPILER2
4894 if ((UseAVX == 2) && EnableX86ECoreOpts) {
4895 generate_string_indexof(StubRoutines::_string_indexof_array);
4896 }
4897 #endif
4898
4899 if (UseAdler32Intrinsics) {
4900 StubRoutines::_updateBytesAdler32 = generate_updateBytesAdler32();
4901 }
4902
4903 if (UsePoly1305Intrinsics) {
4904 StubRoutines::_poly1305_processBlocks = generate_poly1305_processBlocks();
4905 }
4906
4907 if (UseIntPolyIntrinsics) {
4908 StubRoutines::_intpoly_montgomeryMult_P256 = generate_intpoly_montgomeryMult_P256();
4909 StubRoutines::_intpoly_assign = generate_intpoly_assign();
4910 }
4911
4912 if (UseMD5Intrinsics) {
4913 StubRoutines::_md5_implCompress = generate_md5_implCompress(StubId::stubgen_md5_implCompress_id);
4914 StubRoutines::_md5_implCompressMB = generate_md5_implCompress(StubId::stubgen_md5_implCompressMB_id);
4915 }
4916
4917 if (UseSHA1Intrinsics) {
4918 StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask();
4919 StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask();
4920 StubRoutines::_sha1_implCompress = generate_sha1_implCompress(StubId::stubgen_sha1_implCompress_id);
4921 StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(StubId::stubgen_sha1_implCompressMB_id);
4922 }
4923
4924 if (UseSHA256Intrinsics) {
4925 address entry2 = nullptr;
4926 address entry3 = nullptr;
4927 StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256;
4928 char* dst = (char*)StubRoutines::x86::_k256_W;
4929 char* src = (char*)StubRoutines::x86::_k256;
4930 for (int ii = 0; ii < 16; ++ii) {
4931 memcpy(dst + 32 * ii, src + 16 * ii, 16);
4932 memcpy(dst + 32 * ii + 16, src + 16 * ii, 16);
4933 }
4934 StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W;
4935 StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(entry2, entry3);
4936 StubRoutines::x86::_pshuffle_byte_flip_mask_00ba_addr = entry2;
4937 StubRoutines::x86::_pshuffle_byte_flip_mask_dc00_addr = entry3;
4938 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(StubId::stubgen_sha256_implCompress_id);
4939 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(StubId::stubgen_sha256_implCompressMB_id);
4940 }
4941
4942 if (UseSHA512Intrinsics) {
4943 address entry2 = nullptr;
4944 StubRoutines::x86::_k512_W_addr = (address)StubRoutines::x86::_k512_W;
4945 StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = generate_pshuffle_byte_flip_mask_sha512(entry2);
4946 StubRoutines::x86::_pshuffle_byte_flip_mask_ymm_lo_addr_sha512 = entry2;
4947 StubRoutines::_sha512_implCompress = generate_sha512_implCompress(StubId::stubgen_sha512_implCompress_id);
4948 StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(StubId::stubgen_sha512_implCompressMB_id);
4949 }
4950
4951 if (UseBASE64Intrinsics) {
4952 if(VM_Version::supports_avx2()) {
4953 StubRoutines::x86::_avx2_shuffle_base64 = base64_avx2_shuffle_addr();
4954 StubRoutines::x86::_avx2_input_mask_base64 = base64_avx2_input_mask_addr();
4955 StubRoutines::x86::_avx2_lut_base64 = base64_avx2_lut_addr();
4956 StubRoutines::x86::_avx2_decode_tables_base64 = base64_AVX2_decode_tables_addr();
4957 StubRoutines::x86::_avx2_decode_lut_tables_base64 = base64_AVX2_decode_LUT_tables_addr();
4958 }
4959 StubRoutines::x86::_encoding_table_base64 = base64_encoding_table_addr();
4960 if (VM_Version::supports_avx512_vbmi()) {
4961 StubRoutines::x86::_shuffle_base64 = base64_shuffle_addr();
4962 StubRoutines::x86::_lookup_lo_base64 = base64_vbmi_lookup_lo_addr();
4963 StubRoutines::x86::_lookup_hi_base64 = base64_vbmi_lookup_hi_addr();
4964 StubRoutines::x86::_lookup_lo_base64url = base64_vbmi_lookup_lo_url_addr();
4965 StubRoutines::x86::_lookup_hi_base64url = base64_vbmi_lookup_hi_url_addr();
4966 StubRoutines::x86::_pack_vec_base64 = base64_vbmi_pack_vec_addr();
4967 StubRoutines::x86::_join_0_1_base64 = base64_vbmi_join_0_1_addr();
4968 StubRoutines::x86::_join_1_2_base64 = base64_vbmi_join_1_2_addr();
4969 StubRoutines::x86::_join_2_3_base64 = base64_vbmi_join_2_3_addr();
4970 }
4971 StubRoutines::x86::_decoding_table_base64 = base64_decoding_table_addr();
4972 StubRoutines::_base64_encodeBlock = generate_base64_encodeBlock();
4973 StubRoutines::_base64_decodeBlock = generate_base64_decodeBlock();
4974 }
4975
4976 #ifdef COMPILER2
4977 if (UseMultiplyToLenIntrinsic) {
4978 StubRoutines::_multiplyToLen = generate_multiplyToLen();
4979 }
4980 if (UseSquareToLenIntrinsic) {
4981 StubRoutines::_squareToLen = generate_squareToLen();
4982 }
4983 if (UseMulAddIntrinsic) {
4984 StubRoutines::_mulAdd = generate_mulAdd();
4985 }
4986 if (VM_Version::supports_avx512_vbmi2()) {
4987 StubRoutines::_bigIntegerRightShiftWorker = generate_bigIntegerRightShift();
4988 StubRoutines::_bigIntegerLeftShiftWorker = generate_bigIntegerLeftShift();
4989 }
4990 if (UseMontgomeryMultiplyIntrinsic) {
4991 StubRoutines::_montgomeryMultiply
4992 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply);
4993 }
4994 if (UseMontgomerySquareIntrinsic) {
4995 StubRoutines::_montgomerySquare
4996 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square);
4997 }
4998
4999 // Load x86_64_sort library on supported hardware to enable SIMD sort and partition intrinsics
5000
5001 if (VM_Version::supports_avx512dq() || VM_Version::supports_avx2()) {
5002 void *libsimdsort = nullptr;
5003 char ebuf_[1024];
5004 char dll_name_simd_sort[JVM_MAXPATHLEN];
5005 if (os::dll_locate_lib(dll_name_simd_sort, sizeof(dll_name_simd_sort), Arguments::get_dll_dir(), "simdsort")) {
5006 libsimdsort = os::dll_load(dll_name_simd_sort, ebuf_, sizeof ebuf_);
5007 }
5008 // Get addresses for SIMD sort and partition routines
5009 if (libsimdsort != nullptr) {
5010 log_info(library)("Loaded library %s, handle " INTPTR_FORMAT, JNI_LIB_PREFIX "simdsort" JNI_LIB_SUFFIX, p2i(libsimdsort));
5011
5012 os::snprintf_checked(ebuf_, sizeof(ebuf_), VM_Version::supports_avx512_simd_sort() ? "avx512_sort" : "avx2_sort");
5013 StubRoutines::_array_sort = (address)os::dll_lookup(libsimdsort, ebuf_);
5014
5015 os::snprintf_checked(ebuf_, sizeof(ebuf_), VM_Version::supports_avx512_simd_sort() ? "avx512_partition" : "avx2_partition");
5016 StubRoutines::_array_partition = (address)os::dll_lookup(libsimdsort, ebuf_);
5017 }
5018 }
5019
5020 #endif // COMPILER2
5021 #endif // COMPILER2_OR_JVMCI
5022 }
5023
5024 StubGenerator::StubGenerator(CodeBuffer* code, BlobId blob_id, AOTStubData* stub_data) : StubCodeGenerator(code, blob_id, stub_data) {
5025 switch(blob_id) {
5026 case BlobId::stubgen_preuniverse_id:
5027 generate_preuniverse_stubs();
5028 break;
5029 case BlobId::stubgen_initial_id:
5030 generate_initial_stubs();
5031 break;
5032 case BlobId::stubgen_continuation_id:
5033 generate_continuation_stubs();
5034 break;
5035 case BlobId::stubgen_compiler_id:
5036 generate_compiler_stubs();
5037 break;
5038 case BlobId::stubgen_final_id:
5039 generate_final_stubs();
5040 break;
5041 default:
5042 fatal("unexpected blob id: %s", StubInfo::name(blob_id));
5043 break;
5044 };
5045 }
5046
5047 #if INCLUDE_CDS
5048 // publish addresses of static data defined in this file and in other
5049 // stubgen stub generator files
5050 void StubGenerator::init_AOTAddressTable(GrowableArray<address>& external_addresses) {
5051 init_AOTAddressTable_adler(external_addresses);
5052 init_AOTAddressTable_aes(external_addresses);
5053 init_AOTAddressTable_cbrt(external_addresses);
5054 init_AOTAddressTable_chacha(external_addresses);
5055 // constants publishes for all of address use by cos and almost all of sin
5056 init_AOTAddressTable_constants(external_addresses);
5057 init_AOTAddressTable_dilithium(external_addresses);
5058 init_AOTAddressTable_exp(external_addresses);
5059 init_AOTAddressTable_fmod(external_addresses);
5060 init_AOTAddressTable_ghash(external_addresses);
5061 init_AOTAddressTable_kyber(external_addresses);
5062 init_AOTAddressTable_log(external_addresses);
5063 init_AOTAddressTable_poly1305(external_addresses);
5064 init_AOTAddressTable_poly_mont(external_addresses);
5065 init_AOTAddressTable_pow(external_addresses);
5066 init_AOTAddressTable_sha3(external_addresses);
5067 init_AOTAddressTable_sin(external_addresses);
5068 init_AOTAddressTable_sinh(external_addresses);
5069 init_AOTAddressTable_tan(external_addresses);
5070 init_AOTAddressTable_tanh(external_addresses);
5071 }
5072 #endif // INCLUDE_CDS
5073
5074 void StubGenerator_generate(CodeBuffer* code, BlobId blob_id, AOTStubData* stub_data) {
5075 StubGenerator g(code, blob_id, stub_data);
5076 }
5077
5078 #undef __