5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/macroAssembler.hpp"
26 #include "classfile/javaClasses.hpp"
27 #include "classfile/vmIntrinsics.hpp"
28 #include "compiler/oopMap.hpp"
29 #include "gc/shared/barrierSet.hpp"
30 #include "gc/shared/barrierSetAssembler.hpp"
31 #include "gc/shared/barrierSetNMethod.hpp"
32 #include "gc/shared/gc_globals.hpp"
33 #include "memory/universe.hpp"
34 #include "prims/jvmtiExport.hpp"
35 #include "prims/upcallLinker.hpp"
36 #include "runtime/arguments.hpp"
37 #include "runtime/continuationEntry.hpp"
38 #include "runtime/javaThread.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/stubRoutines.hpp"
41 #include "stubGenerator_x86_64.hpp"
42 #ifdef COMPILER2
43 #include "opto/runtime.hpp"
44 #include "opto/c2_globals.hpp"
45 #endif
46 #if INCLUDE_JVMCI
47 #include "jvmci/jvmci_globals.hpp"
48 #endif
49
50 // For a more detailed description of the stub routine structure
51 // see the comment in stubRoutines.hpp
52
53 #define __ _masm->
54 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
55
56 #ifdef PRODUCT
57 #define BLOCK_COMMENT(str) /* nothing */
58 #else
59 #define BLOCK_COMMENT(str) __ block_comment(str)
60 #endif // PRODUCT
284 __ BIND(loop);
285 __ movptr(rax, Address(c_rarg2, 0));// get parameter
286 __ addptr(c_rarg2, wordSize); // advance to next parameter
287 __ decrementl(c_rarg1); // decrement counter
288 __ push(rax); // pass parameter
289 __ jcc(Assembler::notZero, loop);
290
291 // call Java function
292 __ BIND(parameters_done);
293 __ movptr(rbx, method); // get Method*
294 __ movptr(c_rarg1, entry_point); // get entry_point
295 __ mov(r13, rsp); // set sender sp
296 BLOCK_COMMENT("call Java function");
297 __ call(c_rarg1);
298
299 BLOCK_COMMENT("call_stub_return_address:");
300 return_address = __ pc();
301
302 // store result depending on type (everything that is not
303 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
304 __ movptr(c_rarg0, result);
305 Label is_long, is_float, is_double, exit;
306 __ movl(c_rarg1, result_type);
307 __ cmpl(c_rarg1, T_OBJECT);
308 __ jcc(Assembler::equal, is_long);
309 __ cmpl(c_rarg1, T_LONG);
310 __ jcc(Assembler::equal, is_long);
311 __ cmpl(c_rarg1, T_FLOAT);
312 __ jcc(Assembler::equal, is_float);
313 __ cmpl(c_rarg1, T_DOUBLE);
314 __ jcc(Assembler::equal, is_double);
315 #ifdef ASSERT
316 // make sure the type is INT
317 {
318 Label L;
319 __ cmpl(c_rarg1, T_INT);
320 __ jcc(Assembler::equal, L);
321 __ stop("StubRoutines::call_stub: unexpected result type");
322 __ bind(L);
323 }
324 #endif
325
326 // handle T_INT case
327 __ movl(Address(c_rarg0, 0), rax);
328
329 __ BIND(exit);
330
331 // pop parameters
332 __ lea(rsp, rsp_after_call);
333
334 #ifdef ASSERT
335 // verify that threads correspond
336 {
337 Label L1, L2, L3;
338 __ cmpptr(r15_thread, thread);
339 __ jcc(Assembler::equal, L1);
340 __ stop("StubRoutines::call_stub: r15_thread is corrupted");
341 __ bind(L1);
342 __ get_thread_slow(rbx);
343 __ cmpptr(r15_thread, thread);
344 __ jcc(Assembler::equal, L2);
345 __ stop("StubRoutines::call_stub: r15_thread is modified by call");
346 __ bind(L2);
347 __ cmpptr(r15_thread, rbx);
365 __ movptr(r13, r13_save);
366 __ movptr(r12, r12_save);
367 __ movptr(rbx, rbx_save);
368
369 #ifdef _WIN64
370 __ movptr(rdi, rdi_save);
371 __ movptr(rsi, rsi_save);
372 #else
373 __ ldmxcsr(mxcsr_save);
374 #endif
375
376 // restore rsp
377 __ addptr(rsp, -rsp_after_call_off * wordSize);
378
379 // return
380 __ vzeroupper();
381 __ pop(rbp);
382 __ ret(0);
383
384 // handle return types different from T_INT
385 __ BIND(is_long);
386 __ movq(Address(c_rarg0, 0), rax);
387 __ jmp(exit);
388
389 __ BIND(is_float);
390 __ movflt(Address(c_rarg0, 0), xmm0);
391 __ jmp(exit);
392
393 __ BIND(is_double);
394 __ movdbl(Address(c_rarg0, 0), xmm0);
395 __ jmp(exit);
396
397 return start;
398 }
399
400 // Return point for a Java call if there's an exception thrown in
401 // Java code. The exception is caught and transformed into a
402 // pending exception stored in JavaThread that can be tested from
403 // within the VM.
404 //
405 // Note: Usually the parameters are removed by the callee. In case
406 // of an exception crossing an activation frame boundary, that is
407 // not the case if the callee is compiled code => need to setup the
408 // rsp.
409 //
410 // rax: exception oop
411
412 address StubGenerator::generate_catch_exception() {
413 StubGenStubId stub_id = StubGenStubId::catch_exception_id;
414 StubCodeMark mark(this, stub_id);
3745 * Output:
3746 * rax - float16 jshort
3747 */
3748 address StubGenerator::generate_floatToFloat16() {
3749 StubGenStubId stub_id = StubGenStubId::f2hf_id;
3750 StubCodeMark mark(this, stub_id);
3751
3752 address start = __ pc();
3753
3754 BLOCK_COMMENT("Entry:");
3755 // No need for RuntimeStub frame since it is called only during JIT compilation
3756
3757 // Convert and put result into rax
3758 __ flt_to_flt16(rax, xmm0, xmm1);
3759
3760 __ ret(0);
3761
3762 return start;
3763 }
3764
3765 address StubGenerator::generate_cont_thaw(StubGenStubId stub_id) {
3766 if (!Continuations::enabled()) return nullptr;
3767
3768 bool return_barrier;
3769 bool return_barrier_exception;
3770 Continuation::thaw_kind kind;
3771
3772 switch (stub_id) {
3773 case cont_thaw_id:
3774 return_barrier = false;
3775 return_barrier_exception = false;
3776 kind = Continuation::thaw_top;
3777 break;
3778 case cont_returnBarrier_id:
3779 return_barrier = true;
3780 return_barrier_exception = false;
3781 kind = Continuation::thaw_return_barrier;
3782 break;
3783 case cont_returnBarrierExc_id:
3784 return_barrier = true;
3796 if (!return_barrier) {
3797 // Pop return address. If we don't do this, we get a drift,
3798 // where the bottom-most frozen frame continuously grows.
3799 __ pop(c_rarg3);
3800 } else {
3801 __ movptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
3802 }
3803
3804 #ifdef ASSERT
3805 {
3806 Label L_good_sp;
3807 __ cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
3808 __ jcc(Assembler::equal, L_good_sp);
3809 __ stop("Incorrect rsp at thaw entry");
3810 __ BIND(L_good_sp);
3811 }
3812 #endif // ASSERT
3813
3814 if (return_barrier) {
3815 // Preserve possible return value from a method returning to the return barrier.
3816 __ push(rax);
3817 __ push_d(xmm0);
3818 }
3819
3820 __ movptr(c_rarg0, r15_thread);
3821 __ movptr(c_rarg1, (return_barrier ? 1 : 0));
3822 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Continuation::prepare_thaw), 2);
3823 __ movptr(rbx, rax);
3824
3825 if (return_barrier) {
3826 // Restore return value from a method returning to the return barrier.
3827 // No safepoint in the call to thaw, so even an oop return value should be OK.
3828 __ pop_d(xmm0);
3829 __ pop(rax);
3830 }
3831
3832 #ifdef ASSERT
3833 {
3834 Label L_good_sp;
3835 __ cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
3836 __ jcc(Assembler::equal, L_good_sp);
3837 __ stop("Incorrect rsp after prepare thaw");
3838 __ BIND(L_good_sp);
3839 }
3840 #endif // ASSERT
3841
3842 // rbx contains the size of the frames to thaw, 0 if overflow or no more frames
3843 Label L_thaw_success;
3844 __ testptr(rbx, rbx);
3845 __ jccb(Assembler::notZero, L_thaw_success);
3846 __ jump(RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry()));
3847 __ bind(L_thaw_success);
3848
3849 // Make room for the thawed frames and align the stack.
3850 __ subptr(rsp, rbx);
3851 __ andptr(rsp, -StackAlignmentInBytes);
3852
3853 if (return_barrier) {
3854 // Preserve possible return value from a method returning to the return barrier. (Again.)
3855 __ push(rax);
3856 __ push_d(xmm0);
3857 }
3858
3859 // If we want, we can templatize thaw by kind, and have three different entries.
3860 __ movptr(c_rarg0, r15_thread);
3861 __ movptr(c_rarg1, kind);
3862 __ call_VM_leaf(Continuation::thaw_entry(), 2);
3863 __ movptr(rbx, rax);
3864
3865 if (return_barrier) {
3866 // Restore return value from a method returning to the return barrier. (Again.)
3867 // No safepoint in the call to thaw, so even an oop return value should be OK.
3868 __ pop_d(xmm0);
3869 __ pop(rax);
3870 } else {
3871 // Return 0 (success) from doYield.
3872 __ xorptr(rax, rax);
3873 }
3874
3875 // After thawing, rbx is the SP of the yielding frame.
3876 // Move there, and then to saved RBP slot.
3877 __ movptr(rsp, rbx);
3878 __ subptr(rsp, 2*wordSize);
3879
3880 if (return_barrier_exception) {
3881 __ movptr(c_rarg0, r15_thread);
3882 __ movptr(c_rarg1, Address(rsp, wordSize)); // return address
3883
3884 // rax still holds the original exception oop, save it before the call
3885 __ push(rax);
3886
3887 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 2);
3888 __ movptr(rbx, rax);
3889
4056
4057 void StubGenerator::generate_initial_stubs() {
4058 // Generates all stubs and initializes the entry points
4059
4060 // This platform-specific settings are needed by generate_call_stub()
4061 create_control_words();
4062
4063 // Initialize table for unsafe copy memeory check.
4064 if (UnsafeMemoryAccess::_table == nullptr) {
4065 UnsafeMemoryAccess::create_table(16 + 4); // 16 for copyMemory; 4 for setMemory
4066 }
4067
4068 // entry points that exist in all platforms Note: This is code
4069 // that could be shared among different platforms - however the
4070 // benefit seems to be smaller than the disadvantage of having a
4071 // much more complicated generator structure. See also comment in
4072 // stubRoutines.hpp.
4073
4074 StubRoutines::_forward_exception_entry = generate_forward_exception();
4075
4076 StubRoutines::_call_stub_entry =
4077 generate_call_stub(StubRoutines::_call_stub_return_address);
4078
4079 // is referenced by megamorphic call
4080 StubRoutines::_catch_exception_entry = generate_catch_exception();
4081
4082 // platform dependent
4083 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp();
4084
4085 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr();
4086
4087 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup();
4088 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup();
4089 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup();
4090 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup();
4091
4092 StubRoutines::x86::_float_sign_mask = generate_fp_mask(StubGenStubId::float_sign_mask_id, 0x7FFFFFFF7FFFFFFF);
4093 StubRoutines::x86::_float_sign_flip = generate_fp_mask(StubGenStubId::float_sign_flip_id, 0x8000000080000000);
4094 StubRoutines::x86::_double_sign_mask = generate_fp_mask(StubGenStubId::double_sign_mask_id, 0x7FFFFFFFFFFFFFFF);
4095 StubRoutines::x86::_double_sign_flip = generate_fp_mask(StubGenStubId::double_sign_flip_id, 0x8000000000000000);
4105 StubRoutines::x86::generate_CRC32C_table(supports_clmul);
4106 StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table;
4107 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul);
4108 }
4109
4110 if (VM_Version::supports_float16()) {
4111 // For results consistency both intrinsics should be enabled.
4112 // vmIntrinsics checks InlineIntrinsics flag, no need to check it here.
4113 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_float16ToFloat) &&
4114 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_floatToFloat16)) {
4115 StubRoutines::_hf2f = generate_float16ToFloat();
4116 StubRoutines::_f2hf = generate_floatToFloat16();
4117 }
4118 }
4119
4120 generate_libm_stubs();
4121
4122 StubRoutines::_fmod = generate_libmFmod(); // from stubGenerator_x86_64_fmod.cpp
4123 }
4124
4125 void StubGenerator::generate_continuation_stubs() {
4126 // Continuation stubs:
4127 StubRoutines::_cont_thaw = generate_cont_thaw();
4128 StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
4129 StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
4130 StubRoutines::_cont_preempt_stub = generate_cont_preempt_stub();
4131 }
4132
4133 void StubGenerator::generate_final_stubs() {
4134 // Generates the rest of stubs and initializes the entry points
4135
4136 // support for verify_oop (must happen after universe_init)
4137 if (VerifyOops) {
4138 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
4139 }
4140
4141 // arraycopy stubs used by compilers
4142 generate_arraycopy_stubs();
4143
4144 StubRoutines::_method_entry_barrier = generate_method_entry_barrier();
|
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "classfile/javaClasses.hpp"
28 #include "classfile/vmIntrinsics.hpp"
29 #include "compiler/oopMap.hpp"
30 #include "gc/shared/barrierSet.hpp"
31 #include "gc/shared/barrierSetAssembler.hpp"
32 #include "gc/shared/barrierSetNMethod.hpp"
33 #include "gc/shared/gc_globals.hpp"
34 #include "memory/universe.hpp"
35 #include "prims/jvmtiExport.hpp"
36 #include "prims/upcallLinker.hpp"
37 #include "runtime/arguments.hpp"
38 #include "runtime/continuationEntry.hpp"
39 #include "runtime/javaThread.hpp"
40 #include "runtime/sharedRuntime.hpp"
41 #include "runtime/stubRoutines.hpp"
42 #include "utilities/macros.hpp"
43 #include "vmreg_x86.inline.hpp"
44 #include "stubGenerator_x86_64.hpp"
45 #ifdef COMPILER2
46 #include "opto/runtime.hpp"
47 #include "opto/c2_globals.hpp"
48 #endif
49 #if INCLUDE_JVMCI
50 #include "jvmci/jvmci_globals.hpp"
51 #endif
52
53 // For a more detailed description of the stub routine structure
54 // see the comment in stubRoutines.hpp
55
56 #define __ _masm->
57 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
58
59 #ifdef PRODUCT
60 #define BLOCK_COMMENT(str) /* nothing */
61 #else
62 #define BLOCK_COMMENT(str) __ block_comment(str)
63 #endif // PRODUCT
287 __ BIND(loop);
288 __ movptr(rax, Address(c_rarg2, 0));// get parameter
289 __ addptr(c_rarg2, wordSize); // advance to next parameter
290 __ decrementl(c_rarg1); // decrement counter
291 __ push(rax); // pass parameter
292 __ jcc(Assembler::notZero, loop);
293
294 // call Java function
295 __ BIND(parameters_done);
296 __ movptr(rbx, method); // get Method*
297 __ movptr(c_rarg1, entry_point); // get entry_point
298 __ mov(r13, rsp); // set sender sp
299 BLOCK_COMMENT("call Java function");
300 __ call(c_rarg1);
301
302 BLOCK_COMMENT("call_stub_return_address:");
303 return_address = __ pc();
304
305 // store result depending on type (everything that is not
306 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
307 __ movptr(r13, result);
308 Label is_long, is_float, is_double, check_prim, exit;
309 __ movl(rbx, result_type);
310 __ cmpl(rbx, T_OBJECT);
311 __ jcc(Assembler::equal, check_prim);
312 __ cmpl(rbx, T_LONG);
313 __ jcc(Assembler::equal, is_long);
314 __ cmpl(rbx, T_FLOAT);
315 __ jcc(Assembler::equal, is_float);
316 __ cmpl(rbx, T_DOUBLE);
317 __ jcc(Assembler::equal, is_double);
318 #ifdef ASSERT
319 // make sure the type is INT
320 {
321 Label L;
322 __ cmpl(rbx, T_INT);
323 __ jcc(Assembler::equal, L);
324 __ stop("StubRoutines::call_stub: unexpected result type");
325 __ bind(L);
326 }
327 #endif
328
329 // handle T_INT case
330 __ movl(Address(r13, 0), rax);
331
332 __ BIND(exit);
333
334 // pop parameters
335 __ lea(rsp, rsp_after_call);
336
337 #ifdef ASSERT
338 // verify that threads correspond
339 {
340 Label L1, L2, L3;
341 __ cmpptr(r15_thread, thread);
342 __ jcc(Assembler::equal, L1);
343 __ stop("StubRoutines::call_stub: r15_thread is corrupted");
344 __ bind(L1);
345 __ get_thread_slow(rbx);
346 __ cmpptr(r15_thread, thread);
347 __ jcc(Assembler::equal, L2);
348 __ stop("StubRoutines::call_stub: r15_thread is modified by call");
349 __ bind(L2);
350 __ cmpptr(r15_thread, rbx);
368 __ movptr(r13, r13_save);
369 __ movptr(r12, r12_save);
370 __ movptr(rbx, rbx_save);
371
372 #ifdef _WIN64
373 __ movptr(rdi, rdi_save);
374 __ movptr(rsi, rsi_save);
375 #else
376 __ ldmxcsr(mxcsr_save);
377 #endif
378
379 // restore rsp
380 __ addptr(rsp, -rsp_after_call_off * wordSize);
381
382 // return
383 __ vzeroupper();
384 __ pop(rbp);
385 __ ret(0);
386
387 // handle return types different from T_INT
388 __ BIND(check_prim);
389 if (InlineTypeReturnedAsFields) {
390 // Check for scalarized return value
391 __ testptr(rax, 1);
392 __ jcc(Assembler::zero, is_long);
393 // Load pack handler address
394 __ andptr(rax, -2);
395 __ movptr(rax, Address(rax, InstanceKlass::adr_inlineklass_fixed_block_offset()));
396 __ movptr(rbx, Address(rax, InlineKlass::pack_handler_jobject_offset()));
397 // Call pack handler to initialize the buffer
398 __ call(rbx);
399 __ jmp(exit);
400 }
401 __ BIND(is_long);
402 __ movq(Address(r13, 0), rax);
403 __ jmp(exit);
404
405 __ BIND(is_float);
406 __ movflt(Address(r13, 0), xmm0);
407 __ jmp(exit);
408
409 __ BIND(is_double);
410 __ movdbl(Address(r13, 0), xmm0);
411 __ jmp(exit);
412
413 return start;
414 }
415
416 // Return point for a Java call if there's an exception thrown in
417 // Java code. The exception is caught and transformed into a
418 // pending exception stored in JavaThread that can be tested from
419 // within the VM.
420 //
421 // Note: Usually the parameters are removed by the callee. In case
422 // of an exception crossing an activation frame boundary, that is
423 // not the case if the callee is compiled code => need to setup the
424 // rsp.
425 //
426 // rax: exception oop
427
428 address StubGenerator::generate_catch_exception() {
429 StubGenStubId stub_id = StubGenStubId::catch_exception_id;
430 StubCodeMark mark(this, stub_id);
3761 * Output:
3762 * rax - float16 jshort
3763 */
3764 address StubGenerator::generate_floatToFloat16() {
3765 StubGenStubId stub_id = StubGenStubId::f2hf_id;
3766 StubCodeMark mark(this, stub_id);
3767
3768 address start = __ pc();
3769
3770 BLOCK_COMMENT("Entry:");
3771 // No need for RuntimeStub frame since it is called only during JIT compilation
3772
3773 // Convert and put result into rax
3774 __ flt_to_flt16(rax, xmm0, xmm1);
3775
3776 __ ret(0);
3777
3778 return start;
3779 }
3780
3781 static void save_return_registers(MacroAssembler* masm) {
3782 masm->push(rax);
3783 if (InlineTypeReturnedAsFields) {
3784 masm->push(rdi);
3785 masm->push(rsi);
3786 masm->push(rdx);
3787 masm->push(rcx);
3788 masm->push(r8);
3789 masm->push(r9);
3790 }
3791 masm->push_d(xmm0);
3792 if (InlineTypeReturnedAsFields) {
3793 masm->push_d(xmm1);
3794 masm->push_d(xmm2);
3795 masm->push_d(xmm3);
3796 masm->push_d(xmm4);
3797 masm->push_d(xmm5);
3798 masm->push_d(xmm6);
3799 masm->push_d(xmm7);
3800 }
3801 #ifdef ASSERT
3802 masm->movq(rax, 0xBADC0FFE);
3803 masm->movq(rdi, rax);
3804 masm->movq(rsi, rax);
3805 masm->movq(rdx, rax);
3806 masm->movq(rcx, rax);
3807 masm->movq(r8, rax);
3808 masm->movq(r9, rax);
3809 masm->movq(xmm0, rax);
3810 masm->movq(xmm1, rax);
3811 masm->movq(xmm2, rax);
3812 masm->movq(xmm3, rax);
3813 masm->movq(xmm4, rax);
3814 masm->movq(xmm5, rax);
3815 masm->movq(xmm6, rax);
3816 masm->movq(xmm7, rax);
3817 #endif
3818 }
3819
3820 static void restore_return_registers(MacroAssembler* masm) {
3821 if (InlineTypeReturnedAsFields) {
3822 masm->pop_d(xmm7);
3823 masm->pop_d(xmm6);
3824 masm->pop_d(xmm5);
3825 masm->pop_d(xmm4);
3826 masm->pop_d(xmm3);
3827 masm->pop_d(xmm2);
3828 masm->pop_d(xmm1);
3829 }
3830 masm->pop_d(xmm0);
3831 if (InlineTypeReturnedAsFields) {
3832 masm->pop(r9);
3833 masm->pop(r8);
3834 masm->pop(rcx);
3835 masm->pop(rdx);
3836 masm->pop(rsi);
3837 masm->pop(rdi);
3838 }
3839 masm->pop(rax);
3840 }
3841
3842 address StubGenerator::generate_cont_thaw(StubGenStubId stub_id) {
3843 if (!Continuations::enabled()) return nullptr;
3844
3845 bool return_barrier;
3846 bool return_barrier_exception;
3847 Continuation::thaw_kind kind;
3848
3849 switch (stub_id) {
3850 case cont_thaw_id:
3851 return_barrier = false;
3852 return_barrier_exception = false;
3853 kind = Continuation::thaw_top;
3854 break;
3855 case cont_returnBarrier_id:
3856 return_barrier = true;
3857 return_barrier_exception = false;
3858 kind = Continuation::thaw_return_barrier;
3859 break;
3860 case cont_returnBarrierExc_id:
3861 return_barrier = true;
3873 if (!return_barrier) {
3874 // Pop return address. If we don't do this, we get a drift,
3875 // where the bottom-most frozen frame continuously grows.
3876 __ pop(c_rarg3);
3877 } else {
3878 __ movptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
3879 }
3880
3881 #ifdef ASSERT
3882 {
3883 Label L_good_sp;
3884 __ cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
3885 __ jcc(Assembler::equal, L_good_sp);
3886 __ stop("Incorrect rsp at thaw entry");
3887 __ BIND(L_good_sp);
3888 }
3889 #endif // ASSERT
3890
3891 if (return_barrier) {
3892 // Preserve possible return value from a method returning to the return barrier.
3893 save_return_registers(_masm);
3894 }
3895
3896 __ movptr(c_rarg0, r15_thread);
3897 __ movptr(c_rarg1, (return_barrier ? 1 : 0));
3898 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Continuation::prepare_thaw), 2);
3899 __ movptr(rbx, rax);
3900
3901 if (return_barrier) {
3902 // Restore return value from a method returning to the return barrier.
3903 // No safepoint in the call to thaw, so even an oop return value should be OK.
3904 restore_return_registers(_masm);
3905 }
3906
3907 #ifdef ASSERT
3908 {
3909 Label L_good_sp;
3910 __ cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
3911 __ jcc(Assembler::equal, L_good_sp);
3912 __ stop("Incorrect rsp after prepare thaw");
3913 __ BIND(L_good_sp);
3914 }
3915 #endif // ASSERT
3916
3917 // rbx contains the size of the frames to thaw, 0 if overflow or no more frames
3918 Label L_thaw_success;
3919 __ testptr(rbx, rbx);
3920 __ jccb(Assembler::notZero, L_thaw_success);
3921 __ jump(RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry()));
3922 __ bind(L_thaw_success);
3923
3924 // Make room for the thawed frames and align the stack.
3925 __ subptr(rsp, rbx);
3926 __ andptr(rsp, -StackAlignmentInBytes);
3927
3928 if (return_barrier) {
3929 // Preserve possible return value from a method returning to the return barrier. (Again.)
3930 save_return_registers(_masm);
3931 }
3932
3933 // If we want, we can templatize thaw by kind, and have three different entries.
3934 __ movptr(c_rarg0, r15_thread);
3935 __ movptr(c_rarg1, kind);
3936 __ call_VM_leaf(Continuation::thaw_entry(), 2);
3937 __ movptr(rbx, rax);
3938
3939 if (return_barrier) {
3940 // Restore return value from a method returning to the return barrier. (Again.)
3941 // No safepoint in the call to thaw, so even an oop return value should be OK.
3942 restore_return_registers(_masm);
3943 } else {
3944 // Return 0 (success) from doYield.
3945 __ xorptr(rax, rax);
3946 }
3947
3948 // After thawing, rbx is the SP of the yielding frame.
3949 // Move there, and then to saved RBP slot.
3950 __ movptr(rsp, rbx);
3951 __ subptr(rsp, 2*wordSize);
3952
3953 if (return_barrier_exception) {
3954 __ movptr(c_rarg0, r15_thread);
3955 __ movptr(c_rarg1, Address(rsp, wordSize)); // return address
3956
3957 // rax still holds the original exception oop, save it before the call
3958 __ push(rax);
3959
3960 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 2);
3961 __ movptr(rbx, rax);
3962
4129
4130 void StubGenerator::generate_initial_stubs() {
4131 // Generates all stubs and initializes the entry points
4132
4133 // This platform-specific settings are needed by generate_call_stub()
4134 create_control_words();
4135
4136 // Initialize table for unsafe copy memeory check.
4137 if (UnsafeMemoryAccess::_table == nullptr) {
4138 UnsafeMemoryAccess::create_table(16 + 4); // 16 for copyMemory; 4 for setMemory
4139 }
4140
4141 // entry points that exist in all platforms Note: This is code
4142 // that could be shared among different platforms - however the
4143 // benefit seems to be smaller than the disadvantage of having a
4144 // much more complicated generator structure. See also comment in
4145 // stubRoutines.hpp.
4146
4147 StubRoutines::_forward_exception_entry = generate_forward_exception();
4148
4149 // Generate these first because they are called from other stubs
4150 if (InlineTypeReturnedAsFields) {
4151 StubRoutines::_load_inline_type_fields_in_regs =
4152 generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::load_inline_type_fields_in_regs),
4153 "load_inline_type_fields_in_regs", false);
4154 StubRoutines::_store_inline_type_fields_to_buf =
4155 generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::store_inline_type_fields_to_buf),
4156 "store_inline_type_fields_to_buf", true);
4157 }
4158
4159 StubRoutines::_call_stub_entry =
4160 generate_call_stub(StubRoutines::_call_stub_return_address);
4161
4162 // is referenced by megamorphic call
4163 StubRoutines::_catch_exception_entry = generate_catch_exception();
4164
4165 // platform dependent
4166 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp();
4167
4168 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr();
4169
4170 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup();
4171 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup();
4172 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup();
4173 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup();
4174
4175 StubRoutines::x86::_float_sign_mask = generate_fp_mask(StubGenStubId::float_sign_mask_id, 0x7FFFFFFF7FFFFFFF);
4176 StubRoutines::x86::_float_sign_flip = generate_fp_mask(StubGenStubId::float_sign_flip_id, 0x8000000080000000);
4177 StubRoutines::x86::_double_sign_mask = generate_fp_mask(StubGenStubId::double_sign_mask_id, 0x7FFFFFFFFFFFFFFF);
4178 StubRoutines::x86::_double_sign_flip = generate_fp_mask(StubGenStubId::double_sign_flip_id, 0x8000000000000000);
4188 StubRoutines::x86::generate_CRC32C_table(supports_clmul);
4189 StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table;
4190 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul);
4191 }
4192
4193 if (VM_Version::supports_float16()) {
4194 // For results consistency both intrinsics should be enabled.
4195 // vmIntrinsics checks InlineIntrinsics flag, no need to check it here.
4196 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_float16ToFloat) &&
4197 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_floatToFloat16)) {
4198 StubRoutines::_hf2f = generate_float16ToFloat();
4199 StubRoutines::_f2hf = generate_floatToFloat16();
4200 }
4201 }
4202
4203 generate_libm_stubs();
4204
4205 StubRoutines::_fmod = generate_libmFmod(); // from stubGenerator_x86_64_fmod.cpp
4206 }
4207
4208 // Call here from the interpreter or compiled code to either load
4209 // multiple returned values from the inline type instance being
4210 // returned to registers or to store returned values to a newly
4211 // allocated inline type instance.
4212 // Register is a class, but it would be assigned numerical value.
4213 // "0" is assigned for xmm0. Thus we need to ignore -Wnonnull.
4214 PRAGMA_DIAG_PUSH
4215 PRAGMA_NONNULL_IGNORED
4216 address StubGenerator::generate_return_value_stub(address destination, const char* name, bool has_res) {
4217 // We need to save all registers the calling convention may use so
4218 // the runtime calls read or update those registers. This needs to
4219 // be in sync with SharedRuntime::java_return_convention().
4220 enum layout {
4221 pad_off = frame::arg_reg_save_area_bytes/BytesPerInt, pad_off_2,
4222 rax_off, rax_off_2,
4223 j_rarg5_off, j_rarg5_2,
4224 j_rarg4_off, j_rarg4_2,
4225 j_rarg3_off, j_rarg3_2,
4226 j_rarg2_off, j_rarg2_2,
4227 j_rarg1_off, j_rarg1_2,
4228 j_rarg0_off, j_rarg0_2,
4229 j_farg0_off, j_farg0_2,
4230 j_farg1_off, j_farg1_2,
4231 j_farg2_off, j_farg2_2,
4232 j_farg3_off, j_farg3_2,
4233 j_farg4_off, j_farg4_2,
4234 j_farg5_off, j_farg5_2,
4235 j_farg6_off, j_farg6_2,
4236 j_farg7_off, j_farg7_2,
4237 rbp_off, rbp_off_2,
4238 return_off, return_off_2,
4239
4240 framesize
4241 };
4242
4243 CodeBuffer buffer(name, 1000, 512);
4244 MacroAssembler* _masm = new MacroAssembler(&buffer);
4245
4246 int frame_size_in_bytes = align_up(framesize*BytesPerInt, 16);
4247 assert(frame_size_in_bytes == framesize*BytesPerInt, "misaligned");
4248 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
4249 int frame_size_in_words = frame_size_in_bytes / wordSize;
4250
4251 OopMapSet *oop_maps = new OopMapSet();
4252 OopMap* map = new OopMap(frame_size_in_slots, 0);
4253
4254 map->set_callee_saved(VMRegImpl::stack2reg(rax_off), rax->as_VMReg());
4255 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg5_off), j_rarg5->as_VMReg());
4256 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg4_off), j_rarg4->as_VMReg());
4257 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg3_off), j_rarg3->as_VMReg());
4258 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg2_off), j_rarg2->as_VMReg());
4259 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg1_off), j_rarg1->as_VMReg());
4260 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg0_off), j_rarg0->as_VMReg());
4261 map->set_callee_saved(VMRegImpl::stack2reg(j_farg0_off), j_farg0->as_VMReg());
4262 map->set_callee_saved(VMRegImpl::stack2reg(j_farg1_off), j_farg1->as_VMReg());
4263 map->set_callee_saved(VMRegImpl::stack2reg(j_farg2_off), j_farg2->as_VMReg());
4264 map->set_callee_saved(VMRegImpl::stack2reg(j_farg3_off), j_farg3->as_VMReg());
4265 map->set_callee_saved(VMRegImpl::stack2reg(j_farg4_off), j_farg4->as_VMReg());
4266 map->set_callee_saved(VMRegImpl::stack2reg(j_farg5_off), j_farg5->as_VMReg());
4267 map->set_callee_saved(VMRegImpl::stack2reg(j_farg6_off), j_farg6->as_VMReg());
4268 map->set_callee_saved(VMRegImpl::stack2reg(j_farg7_off), j_farg7->as_VMReg());
4269
4270 int start = __ offset();
4271
4272 __ subptr(rsp, frame_size_in_bytes - 8 /* return address*/);
4273
4274 __ movptr(Address(rsp, rbp_off * BytesPerInt), rbp);
4275 __ movdbl(Address(rsp, j_farg7_off * BytesPerInt), j_farg7);
4276 __ movdbl(Address(rsp, j_farg6_off * BytesPerInt), j_farg6);
4277 __ movdbl(Address(rsp, j_farg5_off * BytesPerInt), j_farg5);
4278 __ movdbl(Address(rsp, j_farg4_off * BytesPerInt), j_farg4);
4279 __ movdbl(Address(rsp, j_farg3_off * BytesPerInt), j_farg3);
4280 __ movdbl(Address(rsp, j_farg2_off * BytesPerInt), j_farg2);
4281 __ movdbl(Address(rsp, j_farg1_off * BytesPerInt), j_farg1);
4282 __ movdbl(Address(rsp, j_farg0_off * BytesPerInt), j_farg0);
4283
4284 __ movptr(Address(rsp, j_rarg0_off * BytesPerInt), j_rarg0);
4285 __ movptr(Address(rsp, j_rarg1_off * BytesPerInt), j_rarg1);
4286 __ movptr(Address(rsp, j_rarg2_off * BytesPerInt), j_rarg2);
4287 __ movptr(Address(rsp, j_rarg3_off * BytesPerInt), j_rarg3);
4288 __ movptr(Address(rsp, j_rarg4_off * BytesPerInt), j_rarg4);
4289 __ movptr(Address(rsp, j_rarg5_off * BytesPerInt), j_rarg5);
4290 __ movptr(Address(rsp, rax_off * BytesPerInt), rax);
4291
4292 int frame_complete = __ offset();
4293
4294 __ set_last_Java_frame(noreg, noreg, nullptr, rscratch1);
4295
4296 __ mov(c_rarg0, r15_thread);
4297 __ mov(c_rarg1, rax);
4298
4299 __ call(RuntimeAddress(destination));
4300
4301 // Set an oopmap for the call site.
4302
4303 oop_maps->add_gc_map( __ offset() - start, map);
4304
4305 // clear last_Java_sp
4306 __ reset_last_Java_frame(false);
4307
4308 __ movptr(rbp, Address(rsp, rbp_off * BytesPerInt));
4309 __ movdbl(j_farg7, Address(rsp, j_farg7_off * BytesPerInt));
4310 __ movdbl(j_farg6, Address(rsp, j_farg6_off * BytesPerInt));
4311 __ movdbl(j_farg5, Address(rsp, j_farg5_off * BytesPerInt));
4312 __ movdbl(j_farg4, Address(rsp, j_farg4_off * BytesPerInt));
4313 __ movdbl(j_farg3, Address(rsp, j_farg3_off * BytesPerInt));
4314 __ movdbl(j_farg2, Address(rsp, j_farg2_off * BytesPerInt));
4315 __ movdbl(j_farg1, Address(rsp, j_farg1_off * BytesPerInt));
4316 __ movdbl(j_farg0, Address(rsp, j_farg0_off * BytesPerInt));
4317
4318 __ movptr(j_rarg0, Address(rsp, j_rarg0_off * BytesPerInt));
4319 __ movptr(j_rarg1, Address(rsp, j_rarg1_off * BytesPerInt));
4320 __ movptr(j_rarg2, Address(rsp, j_rarg2_off * BytesPerInt));
4321 __ movptr(j_rarg3, Address(rsp, j_rarg3_off * BytesPerInt));
4322 __ movptr(j_rarg4, Address(rsp, j_rarg4_off * BytesPerInt));
4323 __ movptr(j_rarg5, Address(rsp, j_rarg5_off * BytesPerInt));
4324 __ movptr(rax, Address(rsp, rax_off * BytesPerInt));
4325
4326 __ addptr(rsp, frame_size_in_bytes-8);
4327
4328 // check for pending exceptions
4329 Label pending;
4330 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
4331 __ jcc(Assembler::notEqual, pending);
4332
4333 if (has_res) {
4334 __ get_vm_result_oop(rax);
4335 }
4336
4337 __ ret(0);
4338
4339 __ bind(pending);
4340
4341 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
4342 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
4343
4344 // -------------
4345 // make sure all code is generated
4346 _masm->flush();
4347
4348 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, false);
4349 return stub->entry_point();
4350 }
4351
4352 void StubGenerator::generate_continuation_stubs() {
4353 // Continuation stubs:
4354 StubRoutines::_cont_thaw = generate_cont_thaw();
4355 StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
4356 StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
4357 StubRoutines::_cont_preempt_stub = generate_cont_preempt_stub();
4358 }
4359
4360 void StubGenerator::generate_final_stubs() {
4361 // Generates the rest of stubs and initializes the entry points
4362
4363 // support for verify_oop (must happen after universe_init)
4364 if (VerifyOops) {
4365 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
4366 }
4367
4368 // arraycopy stubs used by compilers
4369 generate_arraycopy_stubs();
4370
4371 StubRoutines::_method_entry_barrier = generate_method_entry_barrier();
|