5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/macroAssembler.hpp"
26 #include "classfile/javaClasses.hpp"
27 #include "classfile/vmIntrinsics.hpp"
28 #include "compiler/oopMap.hpp"
29 #include "gc/shared/barrierSet.hpp"
30 #include "gc/shared/barrierSetAssembler.hpp"
31 #include "gc/shared/barrierSetNMethod.hpp"
32 #include "gc/shared/gc_globals.hpp"
33 #include "memory/universe.hpp"
34 #include "prims/jvmtiExport.hpp"
35 #include "prims/upcallLinker.hpp"
36 #include "runtime/arguments.hpp"
37 #include "runtime/continuationEntry.hpp"
38 #include "runtime/javaThread.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/stubRoutines.hpp"
41 #include "stubGenerator_x86_64.hpp"
42 #ifdef COMPILER2
43 #include "opto/runtime.hpp"
44 #include "opto/c2_globals.hpp"
45 #endif
46 #if INCLUDE_JVMCI
47 #include "jvmci/jvmci_globals.hpp"
48 #endif
49
50 // For a more detailed description of the stub routine structure
51 // see the comment in stubRoutines.hpp
52
53 #define __ _masm->
54 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
55
56 #ifdef PRODUCT
57 #define BLOCK_COMMENT(str) /* nothing */
58 #else
59 #define BLOCK_COMMENT(str) __ block_comment(str)
60 #endif // PRODUCT
295 __ movptr(rax, Address(c_rarg2, 0));// get parameter
296 __ addptr(c_rarg2, wordSize); // advance to next parameter
297 __ decrementl(c_rarg1); // decrement counter
298 __ push(rax); // pass parameter
299 __ jcc(Assembler::notZero, loop);
300
301 // call Java function
302 __ BIND(parameters_done);
303 __ movptr(rbx, method); // get Method*
304 __ movptr(c_rarg1, entry_point); // get entry_point
305 __ mov(r13, rsp); // set sender sp
306 BLOCK_COMMENT("call Java function");
307 __ call(c_rarg1);
308
309 BLOCK_COMMENT("call_stub_return_address:");
310 return_address = __ pc();
311 entries.append(return_address);
312
313 // store result depending on type (everything that is not
314 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
315 __ movptr(c_rarg0, result);
316 Label is_long, is_float, is_double, exit;
317 __ movl(c_rarg1, result_type);
318 __ cmpl(c_rarg1, T_OBJECT);
319 __ jcc(Assembler::equal, is_long);
320 __ cmpl(c_rarg1, T_LONG);
321 __ jcc(Assembler::equal, is_long);
322 __ cmpl(c_rarg1, T_FLOAT);
323 __ jcc(Assembler::equal, is_float);
324 __ cmpl(c_rarg1, T_DOUBLE);
325 __ jcc(Assembler::equal, is_double);
326 #ifdef ASSERT
327 // make sure the type is INT
328 {
329 Label L;
330 __ cmpl(c_rarg1, T_INT);
331 __ jcc(Assembler::equal, L);
332 __ stop("StubRoutines::call_stub: unexpected result type");
333 __ bind(L);
334 }
335 #endif
336
337 // handle T_INT case
338 __ movl(Address(c_rarg0, 0), rax);
339
340 __ BIND(exit);
341
342 // pop parameters
343 __ lea(rsp, rsp_after_call);
344
345 #ifdef ASSERT
346 // verify that threads correspond
347 {
348 Label L1, L2, L3;
349 __ cmpptr(r15_thread, thread);
350 __ jcc(Assembler::equal, L1);
351 __ stop("StubRoutines::call_stub: r15_thread is corrupted");
352 __ bind(L1);
353 __ get_thread_slow(rbx);
354 __ cmpptr(r15_thread, thread);
355 __ jcc(Assembler::equal, L2);
356 __ stop("StubRoutines::call_stub: r15_thread is modified by call");
357 __ bind(L2);
358 __ cmpptr(r15_thread, rbx);
376 __ movptr(r13, r13_save);
377 __ movptr(r12, r12_save);
378 __ movptr(rbx, rbx_save);
379
380 #ifdef _WIN64
381 __ movptr(rdi, rdi_save);
382 __ movptr(rsi, rsi_save);
383 #else
384 __ ldmxcsr(mxcsr_save);
385 #endif
386
387 // restore rsp
388 __ addptr(rsp, -rsp_after_call_off * wordSize);
389
390 // return
391 __ vzeroupper();
392 __ pop(rbp);
393 __ ret(0);
394
395 // handle return types different from T_INT
396 __ BIND(is_long);
397 __ movq(Address(c_rarg0, 0), rax);
398 __ jmp(exit);
399
400 __ BIND(is_float);
401 __ movflt(Address(c_rarg0, 0), xmm0);
402 __ jmp(exit);
403
404 __ BIND(is_double);
405 __ movdbl(Address(c_rarg0, 0), xmm0);
406 __ jmp(exit);
407
408 // record the stub entry and end plus the auxiliary entry
409 store_archive_data(stub_id, start, __ pc(), &entries);
410
411 return start;
412 }
413
414 // Return point for a Java call if there's an exception thrown in
415 // Java code. The exception is caught and transformed into a
416 // pending exception stored in JavaThread that can be tested from
417 // within the VM.
418 //
419 // Note: Usually the parameters are removed by the callee. In case
420 // of an exception crossing an activation frame boundary, that is
421 // not the case if the callee is compiled code => need to setup the
422 // rsp.
423 //
424 // rax: exception oop
425
4356 return start;
4357 }
4358 StubCodeMark mark(this, stub_id);
4359
4360 start = __ pc();
4361
4362 BLOCK_COMMENT("Entry:");
4363 // No need for RuntimeStub frame since it is called only during JIT compilation
4364
4365 // Convert and put result into rax
4366 __ flt_to_flt16(rax, xmm0, xmm1);
4367
4368 __ ret(0);
4369
4370 // record the stub entry and end
4371 store_archive_data(stub_id, start, __ pc());
4372
4373 return start;
4374 }
4375
4376 address StubGenerator::generate_cont_thaw(StubId stub_id) {
4377 if (!Continuations::enabled()) return nullptr;
4378
4379 bool return_barrier;
4380 bool return_barrier_exception;
4381 Continuation::thaw_kind kind;
4382
4383 switch (stub_id) {
4384 case StubId::stubgen_cont_thaw_id:
4385 return_barrier = false;
4386 return_barrier_exception = false;
4387 kind = Continuation::thaw_top;
4388 break;
4389 case StubId::stubgen_cont_returnBarrier_id:
4390 return_barrier = true;
4391 return_barrier_exception = false;
4392 kind = Continuation::thaw_return_barrier;
4393 break;
4394 case StubId::stubgen_cont_returnBarrierExc_id:
4395 return_barrier = true;
4413 if (!return_barrier) {
4414 // Pop return address. If we don't do this, we get a drift,
4415 // where the bottom-most frozen frame continuously grows.
4416 __ pop(c_rarg3);
4417 } else {
4418 __ movptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
4419 }
4420
4421 #ifdef ASSERT
4422 {
4423 Label L_good_sp;
4424 __ cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
4425 __ jcc(Assembler::equal, L_good_sp);
4426 __ stop("Incorrect rsp at thaw entry");
4427 __ BIND(L_good_sp);
4428 }
4429 #endif // ASSERT
4430
4431 if (return_barrier) {
4432 // Preserve possible return value from a method returning to the return barrier.
4433 __ push_ppx(rax);
4434 __ push_d(xmm0);
4435 }
4436
4437 __ movptr(c_rarg0, r15_thread);
4438 __ movptr(c_rarg1, (return_barrier ? 1 : 0));
4439 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Continuation::prepare_thaw), 2);
4440 __ movptr(rbx, rax);
4441
4442 if (return_barrier) {
4443 // Restore return value from a method returning to the return barrier.
4444 // No safepoint in the call to thaw, so even an oop return value should be OK.
4445 __ pop_d(xmm0);
4446 __ pop_ppx(rax);
4447 }
4448
4449 #ifdef ASSERT
4450 {
4451 Label L_good_sp;
4452 __ cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
4453 __ jcc(Assembler::equal, L_good_sp);
4454 __ stop("Incorrect rsp after prepare thaw");
4455 __ BIND(L_good_sp);
4456 }
4457 #endif // ASSERT
4458
4459 // rbx contains the size of the frames to thaw, 0 if overflow or no more frames
4460 Label L_thaw_success;
4461 __ testptr(rbx, rbx);
4462 __ jccb(Assembler::notZero, L_thaw_success);
4463 __ jump(RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry()));
4464 __ bind(L_thaw_success);
4465
4466 // Make room for the thawed frames and align the stack.
4467 __ subptr(rsp, rbx);
4468 __ andptr(rsp, -StackAlignmentInBytes);
4469
4470 if (return_barrier) {
4471 // Preserve possible return value from a method returning to the return barrier. (Again.)
4472 __ push_ppx(rax);
4473 __ push_d(xmm0);
4474 }
4475
4476 // If we want, we can templatize thaw by kind, and have three different entries.
4477 __ movptr(c_rarg0, r15_thread);
4478 __ movptr(c_rarg1, kind);
4479 __ call_VM_leaf(Continuation::thaw_entry(), 2);
4480 __ movptr(rbx, rax);
4481
4482 if (return_barrier) {
4483 // Restore return value from a method returning to the return barrier. (Again.)
4484 // No safepoint in the call to thaw, so even an oop return value should be OK.
4485 __ pop_d(xmm0);
4486 __ pop_ppx(rax);
4487 } else {
4488 // Return 0 (success) from doYield.
4489 __ xorptr(rax, rax);
4490 }
4491
4492 // After thawing, rbx is the SP of the yielding frame.
4493 // Move there, and then to saved RBP slot.
4494 __ movptr(rsp, rbx);
4495 __ subptr(rsp, 2*wordSize);
4496
4497 if (return_barrier_exception) {
4498 __ movptr(c_rarg0, r15_thread);
4499 __ movptr(c_rarg1, Address(rsp, wordSize)); // return address
4500
4501 // rax still holds the original exception oop, save it before the call
4502 __ push_ppx(rax);
4503
4504 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 2);
4505 __ movptr(rbx, rax);
4506
4733
4734 void StubGenerator::generate_initial_stubs() {
4735 // Generates all stubs and initializes the entry points
4736
4737 // This platform-specific settings are needed by generate_call_stub()
4738 create_control_words();
4739
4740 // Initialize table for unsafe copy memeory check.
4741 if (UnsafeMemoryAccess::_table == nullptr) {
4742 UnsafeMemoryAccess::create_table(16 + 4); // 16 for copyMemory; 4 for setMemory
4743 }
4744
4745 // entry points that exist in all platforms Note: This is code
4746 // that could be shared among different platforms - however the
4747 // benefit seems to be smaller than the disadvantage of having a
4748 // much more complicated generator structure. See also comment in
4749 // stubRoutines.hpp.
4750
4751 StubRoutines::_forward_exception_entry = generate_forward_exception();
4752
4753 StubRoutines::_call_stub_entry =
4754 generate_call_stub(StubRoutines::_call_stub_return_address);
4755
4756 // is referenced by megamorphic call
4757 StubRoutines::_catch_exception_entry = generate_catch_exception();
4758
4759 // platform dependent
4760 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr();
4761
4762 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup();
4763 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup();
4764 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup();
4765 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup();
4766
4767 StubRoutines::x86::_float_sign_mask = generate_fp_mask(StubId::stubgen_float_sign_mask_id, 0x7FFFFFFF7FFFFFFF);
4768 StubRoutines::x86::_float_sign_flip = generate_fp_mask(StubId::stubgen_float_sign_flip_id, 0x8000000080000000);
4769 StubRoutines::x86::_double_sign_mask = generate_fp_mask(StubId::stubgen_double_sign_mask_id, 0x7FFFFFFFFFFFFFFF);
4770 StubRoutines::x86::_double_sign_flip = generate_fp_mask(StubId::stubgen_double_sign_flip_id, 0x8000000000000000);
4771
4772 if (UseCRC32Intrinsics) {
4776 if (UseCRC32CIntrinsics) {
4777 bool supports_clmul = VM_Version::supports_clmul();
4778 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul);
4779 }
4780
4781 if (VM_Version::supports_float16()) {
4782 // For results consistency both intrinsics should be enabled.
4783 // vmIntrinsics checks InlineIntrinsics flag, no need to check it here.
4784 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_float16ToFloat) &&
4785 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_floatToFloat16)) {
4786 StubRoutines::_hf2f = generate_float16ToFloat();
4787 StubRoutines::_f2hf = generate_floatToFloat16();
4788 }
4789 }
4790
4791 generate_libm_stubs();
4792
4793 StubRoutines::_fmod = generate_libmFmod(); // from stubGenerator_x86_64_fmod.cpp
4794 }
4795
4796 void StubGenerator::generate_continuation_stubs() {
4797 // Continuation stubs:
4798 StubRoutines::_cont_thaw = generate_cont_thaw();
4799 StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
4800 StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
4801 StubRoutines::_cont_preempt_stub = generate_cont_preempt_stub();
4802 }
4803
4804 void StubGenerator::generate_final_stubs() {
4805 // Generates the rest of stubs and initializes the entry points
4806
4807 // support for verify_oop (must happen after universe_init)
4808 if (VerifyOops) {
4809 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
4810 }
4811
4812 // arraycopy stubs used by compilers
4813 generate_arraycopy_stubs();
4814
4815 StubRoutines::_method_entry_barrier = generate_method_entry_barrier();
|
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "classfile/javaClasses.hpp"
28 #include "classfile/vmIntrinsics.hpp"
29 #include "compiler/oopMap.hpp"
30 #include "gc/shared/barrierSet.hpp"
31 #include "gc/shared/barrierSetAssembler.hpp"
32 #include "gc/shared/barrierSetNMethod.hpp"
33 #include "gc/shared/gc_globals.hpp"
34 #include "memory/universe.hpp"
35 #include "prims/jvmtiExport.hpp"
36 #include "prims/upcallLinker.hpp"
37 #include "runtime/arguments.hpp"
38 #include "runtime/continuationEntry.hpp"
39 #include "runtime/javaThread.hpp"
40 #include "runtime/sharedRuntime.hpp"
41 #include "runtime/stubRoutines.hpp"
42 #include "utilities/macros.hpp"
43 #include "vmreg_x86.inline.hpp"
44 #include "stubGenerator_x86_64.hpp"
45 #ifdef COMPILER2
46 #include "opto/runtime.hpp"
47 #include "opto/c2_globals.hpp"
48 #endif
49 #if INCLUDE_JVMCI
50 #include "jvmci/jvmci_globals.hpp"
51 #endif
52
53 // For a more detailed description of the stub routine structure
54 // see the comment in stubRoutines.hpp
55
56 #define __ _masm->
57 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
58
59 #ifdef PRODUCT
60 #define BLOCK_COMMENT(str) /* nothing */
61 #else
62 #define BLOCK_COMMENT(str) __ block_comment(str)
63 #endif // PRODUCT
298 __ movptr(rax, Address(c_rarg2, 0));// get parameter
299 __ addptr(c_rarg2, wordSize); // advance to next parameter
300 __ decrementl(c_rarg1); // decrement counter
301 __ push(rax); // pass parameter
302 __ jcc(Assembler::notZero, loop);
303
304 // call Java function
305 __ BIND(parameters_done);
306 __ movptr(rbx, method); // get Method*
307 __ movptr(c_rarg1, entry_point); // get entry_point
308 __ mov(r13, rsp); // set sender sp
309 BLOCK_COMMENT("call Java function");
310 __ call(c_rarg1);
311
312 BLOCK_COMMENT("call_stub_return_address:");
313 return_address = __ pc();
314 entries.append(return_address);
315
316 // store result depending on type (everything that is not
317 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
318 __ movptr(r13, result);
319 Label is_long, is_float, is_double, check_prim, exit;
320 __ movl(rbx, result_type);
321 __ cmpl(rbx, T_OBJECT);
322 __ jcc(Assembler::equal, check_prim);
323 __ cmpl(rbx, T_LONG);
324 __ jcc(Assembler::equal, is_long);
325 __ cmpl(rbx, T_FLOAT);
326 __ jcc(Assembler::equal, is_float);
327 __ cmpl(rbx, T_DOUBLE);
328 __ jcc(Assembler::equal, is_double);
329 #ifdef ASSERT
330 // make sure the type is INT
331 {
332 Label L;
333 __ cmpl(rbx, T_INT);
334 __ jcc(Assembler::equal, L);
335 __ stop("StubRoutines::call_stub: unexpected result type");
336 __ bind(L);
337 }
338 #endif
339
340 // handle T_INT case
341 __ movl(Address(r13, 0), rax);
342
343 __ BIND(exit);
344
345 // pop parameters
346 __ lea(rsp, rsp_after_call);
347
348 #ifdef ASSERT
349 // verify that threads correspond
350 {
351 Label L1, L2, L3;
352 __ cmpptr(r15_thread, thread);
353 __ jcc(Assembler::equal, L1);
354 __ stop("StubRoutines::call_stub: r15_thread is corrupted");
355 __ bind(L1);
356 __ get_thread_slow(rbx);
357 __ cmpptr(r15_thread, thread);
358 __ jcc(Assembler::equal, L2);
359 __ stop("StubRoutines::call_stub: r15_thread is modified by call");
360 __ bind(L2);
361 __ cmpptr(r15_thread, rbx);
379 __ movptr(r13, r13_save);
380 __ movptr(r12, r12_save);
381 __ movptr(rbx, rbx_save);
382
383 #ifdef _WIN64
384 __ movptr(rdi, rdi_save);
385 __ movptr(rsi, rsi_save);
386 #else
387 __ ldmxcsr(mxcsr_save);
388 #endif
389
390 // restore rsp
391 __ addptr(rsp, -rsp_after_call_off * wordSize);
392
393 // return
394 __ vzeroupper();
395 __ pop(rbp);
396 __ ret(0);
397
398 // handle return types different from T_INT
399 __ BIND(check_prim);
400 if (InlineTypeReturnedAsFields) {
401 // Check for scalarized return value
402 __ testptr(rax, 1);
403 __ jcc(Assembler::zero, is_long);
404 // Load pack handler address
405 __ andptr(rax, -2);
406 __ movptr(rax, Address(rax, InlineKlass::adr_members_offset()));
407 __ movptr(rbx, Address(rax, InlineKlass::pack_handler_jobject_offset()));
408 // Call pack handler to initialize the buffer
409 __ call(rbx);
410 __ jmp(exit);
411 }
412 __ BIND(is_long);
413 __ movq(Address(r13, 0), rax);
414 __ jmp(exit);
415
416 __ BIND(is_float);
417 __ movflt(Address(r13, 0), xmm0);
418 __ jmp(exit);
419
420 __ BIND(is_double);
421 __ movdbl(Address(r13, 0), xmm0);
422 __ jmp(exit);
423
424 // record the stub entry and end plus the auxiliary entry
425 store_archive_data(stub_id, start, __ pc(), &entries);
426
427 return start;
428 }
429
430 // Return point for a Java call if there's an exception thrown in
431 // Java code. The exception is caught and transformed into a
432 // pending exception stored in JavaThread that can be tested from
433 // within the VM.
434 //
435 // Note: Usually the parameters are removed by the callee. In case
436 // of an exception crossing an activation frame boundary, that is
437 // not the case if the callee is compiled code => need to setup the
438 // rsp.
439 //
440 // rax: exception oop
441
4372 return start;
4373 }
4374 StubCodeMark mark(this, stub_id);
4375
4376 start = __ pc();
4377
4378 BLOCK_COMMENT("Entry:");
4379 // No need for RuntimeStub frame since it is called only during JIT compilation
4380
4381 // Convert and put result into rax
4382 __ flt_to_flt16(rax, xmm0, xmm1);
4383
4384 __ ret(0);
4385
4386 // record the stub entry and end
4387 store_archive_data(stub_id, start, __ pc());
4388
4389 return start;
4390 }
4391
4392 static void save_return_registers(MacroAssembler* masm) {
4393 masm->push_ppx(rax);
4394 if (InlineTypeReturnedAsFields) {
4395 masm->push(rdi);
4396 masm->push(rsi);
4397 masm->push(rdx);
4398 masm->push(rcx);
4399 masm->push(r8);
4400 masm->push(r9);
4401 }
4402 masm->push_d(xmm0);
4403 if (InlineTypeReturnedAsFields) {
4404 masm->push_d(xmm1);
4405 masm->push_d(xmm2);
4406 masm->push_d(xmm3);
4407 masm->push_d(xmm4);
4408 masm->push_d(xmm5);
4409 masm->push_d(xmm6);
4410 masm->push_d(xmm7);
4411 }
4412 #ifdef ASSERT
4413 masm->movq(rax, 0xBADC0FFE);
4414 masm->movq(rdi, rax);
4415 masm->movq(rsi, rax);
4416 masm->movq(rdx, rax);
4417 masm->movq(rcx, rax);
4418 masm->movq(r8, rax);
4419 masm->movq(r9, rax);
4420 masm->movq(xmm0, rax);
4421 masm->movq(xmm1, rax);
4422 masm->movq(xmm2, rax);
4423 masm->movq(xmm3, rax);
4424 masm->movq(xmm4, rax);
4425 masm->movq(xmm5, rax);
4426 masm->movq(xmm6, rax);
4427 masm->movq(xmm7, rax);
4428 #endif
4429 }
4430
4431 static void restore_return_registers(MacroAssembler* masm) {
4432 if (InlineTypeReturnedAsFields) {
4433 masm->pop_d(xmm7);
4434 masm->pop_d(xmm6);
4435 masm->pop_d(xmm5);
4436 masm->pop_d(xmm4);
4437 masm->pop_d(xmm3);
4438 masm->pop_d(xmm2);
4439 masm->pop_d(xmm1);
4440 }
4441 masm->pop_d(xmm0);
4442 if (InlineTypeReturnedAsFields) {
4443 masm->pop(r9);
4444 masm->pop(r8);
4445 masm->pop(rcx);
4446 masm->pop(rdx);
4447 masm->pop(rsi);
4448 masm->pop(rdi);
4449 }
4450 masm->pop_ppx(rax);
4451 }
4452
4453 address StubGenerator::generate_cont_thaw(StubId stub_id) {
4454 if (!Continuations::enabled()) return nullptr;
4455
4456 bool return_barrier;
4457 bool return_barrier_exception;
4458 Continuation::thaw_kind kind;
4459
4460 switch (stub_id) {
4461 case StubId::stubgen_cont_thaw_id:
4462 return_barrier = false;
4463 return_barrier_exception = false;
4464 kind = Continuation::thaw_top;
4465 break;
4466 case StubId::stubgen_cont_returnBarrier_id:
4467 return_barrier = true;
4468 return_barrier_exception = false;
4469 kind = Continuation::thaw_return_barrier;
4470 break;
4471 case StubId::stubgen_cont_returnBarrierExc_id:
4472 return_barrier = true;
4490 if (!return_barrier) {
4491 // Pop return address. If we don't do this, we get a drift,
4492 // where the bottom-most frozen frame continuously grows.
4493 __ pop(c_rarg3);
4494 } else {
4495 __ movptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
4496 }
4497
4498 #ifdef ASSERT
4499 {
4500 Label L_good_sp;
4501 __ cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
4502 __ jcc(Assembler::equal, L_good_sp);
4503 __ stop("Incorrect rsp at thaw entry");
4504 __ BIND(L_good_sp);
4505 }
4506 #endif // ASSERT
4507
4508 if (return_barrier) {
4509 // Preserve possible return value from a method returning to the return barrier.
4510 save_return_registers(_masm);
4511 }
4512
4513 __ movptr(c_rarg0, r15_thread);
4514 __ movptr(c_rarg1, (return_barrier ? 1 : 0));
4515 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Continuation::prepare_thaw), 2);
4516 __ movptr(rbx, rax);
4517
4518 if (return_barrier) {
4519 // Restore return value from a method returning to the return barrier.
4520 // No safepoint in the call to thaw, so even an oop return value should be OK.
4521 restore_return_registers(_masm);
4522 }
4523
4524 #ifdef ASSERT
4525 {
4526 Label L_good_sp;
4527 __ cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
4528 __ jcc(Assembler::equal, L_good_sp);
4529 __ stop("Incorrect rsp after prepare thaw");
4530 __ BIND(L_good_sp);
4531 }
4532 #endif // ASSERT
4533
4534 // rbx contains the size of the frames to thaw, 0 if overflow or no more frames
4535 Label L_thaw_success;
4536 __ testptr(rbx, rbx);
4537 __ jccb(Assembler::notZero, L_thaw_success);
4538 __ jump(RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry()));
4539 __ bind(L_thaw_success);
4540
4541 // Make room for the thawed frames and align the stack.
4542 __ subptr(rsp, rbx);
4543 __ andptr(rsp, -StackAlignmentInBytes);
4544
4545 if (return_barrier) {
4546 // Preserve possible return value from a method returning to the return barrier. (Again.)
4547 save_return_registers(_masm);
4548 }
4549
4550 // If we want, we can templatize thaw by kind, and have three different entries.
4551 __ movptr(c_rarg0, r15_thread);
4552 __ movptr(c_rarg1, kind);
4553 __ call_VM_leaf(Continuation::thaw_entry(), 2);
4554 __ movptr(rbx, rax);
4555
4556 if (return_barrier) {
4557 // Restore return value from a method returning to the return barrier. (Again.)
4558 // No safepoint in the call to thaw, so even an oop return value should be OK.
4559 restore_return_registers(_masm);
4560 } else {
4561 // Return 0 (success) from doYield.
4562 __ xorptr(rax, rax);
4563 }
4564
4565 // After thawing, rbx is the SP of the yielding frame.
4566 // Move there, and then to saved RBP slot.
4567 __ movptr(rsp, rbx);
4568 __ subptr(rsp, 2*wordSize);
4569
4570 if (return_barrier_exception) {
4571 __ movptr(c_rarg0, r15_thread);
4572 __ movptr(c_rarg1, Address(rsp, wordSize)); // return address
4573
4574 // rax still holds the original exception oop, save it before the call
4575 __ push_ppx(rax);
4576
4577 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 2);
4578 __ movptr(rbx, rax);
4579
4806
4807 void StubGenerator::generate_initial_stubs() {
4808 // Generates all stubs and initializes the entry points
4809
4810 // This platform-specific settings are needed by generate_call_stub()
4811 create_control_words();
4812
4813 // Initialize table for unsafe copy memeory check.
4814 if (UnsafeMemoryAccess::_table == nullptr) {
4815 UnsafeMemoryAccess::create_table(16 + 4); // 16 for copyMemory; 4 for setMemory
4816 }
4817
4818 // entry points that exist in all platforms Note: This is code
4819 // that could be shared among different platforms - however the
4820 // benefit seems to be smaller than the disadvantage of having a
4821 // much more complicated generator structure. See also comment in
4822 // stubRoutines.hpp.
4823
4824 StubRoutines::_forward_exception_entry = generate_forward_exception();
4825
4826 // Generate these first because they are called from other stubs
4827 if (InlineTypeReturnedAsFields) {
4828 StubRoutines::_load_inline_type_fields_in_regs =
4829 generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::load_inline_type_fields_in_regs),
4830 "load_inline_type_fields_in_regs", false);
4831 StubRoutines::_store_inline_type_fields_to_buf =
4832 generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::store_inline_type_fields_to_buf),
4833 "store_inline_type_fields_to_buf", true);
4834 }
4835
4836 StubRoutines::_call_stub_entry =
4837 generate_call_stub(StubRoutines::_call_stub_return_address);
4838
4839 // is referenced by megamorphic call
4840 StubRoutines::_catch_exception_entry = generate_catch_exception();
4841
4842 // platform dependent
4843 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr();
4844
4845 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup();
4846 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup();
4847 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup();
4848 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup();
4849
4850 StubRoutines::x86::_float_sign_mask = generate_fp_mask(StubId::stubgen_float_sign_mask_id, 0x7FFFFFFF7FFFFFFF);
4851 StubRoutines::x86::_float_sign_flip = generate_fp_mask(StubId::stubgen_float_sign_flip_id, 0x8000000080000000);
4852 StubRoutines::x86::_double_sign_mask = generate_fp_mask(StubId::stubgen_double_sign_mask_id, 0x7FFFFFFFFFFFFFFF);
4853 StubRoutines::x86::_double_sign_flip = generate_fp_mask(StubId::stubgen_double_sign_flip_id, 0x8000000000000000);
4854
4855 if (UseCRC32Intrinsics) {
4859 if (UseCRC32CIntrinsics) {
4860 bool supports_clmul = VM_Version::supports_clmul();
4861 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul);
4862 }
4863
4864 if (VM_Version::supports_float16()) {
4865 // For results consistency both intrinsics should be enabled.
4866 // vmIntrinsics checks InlineIntrinsics flag, no need to check it here.
4867 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_float16ToFloat) &&
4868 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_floatToFloat16)) {
4869 StubRoutines::_hf2f = generate_float16ToFloat();
4870 StubRoutines::_f2hf = generate_floatToFloat16();
4871 }
4872 }
4873
4874 generate_libm_stubs();
4875
4876 StubRoutines::_fmod = generate_libmFmod(); // from stubGenerator_x86_64_fmod.cpp
4877 }
4878
4879 // Call here from the interpreter or compiled code to either load
4880 // multiple returned values from the inline type instance being
4881 // returned to registers or to store returned values to a newly
4882 // allocated inline type instance.
4883 // Register is a class, but it would be assigned numerical value.
4884 // "0" is assigned for xmm0. Thus we need to ignore -Wnonnull.
4885 PRAGMA_DIAG_PUSH
4886 PRAGMA_NONNULL_IGNORED
4887 address StubGenerator::generate_return_value_stub(address destination, const char* name, bool has_res) {
4888 // We need to save all registers the calling convention may use so
4889 // the runtime calls read or update those registers. This needs to
4890 // be in sync with SharedRuntime::java_return_convention().
4891 enum layout {
4892 pad_off = frame::arg_reg_save_area_bytes/BytesPerInt, pad_off_2,
4893 rax_off, rax_off_2,
4894 j_rarg5_off, j_rarg5_2,
4895 j_rarg4_off, j_rarg4_2,
4896 j_rarg3_off, j_rarg3_2,
4897 j_rarg2_off, j_rarg2_2,
4898 j_rarg1_off, j_rarg1_2,
4899 j_rarg0_off, j_rarg0_2,
4900 j_farg0_off, j_farg0_2,
4901 j_farg1_off, j_farg1_2,
4902 j_farg2_off, j_farg2_2,
4903 j_farg3_off, j_farg3_2,
4904 j_farg4_off, j_farg4_2,
4905 j_farg5_off, j_farg5_2,
4906 j_farg6_off, j_farg6_2,
4907 j_farg7_off, j_farg7_2,
4908 rbp_off, rbp_off_2,
4909 return_off, return_off_2,
4910
4911 framesize
4912 };
4913
4914 CodeBuffer buffer(name, 1000, 512);
4915 MacroAssembler* _masm = new MacroAssembler(&buffer);
4916
4917 int frame_size_in_bytes = align_up(framesize*BytesPerInt, 16);
4918 assert(frame_size_in_bytes == framesize*BytesPerInt, "misaligned");
4919 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
4920 int frame_size_in_words = frame_size_in_bytes / wordSize;
4921
4922 OopMapSet *oop_maps = new OopMapSet();
4923 OopMap* map = new OopMap(frame_size_in_slots, 0);
4924
4925 map->set_callee_saved(VMRegImpl::stack2reg(rax_off), rax->as_VMReg());
4926 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg5_off), j_rarg5->as_VMReg());
4927 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg4_off), j_rarg4->as_VMReg());
4928 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg3_off), j_rarg3->as_VMReg());
4929 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg2_off), j_rarg2->as_VMReg());
4930 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg1_off), j_rarg1->as_VMReg());
4931 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg0_off), j_rarg0->as_VMReg());
4932 map->set_callee_saved(VMRegImpl::stack2reg(j_farg0_off), j_farg0->as_VMReg());
4933 map->set_callee_saved(VMRegImpl::stack2reg(j_farg1_off), j_farg1->as_VMReg());
4934 map->set_callee_saved(VMRegImpl::stack2reg(j_farg2_off), j_farg2->as_VMReg());
4935 map->set_callee_saved(VMRegImpl::stack2reg(j_farg3_off), j_farg3->as_VMReg());
4936 map->set_callee_saved(VMRegImpl::stack2reg(j_farg4_off), j_farg4->as_VMReg());
4937 map->set_callee_saved(VMRegImpl::stack2reg(j_farg5_off), j_farg5->as_VMReg());
4938 map->set_callee_saved(VMRegImpl::stack2reg(j_farg6_off), j_farg6->as_VMReg());
4939 map->set_callee_saved(VMRegImpl::stack2reg(j_farg7_off), j_farg7->as_VMReg());
4940
4941 int start = __ offset();
4942
4943 __ subptr(rsp, frame_size_in_bytes - 8 /* return address*/);
4944
4945 __ movptr(Address(rsp, rbp_off * BytesPerInt), rbp);
4946 __ movdbl(Address(rsp, j_farg7_off * BytesPerInt), j_farg7);
4947 __ movdbl(Address(rsp, j_farg6_off * BytesPerInt), j_farg6);
4948 __ movdbl(Address(rsp, j_farg5_off * BytesPerInt), j_farg5);
4949 __ movdbl(Address(rsp, j_farg4_off * BytesPerInt), j_farg4);
4950 __ movdbl(Address(rsp, j_farg3_off * BytesPerInt), j_farg3);
4951 __ movdbl(Address(rsp, j_farg2_off * BytesPerInt), j_farg2);
4952 __ movdbl(Address(rsp, j_farg1_off * BytesPerInt), j_farg1);
4953 __ movdbl(Address(rsp, j_farg0_off * BytesPerInt), j_farg0);
4954
4955 __ movptr(Address(rsp, j_rarg0_off * BytesPerInt), j_rarg0);
4956 __ movptr(Address(rsp, j_rarg1_off * BytesPerInt), j_rarg1);
4957 __ movptr(Address(rsp, j_rarg2_off * BytesPerInt), j_rarg2);
4958 __ movptr(Address(rsp, j_rarg3_off * BytesPerInt), j_rarg3);
4959 __ movptr(Address(rsp, j_rarg4_off * BytesPerInt), j_rarg4);
4960 __ movptr(Address(rsp, j_rarg5_off * BytesPerInt), j_rarg5);
4961 __ movptr(Address(rsp, rax_off * BytesPerInt), rax);
4962
4963 int frame_complete = __ offset();
4964
4965 __ set_last_Java_frame(noreg, noreg, nullptr, rscratch1);
4966
4967 __ mov(c_rarg0, r15_thread);
4968 __ mov(c_rarg1, rax);
4969
4970 __ call(RuntimeAddress(destination));
4971
4972 // Set an oopmap for the call site.
4973
4974 oop_maps->add_gc_map( __ offset() - start, map);
4975
4976 // clear last_Java_sp
4977 __ reset_last_Java_frame(false);
4978
4979 __ movptr(rbp, Address(rsp, rbp_off * BytesPerInt));
4980 __ movdbl(j_farg7, Address(rsp, j_farg7_off * BytesPerInt));
4981 __ movdbl(j_farg6, Address(rsp, j_farg6_off * BytesPerInt));
4982 __ movdbl(j_farg5, Address(rsp, j_farg5_off * BytesPerInt));
4983 __ movdbl(j_farg4, Address(rsp, j_farg4_off * BytesPerInt));
4984 __ movdbl(j_farg3, Address(rsp, j_farg3_off * BytesPerInt));
4985 __ movdbl(j_farg2, Address(rsp, j_farg2_off * BytesPerInt));
4986 __ movdbl(j_farg1, Address(rsp, j_farg1_off * BytesPerInt));
4987 __ movdbl(j_farg0, Address(rsp, j_farg0_off * BytesPerInt));
4988
4989 __ movptr(j_rarg0, Address(rsp, j_rarg0_off * BytesPerInt));
4990 __ movptr(j_rarg1, Address(rsp, j_rarg1_off * BytesPerInt));
4991 __ movptr(j_rarg2, Address(rsp, j_rarg2_off * BytesPerInt));
4992 __ movptr(j_rarg3, Address(rsp, j_rarg3_off * BytesPerInt));
4993 __ movptr(j_rarg4, Address(rsp, j_rarg4_off * BytesPerInt));
4994 __ movptr(j_rarg5, Address(rsp, j_rarg5_off * BytesPerInt));
4995 __ movptr(rax, Address(rsp, rax_off * BytesPerInt));
4996
4997 __ addptr(rsp, frame_size_in_bytes-8);
4998
4999 // check for pending exceptions
5000 Label pending;
5001 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
5002 __ jcc(Assembler::notEqual, pending);
5003
5004 if (has_res) {
5005 __ get_vm_result_oop(rax);
5006 }
5007
5008 __ ret(0);
5009
5010 __ bind(pending);
5011
5012 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
5013 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
5014
5015 // -------------
5016 // make sure all code is generated
5017 _masm->flush();
5018
5019 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, false);
5020 return stub->entry_point();
5021 }
5022
5023 void StubGenerator::generate_continuation_stubs() {
5024 // Continuation stubs:
5025 StubRoutines::_cont_thaw = generate_cont_thaw();
5026 StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
5027 StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
5028 StubRoutines::_cont_preempt_stub = generate_cont_preempt_stub();
5029 }
5030
5031 void StubGenerator::generate_final_stubs() {
5032 // Generates the rest of stubs and initializes the entry points
5033
5034 // support for verify_oop (must happen after universe_init)
5035 if (VerifyOops) {
5036 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
5037 }
5038
5039 // arraycopy stubs used by compilers
5040 generate_arraycopy_stubs();
5041
5042 StubRoutines::_method_entry_barrier = generate_method_entry_barrier();
|