311 __ mov(r19_sender_sp, sp);
312 __ blr(c_rarg4);
313
314 // we do this here because the notify will already have been done
315 // if we get to the next instruction via an exception
316 //
317 // n.b. adding this instruction here affects the calculation of
318 // whether or not a routine returns to the call stub (used when
319 // doing stack walks) since the normal test is to check the return
320 // pc against the address saved below. so we may need to allow for
321 // this extra instruction in the check.
322
323 // save current address for use by exception handling code
324
325 return_address = __ pc();
326
327 // store result depending on type (everything that is not
328 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
329 // n.b. this assumes Java returns an integral result in r0
330 // and a floating result in j_farg0
331 __ ldr(j_rarg2, result);
332 Label is_long, is_float, is_double, exit;
333 __ ldr(j_rarg1, result_type);
334 __ cmp(j_rarg1, (u1)T_OBJECT);
335 __ br(Assembler::EQ, is_long);
336 __ cmp(j_rarg1, (u1)T_LONG);
337 __ br(Assembler::EQ, is_long);
338 __ cmp(j_rarg1, (u1)T_FLOAT);
339 __ br(Assembler::EQ, is_float);
340 __ cmp(j_rarg1, (u1)T_DOUBLE);
341 __ br(Assembler::EQ, is_double);
342
343 // handle T_INT case
344 __ strw(r0, Address(j_rarg2));
345
346 __ BIND(exit);
347
348 // pop parameters
349 __ sub(esp, rfp, -sp_after_call_off * wordSize);
350
351 #ifdef ASSERT
352 // verify that threads correspond
353 {
354 Label L, S;
355 __ ldr(rscratch1, thread);
356 __ cmp(rthread, rscratch1);
357 __ br(Assembler::NE, S);
358 __ get_thread(rscratch1);
359 __ cmp(rthread, rscratch1);
360 __ br(Assembler::EQ, L);
361 __ BIND(S);
362 __ stop("StubRoutines::call_stub: threads must correspond");
363 __ BIND(L);
364 }
376 __ ldp(r26, r25, r26_save);
377 __ ldp(r24, r23, r24_save);
378 __ ldp(r22, r21, r22_save);
379 __ ldp(r20, r19, r20_save);
380
381 // restore fpcr
382 __ ldr(rscratch1, fpcr_save);
383 __ set_fpcr(rscratch1);
384
385 __ ldp(c_rarg0, c_rarg1, call_wrapper);
386 __ ldrw(c_rarg2, result_type);
387 __ ldr(c_rarg3, method);
388 __ ldp(c_rarg4, c_rarg5, entry_point);
389 __ ldp(c_rarg6, c_rarg7, parameter_size);
390
391 // leave frame and return to caller
392 __ leave();
393 __ ret(lr);
394
395 // handle return types different from T_INT
396
397 __ BIND(is_long);
398 __ str(r0, Address(j_rarg2, 0));
399 __ br(Assembler::AL, exit);
400
401 __ BIND(is_float);
402 __ strs(j_farg0, Address(j_rarg2, 0));
403 __ br(Assembler::AL, exit);
404
405 __ BIND(is_double);
406 __ strd(j_farg0, Address(j_rarg2, 0));
407 __ br(Assembler::AL, exit);
408
409 return start;
410 }
411
412 // Return point for a Java call if there's an exception thrown in
413 // Java code. The exception is caught and transformed into a
414 // pending exception stored in JavaThread that can be tested from
415 // within the VM.
416 //
417 // Note: Usually the parameters are removed by the callee. In case
418 // of an exception crossing an activation frame boundary, that is
419 // not the case if the callee is compiled code => need to setup the
420 // rsp.
421 //
422 // r0: exception oop
423
424 address generate_catch_exception() {
425 StubId stub_id = StubId::stubgen_catch_exception_id;
426 StubCodeMark mark(this, stub_id);
2207 // |array_tag| | header_size | element_type | |log2_element_size|
2208 // 32 30 24 16 8 2 0
2209 //
2210 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2211 //
2212
2213 const int lh_offset = in_bytes(Klass::layout_helper_offset());
2214
2215 // Handle objArrays completely differently...
2216 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2217 __ ldrw(lh, Address(scratch_src_klass, lh_offset));
2218 __ movw(rscratch1, objArray_lh);
2219 __ eorw(rscratch2, lh, rscratch1);
2220 __ cbzw(rscratch2, L_objArray);
2221
2222 // if (src->klass() != dst->klass()) return -1;
2223 __ load_klass(rscratch2, dst);
2224 __ eor(rscratch2, rscratch2, scratch_src_klass);
2225 __ cbnz(rscratch2, L_failed);
2226
2227 // if (!src->is_Array()) return -1;
2228 __ tbz(lh, 31, L_failed); // i.e. (lh >= 0)
2229
2230 // At this point, it is known to be a typeArray (array_tag 0x3).
2231 #ifdef ASSERT
2232 {
2233 BLOCK_COMMENT("assert primitive array {");
2234 Label L;
2235 __ movw(rscratch2, Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2236 __ cmpw(lh, rscratch2);
2237 __ br(Assembler::GE, L);
2238 __ stop("must be a primitive array");
2239 __ bind(L);
2240 BLOCK_COMMENT("} assert primitive array done");
2241 }
2242 #endif
2243
2244 arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
2245 rscratch2, L_failed);
2246
10505 gen_cas_entry(MacroAssembler::xword, memory_order_relaxed);
10506
10507 AtomicStubMark mark_cmpxchg_4_release
10508 (_masm, &aarch64_atomic_cmpxchg_4_release_impl);
10509 gen_cas_entry(MacroAssembler::word, memory_order_release);
10510 AtomicStubMark mark_cmpxchg_8_release
10511 (_masm, &aarch64_atomic_cmpxchg_8_release_impl);
10512 gen_cas_entry(MacroAssembler::xword, memory_order_release);
10513
10514 AtomicStubMark mark_cmpxchg_4_seq_cst
10515 (_masm, &aarch64_atomic_cmpxchg_4_seq_cst_impl);
10516 gen_cas_entry(MacroAssembler::word, memory_order_seq_cst);
10517 AtomicStubMark mark_cmpxchg_8_seq_cst
10518 (_masm, &aarch64_atomic_cmpxchg_8_seq_cst_impl);
10519 gen_cas_entry(MacroAssembler::xword, memory_order_seq_cst);
10520
10521 ICache::invalidate_range(first_entry, __ pc() - first_entry);
10522 }
10523 #endif // LINUX
10524
10525 address generate_cont_thaw(Continuation::thaw_kind kind) {
10526 bool return_barrier = Continuation::is_thaw_return_barrier(kind);
10527 bool return_barrier_exception = Continuation::is_thaw_return_barrier_exception(kind);
10528
10529 address start = __ pc();
10530
10531 if (return_barrier) {
10532 __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
10533 __ mov(sp, rscratch1);
10534 }
10535 assert_asm(_masm, (__ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset())), __ cmp(sp, rscratch1)), Assembler::EQ, "incorrect sp");
10536
10537 if (return_barrier) {
10538 // preserve possible return value from a method returning to the return barrier
10539 __ fmovd(rscratch1, v0);
10540 __ stp(rscratch1, r0, Address(__ pre(sp, -2 * wordSize)));
10541 }
10542
10543 __ movw(c_rarg1, (return_barrier ? 1 : 0));
10544 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Continuation::prepare_thaw), rthread, c_rarg1);
10545 __ mov(rscratch2, r0); // r0 contains the size of the frames to thaw, 0 if overflow or no more frames
10546
10547 if (return_barrier) {
10548 // restore return value (no safepoint in the call to thaw, so even an oop return value should be OK)
10549 __ ldp(rscratch1, r0, Address(__ post(sp, 2 * wordSize)));
10550 __ fmovd(v0, rscratch1);
10551 }
10552 assert_asm(_masm, (__ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset())), __ cmp(sp, rscratch1)), Assembler::EQ, "incorrect sp");
10553
10554
10555 Label thaw_success;
10556 // rscratch2 contains the size of the frames to thaw, 0 if overflow or no more frames
10557 __ cbnz(rscratch2, thaw_success);
10558 __ lea(rscratch1, RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry()));
10559 __ br(rscratch1);
10560 __ bind(thaw_success);
10561
10562 // make room for the thawed frames
10563 __ sub(rscratch1, sp, rscratch2);
10564 __ andr(rscratch1, rscratch1, -16); // align
10565 __ mov(sp, rscratch1);
10566
10567 if (return_barrier) {
10568 // save original return value -- again
10569 __ fmovd(rscratch1, v0);
10570 __ stp(rscratch1, r0, Address(__ pre(sp, -2 * wordSize)));
10571 }
10572
10573 // If we want, we can templatize thaw by kind, and have three different entries
10574 __ movw(c_rarg1, (uint32_t)kind);
10575
10576 __ call_VM_leaf(Continuation::thaw_entry(), rthread, c_rarg1);
10577 __ mov(rscratch2, r0); // r0 is the sp of the yielding frame
10578
10579 if (return_barrier) {
10580 // restore return value (no safepoint in the call to thaw, so even an oop return value should be OK)
10581 __ ldp(rscratch1, r0, Address(__ post(sp, 2 * wordSize)));
10582 __ fmovd(v0, rscratch1);
10583 } else {
10584 __ mov(r0, zr); // return 0 (success) from doYield
10585 }
10586
10587 // we're now on the yield frame (which is in an address above us b/c rsp has been pushed down)
10588 __ sub(sp, rscratch2, 2*wordSize); // now pointing to rfp spill
10589 __ mov(rfp, sp);
10590
10591 if (return_barrier_exception) {
10592 __ ldr(c_rarg1, Address(rfp, wordSize)); // return address
10593 __ authenticate_return_address(c_rarg1);
10594 __ verify_oop(r0);
10595 // save return value containing the exception oop in callee-saved R19
10596 __ mov(r19, r0);
10597
10598 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, c_rarg1);
10599
10600 // Reinitialize the ptrue predicate register, in case the external runtime call clobbers ptrue reg, as we may return to SVE compiled code.
10601 // __ reinitialize_ptrue();
10602
11699 // assert(Ra == Pa_base[j], "must be");
11700 // MACC(Ra, Ra, t0, t1, t2);
11701 // }
11702 // iters = (2*len-i)/2;
11703 // assert(iters == len-j, "must be");
11704 // for (; iters--; j++) {
11705 // assert(Rm == Pm_base[j] && Rn == Pn_base[i-j], "must be");
11706 // MACC(Rm, Rn, t0, t1, t2);
11707 // Rm = *++Pm;
11708 // Rn = *--Pn;
11709 // }
11710 // Pm_base[i-len] = t0;
11711 // t0 = t1; t1 = t2; t2 = 0;
11712 // }
11713
11714 // while (t0)
11715 // t0 = sub(Pm_base, Pn_base, t0, len);
11716 // }
11717 };
11718
11719 // Initialization
11720 void generate_preuniverse_stubs() {
11721 // preuniverse stubs are not needed for aarch64
11722 }
11723
11724 void generate_initial_stubs() {
11725 // Generate initial stubs and initializes the entry points
11726
11727 // entry points that exist in all platforms Note: This is code
11728 // that could be shared among different platforms - however the
11729 // benefit seems to be smaller than the disadvantage of having a
11730 // much more complicated generator structure. See also comment in
11731 // stubRoutines.hpp.
11732
11733 StubRoutines::_forward_exception_entry = generate_forward_exception();
11734
11735 StubRoutines::_call_stub_entry =
11736 generate_call_stub(StubRoutines::_call_stub_return_address);
11737
11738 // is referenced by megamorphic call
11747 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
11748 }
11749
11750 if (UseCRC32CIntrinsics) {
11751 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
11752 }
11753
11754 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
11755 StubRoutines::_dsin = generate_dsin_dcos(/* isCos = */ false);
11756 }
11757
11758 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
11759 StubRoutines::_dcos = generate_dsin_dcos(/* isCos = */ true);
11760 }
11761
11762 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_float16ToFloat) &&
11763 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_floatToFloat16)) {
11764 StubRoutines::_hf2f = generate_float16ToFloat();
11765 StubRoutines::_f2hf = generate_floatToFloat16();
11766 }
11767 }
11768
11769 void generate_continuation_stubs() {
11770 // Continuation stubs:
11771 StubRoutines::_cont_thaw = generate_cont_thaw();
11772 StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
11773 StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
11774 StubRoutines::_cont_preempt_stub = generate_cont_preempt_stub();
11775 }
11776
11777 void generate_final_stubs() {
11778 // support for verify_oop (must happen after universe_init)
11779 if (VerifyOops) {
11780 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
11781 }
11782
11783 // arraycopy stubs used by compilers
11784 generate_arraycopy_stubs();
11785
11786 StubRoutines::_method_entry_barrier = generate_method_entry_barrier();
|
311 __ mov(r19_sender_sp, sp);
312 __ blr(c_rarg4);
313
314 // we do this here because the notify will already have been done
315 // if we get to the next instruction via an exception
316 //
317 // n.b. adding this instruction here affects the calculation of
318 // whether or not a routine returns to the call stub (used when
319 // doing stack walks) since the normal test is to check the return
320 // pc against the address saved below. so we may need to allow for
321 // this extra instruction in the check.
322
323 // save current address for use by exception handling code
324
325 return_address = __ pc();
326
327 // store result depending on type (everything that is not
328 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
329 // n.b. this assumes Java returns an integral result in r0
330 // and a floating result in j_farg0
331 // All of j_rargN may be used to return inline type fields so be careful
332 // not to clobber those.
333 // SharedRuntime::generate_buffered_inline_type_adapter() knows the register
334 // assignment of Rresult below.
335 Register Rresult = r14, Rresult_type = r15;
336 __ ldr(Rresult, result);
337 Label is_long, is_float, is_double, check_prim, exit;
338 __ ldr(Rresult_type, result_type);
339 __ cmp(Rresult_type, (u1)T_OBJECT);
340 __ br(Assembler::EQ, check_prim);
341 __ cmp(Rresult_type, (u1)T_LONG);
342 __ br(Assembler::EQ, is_long);
343 __ cmp(Rresult_type, (u1)T_FLOAT);
344 __ br(Assembler::EQ, is_float);
345 __ cmp(Rresult_type, (u1)T_DOUBLE);
346 __ br(Assembler::EQ, is_double);
347
348 // handle T_INT case
349 __ strw(r0, Address(Rresult));
350
351 __ BIND(exit);
352
353 // pop parameters
354 __ sub(esp, rfp, -sp_after_call_off * wordSize);
355
356 #ifdef ASSERT
357 // verify that threads correspond
358 {
359 Label L, S;
360 __ ldr(rscratch1, thread);
361 __ cmp(rthread, rscratch1);
362 __ br(Assembler::NE, S);
363 __ get_thread(rscratch1);
364 __ cmp(rthread, rscratch1);
365 __ br(Assembler::EQ, L);
366 __ BIND(S);
367 __ stop("StubRoutines::call_stub: threads must correspond");
368 __ BIND(L);
369 }
381 __ ldp(r26, r25, r26_save);
382 __ ldp(r24, r23, r24_save);
383 __ ldp(r22, r21, r22_save);
384 __ ldp(r20, r19, r20_save);
385
386 // restore fpcr
387 __ ldr(rscratch1, fpcr_save);
388 __ set_fpcr(rscratch1);
389
390 __ ldp(c_rarg0, c_rarg1, call_wrapper);
391 __ ldrw(c_rarg2, result_type);
392 __ ldr(c_rarg3, method);
393 __ ldp(c_rarg4, c_rarg5, entry_point);
394 __ ldp(c_rarg6, c_rarg7, parameter_size);
395
396 // leave frame and return to caller
397 __ leave();
398 __ ret(lr);
399
400 // handle return types different from T_INT
401 __ BIND(check_prim);
402 if (InlineTypeReturnedAsFields) {
403 // Check for scalarized return value
404 __ tbz(r0, 0, is_long);
405 // Load pack handler address
406 __ andr(rscratch1, r0, -2);
407 __ ldr(rscratch1, Address(rscratch1, InlineKlass::adr_members_offset()));
408 __ ldr(rscratch1, Address(rscratch1, InlineKlass::pack_handler_jobject_offset()));
409 __ blr(rscratch1);
410 __ b(exit);
411 }
412
413 __ BIND(is_long);
414 __ str(r0, Address(Rresult, 0));
415 __ br(Assembler::AL, exit);
416
417 __ BIND(is_float);
418 __ strs(j_farg0, Address(Rresult, 0));
419 __ br(Assembler::AL, exit);
420
421 __ BIND(is_double);
422 __ strd(j_farg0, Address(Rresult, 0));
423 __ br(Assembler::AL, exit);
424
425 return start;
426 }
427
428 // Return point for a Java call if there's an exception thrown in
429 // Java code. The exception is caught and transformed into a
430 // pending exception stored in JavaThread that can be tested from
431 // within the VM.
432 //
433 // Note: Usually the parameters are removed by the callee. In case
434 // of an exception crossing an activation frame boundary, that is
435 // not the case if the callee is compiled code => need to setup the
436 // rsp.
437 //
438 // r0: exception oop
439
440 address generate_catch_exception() {
441 StubId stub_id = StubId::stubgen_catch_exception_id;
442 StubCodeMark mark(this, stub_id);
2223 // |array_tag| | header_size | element_type | |log2_element_size|
2224 // 32 30 24 16 8 2 0
2225 //
2226 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2227 //
2228
2229 const int lh_offset = in_bytes(Klass::layout_helper_offset());
2230
2231 // Handle objArrays completely differently...
2232 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2233 __ ldrw(lh, Address(scratch_src_klass, lh_offset));
2234 __ movw(rscratch1, objArray_lh);
2235 __ eorw(rscratch2, lh, rscratch1);
2236 __ cbzw(rscratch2, L_objArray);
2237
2238 // if (src->klass() != dst->klass()) return -1;
2239 __ load_klass(rscratch2, dst);
2240 __ eor(rscratch2, rscratch2, scratch_src_klass);
2241 __ cbnz(rscratch2, L_failed);
2242
2243 // Check for flat inline type array -> return -1
2244 __ test_flat_array_oop(src, rscratch2, L_failed);
2245
2246 // Check for null-free (non-flat) inline type array -> handle as object array
2247 __ test_null_free_array_oop(src, rscratch2, L_objArray);
2248
2249 // if (!src->is_Array()) return -1;
2250 __ tbz(lh, 31, L_failed); // i.e. (lh >= 0)
2251
2252 // At this point, it is known to be a typeArray (array_tag 0x3).
2253 #ifdef ASSERT
2254 {
2255 BLOCK_COMMENT("assert primitive array {");
2256 Label L;
2257 __ movw(rscratch2, Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2258 __ cmpw(lh, rscratch2);
2259 __ br(Assembler::GE, L);
2260 __ stop("must be a primitive array");
2261 __ bind(L);
2262 BLOCK_COMMENT("} assert primitive array done");
2263 }
2264 #endif
2265
2266 arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
2267 rscratch2, L_failed);
2268
10527 gen_cas_entry(MacroAssembler::xword, memory_order_relaxed);
10528
10529 AtomicStubMark mark_cmpxchg_4_release
10530 (_masm, &aarch64_atomic_cmpxchg_4_release_impl);
10531 gen_cas_entry(MacroAssembler::word, memory_order_release);
10532 AtomicStubMark mark_cmpxchg_8_release
10533 (_masm, &aarch64_atomic_cmpxchg_8_release_impl);
10534 gen_cas_entry(MacroAssembler::xword, memory_order_release);
10535
10536 AtomicStubMark mark_cmpxchg_4_seq_cst
10537 (_masm, &aarch64_atomic_cmpxchg_4_seq_cst_impl);
10538 gen_cas_entry(MacroAssembler::word, memory_order_seq_cst);
10539 AtomicStubMark mark_cmpxchg_8_seq_cst
10540 (_masm, &aarch64_atomic_cmpxchg_8_seq_cst_impl);
10541 gen_cas_entry(MacroAssembler::xword, memory_order_seq_cst);
10542
10543 ICache::invalidate_range(first_entry, __ pc() - first_entry);
10544 }
10545 #endif // LINUX
10546
10547 static void save_return_registers(MacroAssembler* masm) {
10548 if (InlineTypeReturnedAsFields) {
10549 masm->push(RegSet::range(r0, r7), sp);
10550 masm->sub(sp, sp, 4 * wordSize);
10551 masm->st1(v0, v1, v2, v3, masm->T1D, Address(sp));
10552 masm->sub(sp, sp, 4 * wordSize);
10553 masm->st1(v4, v5, v6, v7, masm->T1D, Address(sp));
10554 } else {
10555 masm->fmovd(rscratch1, v0);
10556 masm->stp(rscratch1, r0, Address(masm->pre(sp, -2 * wordSize)));
10557 }
10558 }
10559
10560 static void restore_return_registers(MacroAssembler* masm) {
10561 if (InlineTypeReturnedAsFields) {
10562 masm->ld1(v4, v5, v6, v7, masm->T1D, Address(masm->post(sp, 4 * wordSize)));
10563 masm->ld1(v0, v1, v2, v3, masm->T1D, Address(masm->post(sp, 4 * wordSize)));
10564 masm->pop(RegSet::range(r0, r7), sp);
10565 } else {
10566 masm->ldp(rscratch1, r0, Address(masm->post(sp, 2 * wordSize)));
10567 masm->fmovd(v0, rscratch1);
10568 }
10569 }
10570
10571 address generate_cont_thaw(Continuation::thaw_kind kind) {
10572 bool return_barrier = Continuation::is_thaw_return_barrier(kind);
10573 bool return_barrier_exception = Continuation::is_thaw_return_barrier_exception(kind);
10574
10575 address start = __ pc();
10576
10577 if (return_barrier) {
10578 __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
10579 __ mov(sp, rscratch1);
10580 }
10581 assert_asm(_masm, (__ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset())), __ cmp(sp, rscratch1)), Assembler::EQ, "incorrect sp");
10582
10583 if (return_barrier) {
10584 // preserve possible return value from a method returning to the return barrier
10585 save_return_registers(_masm);
10586 }
10587
10588 __ movw(c_rarg1, (return_barrier ? 1 : 0));
10589 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Continuation::prepare_thaw), rthread, c_rarg1);
10590 __ mov(rscratch2, r0); // r0 contains the size of the frames to thaw, 0 if overflow or no more frames
10591
10592 if (return_barrier) {
10593 // restore return value (no safepoint in the call to thaw, so even an oop return value should be OK)
10594 restore_return_registers(_masm);
10595 }
10596 assert_asm(_masm, (__ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset())), __ cmp(sp, rscratch1)), Assembler::EQ, "incorrect sp");
10597
10598
10599 Label thaw_success;
10600 // rscratch2 contains the size of the frames to thaw, 0 if overflow or no more frames
10601 __ cbnz(rscratch2, thaw_success);
10602 __ lea(rscratch1, RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry()));
10603 __ br(rscratch1);
10604 __ bind(thaw_success);
10605
10606 // make room for the thawed frames
10607 __ sub(rscratch1, sp, rscratch2);
10608 __ andr(rscratch1, rscratch1, -16); // align
10609 __ mov(sp, rscratch1);
10610
10611 if (return_barrier) {
10612 // save original return value -- again
10613 save_return_registers(_masm);
10614 }
10615
10616 // If we want, we can templatize thaw by kind, and have three different entries
10617 __ movw(c_rarg1, (uint32_t)kind);
10618
10619 __ call_VM_leaf(Continuation::thaw_entry(), rthread, c_rarg1);
10620 __ mov(rscratch2, r0); // r0 is the sp of the yielding frame
10621
10622 if (return_barrier) {
10623 // restore return value (no safepoint in the call to thaw, so even an oop return value should be OK)
10624 restore_return_registers(_masm);
10625 } else {
10626 __ mov(r0, zr); // return 0 (success) from doYield
10627 }
10628
10629 // we're now on the yield frame (which is in an address above us b/c rsp has been pushed down)
10630 __ sub(sp, rscratch2, 2*wordSize); // now pointing to rfp spill
10631 __ mov(rfp, sp);
10632
10633 if (return_barrier_exception) {
10634 __ ldr(c_rarg1, Address(rfp, wordSize)); // return address
10635 __ authenticate_return_address(c_rarg1);
10636 __ verify_oop(r0);
10637 // save return value containing the exception oop in callee-saved R19
10638 __ mov(r19, r0);
10639
10640 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, c_rarg1);
10641
10642 // Reinitialize the ptrue predicate register, in case the external runtime call clobbers ptrue reg, as we may return to SVE compiled code.
10643 // __ reinitialize_ptrue();
10644
11741 // assert(Ra == Pa_base[j], "must be");
11742 // MACC(Ra, Ra, t0, t1, t2);
11743 // }
11744 // iters = (2*len-i)/2;
11745 // assert(iters == len-j, "must be");
11746 // for (; iters--; j++) {
11747 // assert(Rm == Pm_base[j] && Rn == Pn_base[i-j], "must be");
11748 // MACC(Rm, Rn, t0, t1, t2);
11749 // Rm = *++Pm;
11750 // Rn = *--Pn;
11751 // }
11752 // Pm_base[i-len] = t0;
11753 // t0 = t1; t1 = t2; t2 = 0;
11754 // }
11755
11756 // while (t0)
11757 // t0 = sub(Pm_base, Pn_base, t0, len);
11758 // }
11759 };
11760
11761 // Call here from the interpreter or compiled code to either load
11762 // multiple returned values from the inline type instance being
11763 // returned to registers or to store returned values to a newly
11764 // allocated inline type instance.
11765 address generate_return_value_stub(address destination, const char* name, bool has_res) {
11766 // We need to save all registers the calling convention may use so
11767 // the runtime calls read or update those registers. This needs to
11768 // be in sync with SharedRuntime::java_return_convention().
11769 // n.b. aarch64 asserts that frame::arg_reg_save_area_bytes == 0
11770 enum layout {
11771 j_rarg7_off = 0, j_rarg7_2, // j_rarg7 is r0
11772 j_rarg6_off, j_rarg6_2,
11773 j_rarg5_off, j_rarg5_2,
11774 j_rarg4_off, j_rarg4_2,
11775 j_rarg3_off, j_rarg3_2,
11776 j_rarg2_off, j_rarg2_2,
11777 j_rarg1_off, j_rarg1_2,
11778 j_rarg0_off, j_rarg0_2,
11779
11780 j_farg7_off, j_farg7_2,
11781 j_farg6_off, j_farg6_2,
11782 j_farg5_off, j_farg5_2,
11783 j_farg4_off, j_farg4_2,
11784 j_farg3_off, j_farg3_2,
11785 j_farg2_off, j_farg2_2,
11786 j_farg1_off, j_farg1_2,
11787 j_farg0_off, j_farg0_2,
11788
11789 rfp_off, rfp_off2,
11790 return_off, return_off2,
11791
11792 framesize // inclusive of return address
11793 };
11794
11795 CodeBuffer code(name, 512, 64);
11796 MacroAssembler* masm = new MacroAssembler(&code);
11797
11798 int frame_size_in_bytes = align_up(framesize*BytesPerInt, 16);
11799 assert(frame_size_in_bytes == framesize*BytesPerInt, "misaligned");
11800 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
11801 int frame_size_in_words = frame_size_in_bytes / wordSize;
11802
11803 OopMapSet* oop_maps = new OopMapSet();
11804 OopMap* map = new OopMap(frame_size_in_slots, 0);
11805
11806 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg7_off), j_rarg7->as_VMReg());
11807 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg6_off), j_rarg6->as_VMReg());
11808 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg5_off), j_rarg5->as_VMReg());
11809 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg4_off), j_rarg4->as_VMReg());
11810 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg3_off), j_rarg3->as_VMReg());
11811 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg2_off), j_rarg2->as_VMReg());
11812 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg1_off), j_rarg1->as_VMReg());
11813 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg0_off), j_rarg0->as_VMReg());
11814
11815 map->set_callee_saved(VMRegImpl::stack2reg(j_farg0_off), j_farg0->as_VMReg());
11816 map->set_callee_saved(VMRegImpl::stack2reg(j_farg1_off), j_farg1->as_VMReg());
11817 map->set_callee_saved(VMRegImpl::stack2reg(j_farg2_off), j_farg2->as_VMReg());
11818 map->set_callee_saved(VMRegImpl::stack2reg(j_farg3_off), j_farg3->as_VMReg());
11819 map->set_callee_saved(VMRegImpl::stack2reg(j_farg4_off), j_farg4->as_VMReg());
11820 map->set_callee_saved(VMRegImpl::stack2reg(j_farg5_off), j_farg5->as_VMReg());
11821 map->set_callee_saved(VMRegImpl::stack2reg(j_farg6_off), j_farg6->as_VMReg());
11822 map->set_callee_saved(VMRegImpl::stack2reg(j_farg7_off), j_farg7->as_VMReg());
11823
11824 address start = __ pc();
11825
11826 __ enter(); // Save FP and LR before call
11827
11828 __ stpd(j_farg1, j_farg0, Address(__ pre(sp, -2 * wordSize)));
11829 __ stpd(j_farg3, j_farg2, Address(__ pre(sp, -2 * wordSize)));
11830 __ stpd(j_farg5, j_farg4, Address(__ pre(sp, -2 * wordSize)));
11831 __ stpd(j_farg7, j_farg6, Address(__ pre(sp, -2 * wordSize)));
11832
11833 __ stp(j_rarg1, j_rarg0, Address(__ pre(sp, -2 * wordSize)));
11834 __ stp(j_rarg3, j_rarg2, Address(__ pre(sp, -2 * wordSize)));
11835 __ stp(j_rarg5, j_rarg4, Address(__ pre(sp, -2 * wordSize)));
11836 __ stp(j_rarg7, j_rarg6, Address(__ pre(sp, -2 * wordSize)));
11837
11838 int frame_complete = __ offset();
11839
11840 // Set up last_Java_sp and last_Java_fp
11841 address the_pc = __ pc();
11842 __ set_last_Java_frame(sp, noreg, the_pc, rscratch1);
11843
11844 // Call runtime
11845 __ mov(c_rarg1, r0);
11846 __ mov(c_rarg0, rthread);
11847
11848 __ mov(rscratch1, destination);
11849 __ blr(rscratch1);
11850
11851 oop_maps->add_gc_map(the_pc - start, map);
11852
11853 __ reset_last_Java_frame(false);
11854
11855 __ ldp(j_rarg7, j_rarg6, Address(__ post(sp, 2 * wordSize)));
11856 __ ldp(j_rarg5, j_rarg4, Address(__ post(sp, 2 * wordSize)));
11857 __ ldp(j_rarg3, j_rarg2, Address(__ post(sp, 2 * wordSize)));
11858 __ ldp(j_rarg1, j_rarg0, Address(__ post(sp, 2 * wordSize)));
11859
11860 __ ldpd(j_farg7, j_farg6, Address(__ post(sp, 2 * wordSize)));
11861 __ ldpd(j_farg5, j_farg4, Address(__ post(sp, 2 * wordSize)));
11862 __ ldpd(j_farg3, j_farg2, Address(__ post(sp, 2 * wordSize)));
11863 __ ldpd(j_farg1, j_farg0, Address(__ post(sp, 2 * wordSize)));
11864
11865 __ leave();
11866
11867 // check for pending exceptions
11868 Label pending;
11869 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
11870 __ cbnz(rscratch1, pending);
11871
11872 if (has_res) {
11873 __ get_vm_result_oop(r0, rthread);
11874 }
11875
11876 __ ret(lr);
11877
11878 __ bind(pending);
11879 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
11880
11881 // -------------
11882 // make sure all code is generated
11883 masm->flush();
11884
11885 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, frame_size_in_words, oop_maps, false);
11886 return stub->entry_point();
11887 }
11888
11889 // Initialization
11890 void generate_preuniverse_stubs() {
11891 // preuniverse stubs are not needed for aarch64
11892 }
11893
11894 void generate_initial_stubs() {
11895 // Generate initial stubs and initializes the entry points
11896
11897 // entry points that exist in all platforms Note: This is code
11898 // that could be shared among different platforms - however the
11899 // benefit seems to be smaller than the disadvantage of having a
11900 // much more complicated generator structure. See also comment in
11901 // stubRoutines.hpp.
11902
11903 StubRoutines::_forward_exception_entry = generate_forward_exception();
11904
11905 StubRoutines::_call_stub_entry =
11906 generate_call_stub(StubRoutines::_call_stub_return_address);
11907
11908 // is referenced by megamorphic call
11917 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
11918 }
11919
11920 if (UseCRC32CIntrinsics) {
11921 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
11922 }
11923
11924 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
11925 StubRoutines::_dsin = generate_dsin_dcos(/* isCos = */ false);
11926 }
11927
11928 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
11929 StubRoutines::_dcos = generate_dsin_dcos(/* isCos = */ true);
11930 }
11931
11932 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_float16ToFloat) &&
11933 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_floatToFloat16)) {
11934 StubRoutines::_hf2f = generate_float16ToFloat();
11935 StubRoutines::_f2hf = generate_floatToFloat16();
11936 }
11937
11938 if (InlineTypeReturnedAsFields) {
11939 StubRoutines::_load_inline_type_fields_in_regs =
11940 generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::load_inline_type_fields_in_regs), "load_inline_type_fields_in_regs", false);
11941 StubRoutines::_store_inline_type_fields_to_buf =
11942 generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::store_inline_type_fields_to_buf), "store_inline_type_fields_to_buf", true);
11943 }
11944
11945 }
11946
11947 void generate_continuation_stubs() {
11948 // Continuation stubs:
11949 StubRoutines::_cont_thaw = generate_cont_thaw();
11950 StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
11951 StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
11952 StubRoutines::_cont_preempt_stub = generate_cont_preempt_stub();
11953 }
11954
11955 void generate_final_stubs() {
11956 // support for verify_oop (must happen after universe_init)
11957 if (VerifyOops) {
11958 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
11959 }
11960
11961 // arraycopy stubs used by compilers
11962 generate_arraycopy_stubs();
11963
11964 StubRoutines::_method_entry_barrier = generate_method_entry_barrier();
|