295 // rmethod: Method*
296 // r19_sender_sp: sender sp
297 BLOCK_COMMENT("call Java function");
298 __ mov(r19_sender_sp, sp);
299 __ blr(c_rarg4);
300
301 // we do this here because the notify will already have been done
302 // if we get to the next instruction via an exception
303 //
304 // n.b. adding this instruction here affects the calculation of
305 // whether or not a routine returns to the call stub (used when
306 // doing stack walks) since the normal test is to check the return
307 // pc against the address saved below. so we may need to allow for
308 // this extra instruction in the check.
309
310 // save current address for use by exception handling code
311
312 return_address = __ pc();
313
314 // store result depending on type (everything that is not
315 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
316 // n.b. this assumes Java returns an integral result in r0
317 // and a floating result in j_farg0
318 __ ldr(j_rarg2, result);
319 Label is_long, is_float, is_double, exit;
320 __ ldr(j_rarg1, result_type);
321 __ cmp(j_rarg1, (u1)T_OBJECT);
322 __ br(Assembler::EQ, is_long);
323 __ cmp(j_rarg1, (u1)T_LONG);
324 __ br(Assembler::EQ, is_long);
325 __ cmp(j_rarg1, (u1)T_FLOAT);
326 __ br(Assembler::EQ, is_float);
327 __ cmp(j_rarg1, (u1)T_DOUBLE);
328 __ br(Assembler::EQ, is_double);
329
330 // handle T_INT case
331 __ strw(r0, Address(j_rarg2));
332
333 __ BIND(exit);
334
335 // pop parameters
336 __ sub(esp, rfp, -sp_after_call_off * wordSize);
337
338 #ifdef ASSERT
339 // verify that threads correspond
340 {
341 Label L, S;
342 __ ldr(rscratch1, thread);
343 __ cmp(rthread, rscratch1);
344 __ br(Assembler::NE, S);
345 __ get_thread(rscratch1);
346 __ cmp(rthread, rscratch1);
347 __ br(Assembler::EQ, L);
348 __ BIND(S);
349 __ stop("StubRoutines::call_stub: threads must correspond");
350 __ BIND(L);
351 }
359 __ ldpd(v11, v10, d11_save);
360 __ ldpd(v9, v8, d9_save);
361
362 __ ldp(r28, r27, r28_save);
363 __ ldp(r26, r25, r26_save);
364 __ ldp(r24, r23, r24_save);
365 __ ldp(r22, r21, r22_save);
366 __ ldp(r20, r19, r20_save);
367
368 __ ldp(c_rarg0, c_rarg1, call_wrapper);
369 __ ldrw(c_rarg2, result_type);
370 __ ldr(c_rarg3, method);
371 __ ldp(c_rarg4, c_rarg5, entry_point);
372 __ ldp(c_rarg6, c_rarg7, parameter_size);
373
374 // leave frame and return to caller
375 __ leave();
376 __ ret(lr);
377
378 // handle return types different from T_INT
379
380 __ BIND(is_long);
381 __ str(r0, Address(j_rarg2, 0));
382 __ br(Assembler::AL, exit);
383
384 __ BIND(is_float);
385 __ strs(j_farg0, Address(j_rarg2, 0));
386 __ br(Assembler::AL, exit);
387
388 __ BIND(is_double);
389 __ strd(j_farg0, Address(j_rarg2, 0));
390 __ br(Assembler::AL, exit);
391
392 return start;
393 }
394
395 // Return point for a Java call if there's an exception thrown in
396 // Java code. The exception is caught and transformed into a
397 // pending exception stored in JavaThread that can be tested from
398 // within the VM.
399 //
400 // Note: Usually the parameters are removed by the callee. In case
401 // of an exception crossing an activation frame boundary, that is
402 // not the case if the callee is compiled code => need to setup the
403 // rsp.
404 //
405 // r0: exception oop
406
407 address generate_catch_exception() {
408 StubCodeMark mark(this, "StubRoutines", "catch_exception");
409 address start = __ pc();
2092 // |array_tag| | header_size | element_type | |log2_element_size|
2093 // 32 30 24 16 8 2 0
2094 //
2095 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2096 //
2097
2098 const int lh_offset = in_bytes(Klass::layout_helper_offset());
2099
2100 // Handle objArrays completely differently...
2101 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2102 __ ldrw(lh, Address(scratch_src_klass, lh_offset));
2103 __ movw(rscratch1, objArray_lh);
2104 __ eorw(rscratch2, lh, rscratch1);
2105 __ cbzw(rscratch2, L_objArray);
2106
2107 // if (src->klass() != dst->klass()) return -1;
2108 __ load_klass(rscratch2, dst);
2109 __ eor(rscratch2, rscratch2, scratch_src_klass);
2110 __ cbnz(rscratch2, L_failed);
2111
2112 // if (!src->is_Array()) return -1;
2113 __ tbz(lh, 31, L_failed); // i.e. (lh >= 0)
2114
2115 // At this point, it is known to be a typeArray (array_tag 0x3).
2116 #ifdef ASSERT
2117 {
2118 BLOCK_COMMENT("assert primitive array {");
2119 Label L;
2120 __ movw(rscratch2, Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2121 __ cmpw(lh, rscratch2);
2122 __ br(Assembler::GE, L);
2123 __ stop("must be a primitive array");
2124 __ bind(L);
2125 BLOCK_COMMENT("} assert primitive array done");
2126 }
2127 #endif
2128
2129 arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
2130 rscratch2, L_failed);
2131
7885 // MACC(Ra, Ra, t0, t1, t2);
7886 // }
7887 // iters = (2*len-i)/2;
7888 // assert(iters == len-j, "must be");
7889 // for (; iters--; j++) {
7890 // assert(Rm == Pm_base[j] && Rn == Pn_base[i-j], "must be");
7891 // MACC(Rm, Rn, t0, t1, t2);
7892 // Rm = *++Pm;
7893 // Rn = *--Pn;
7894 // }
7895 // Pm_base[i-len] = t0;
7896 // t0 = t1; t1 = t2; t2 = 0;
7897 // }
7898
7899 // while (t0)
7900 // t0 = sub(Pm_base, Pn_base, t0, len);
7901 // }
7902 };
7903
7904
7905 // Initialization
7906 void generate_initial() {
7907 // Generate initial stubs and initializes the entry points
7908
7909 // entry points that exist in all platforms Note: This is code
7910 // that could be shared among different platforms - however the
7911 // benefit seems to be smaller than the disadvantage of having a
7912 // much more complicated generator structure. See also comment in
7913 // stubRoutines.hpp.
7914
7915 StubRoutines::_forward_exception_entry = generate_forward_exception();
7916
7917 StubRoutines::_call_stub_entry =
7918 generate_call_stub(StubRoutines::_call_stub_return_address);
7919
7920 // is referenced by megamorphic call
7921 StubRoutines::_catch_exception_entry = generate_catch_exception();
7922
7923 // Build this early so it's available for the interpreter.
7924 StubRoutines::_throw_StackOverflowError_entry =
7934 StubRoutines::_crc_table_adr = (address)StubRoutines::aarch64::_crc_table;
7935 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
7936 }
7937
7938 if (UseCRC32CIntrinsics) {
7939 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
7940 }
7941
7942 // Disabled until JDK-8210858 is fixed
7943 // if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) {
7944 // StubRoutines::_dlog = generate_dlog();
7945 // }
7946
7947 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
7948 StubRoutines::_dsin = generate_dsin_dcos(/* isCos = */ false);
7949 }
7950
7951 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
7952 StubRoutines::_dcos = generate_dsin_dcos(/* isCos = */ true);
7953 }
7954 }
7955
7956 void generate_phase1() {
7957 // Continuation stubs:
7958 StubRoutines::_cont_thaw = generate_cont_thaw();
7959 StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
7960 StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
7961
7962 JFR_ONLY(StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint();)
7963 JFR_ONLY(StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point();)
7964 }
7965
7966 void generate_all() {
7967 // support for verify_oop (must happen after universe_init)
7968 if (VerifyOops) {
7969 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
7970 }
7971 StubRoutines::_throw_AbstractMethodError_entry =
7972 generate_throw_exception("AbstractMethodError throw_exception",
7973 CAST_FROM_FN_PTR(address,
|
295 // rmethod: Method*
296 // r19_sender_sp: sender sp
297 BLOCK_COMMENT("call Java function");
298 __ mov(r19_sender_sp, sp);
299 __ blr(c_rarg4);
300
301 // we do this here because the notify will already have been done
302 // if we get to the next instruction via an exception
303 //
304 // n.b. adding this instruction here affects the calculation of
305 // whether or not a routine returns to the call stub (used when
306 // doing stack walks) since the normal test is to check the return
307 // pc against the address saved below. so we may need to allow for
308 // this extra instruction in the check.
309
310 // save current address for use by exception handling code
311
312 return_address = __ pc();
313
314 // store result depending on type (everything that is not
315 // T_OBJECT, T_PRIMITIVE_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
316 // n.b. this assumes Java returns an integral result in r0
317 // and a floating result in j_farg0
318 // All of j_rargN may be used to return inline type fields so be careful
319 // not to clobber those.
320 // SharedRuntime::generate_buffered_inline_type_adapter() knows the register
321 // assignment of Rresult below.
322 Register Rresult = r14, Rresult_type = r15;
323 __ ldr(Rresult, result);
324 Label is_long, is_float, is_double, check_prim, exit;
325 __ ldr(Rresult_type, result_type);
326 __ cmp(Rresult_type, (u1)T_OBJECT);
327 __ br(Assembler::EQ, check_prim);
328 __ cmp(Rresult_type, (u1)T_PRIMITIVE_OBJECT);
329 __ br(Assembler::EQ, check_prim);
330 __ cmp(Rresult_type, (u1)T_LONG);
331 __ br(Assembler::EQ, is_long);
332 __ cmp(Rresult_type, (u1)T_FLOAT);
333 __ br(Assembler::EQ, is_float);
334 __ cmp(Rresult_type, (u1)T_DOUBLE);
335 __ br(Assembler::EQ, is_double);
336
337 // handle T_INT case
338 __ strw(r0, Address(Rresult));
339
340 __ BIND(exit);
341
342 // pop parameters
343 __ sub(esp, rfp, -sp_after_call_off * wordSize);
344
345 #ifdef ASSERT
346 // verify that threads correspond
347 {
348 Label L, S;
349 __ ldr(rscratch1, thread);
350 __ cmp(rthread, rscratch1);
351 __ br(Assembler::NE, S);
352 __ get_thread(rscratch1);
353 __ cmp(rthread, rscratch1);
354 __ br(Assembler::EQ, L);
355 __ BIND(S);
356 __ stop("StubRoutines::call_stub: threads must correspond");
357 __ BIND(L);
358 }
366 __ ldpd(v11, v10, d11_save);
367 __ ldpd(v9, v8, d9_save);
368
369 __ ldp(r28, r27, r28_save);
370 __ ldp(r26, r25, r26_save);
371 __ ldp(r24, r23, r24_save);
372 __ ldp(r22, r21, r22_save);
373 __ ldp(r20, r19, r20_save);
374
375 __ ldp(c_rarg0, c_rarg1, call_wrapper);
376 __ ldrw(c_rarg2, result_type);
377 __ ldr(c_rarg3, method);
378 __ ldp(c_rarg4, c_rarg5, entry_point);
379 __ ldp(c_rarg6, c_rarg7, parameter_size);
380
381 // leave frame and return to caller
382 __ leave();
383 __ ret(lr);
384
385 // handle return types different from T_INT
386 __ BIND(check_prim);
387 if (InlineTypeReturnedAsFields) {
388 // Check for scalarized return value
389 __ tbz(r0, 0, is_long);
390 // Load pack handler address
391 __ andr(rscratch1, r0, -2);
392 __ ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
393 __ ldr(rscratch1, Address(rscratch1, InlineKlass::pack_handler_jobject_offset()));
394 __ blr(rscratch1);
395 __ b(exit);
396 }
397
398 __ BIND(is_long);
399 __ str(r0, Address(Rresult, 0));
400 __ br(Assembler::AL, exit);
401
402 __ BIND(is_float);
403 __ strs(j_farg0, Address(Rresult, 0));
404 __ br(Assembler::AL, exit);
405
406 __ BIND(is_double);
407 __ strd(j_farg0, Address(Rresult, 0));
408 __ br(Assembler::AL, exit);
409
410 return start;
411 }
412
413 // Return point for a Java call if there's an exception thrown in
414 // Java code. The exception is caught and transformed into a
415 // pending exception stored in JavaThread that can be tested from
416 // within the VM.
417 //
418 // Note: Usually the parameters are removed by the callee. In case
419 // of an exception crossing an activation frame boundary, that is
420 // not the case if the callee is compiled code => need to setup the
421 // rsp.
422 //
423 // r0: exception oop
424
425 address generate_catch_exception() {
426 StubCodeMark mark(this, "StubRoutines", "catch_exception");
427 address start = __ pc();
2110 // |array_tag| | header_size | element_type | |log2_element_size|
2111 // 32 30 24 16 8 2 0
2112 //
2113 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2114 //
2115
2116 const int lh_offset = in_bytes(Klass::layout_helper_offset());
2117
2118 // Handle objArrays completely differently...
2119 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2120 __ ldrw(lh, Address(scratch_src_klass, lh_offset));
2121 __ movw(rscratch1, objArray_lh);
2122 __ eorw(rscratch2, lh, rscratch1);
2123 __ cbzw(rscratch2, L_objArray);
2124
2125 // if (src->klass() != dst->klass()) return -1;
2126 __ load_klass(rscratch2, dst);
2127 __ eor(rscratch2, rscratch2, scratch_src_klass);
2128 __ cbnz(rscratch2, L_failed);
2129
2130 // Check for flat inline type array -> return -1
2131 __ tst(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
2132 __ br(Assembler::NE, L_failed);
2133
2134 // Check for null-free (non-flat) inline type array -> handle as object array
2135 __ tst(lh, Klass::_lh_null_free_array_bit_inplace);
2136 __ br(Assembler::NE, L_failed);
2137
2138 // if (!src->is_Array()) return -1;
2139 __ tbz(lh, 31, L_failed); // i.e. (lh >= 0)
2140
2141 // At this point, it is known to be a typeArray (array_tag 0x3).
2142 #ifdef ASSERT
2143 {
2144 BLOCK_COMMENT("assert primitive array {");
2145 Label L;
2146 __ movw(rscratch2, Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2147 __ cmpw(lh, rscratch2);
2148 __ br(Assembler::GE, L);
2149 __ stop("must be a primitive array");
2150 __ bind(L);
2151 BLOCK_COMMENT("} assert primitive array done");
2152 }
2153 #endif
2154
2155 arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
2156 rscratch2, L_failed);
2157
7911 // MACC(Ra, Ra, t0, t1, t2);
7912 // }
7913 // iters = (2*len-i)/2;
7914 // assert(iters == len-j, "must be");
7915 // for (; iters--; j++) {
7916 // assert(Rm == Pm_base[j] && Rn == Pn_base[i-j], "must be");
7917 // MACC(Rm, Rn, t0, t1, t2);
7918 // Rm = *++Pm;
7919 // Rn = *--Pn;
7920 // }
7921 // Pm_base[i-len] = t0;
7922 // t0 = t1; t1 = t2; t2 = 0;
7923 // }
7924
7925 // while (t0)
7926 // t0 = sub(Pm_base, Pn_base, t0, len);
7927 // }
7928 };
7929
7930
7931 // Call here from the interpreter or compiled code to either load
7932 // multiple returned values from the inline type instance being
7933 // returned to registers or to store returned values to a newly
7934 // allocated inline type instance.
7935 address generate_return_value_stub(address destination, const char* name, bool has_res) {
7936 // We need to save all registers the calling convention may use so
7937 // the runtime calls read or update those registers. This needs to
7938 // be in sync with SharedRuntime::java_return_convention().
7939 // n.b. aarch64 asserts that frame::arg_reg_save_area_bytes == 0
7940 enum layout {
7941 j_rarg7_off = 0, j_rarg7_2, // j_rarg7 is r0
7942 j_rarg6_off, j_rarg6_2,
7943 j_rarg5_off, j_rarg5_2,
7944 j_rarg4_off, j_rarg4_2,
7945 j_rarg3_off, j_rarg3_2,
7946 j_rarg2_off, j_rarg2_2,
7947 j_rarg1_off, j_rarg1_2,
7948 j_rarg0_off, j_rarg0_2,
7949
7950 j_farg7_off, j_farg7_2,
7951 j_farg6_off, j_farg6_2,
7952 j_farg5_off, j_farg5_2,
7953 j_farg4_off, j_farg4_2,
7954 j_farg3_off, j_farg3_2,
7955 j_farg2_off, j_farg2_2,
7956 j_farg1_off, j_farg1_2,
7957 j_farg0_off, j_farg0_2,
7958
7959 rfp_off, rfp_off2,
7960 return_off, return_off2,
7961
7962 framesize // inclusive of return address
7963 };
7964
7965 CodeBuffer code(name, 512, 64);
7966 MacroAssembler* masm = new MacroAssembler(&code);
7967
7968 int frame_size_in_bytes = align_up(framesize*BytesPerInt, 16);
7969 assert(frame_size_in_bytes == framesize*BytesPerInt, "misaligned");
7970 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
7971 int frame_size_in_words = frame_size_in_bytes / wordSize;
7972
7973 OopMapSet* oop_maps = new OopMapSet();
7974 OopMap* map = new OopMap(frame_size_in_slots, 0);
7975
7976 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg7_off), j_rarg7->as_VMReg());
7977 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg6_off), j_rarg6->as_VMReg());
7978 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg5_off), j_rarg5->as_VMReg());
7979 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg4_off), j_rarg4->as_VMReg());
7980 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg3_off), j_rarg3->as_VMReg());
7981 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg2_off), j_rarg2->as_VMReg());
7982 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg1_off), j_rarg1->as_VMReg());
7983 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg0_off), j_rarg0->as_VMReg());
7984
7985 map->set_callee_saved(VMRegImpl::stack2reg(j_farg0_off), j_farg0->as_VMReg());
7986 map->set_callee_saved(VMRegImpl::stack2reg(j_farg1_off), j_farg1->as_VMReg());
7987 map->set_callee_saved(VMRegImpl::stack2reg(j_farg2_off), j_farg2->as_VMReg());
7988 map->set_callee_saved(VMRegImpl::stack2reg(j_farg3_off), j_farg3->as_VMReg());
7989 map->set_callee_saved(VMRegImpl::stack2reg(j_farg4_off), j_farg4->as_VMReg());
7990 map->set_callee_saved(VMRegImpl::stack2reg(j_farg5_off), j_farg5->as_VMReg());
7991 map->set_callee_saved(VMRegImpl::stack2reg(j_farg6_off), j_farg6->as_VMReg());
7992 map->set_callee_saved(VMRegImpl::stack2reg(j_farg7_off), j_farg7->as_VMReg());
7993
7994 address start = __ pc();
7995
7996 __ enter(); // Save FP and LR before call
7997
7998 __ stpd(j_farg1, j_farg0, Address(__ pre(sp, -2 * wordSize)));
7999 __ stpd(j_farg3, j_farg2, Address(__ pre(sp, -2 * wordSize)));
8000 __ stpd(j_farg5, j_farg4, Address(__ pre(sp, -2 * wordSize)));
8001 __ stpd(j_farg7, j_farg6, Address(__ pre(sp, -2 * wordSize)));
8002
8003 __ stp(j_rarg1, j_rarg0, Address(__ pre(sp, -2 * wordSize)));
8004 __ stp(j_rarg3, j_rarg2, Address(__ pre(sp, -2 * wordSize)));
8005 __ stp(j_rarg5, j_rarg4, Address(__ pre(sp, -2 * wordSize)));
8006 __ stp(j_rarg7, j_rarg6, Address(__ pre(sp, -2 * wordSize)));
8007
8008 int frame_complete = __ offset();
8009
8010 // Set up last_Java_sp and last_Java_fp
8011 address the_pc = __ pc();
8012 __ set_last_Java_frame(sp, noreg, the_pc, rscratch1);
8013
8014 // Call runtime
8015 __ mov(c_rarg1, r0);
8016 __ mov(c_rarg0, rthread);
8017
8018 __ mov(rscratch1, destination);
8019 __ blr(rscratch1);
8020
8021 oop_maps->add_gc_map(the_pc - start, map);
8022
8023 __ reset_last_Java_frame(false);
8024
8025 __ ldp(j_rarg7, j_rarg6, Address(__ post(sp, 2 * wordSize)));
8026 __ ldp(j_rarg5, j_rarg4, Address(__ post(sp, 2 * wordSize)));
8027 __ ldp(j_rarg3, j_rarg2, Address(__ post(sp, 2 * wordSize)));
8028 __ ldp(j_rarg1, j_rarg0, Address(__ post(sp, 2 * wordSize)));
8029
8030 __ ldpd(j_farg7, j_farg6, Address(__ post(sp, 2 * wordSize)));
8031 __ ldpd(j_farg5, j_farg4, Address(__ post(sp, 2 * wordSize)));
8032 __ ldpd(j_farg3, j_farg2, Address(__ post(sp, 2 * wordSize)));
8033 __ ldpd(j_farg1, j_farg0, Address(__ post(sp, 2 * wordSize)));
8034
8035 __ leave();
8036
8037 // check for pending exceptions
8038 Label pending;
8039 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
8040 __ cbnz(rscratch1, pending);
8041
8042 if (has_res) {
8043 __ get_vm_result(r0, rthread);
8044 }
8045
8046 __ ret(lr);
8047
8048 __ bind(pending);
8049 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
8050
8051 // -------------
8052 // make sure all code is generated
8053 masm->flush();
8054
8055 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, frame_size_in_words, oop_maps, false);
8056 return stub->entry_point();
8057 }
8058
8059 // Initialization
8060 void generate_initial() {
8061 // Generate initial stubs and initializes the entry points
8062
8063 // entry points that exist in all platforms Note: This is code
8064 // that could be shared among different platforms - however the
8065 // benefit seems to be smaller than the disadvantage of having a
8066 // much more complicated generator structure. See also comment in
8067 // stubRoutines.hpp.
8068
8069 StubRoutines::_forward_exception_entry = generate_forward_exception();
8070
8071 StubRoutines::_call_stub_entry =
8072 generate_call_stub(StubRoutines::_call_stub_return_address);
8073
8074 // is referenced by megamorphic call
8075 StubRoutines::_catch_exception_entry = generate_catch_exception();
8076
8077 // Build this early so it's available for the interpreter.
8078 StubRoutines::_throw_StackOverflowError_entry =
8088 StubRoutines::_crc_table_adr = (address)StubRoutines::aarch64::_crc_table;
8089 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
8090 }
8091
8092 if (UseCRC32CIntrinsics) {
8093 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
8094 }
8095
8096 // Disabled until JDK-8210858 is fixed
8097 // if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) {
8098 // StubRoutines::_dlog = generate_dlog();
8099 // }
8100
8101 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
8102 StubRoutines::_dsin = generate_dsin_dcos(/* isCos = */ false);
8103 }
8104
8105 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
8106 StubRoutines::_dcos = generate_dsin_dcos(/* isCos = */ true);
8107 }
8108
8109 if (InlineTypeReturnedAsFields) {
8110 StubRoutines::_load_inline_type_fields_in_regs =
8111 generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::load_inline_type_fields_in_regs), "load_inline_type_fields_in_regs", false);
8112 StubRoutines::_store_inline_type_fields_to_buf =
8113 generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::store_inline_type_fields_to_buf), "store_inline_type_fields_to_buf", true);
8114 }
8115 }
8116
8117 void generate_phase1() {
8118 // Continuation stubs:
8119 StubRoutines::_cont_thaw = generate_cont_thaw();
8120 StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
8121 StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
8122
8123 JFR_ONLY(StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint();)
8124 JFR_ONLY(StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point();)
8125 }
8126
8127 void generate_all() {
8128 // support for verify_oop (must happen after universe_init)
8129 if (VerifyOops) {
8130 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
8131 }
8132 StubRoutines::_throw_AbstractMethodError_entry =
8133 generate_throw_exception("AbstractMethodError throw_exception",
8134 CAST_FROM_FN_PTR(address,
|