295 // rmethod: Method*
296 // r19_sender_sp: sender sp
297 BLOCK_COMMENT("call Java function");
298 __ mov(r19_sender_sp, sp);
299 __ blr(c_rarg4);
300
301 // we do this here because the notify will already have been done
302 // if we get to the next instruction via an exception
303 //
304 // n.b. adding this instruction here affects the calculation of
305 // whether or not a routine returns to the call stub (used when
306 // doing stack walks) since the normal test is to check the return
307 // pc against the address saved below. so we may need to allow for
308 // this extra instruction in the check.
309
310 // save current address for use by exception handling code
311
312 return_address = __ pc();
313
314 // store result depending on type (everything that is not
315 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
316 // n.b. this assumes Java returns an integral result in r0
317 // and a floating result in j_farg0
318 __ ldr(j_rarg2, result);
319 Label is_long, is_float, is_double, exit;
320 __ ldr(j_rarg1, result_type);
321 __ cmp(j_rarg1, (u1)T_OBJECT);
322 __ br(Assembler::EQ, is_long);
323 __ cmp(j_rarg1, (u1)T_LONG);
324 __ br(Assembler::EQ, is_long);
325 __ cmp(j_rarg1, (u1)T_FLOAT);
326 __ br(Assembler::EQ, is_float);
327 __ cmp(j_rarg1, (u1)T_DOUBLE);
328 __ br(Assembler::EQ, is_double);
329
330 // handle T_INT case
331 __ strw(r0, Address(j_rarg2));
332
333 __ BIND(exit);
334
335 // pop parameters
336 __ sub(esp, rfp, -sp_after_call_off * wordSize);
337
338 #ifdef ASSERT
339 // verify that threads correspond
340 {
341 Label L, S;
342 __ ldr(rscratch1, thread);
343 __ cmp(rthread, rscratch1);
344 __ br(Assembler::NE, S);
345 __ get_thread(rscratch1);
346 __ cmp(rthread, rscratch1);
347 __ br(Assembler::EQ, L);
348 __ BIND(S);
349 __ stop("StubRoutines::call_stub: threads must correspond");
350 __ BIND(L);
351 }
359 __ ldpd(v11, v10, d11_save);
360 __ ldpd(v9, v8, d9_save);
361
362 __ ldp(r28, r27, r28_save);
363 __ ldp(r26, r25, r26_save);
364 __ ldp(r24, r23, r24_save);
365 __ ldp(r22, r21, r22_save);
366 __ ldp(r20, r19, r20_save);
367
368 __ ldp(c_rarg0, c_rarg1, call_wrapper);
369 __ ldrw(c_rarg2, result_type);
370 __ ldr(c_rarg3, method);
371 __ ldp(c_rarg4, c_rarg5, entry_point);
372 __ ldp(c_rarg6, c_rarg7, parameter_size);
373
374 // leave frame and return to caller
375 __ leave();
376 __ ret(lr);
377
378 // handle return types different from T_INT
379
380 __ BIND(is_long);
381 __ str(r0, Address(j_rarg2, 0));
382 __ br(Assembler::AL, exit);
383
384 __ BIND(is_float);
385 __ strs(j_farg0, Address(j_rarg2, 0));
386 __ br(Assembler::AL, exit);
387
388 __ BIND(is_double);
389 __ strd(j_farg0, Address(j_rarg2, 0));
390 __ br(Assembler::AL, exit);
391
392 return start;
393 }
394
395 // Return point for a Java call if there's an exception thrown in
396 // Java code. The exception is caught and transformed into a
397 // pending exception stored in JavaThread that can be tested from
398 // within the VM.
399 //
400 // Note: Usually the parameters are removed by the callee. In case
401 // of an exception crossing an activation frame boundary, that is
402 // not the case if the callee is compiled code => need to setup the
403 // rsp.
404 //
405 // r0: exception oop
406
407 address generate_catch_exception() {
408 StubCodeMark mark(this, "StubRoutines", "catch_exception");
409 address start = __ pc();
2182 // |array_tag| | header_size | element_type | |log2_element_size|
2183 // 32 30 24 16 8 2 0
2184 //
2185 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2186 //
2187
2188 const int lh_offset = in_bytes(Klass::layout_helper_offset());
2189
2190 // Handle objArrays completely differently...
2191 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2192 __ ldrw(lh, Address(scratch_src_klass, lh_offset));
2193 __ movw(rscratch1, objArray_lh);
2194 __ eorw(rscratch2, lh, rscratch1);
2195 __ cbzw(rscratch2, L_objArray);
2196
2197 // if (src->klass() != dst->klass()) return -1;
2198 __ load_klass(rscratch2, dst);
2199 __ eor(rscratch2, rscratch2, scratch_src_klass);
2200 __ cbnz(rscratch2, L_failed);
2201
2202 // if (!src->is_Array()) return -1;
2203 __ tbz(lh, 31, L_failed); // i.e. (lh >= 0)
2204
2205 // At this point, it is known to be a typeArray (array_tag 0x3).
2206 #ifdef ASSERT
2207 {
2208 BLOCK_COMMENT("assert primitive array {");
2209 Label L;
2210 __ movw(rscratch2, Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2211 __ cmpw(lh, rscratch2);
2212 __ br(Assembler::GE, L);
2213 __ stop("must be a primitive array");
2214 __ bind(L);
2215 BLOCK_COMMENT("} assert primitive array done");
2216 }
2217 #endif
2218
2219 arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
2220 rscratch2, L_failed);
2221
8016 // MACC(Ra, Ra, t0, t1, t2);
8017 // }
8018 // iters = (2*len-i)/2;
8019 // assert(iters == len-j, "must be");
8020 // for (; iters--; j++) {
8021 // assert(Rm == Pm_base[j] && Rn == Pn_base[i-j], "must be");
8022 // MACC(Rm, Rn, t0, t1, t2);
8023 // Rm = *++Pm;
8024 // Rn = *--Pn;
8025 // }
8026 // Pm_base[i-len] = t0;
8027 // t0 = t1; t1 = t2; t2 = 0;
8028 // }
8029
8030 // while (t0)
8031 // t0 = sub(Pm_base, Pn_base, t0, len);
8032 // }
8033 };
8034
8035
8036 // Initialization
8037 void generate_initial_stubs() {
8038 // Generate initial stubs and initializes the entry points
8039
8040 // entry points that exist in all platforms Note: This is code
8041 // that could be shared among different platforms - however the
8042 // benefit seems to be smaller than the disadvantage of having a
8043 // much more complicated generator structure. See also comment in
8044 // stubRoutines.hpp.
8045
8046 StubRoutines::_forward_exception_entry = generate_forward_exception();
8047
8048 StubRoutines::_call_stub_entry =
8049 generate_call_stub(StubRoutines::_call_stub_return_address);
8050
8051 // is referenced by megamorphic call
8052 StubRoutines::_catch_exception_entry = generate_catch_exception();
8053
8054 // Build this early so it's available for the interpreter.
8055 StubRoutines::_throw_StackOverflowError_entry =
8071 StubRoutines::_crc_table_adr = (address)StubRoutines::aarch64::_crc_table;
8072 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
8073 }
8074
8075 if (UseCRC32CIntrinsics) {
8076 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
8077 }
8078
8079 // Disabled until JDK-8210858 is fixed
8080 // if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) {
8081 // StubRoutines::_dlog = generate_dlog();
8082 // }
8083
8084 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
8085 StubRoutines::_dsin = generate_dsin_dcos(/* isCos = */ false);
8086 }
8087
8088 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
8089 StubRoutines::_dcos = generate_dsin_dcos(/* isCos = */ true);
8090 }
8091 }
8092
8093 void generate_continuation_stubs() {
8094 // Continuation stubs:
8095 StubRoutines::_cont_thaw = generate_cont_thaw();
8096 StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
8097 StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
8098
8099 JFR_ONLY(StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint();)
8100 JFR_ONLY(StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point();)
8101 }
8102
8103 void generate_final_stubs() {
8104 // support for verify_oop (must happen after universe_init)
8105 if (VerifyOops) {
8106 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
8107 }
8108 StubRoutines::_throw_AbstractMethodError_entry =
8109 generate_throw_exception("AbstractMethodError throw_exception",
8110 CAST_FROM_FN_PTR(address,
|
295 // rmethod: Method*
296 // r19_sender_sp: sender sp
297 BLOCK_COMMENT("call Java function");
298 __ mov(r19_sender_sp, sp);
299 __ blr(c_rarg4);
300
301 // we do this here because the notify will already have been done
302 // if we get to the next instruction via an exception
303 //
304 // n.b. adding this instruction here affects the calculation of
305 // whether or not a routine returns to the call stub (used when
306 // doing stack walks) since the normal test is to check the return
307 // pc against the address saved below. so we may need to allow for
308 // this extra instruction in the check.
309
310 // save current address for use by exception handling code
311
312 return_address = __ pc();
313
314 // store result depending on type (everything that is not
315 // T_OBJECT, T_PRIMITIVE_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
316 // n.b. this assumes Java returns an integral result in r0
317 // and a floating result in j_farg0
318 // All of j_rargN may be used to return inline type fields so be careful
319 // not to clobber those.
320 // SharedRuntime::generate_buffered_inline_type_adapter() knows the register
321 // assignment of Rresult below.
322 Register Rresult = r14, Rresult_type = r15;
323 __ ldr(Rresult, result);
324 Label is_long, is_float, is_double, check_prim, exit;
325 __ ldr(Rresult_type, result_type);
326 __ cmp(Rresult_type, (u1)T_OBJECT);
327 __ br(Assembler::EQ, check_prim);
328 __ cmp(Rresult_type, (u1)T_PRIMITIVE_OBJECT);
329 __ br(Assembler::EQ, check_prim);
330 __ cmp(Rresult_type, (u1)T_LONG);
331 __ br(Assembler::EQ, is_long);
332 __ cmp(Rresult_type, (u1)T_FLOAT);
333 __ br(Assembler::EQ, is_float);
334 __ cmp(Rresult_type, (u1)T_DOUBLE);
335 __ br(Assembler::EQ, is_double);
336
337 // handle T_INT case
338 __ strw(r0, Address(Rresult));
339
340 __ BIND(exit);
341
342 // pop parameters
343 __ sub(esp, rfp, -sp_after_call_off * wordSize);
344
345 #ifdef ASSERT
346 // verify that threads correspond
347 {
348 Label L, S;
349 __ ldr(rscratch1, thread);
350 __ cmp(rthread, rscratch1);
351 __ br(Assembler::NE, S);
352 __ get_thread(rscratch1);
353 __ cmp(rthread, rscratch1);
354 __ br(Assembler::EQ, L);
355 __ BIND(S);
356 __ stop("StubRoutines::call_stub: threads must correspond");
357 __ BIND(L);
358 }
366 __ ldpd(v11, v10, d11_save);
367 __ ldpd(v9, v8, d9_save);
368
369 __ ldp(r28, r27, r28_save);
370 __ ldp(r26, r25, r26_save);
371 __ ldp(r24, r23, r24_save);
372 __ ldp(r22, r21, r22_save);
373 __ ldp(r20, r19, r20_save);
374
375 __ ldp(c_rarg0, c_rarg1, call_wrapper);
376 __ ldrw(c_rarg2, result_type);
377 __ ldr(c_rarg3, method);
378 __ ldp(c_rarg4, c_rarg5, entry_point);
379 __ ldp(c_rarg6, c_rarg7, parameter_size);
380
381 // leave frame and return to caller
382 __ leave();
383 __ ret(lr);
384
385 // handle return types different from T_INT
386 __ BIND(check_prim);
387 if (InlineTypeReturnedAsFields) {
388 // Check for scalarized return value
389 __ tbz(r0, 0, is_long);
390 // Load pack handler address
391 __ andr(rscratch1, r0, -2);
392 __ ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
393 __ ldr(rscratch1, Address(rscratch1, InlineKlass::pack_handler_jobject_offset()));
394 __ blr(rscratch1);
395 __ b(exit);
396 }
397
398 __ BIND(is_long);
399 __ str(r0, Address(Rresult, 0));
400 __ br(Assembler::AL, exit);
401
402 __ BIND(is_float);
403 __ strs(j_farg0, Address(Rresult, 0));
404 __ br(Assembler::AL, exit);
405
406 __ BIND(is_double);
407 __ strd(j_farg0, Address(Rresult, 0));
408 __ br(Assembler::AL, exit);
409
410 return start;
411 }
412
413 // Return point for a Java call if there's an exception thrown in
414 // Java code. The exception is caught and transformed into a
415 // pending exception stored in JavaThread that can be tested from
416 // within the VM.
417 //
418 // Note: Usually the parameters are removed by the callee. In case
419 // of an exception crossing an activation frame boundary, that is
420 // not the case if the callee is compiled code => need to setup the
421 // rsp.
422 //
423 // r0: exception oop
424
425 address generate_catch_exception() {
426 StubCodeMark mark(this, "StubRoutines", "catch_exception");
427 address start = __ pc();
2200 // |array_tag| | header_size | element_type | |log2_element_size|
2201 // 32 30 24 16 8 2 0
2202 //
2203 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2204 //
2205
2206 const int lh_offset = in_bytes(Klass::layout_helper_offset());
2207
2208 // Handle objArrays completely differently...
2209 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2210 __ ldrw(lh, Address(scratch_src_klass, lh_offset));
2211 __ movw(rscratch1, objArray_lh);
2212 __ eorw(rscratch2, lh, rscratch1);
2213 __ cbzw(rscratch2, L_objArray);
2214
2215 // if (src->klass() != dst->klass()) return -1;
2216 __ load_klass(rscratch2, dst);
2217 __ eor(rscratch2, rscratch2, scratch_src_klass);
2218 __ cbnz(rscratch2, L_failed);
2219
2220 // Check for flat inline type array -> return -1
2221 __ tst(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
2222 __ br(Assembler::NE, L_failed);
2223
2224 // Check for null-free (non-flat) inline type array -> handle as object array
2225 __ tst(lh, Klass::_lh_null_free_array_bit_inplace);
2226 __ br(Assembler::NE, L_failed);
2227
2228 // if (!src->is_Array()) return -1;
2229 __ tbz(lh, 31, L_failed); // i.e. (lh >= 0)
2230
2231 // At this point, it is known to be a typeArray (array_tag 0x3).
2232 #ifdef ASSERT
2233 {
2234 BLOCK_COMMENT("assert primitive array {");
2235 Label L;
2236 __ movw(rscratch2, Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2237 __ cmpw(lh, rscratch2);
2238 __ br(Assembler::GE, L);
2239 __ stop("must be a primitive array");
2240 __ bind(L);
2241 BLOCK_COMMENT("} assert primitive array done");
2242 }
2243 #endif
2244
2245 arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
2246 rscratch2, L_failed);
2247
8042 // MACC(Ra, Ra, t0, t1, t2);
8043 // }
8044 // iters = (2*len-i)/2;
8045 // assert(iters == len-j, "must be");
8046 // for (; iters--; j++) {
8047 // assert(Rm == Pm_base[j] && Rn == Pn_base[i-j], "must be");
8048 // MACC(Rm, Rn, t0, t1, t2);
8049 // Rm = *++Pm;
8050 // Rn = *--Pn;
8051 // }
8052 // Pm_base[i-len] = t0;
8053 // t0 = t1; t1 = t2; t2 = 0;
8054 // }
8055
8056 // while (t0)
8057 // t0 = sub(Pm_base, Pn_base, t0, len);
8058 // }
8059 };
8060
8061
8062 // Call here from the interpreter or compiled code to either load
8063 // multiple returned values from the inline type instance being
8064 // returned to registers or to store returned values to a newly
8065 // allocated inline type instance.
8066 address generate_return_value_stub(address destination, const char* name, bool has_res) {
8067 // We need to save all registers the calling convention may use so
8068 // the runtime calls read or update those registers. This needs to
8069 // be in sync with SharedRuntime::java_return_convention().
8070 // n.b. aarch64 asserts that frame::arg_reg_save_area_bytes == 0
8071 enum layout {
8072 j_rarg7_off = 0, j_rarg7_2, // j_rarg7 is r0
8073 j_rarg6_off, j_rarg6_2,
8074 j_rarg5_off, j_rarg5_2,
8075 j_rarg4_off, j_rarg4_2,
8076 j_rarg3_off, j_rarg3_2,
8077 j_rarg2_off, j_rarg2_2,
8078 j_rarg1_off, j_rarg1_2,
8079 j_rarg0_off, j_rarg0_2,
8080
8081 j_farg7_off, j_farg7_2,
8082 j_farg6_off, j_farg6_2,
8083 j_farg5_off, j_farg5_2,
8084 j_farg4_off, j_farg4_2,
8085 j_farg3_off, j_farg3_2,
8086 j_farg2_off, j_farg2_2,
8087 j_farg1_off, j_farg1_2,
8088 j_farg0_off, j_farg0_2,
8089
8090 rfp_off, rfp_off2,
8091 return_off, return_off2,
8092
8093 framesize // inclusive of return address
8094 };
8095
8096 CodeBuffer code(name, 512, 64);
8097 MacroAssembler* masm = new MacroAssembler(&code);
8098
8099 int frame_size_in_bytes = align_up(framesize*BytesPerInt, 16);
8100 assert(frame_size_in_bytes == framesize*BytesPerInt, "misaligned");
8101 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
8102 int frame_size_in_words = frame_size_in_bytes / wordSize;
8103
8104 OopMapSet* oop_maps = new OopMapSet();
8105 OopMap* map = new OopMap(frame_size_in_slots, 0);
8106
8107 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg7_off), j_rarg7->as_VMReg());
8108 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg6_off), j_rarg6->as_VMReg());
8109 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg5_off), j_rarg5->as_VMReg());
8110 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg4_off), j_rarg4->as_VMReg());
8111 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg3_off), j_rarg3->as_VMReg());
8112 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg2_off), j_rarg2->as_VMReg());
8113 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg1_off), j_rarg1->as_VMReg());
8114 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg0_off), j_rarg0->as_VMReg());
8115
8116 map->set_callee_saved(VMRegImpl::stack2reg(j_farg0_off), j_farg0->as_VMReg());
8117 map->set_callee_saved(VMRegImpl::stack2reg(j_farg1_off), j_farg1->as_VMReg());
8118 map->set_callee_saved(VMRegImpl::stack2reg(j_farg2_off), j_farg2->as_VMReg());
8119 map->set_callee_saved(VMRegImpl::stack2reg(j_farg3_off), j_farg3->as_VMReg());
8120 map->set_callee_saved(VMRegImpl::stack2reg(j_farg4_off), j_farg4->as_VMReg());
8121 map->set_callee_saved(VMRegImpl::stack2reg(j_farg5_off), j_farg5->as_VMReg());
8122 map->set_callee_saved(VMRegImpl::stack2reg(j_farg6_off), j_farg6->as_VMReg());
8123 map->set_callee_saved(VMRegImpl::stack2reg(j_farg7_off), j_farg7->as_VMReg());
8124
8125 address start = __ pc();
8126
8127 __ enter(); // Save FP and LR before call
8128
8129 __ stpd(j_farg1, j_farg0, Address(__ pre(sp, -2 * wordSize)));
8130 __ stpd(j_farg3, j_farg2, Address(__ pre(sp, -2 * wordSize)));
8131 __ stpd(j_farg5, j_farg4, Address(__ pre(sp, -2 * wordSize)));
8132 __ stpd(j_farg7, j_farg6, Address(__ pre(sp, -2 * wordSize)));
8133
8134 __ stp(j_rarg1, j_rarg0, Address(__ pre(sp, -2 * wordSize)));
8135 __ stp(j_rarg3, j_rarg2, Address(__ pre(sp, -2 * wordSize)));
8136 __ stp(j_rarg5, j_rarg4, Address(__ pre(sp, -2 * wordSize)));
8137 __ stp(j_rarg7, j_rarg6, Address(__ pre(sp, -2 * wordSize)));
8138
8139 int frame_complete = __ offset();
8140
8141 // Set up last_Java_sp and last_Java_fp
8142 address the_pc = __ pc();
8143 __ set_last_Java_frame(sp, noreg, the_pc, rscratch1);
8144
8145 // Call runtime
8146 __ mov(c_rarg1, r0);
8147 __ mov(c_rarg0, rthread);
8148
8149 __ mov(rscratch1, destination);
8150 __ blr(rscratch1);
8151
8152 oop_maps->add_gc_map(the_pc - start, map);
8153
8154 __ reset_last_Java_frame(false);
8155
8156 __ ldp(j_rarg7, j_rarg6, Address(__ post(sp, 2 * wordSize)));
8157 __ ldp(j_rarg5, j_rarg4, Address(__ post(sp, 2 * wordSize)));
8158 __ ldp(j_rarg3, j_rarg2, Address(__ post(sp, 2 * wordSize)));
8159 __ ldp(j_rarg1, j_rarg0, Address(__ post(sp, 2 * wordSize)));
8160
8161 __ ldpd(j_farg7, j_farg6, Address(__ post(sp, 2 * wordSize)));
8162 __ ldpd(j_farg5, j_farg4, Address(__ post(sp, 2 * wordSize)));
8163 __ ldpd(j_farg3, j_farg2, Address(__ post(sp, 2 * wordSize)));
8164 __ ldpd(j_farg1, j_farg0, Address(__ post(sp, 2 * wordSize)));
8165
8166 __ leave();
8167
8168 // check for pending exceptions
8169 Label pending;
8170 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
8171 __ cbnz(rscratch1, pending);
8172
8173 if (has_res) {
8174 __ get_vm_result(r0, rthread);
8175 }
8176
8177 __ ret(lr);
8178
8179 __ bind(pending);
8180 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
8181
8182 // -------------
8183 // make sure all code is generated
8184 masm->flush();
8185
8186 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, frame_size_in_words, oop_maps, false);
8187 return stub->entry_point();
8188 }
8189
8190 // Initialization
8191 void generate_initial_stubs() {
8192 // Generate initial stubs and initializes the entry points
8193
8194 // entry points that exist in all platforms Note: This is code
8195 // that could be shared among different platforms - however the
8196 // benefit seems to be smaller than the disadvantage of having a
8197 // much more complicated generator structure. See also comment in
8198 // stubRoutines.hpp.
8199
8200 StubRoutines::_forward_exception_entry = generate_forward_exception();
8201
8202 StubRoutines::_call_stub_entry =
8203 generate_call_stub(StubRoutines::_call_stub_return_address);
8204
8205 // is referenced by megamorphic call
8206 StubRoutines::_catch_exception_entry = generate_catch_exception();
8207
8208 // Build this early so it's available for the interpreter.
8209 StubRoutines::_throw_StackOverflowError_entry =
8225 StubRoutines::_crc_table_adr = (address)StubRoutines::aarch64::_crc_table;
8226 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
8227 }
8228
8229 if (UseCRC32CIntrinsics) {
8230 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
8231 }
8232
8233 // Disabled until JDK-8210858 is fixed
8234 // if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) {
8235 // StubRoutines::_dlog = generate_dlog();
8236 // }
8237
8238 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
8239 StubRoutines::_dsin = generate_dsin_dcos(/* isCos = */ false);
8240 }
8241
8242 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
8243 StubRoutines::_dcos = generate_dsin_dcos(/* isCos = */ true);
8244 }
8245
8246 if (InlineTypeReturnedAsFields) {
8247 StubRoutines::_load_inline_type_fields_in_regs =
8248 generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::load_inline_type_fields_in_regs), "load_inline_type_fields_in_regs", false);
8249 StubRoutines::_store_inline_type_fields_to_buf =
8250 generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::store_inline_type_fields_to_buf), "store_inline_type_fields_to_buf", true);
8251 }
8252 }
8253
8254 void generate_continuation_stubs() {
8255 // Continuation stubs:
8256 StubRoutines::_cont_thaw = generate_cont_thaw();
8257 StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
8258 StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
8259
8260 JFR_ONLY(StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint();)
8261 JFR_ONLY(StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point();)
8262 }
8263
8264 void generate_final_stubs() {
8265 // support for verify_oop (must happen after universe_init)
8266 if (VerifyOops) {
8267 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
8268 }
8269 StubRoutines::_throw_AbstractMethodError_entry =
8270 generate_throw_exception("AbstractMethodError throw_exception",
8271 CAST_FROM_FN_PTR(address,
|