296 // rmethod: Method*
297 // r19_sender_sp: sender sp
298 BLOCK_COMMENT("call Java function");
299 __ mov(r19_sender_sp, sp);
300 __ blr(c_rarg4);
301
302 // we do this here because the notify will already have been done
303 // if we get to the next instruction via an exception
304 //
305 // n.b. adding this instruction here affects the calculation of
306 // whether or not a routine returns to the call stub (used when
307 // doing stack walks) since the normal test is to check the return
308 // pc against the address saved below. so we may need to allow for
309 // this extra instruction in the check.
310
311 // save current address for use by exception handling code
312
313 return_address = __ pc();
314
315 // store result depending on type (everything that is not
316 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
317 // n.b. this assumes Java returns an integral result in r0
318 // and a floating result in j_farg0
319 __ ldr(j_rarg2, result);
320 Label is_long, is_float, is_double, exit;
321 __ ldr(j_rarg1, result_type);
322 __ cmp(j_rarg1, (u1)T_OBJECT);
323 __ br(Assembler::EQ, is_long);
324 __ cmp(j_rarg1, (u1)T_LONG);
325 __ br(Assembler::EQ, is_long);
326 __ cmp(j_rarg1, (u1)T_FLOAT);
327 __ br(Assembler::EQ, is_float);
328 __ cmp(j_rarg1, (u1)T_DOUBLE);
329 __ br(Assembler::EQ, is_double);
330
331 // handle T_INT case
332 __ strw(r0, Address(j_rarg2));
333
334 __ BIND(exit);
335
336 // pop parameters
337 __ sub(esp, rfp, -sp_after_call_off * wordSize);
338
339 #ifdef ASSERT
340 // verify that threads correspond
341 {
342 Label L, S;
343 __ ldr(rscratch1, thread);
344 __ cmp(rthread, rscratch1);
345 __ br(Assembler::NE, S);
346 __ get_thread(rscratch1);
347 __ cmp(rthread, rscratch1);
348 __ br(Assembler::EQ, L);
349 __ BIND(S);
350 __ stop("StubRoutines::call_stub: threads must correspond");
351 __ BIND(L);
352 }
360 __ ldpd(v11, v10, d11_save);
361 __ ldpd(v9, v8, d9_save);
362
363 __ ldp(r28, r27, r28_save);
364 __ ldp(r26, r25, r26_save);
365 __ ldp(r24, r23, r24_save);
366 __ ldp(r22, r21, r22_save);
367 __ ldp(r20, r19, r20_save);
368
369 __ ldp(c_rarg0, c_rarg1, call_wrapper);
370 __ ldrw(c_rarg2, result_type);
371 __ ldr(c_rarg3, method);
372 __ ldp(c_rarg4, c_rarg5, entry_point);
373 __ ldp(c_rarg6, c_rarg7, parameter_size);
374
375 // leave frame and return to caller
376 __ leave();
377 __ ret(lr);
378
379 // handle return types different from T_INT
380
381 __ BIND(is_long);
382 __ str(r0, Address(j_rarg2, 0));
383 __ br(Assembler::AL, exit);
384
385 __ BIND(is_float);
386 __ strs(j_farg0, Address(j_rarg2, 0));
387 __ br(Assembler::AL, exit);
388
389 __ BIND(is_double);
390 __ strd(j_farg0, Address(j_rarg2, 0));
391 __ br(Assembler::AL, exit);
392
393 return start;
394 }
395
396 // Return point for a Java call if there's an exception thrown in
397 // Java code. The exception is caught and transformed into a
398 // pending exception stored in JavaThread that can be tested from
399 // within the VM.
400 //
401 // Note: Usually the parameters are removed by the callee. In case
402 // of an exception crossing an activation frame boundary, that is
403 // not the case if the callee is compiled code => need to setup the
404 // rsp.
405 //
406 // r0: exception oop
407
408 address generate_catch_exception() {
409 StubCodeMark mark(this, "StubRoutines", "catch_exception");
410 address start = __ pc();
2183 // |array_tag| | header_size | element_type | |log2_element_size|
2184 // 32 30 24 16 8 2 0
2185 //
2186 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2187 //
2188
2189 const int lh_offset = in_bytes(Klass::layout_helper_offset());
2190
2191 // Handle objArrays completely differently...
2192 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2193 __ ldrw(lh, Address(scratch_src_klass, lh_offset));
2194 __ movw(rscratch1, objArray_lh);
2195 __ eorw(rscratch2, lh, rscratch1);
2196 __ cbzw(rscratch2, L_objArray);
2197
2198 // if (src->klass() != dst->klass()) return -1;
2199 __ load_klass(rscratch2, dst);
2200 __ eor(rscratch2, rscratch2, scratch_src_klass);
2201 __ cbnz(rscratch2, L_failed);
2202
2203 // if (!src->is_Array()) return -1;
2204 __ tbz(lh, 31, L_failed); // i.e. (lh >= 0)
2205
2206 // At this point, it is known to be a typeArray (array_tag 0x3).
2207 #ifdef ASSERT
2208 {
2209 BLOCK_COMMENT("assert primitive array {");
2210 Label L;
2211 __ movw(rscratch2, Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2212 __ cmpw(lh, rscratch2);
2213 __ br(Assembler::GE, L);
2214 __ stop("must be a primitive array");
2215 __ bind(L);
2216 BLOCK_COMMENT("} assert primitive array done");
2217 }
2218 #endif
2219
2220 arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
2221 rscratch2, L_failed);
2222
8240 // MACC(Ra, Ra, t0, t1, t2);
8241 // }
8242 // iters = (2*len-i)/2;
8243 // assert(iters == len-j, "must be");
8244 // for (; iters--; j++) {
8245 // assert(Rm == Pm_base[j] && Rn == Pn_base[i-j], "must be");
8246 // MACC(Rm, Rn, t0, t1, t2);
8247 // Rm = *++Pm;
8248 // Rn = *--Pn;
8249 // }
8250 // Pm_base[i-len] = t0;
8251 // t0 = t1; t1 = t2; t2 = 0;
8252 // }
8253
8254 // while (t0)
8255 // t0 = sub(Pm_base, Pn_base, t0, len);
8256 // }
8257 };
8258
8259
8260 // Initialization
8261 void generate_initial_stubs() {
8262 // Generate initial stubs and initializes the entry points
8263
8264 // entry points that exist in all platforms Note: This is code
8265 // that could be shared among different platforms - however the
8266 // benefit seems to be smaller than the disadvantage of having a
8267 // much more complicated generator structure. See also comment in
8268 // stubRoutines.hpp.
8269
8270 StubRoutines::_forward_exception_entry = generate_forward_exception();
8271
8272 StubRoutines::_call_stub_entry =
8273 generate_call_stub(StubRoutines::_call_stub_return_address);
8274
8275 // is referenced by megamorphic call
8276 StubRoutines::_catch_exception_entry = generate_catch_exception();
8277
8278 // Build this early so it's available for the interpreter.
8279 StubRoutines::_throw_StackOverflowError_entry =
8295 StubRoutines::_crc_table_adr = (address)StubRoutines::aarch64::_crc_table;
8296 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
8297 }
8298
8299 if (UseCRC32CIntrinsics) {
8300 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
8301 }
8302
8303 // Disabled until JDK-8210858 is fixed
8304 // if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) {
8305 // StubRoutines::_dlog = generate_dlog();
8306 // }
8307
8308 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
8309 StubRoutines::_dsin = generate_dsin_dcos(/* isCos = */ false);
8310 }
8311
8312 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
8313 StubRoutines::_dcos = generate_dsin_dcos(/* isCos = */ true);
8314 }
8315 }
8316
8317 void generate_continuation_stubs() {
8318 // Continuation stubs:
8319 StubRoutines::_cont_thaw = generate_cont_thaw();
8320 StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
8321 StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
8322
8323 JFR_ONLY(generate_jfr_stubs();)
8324 }
8325
8326 #if INCLUDE_JFR
8327 void generate_jfr_stubs() {
8328 StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint();
8329 StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point();
8330 StubRoutines::_jfr_return_lease_stub = generate_jfr_return_lease();
8331 StubRoutines::_jfr_return_lease = StubRoutines::_jfr_return_lease_stub->entry_point();
8332 }
8333 #endif // INCLUDE_JFR
8334
|
296 // rmethod: Method*
297 // r19_sender_sp: sender sp
298 BLOCK_COMMENT("call Java function");
299 __ mov(r19_sender_sp, sp);
300 __ blr(c_rarg4);
301
302 // we do this here because the notify will already have been done
303 // if we get to the next instruction via an exception
304 //
305 // n.b. adding this instruction here affects the calculation of
306 // whether or not a routine returns to the call stub (used when
307 // doing stack walks) since the normal test is to check the return
308 // pc against the address saved below. so we may need to allow for
309 // this extra instruction in the check.
310
311 // save current address for use by exception handling code
312
313 return_address = __ pc();
314
315 // store result depending on type (everything that is not
316 // T_OBJECT, T_PRIMITIVE_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
317 // n.b. this assumes Java returns an integral result in r0
318 // and a floating result in j_farg0
319 // All of j_rargN may be used to return inline type fields so be careful
320 // not to clobber those.
321 // SharedRuntime::generate_buffered_inline_type_adapter() knows the register
322 // assignment of Rresult below.
323 Register Rresult = r14, Rresult_type = r15;
324 __ ldr(Rresult, result);
325 Label is_long, is_float, is_double, check_prim, exit;
326 __ ldr(Rresult_type, result_type);
327 __ cmp(Rresult_type, (u1)T_OBJECT);
328 __ br(Assembler::EQ, check_prim);
329 __ cmp(Rresult_type, (u1)T_PRIMITIVE_OBJECT);
330 __ br(Assembler::EQ, check_prim);
331 __ cmp(Rresult_type, (u1)T_LONG);
332 __ br(Assembler::EQ, is_long);
333 __ cmp(Rresult_type, (u1)T_FLOAT);
334 __ br(Assembler::EQ, is_float);
335 __ cmp(Rresult_type, (u1)T_DOUBLE);
336 __ br(Assembler::EQ, is_double);
337
338 // handle T_INT case
339 __ strw(r0, Address(Rresult));
340
341 __ BIND(exit);
342
343 // pop parameters
344 __ sub(esp, rfp, -sp_after_call_off * wordSize);
345
346 #ifdef ASSERT
347 // verify that threads correspond
348 {
349 Label L, S;
350 __ ldr(rscratch1, thread);
351 __ cmp(rthread, rscratch1);
352 __ br(Assembler::NE, S);
353 __ get_thread(rscratch1);
354 __ cmp(rthread, rscratch1);
355 __ br(Assembler::EQ, L);
356 __ BIND(S);
357 __ stop("StubRoutines::call_stub: threads must correspond");
358 __ BIND(L);
359 }
367 __ ldpd(v11, v10, d11_save);
368 __ ldpd(v9, v8, d9_save);
369
370 __ ldp(r28, r27, r28_save);
371 __ ldp(r26, r25, r26_save);
372 __ ldp(r24, r23, r24_save);
373 __ ldp(r22, r21, r22_save);
374 __ ldp(r20, r19, r20_save);
375
376 __ ldp(c_rarg0, c_rarg1, call_wrapper);
377 __ ldrw(c_rarg2, result_type);
378 __ ldr(c_rarg3, method);
379 __ ldp(c_rarg4, c_rarg5, entry_point);
380 __ ldp(c_rarg6, c_rarg7, parameter_size);
381
382 // leave frame and return to caller
383 __ leave();
384 __ ret(lr);
385
386 // handle return types different from T_INT
387 __ BIND(check_prim);
388 if (InlineTypeReturnedAsFields) {
389 // Check for scalarized return value
390 __ tbz(r0, 0, is_long);
391 // Load pack handler address
392 __ andr(rscratch1, r0, -2);
393 __ ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
394 __ ldr(rscratch1, Address(rscratch1, InlineKlass::pack_handler_jobject_offset()));
395 __ blr(rscratch1);
396 __ b(exit);
397 }
398
399 __ BIND(is_long);
400 __ str(r0, Address(Rresult, 0));
401 __ br(Assembler::AL, exit);
402
403 __ BIND(is_float);
404 __ strs(j_farg0, Address(Rresult, 0));
405 __ br(Assembler::AL, exit);
406
407 __ BIND(is_double);
408 __ strd(j_farg0, Address(Rresult, 0));
409 __ br(Assembler::AL, exit);
410
411 return start;
412 }
413
414 // Return point for a Java call if there's an exception thrown in
415 // Java code. The exception is caught and transformed into a
416 // pending exception stored in JavaThread that can be tested from
417 // within the VM.
418 //
419 // Note: Usually the parameters are removed by the callee. In case
420 // of an exception crossing an activation frame boundary, that is
421 // not the case if the callee is compiled code => need to setup the
422 // rsp.
423 //
424 // r0: exception oop
425
426 address generate_catch_exception() {
427 StubCodeMark mark(this, "StubRoutines", "catch_exception");
428 address start = __ pc();
2201 // |array_tag| | header_size | element_type | |log2_element_size|
2202 // 32 30 24 16 8 2 0
2203 //
2204 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2205 //
2206
2207 const int lh_offset = in_bytes(Klass::layout_helper_offset());
2208
2209 // Handle objArrays completely differently...
2210 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2211 __ ldrw(lh, Address(scratch_src_klass, lh_offset));
2212 __ movw(rscratch1, objArray_lh);
2213 __ eorw(rscratch2, lh, rscratch1);
2214 __ cbzw(rscratch2, L_objArray);
2215
2216 // if (src->klass() != dst->klass()) return -1;
2217 __ load_klass(rscratch2, dst);
2218 __ eor(rscratch2, rscratch2, scratch_src_klass);
2219 __ cbnz(rscratch2, L_failed);
2220
2221 // Check for flat inline type array -> return -1
2222 __ tst(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
2223 __ br(Assembler::NE, L_failed);
2224
2225 // Check for null-free (non-flat) inline type array -> handle as object array
2226 __ tst(lh, Klass::_lh_null_free_array_bit_inplace);
2227 __ br(Assembler::NE, L_failed);
2228
2229 // if (!src->is_Array()) return -1;
2230 __ tbz(lh, 31, L_failed); // i.e. (lh >= 0)
2231
2232 // At this point, it is known to be a typeArray (array_tag 0x3).
2233 #ifdef ASSERT
2234 {
2235 BLOCK_COMMENT("assert primitive array {");
2236 Label L;
2237 __ movw(rscratch2, Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2238 __ cmpw(lh, rscratch2);
2239 __ br(Assembler::GE, L);
2240 __ stop("must be a primitive array");
2241 __ bind(L);
2242 BLOCK_COMMENT("} assert primitive array done");
2243 }
2244 #endif
2245
2246 arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
2247 rscratch2, L_failed);
2248
8266 // MACC(Ra, Ra, t0, t1, t2);
8267 // }
8268 // iters = (2*len-i)/2;
8269 // assert(iters == len-j, "must be");
8270 // for (; iters--; j++) {
8271 // assert(Rm == Pm_base[j] && Rn == Pn_base[i-j], "must be");
8272 // MACC(Rm, Rn, t0, t1, t2);
8273 // Rm = *++Pm;
8274 // Rn = *--Pn;
8275 // }
8276 // Pm_base[i-len] = t0;
8277 // t0 = t1; t1 = t2; t2 = 0;
8278 // }
8279
8280 // while (t0)
8281 // t0 = sub(Pm_base, Pn_base, t0, len);
8282 // }
8283 };
8284
8285
8286 // Call here from the interpreter or compiled code to either load
8287 // multiple returned values from the inline type instance being
8288 // returned to registers or to store returned values to a newly
8289 // allocated inline type instance.
8290 address generate_return_value_stub(address destination, const char* name, bool has_res) {
8291 // We need to save all registers the calling convention may use so
8292 // the runtime calls read or update those registers. This needs to
8293 // be in sync with SharedRuntime::java_return_convention().
8294 // n.b. aarch64 asserts that frame::arg_reg_save_area_bytes == 0
8295 enum layout {
8296 j_rarg7_off = 0, j_rarg7_2, // j_rarg7 is r0
8297 j_rarg6_off, j_rarg6_2,
8298 j_rarg5_off, j_rarg5_2,
8299 j_rarg4_off, j_rarg4_2,
8300 j_rarg3_off, j_rarg3_2,
8301 j_rarg2_off, j_rarg2_2,
8302 j_rarg1_off, j_rarg1_2,
8303 j_rarg0_off, j_rarg0_2,
8304
8305 j_farg7_off, j_farg7_2,
8306 j_farg6_off, j_farg6_2,
8307 j_farg5_off, j_farg5_2,
8308 j_farg4_off, j_farg4_2,
8309 j_farg3_off, j_farg3_2,
8310 j_farg2_off, j_farg2_2,
8311 j_farg1_off, j_farg1_2,
8312 j_farg0_off, j_farg0_2,
8313
8314 rfp_off, rfp_off2,
8315 return_off, return_off2,
8316
8317 framesize // inclusive of return address
8318 };
8319
8320 CodeBuffer code(name, 512, 64);
8321 MacroAssembler* masm = new MacroAssembler(&code);
8322
8323 int frame_size_in_bytes = align_up(framesize*BytesPerInt, 16);
8324 assert(frame_size_in_bytes == framesize*BytesPerInt, "misaligned");
8325 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
8326 int frame_size_in_words = frame_size_in_bytes / wordSize;
8327
8328 OopMapSet* oop_maps = new OopMapSet();
8329 OopMap* map = new OopMap(frame_size_in_slots, 0);
8330
8331 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg7_off), j_rarg7->as_VMReg());
8332 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg6_off), j_rarg6->as_VMReg());
8333 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg5_off), j_rarg5->as_VMReg());
8334 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg4_off), j_rarg4->as_VMReg());
8335 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg3_off), j_rarg3->as_VMReg());
8336 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg2_off), j_rarg2->as_VMReg());
8337 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg1_off), j_rarg1->as_VMReg());
8338 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg0_off), j_rarg0->as_VMReg());
8339
8340 map->set_callee_saved(VMRegImpl::stack2reg(j_farg0_off), j_farg0->as_VMReg());
8341 map->set_callee_saved(VMRegImpl::stack2reg(j_farg1_off), j_farg1->as_VMReg());
8342 map->set_callee_saved(VMRegImpl::stack2reg(j_farg2_off), j_farg2->as_VMReg());
8343 map->set_callee_saved(VMRegImpl::stack2reg(j_farg3_off), j_farg3->as_VMReg());
8344 map->set_callee_saved(VMRegImpl::stack2reg(j_farg4_off), j_farg4->as_VMReg());
8345 map->set_callee_saved(VMRegImpl::stack2reg(j_farg5_off), j_farg5->as_VMReg());
8346 map->set_callee_saved(VMRegImpl::stack2reg(j_farg6_off), j_farg6->as_VMReg());
8347 map->set_callee_saved(VMRegImpl::stack2reg(j_farg7_off), j_farg7->as_VMReg());
8348
8349 address start = __ pc();
8350
8351 __ enter(); // Save FP and LR before call
8352
8353 __ stpd(j_farg1, j_farg0, Address(__ pre(sp, -2 * wordSize)));
8354 __ stpd(j_farg3, j_farg2, Address(__ pre(sp, -2 * wordSize)));
8355 __ stpd(j_farg5, j_farg4, Address(__ pre(sp, -2 * wordSize)));
8356 __ stpd(j_farg7, j_farg6, Address(__ pre(sp, -2 * wordSize)));
8357
8358 __ stp(j_rarg1, j_rarg0, Address(__ pre(sp, -2 * wordSize)));
8359 __ stp(j_rarg3, j_rarg2, Address(__ pre(sp, -2 * wordSize)));
8360 __ stp(j_rarg5, j_rarg4, Address(__ pre(sp, -2 * wordSize)));
8361 __ stp(j_rarg7, j_rarg6, Address(__ pre(sp, -2 * wordSize)));
8362
8363 int frame_complete = __ offset();
8364
8365 // Set up last_Java_sp and last_Java_fp
8366 address the_pc = __ pc();
8367 __ set_last_Java_frame(sp, noreg, the_pc, rscratch1);
8368
8369 // Call runtime
8370 __ mov(c_rarg1, r0);
8371 __ mov(c_rarg0, rthread);
8372
8373 __ mov(rscratch1, destination);
8374 __ blr(rscratch1);
8375
8376 oop_maps->add_gc_map(the_pc - start, map);
8377
8378 __ reset_last_Java_frame(false);
8379
8380 __ ldp(j_rarg7, j_rarg6, Address(__ post(sp, 2 * wordSize)));
8381 __ ldp(j_rarg5, j_rarg4, Address(__ post(sp, 2 * wordSize)));
8382 __ ldp(j_rarg3, j_rarg2, Address(__ post(sp, 2 * wordSize)));
8383 __ ldp(j_rarg1, j_rarg0, Address(__ post(sp, 2 * wordSize)));
8384
8385 __ ldpd(j_farg7, j_farg6, Address(__ post(sp, 2 * wordSize)));
8386 __ ldpd(j_farg5, j_farg4, Address(__ post(sp, 2 * wordSize)));
8387 __ ldpd(j_farg3, j_farg2, Address(__ post(sp, 2 * wordSize)));
8388 __ ldpd(j_farg1, j_farg0, Address(__ post(sp, 2 * wordSize)));
8389
8390 __ leave();
8391
8392 // check for pending exceptions
8393 Label pending;
8394 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
8395 __ cbnz(rscratch1, pending);
8396
8397 if (has_res) {
8398 __ get_vm_result(r0, rthread);
8399 }
8400
8401 __ ret(lr);
8402
8403 __ bind(pending);
8404 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
8405
8406 // -------------
8407 // make sure all code is generated
8408 masm->flush();
8409
8410 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, frame_size_in_words, oop_maps, false);
8411 return stub->entry_point();
8412 }
8413
8414 // Initialization
8415 void generate_initial_stubs() {
8416 // Generate initial stubs and initializes the entry points
8417
8418 // entry points that exist in all platforms Note: This is code
8419 // that could be shared among different platforms - however the
8420 // benefit seems to be smaller than the disadvantage of having a
8421 // much more complicated generator structure. See also comment in
8422 // stubRoutines.hpp.
8423
8424 StubRoutines::_forward_exception_entry = generate_forward_exception();
8425
8426 StubRoutines::_call_stub_entry =
8427 generate_call_stub(StubRoutines::_call_stub_return_address);
8428
8429 // is referenced by megamorphic call
8430 StubRoutines::_catch_exception_entry = generate_catch_exception();
8431
8432 // Build this early so it's available for the interpreter.
8433 StubRoutines::_throw_StackOverflowError_entry =
8449 StubRoutines::_crc_table_adr = (address)StubRoutines::aarch64::_crc_table;
8450 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
8451 }
8452
8453 if (UseCRC32CIntrinsics) {
8454 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
8455 }
8456
8457 // Disabled until JDK-8210858 is fixed
8458 // if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) {
8459 // StubRoutines::_dlog = generate_dlog();
8460 // }
8461
8462 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
8463 StubRoutines::_dsin = generate_dsin_dcos(/* isCos = */ false);
8464 }
8465
8466 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
8467 StubRoutines::_dcos = generate_dsin_dcos(/* isCos = */ true);
8468 }
8469
8470 if (InlineTypeReturnedAsFields) {
8471 StubRoutines::_load_inline_type_fields_in_regs =
8472 generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::load_inline_type_fields_in_regs), "load_inline_type_fields_in_regs", false);
8473 StubRoutines::_store_inline_type_fields_to_buf =
8474 generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::store_inline_type_fields_to_buf), "store_inline_type_fields_to_buf", true);
8475 }
8476 }
8477
8478 void generate_continuation_stubs() {
8479 // Continuation stubs:
8480 StubRoutines::_cont_thaw = generate_cont_thaw();
8481 StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
8482 StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
8483
8484 JFR_ONLY(generate_jfr_stubs();)
8485 }
8486
8487 #if INCLUDE_JFR
8488 void generate_jfr_stubs() {
8489 StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint();
8490 StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point();
8491 StubRoutines::_jfr_return_lease_stub = generate_jfr_return_lease();
8492 StubRoutines::_jfr_return_lease = StubRoutines::_jfr_return_lease_stub->entry_point();
8493 }
8494 #endif // INCLUDE_JFR
8495
|