308 __ mov(r19_sender_sp, sp);
309 __ blr(c_rarg4);
310
311 // we do this here because the notify will already have been done
312 // if we get to the next instruction via an exception
313 //
314 // n.b. adding this instruction here affects the calculation of
315 // whether or not a routine returns to the call stub (used when
316 // doing stack walks) since the normal test is to check the return
317 // pc against the address saved below. so we may need to allow for
318 // this extra instruction in the check.
319
320 // save current address for use by exception handling code
321
322 return_address = __ pc();
323
324 // store result depending on type (everything that is not
325 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
326 // n.b. this assumes Java returns an integral result in r0
327 // and a floating result in j_farg0
328 __ ldr(j_rarg2, result);
329 Label is_long, is_float, is_double, exit;
330 __ ldr(j_rarg1, result_type);
331 __ cmp(j_rarg1, (u1)T_OBJECT);
332 __ br(Assembler::EQ, is_long);
333 __ cmp(j_rarg1, (u1)T_LONG);
334 __ br(Assembler::EQ, is_long);
335 __ cmp(j_rarg1, (u1)T_FLOAT);
336 __ br(Assembler::EQ, is_float);
337 __ cmp(j_rarg1, (u1)T_DOUBLE);
338 __ br(Assembler::EQ, is_double);
339
340 // handle T_INT case
341 __ strw(r0, Address(j_rarg2));
342
343 __ BIND(exit);
344
345 // pop parameters
346 __ sub(esp, rfp, -sp_after_call_off * wordSize);
347
348 #ifdef ASSERT
349 // verify that threads correspond
350 {
351 Label L, S;
352 __ ldr(rscratch1, thread);
353 __ cmp(rthread, rscratch1);
354 __ br(Assembler::NE, S);
355 __ get_thread(rscratch1);
356 __ cmp(rthread, rscratch1);
357 __ br(Assembler::EQ, L);
358 __ BIND(S);
359 __ stop("StubRoutines::call_stub: threads must correspond");
360 __ BIND(L);
361 }
373 __ ldp(r26, r25, r26_save);
374 __ ldp(r24, r23, r24_save);
375 __ ldp(r22, r21, r22_save);
376 __ ldp(r20, r19, r20_save);
377
378 // restore fpcr
379 __ ldr(rscratch1, fpcr_save);
380 __ set_fpcr(rscratch1);
381
382 __ ldp(c_rarg0, c_rarg1, call_wrapper);
383 __ ldrw(c_rarg2, result_type);
384 __ ldr(c_rarg3, method);
385 __ ldp(c_rarg4, c_rarg5, entry_point);
386 __ ldp(c_rarg6, c_rarg7, parameter_size);
387
388 // leave frame and return to caller
389 __ leave();
390 __ ret(lr);
391
392 // handle return types different from T_INT
393
394 __ BIND(is_long);
395 __ str(r0, Address(j_rarg2, 0));
396 __ br(Assembler::AL, exit);
397
398 __ BIND(is_float);
399 __ strs(j_farg0, Address(j_rarg2, 0));
400 __ br(Assembler::AL, exit);
401
402 __ BIND(is_double);
403 __ strd(j_farg0, Address(j_rarg2, 0));
404 __ br(Assembler::AL, exit);
405
406 return start;
407 }
408
409 // Return point for a Java call if there's an exception thrown in
410 // Java code. The exception is caught and transformed into a
411 // pending exception stored in JavaThread that can be tested from
412 // within the VM.
413 //
414 // Note: Usually the parameters are removed by the callee. In case
415 // of an exception crossing an activation frame boundary, that is
416 // not the case if the callee is compiled code => need to setup the
417 // rsp.
418 //
419 // r0: exception oop
420
421 address generate_catch_exception() {
422 StubCodeMark mark(this, "StubRoutines", "catch_exception");
423 address start = __ pc();
2196 // |array_tag| | header_size | element_type | |log2_element_size|
2197 // 32 30 24 16 8 2 0
2198 //
2199 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2200 //
2201
2202 const int lh_offset = in_bytes(Klass::layout_helper_offset());
2203
2204 // Handle objArrays completely differently...
2205 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2206 __ ldrw(lh, Address(scratch_src_klass, lh_offset));
2207 __ movw(rscratch1, objArray_lh);
2208 __ eorw(rscratch2, lh, rscratch1);
2209 __ cbzw(rscratch2, L_objArray);
2210
2211 // if (src->klass() != dst->klass()) return -1;
2212 __ load_klass(rscratch2, dst);
2213 __ eor(rscratch2, rscratch2, scratch_src_klass);
2214 __ cbnz(rscratch2, L_failed);
2215
2216 // if (!src->is_Array()) return -1;
2217 __ tbz(lh, 31, L_failed); // i.e. (lh >= 0)
2218
2219 // At this point, it is known to be a typeArray (array_tag 0x3).
2220 #ifdef ASSERT
2221 {
2222 BLOCK_COMMENT("assert primitive array {");
2223 Label L;
2224 __ movw(rscratch2, Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2225 __ cmpw(lh, rscratch2);
2226 __ br(Assembler::GE, L);
2227 __ stop("must be a primitive array");
2228 __ bind(L);
2229 BLOCK_COMMENT("} assert primitive array done");
2230 }
2231 #endif
2232
2233 arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
2234 rscratch2, L_failed);
2235
8328 // MACC(Ra, Ra, t0, t1, t2);
8329 // }
8330 // iters = (2*len-i)/2;
8331 // assert(iters == len-j, "must be");
8332 // for (; iters--; j++) {
8333 // assert(Rm == Pm_base[j] && Rn == Pn_base[i-j], "must be");
8334 // MACC(Rm, Rn, t0, t1, t2);
8335 // Rm = *++Pm;
8336 // Rn = *--Pn;
8337 // }
8338 // Pm_base[i-len] = t0;
8339 // t0 = t1; t1 = t2; t2 = 0;
8340 // }
8341
8342 // while (t0)
8343 // t0 = sub(Pm_base, Pn_base, t0, len);
8344 // }
8345 };
8346
8347
8348 // Initialization
8349 void generate_initial_stubs() {
8350 // Generate initial stubs and initializes the entry points
8351
8352 // entry points that exist in all platforms Note: This is code
8353 // that could be shared among different platforms - however the
8354 // benefit seems to be smaller than the disadvantage of having a
8355 // much more complicated generator structure. See also comment in
8356 // stubRoutines.hpp.
8357
8358 StubRoutines::_forward_exception_entry = generate_forward_exception();
8359
8360 StubRoutines::_call_stub_entry =
8361 generate_call_stub(StubRoutines::_call_stub_return_address);
8362
8363 // is referenced by megamorphic call
8364 StubRoutines::_catch_exception_entry = generate_catch_exception();
8365
8366 // Build this early so it's available for the interpreter.
8367 StubRoutines::_throw_StackOverflowError_entry =
8384 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
8385 }
8386
8387 if (UseCRC32CIntrinsics) {
8388 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
8389 }
8390
8391 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
8392 StubRoutines::_dsin = generate_dsin_dcos(/* isCos = */ false);
8393 }
8394
8395 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
8396 StubRoutines::_dcos = generate_dsin_dcos(/* isCos = */ true);
8397 }
8398
8399 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_float16ToFloat) &&
8400 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_floatToFloat16)) {
8401 StubRoutines::_hf2f = generate_float16ToFloat();
8402 StubRoutines::_f2hf = generate_floatToFloat16();
8403 }
8404 }
8405
8406 void generate_continuation_stubs() {
8407 // Continuation stubs:
8408 StubRoutines::_cont_thaw = generate_cont_thaw();
8409 StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
8410 StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
8411
8412 JFR_ONLY(generate_jfr_stubs();)
8413 }
8414
8415 #if INCLUDE_JFR
8416 void generate_jfr_stubs() {
8417 StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint();
8418 StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point();
8419 StubRoutines::_jfr_return_lease_stub = generate_jfr_return_lease();
8420 StubRoutines::_jfr_return_lease = StubRoutines::_jfr_return_lease_stub->entry_point();
8421 }
8422 #endif // INCLUDE_JFR
8423
|
308 __ mov(r19_sender_sp, sp);
309 __ blr(c_rarg4);
310
311 // we do this here because the notify will already have been done
312 // if we get to the next instruction via an exception
313 //
314 // n.b. adding this instruction here affects the calculation of
315 // whether or not a routine returns to the call stub (used when
316 // doing stack walks) since the normal test is to check the return
317 // pc against the address saved below. so we may need to allow for
318 // this extra instruction in the check.
319
320 // save current address for use by exception handling code
321
322 return_address = __ pc();
323
324 // store result depending on type (everything that is not
325 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
326 // n.b. this assumes Java returns an integral result in r0
327 // and a floating result in j_farg0
328 // All of j_rargN may be used to return inline type fields so be careful
329 // not to clobber those.
330 // SharedRuntime::generate_buffered_inline_type_adapter() knows the register
331 // assignment of Rresult below.
332 Register Rresult = r14, Rresult_type = r15;
333 __ ldr(Rresult, result);
334 Label is_long, is_float, is_double, check_prim, exit;
335 __ ldr(Rresult_type, result_type);
336 __ cmp(Rresult_type, (u1)T_OBJECT);
337 __ br(Assembler::EQ, check_prim);
338 __ cmp(Rresult_type, (u1)T_LONG);
339 __ br(Assembler::EQ, is_long);
340 __ cmp(Rresult_type, (u1)T_FLOAT);
341 __ br(Assembler::EQ, is_float);
342 __ cmp(Rresult_type, (u1)T_DOUBLE);
343 __ br(Assembler::EQ, is_double);
344
345 // handle T_INT case
346 __ strw(r0, Address(Rresult));
347
348 __ BIND(exit);
349
350 // pop parameters
351 __ sub(esp, rfp, -sp_after_call_off * wordSize);
352
353 #ifdef ASSERT
354 // verify that threads correspond
355 {
356 Label L, S;
357 __ ldr(rscratch1, thread);
358 __ cmp(rthread, rscratch1);
359 __ br(Assembler::NE, S);
360 __ get_thread(rscratch1);
361 __ cmp(rthread, rscratch1);
362 __ br(Assembler::EQ, L);
363 __ BIND(S);
364 __ stop("StubRoutines::call_stub: threads must correspond");
365 __ BIND(L);
366 }
378 __ ldp(r26, r25, r26_save);
379 __ ldp(r24, r23, r24_save);
380 __ ldp(r22, r21, r22_save);
381 __ ldp(r20, r19, r20_save);
382
383 // restore fpcr
384 __ ldr(rscratch1, fpcr_save);
385 __ set_fpcr(rscratch1);
386
387 __ ldp(c_rarg0, c_rarg1, call_wrapper);
388 __ ldrw(c_rarg2, result_type);
389 __ ldr(c_rarg3, method);
390 __ ldp(c_rarg4, c_rarg5, entry_point);
391 __ ldp(c_rarg6, c_rarg7, parameter_size);
392
393 // leave frame and return to caller
394 __ leave();
395 __ ret(lr);
396
397 // handle return types different from T_INT
398 __ BIND(check_prim);
399 if (InlineTypeReturnedAsFields) {
400 // Check for scalarized return value
401 __ tbz(r0, 0, is_long);
402 // Load pack handler address
403 __ andr(rscratch1, r0, -2);
404 __ ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
405 __ ldr(rscratch1, Address(rscratch1, InlineKlass::pack_handler_jobject_offset()));
406 __ blr(rscratch1);
407 __ b(exit);
408 }
409
410 __ BIND(is_long);
411 __ str(r0, Address(Rresult, 0));
412 __ br(Assembler::AL, exit);
413
414 __ BIND(is_float);
415 __ strs(j_farg0, Address(Rresult, 0));
416 __ br(Assembler::AL, exit);
417
418 __ BIND(is_double);
419 __ strd(j_farg0, Address(Rresult, 0));
420 __ br(Assembler::AL, exit);
421
422 return start;
423 }
424
425 // Return point for a Java call if there's an exception thrown in
426 // Java code. The exception is caught and transformed into a
427 // pending exception stored in JavaThread that can be tested from
428 // within the VM.
429 //
430 // Note: Usually the parameters are removed by the callee. In case
431 // of an exception crossing an activation frame boundary, that is
432 // not the case if the callee is compiled code => need to setup the
433 // rsp.
434 //
435 // r0: exception oop
436
437 address generate_catch_exception() {
438 StubCodeMark mark(this, "StubRoutines", "catch_exception");
439 address start = __ pc();
2212 // |array_tag| | header_size | element_type | |log2_element_size|
2213 // 32 30 24 16 8 2 0
2214 //
2215 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2216 //
2217
2218 const int lh_offset = in_bytes(Klass::layout_helper_offset());
2219
2220 // Handle objArrays completely differently...
2221 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2222 __ ldrw(lh, Address(scratch_src_klass, lh_offset));
2223 __ movw(rscratch1, objArray_lh);
2224 __ eorw(rscratch2, lh, rscratch1);
2225 __ cbzw(rscratch2, L_objArray);
2226
2227 // if (src->klass() != dst->klass()) return -1;
2228 __ load_klass(rscratch2, dst);
2229 __ eor(rscratch2, rscratch2, scratch_src_klass);
2230 __ cbnz(rscratch2, L_failed);
2231
2232 // Check for flat inline type array -> return -1
2233 __ test_flat_array_oop(src, rscratch2, L_failed);
2234
2235 // Check for null-free (non-flat) inline type array -> handle as object array
2236 __ test_null_free_array_oop(src, rscratch2, L_objArray);
2237
2238 // if (!src->is_Array()) return -1;
2239 __ tbz(lh, 31, L_failed); // i.e. (lh >= 0)
2240
2241 // At this point, it is known to be a typeArray (array_tag 0x3).
2242 #ifdef ASSERT
2243 {
2244 BLOCK_COMMENT("assert primitive array {");
2245 Label L;
2246 __ movw(rscratch2, Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2247 __ cmpw(lh, rscratch2);
2248 __ br(Assembler::GE, L);
2249 __ stop("must be a primitive array");
2250 __ bind(L);
2251 BLOCK_COMMENT("} assert primitive array done");
2252 }
2253 #endif
2254
2255 arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
2256 rscratch2, L_failed);
2257
8350 // MACC(Ra, Ra, t0, t1, t2);
8351 // }
8352 // iters = (2*len-i)/2;
8353 // assert(iters == len-j, "must be");
8354 // for (; iters--; j++) {
8355 // assert(Rm == Pm_base[j] && Rn == Pn_base[i-j], "must be");
8356 // MACC(Rm, Rn, t0, t1, t2);
8357 // Rm = *++Pm;
8358 // Rn = *--Pn;
8359 // }
8360 // Pm_base[i-len] = t0;
8361 // t0 = t1; t1 = t2; t2 = 0;
8362 // }
8363
8364 // while (t0)
8365 // t0 = sub(Pm_base, Pn_base, t0, len);
8366 // }
8367 };
8368
8369
8370 // Call here from the interpreter or compiled code to either load
8371 // multiple returned values from the inline type instance being
8372 // returned to registers or to store returned values to a newly
8373 // allocated inline type instance.
8374 address generate_return_value_stub(address destination, const char* name, bool has_res) {
8375 // We need to save all registers the calling convention may use so
8376 // the runtime calls read or update those registers. This needs to
8377 // be in sync with SharedRuntime::java_return_convention().
8378 // n.b. aarch64 asserts that frame::arg_reg_save_area_bytes == 0
8379 enum layout {
8380 j_rarg7_off = 0, j_rarg7_2, // j_rarg7 is r0
8381 j_rarg6_off, j_rarg6_2,
8382 j_rarg5_off, j_rarg5_2,
8383 j_rarg4_off, j_rarg4_2,
8384 j_rarg3_off, j_rarg3_2,
8385 j_rarg2_off, j_rarg2_2,
8386 j_rarg1_off, j_rarg1_2,
8387 j_rarg0_off, j_rarg0_2,
8388
8389 j_farg7_off, j_farg7_2,
8390 j_farg6_off, j_farg6_2,
8391 j_farg5_off, j_farg5_2,
8392 j_farg4_off, j_farg4_2,
8393 j_farg3_off, j_farg3_2,
8394 j_farg2_off, j_farg2_2,
8395 j_farg1_off, j_farg1_2,
8396 j_farg0_off, j_farg0_2,
8397
8398 rfp_off, rfp_off2,
8399 return_off, return_off2,
8400
8401 framesize // inclusive of return address
8402 };
8403
8404 CodeBuffer code(name, 512, 64);
8405 MacroAssembler* masm = new MacroAssembler(&code);
8406
8407 int frame_size_in_bytes = align_up(framesize*BytesPerInt, 16);
8408 assert(frame_size_in_bytes == framesize*BytesPerInt, "misaligned");
8409 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
8410 int frame_size_in_words = frame_size_in_bytes / wordSize;
8411
8412 OopMapSet* oop_maps = new OopMapSet();
8413 OopMap* map = new OopMap(frame_size_in_slots, 0);
8414
8415 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg7_off), j_rarg7->as_VMReg());
8416 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg6_off), j_rarg6->as_VMReg());
8417 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg5_off), j_rarg5->as_VMReg());
8418 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg4_off), j_rarg4->as_VMReg());
8419 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg3_off), j_rarg3->as_VMReg());
8420 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg2_off), j_rarg2->as_VMReg());
8421 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg1_off), j_rarg1->as_VMReg());
8422 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg0_off), j_rarg0->as_VMReg());
8423
8424 map->set_callee_saved(VMRegImpl::stack2reg(j_farg0_off), j_farg0->as_VMReg());
8425 map->set_callee_saved(VMRegImpl::stack2reg(j_farg1_off), j_farg1->as_VMReg());
8426 map->set_callee_saved(VMRegImpl::stack2reg(j_farg2_off), j_farg2->as_VMReg());
8427 map->set_callee_saved(VMRegImpl::stack2reg(j_farg3_off), j_farg3->as_VMReg());
8428 map->set_callee_saved(VMRegImpl::stack2reg(j_farg4_off), j_farg4->as_VMReg());
8429 map->set_callee_saved(VMRegImpl::stack2reg(j_farg5_off), j_farg5->as_VMReg());
8430 map->set_callee_saved(VMRegImpl::stack2reg(j_farg6_off), j_farg6->as_VMReg());
8431 map->set_callee_saved(VMRegImpl::stack2reg(j_farg7_off), j_farg7->as_VMReg());
8432
8433 address start = __ pc();
8434
8435 __ enter(); // Save FP and LR before call
8436
8437 __ stpd(j_farg1, j_farg0, Address(__ pre(sp, -2 * wordSize)));
8438 __ stpd(j_farg3, j_farg2, Address(__ pre(sp, -2 * wordSize)));
8439 __ stpd(j_farg5, j_farg4, Address(__ pre(sp, -2 * wordSize)));
8440 __ stpd(j_farg7, j_farg6, Address(__ pre(sp, -2 * wordSize)));
8441
8442 __ stp(j_rarg1, j_rarg0, Address(__ pre(sp, -2 * wordSize)));
8443 __ stp(j_rarg3, j_rarg2, Address(__ pre(sp, -2 * wordSize)));
8444 __ stp(j_rarg5, j_rarg4, Address(__ pre(sp, -2 * wordSize)));
8445 __ stp(j_rarg7, j_rarg6, Address(__ pre(sp, -2 * wordSize)));
8446
8447 int frame_complete = __ offset();
8448
8449 // Set up last_Java_sp and last_Java_fp
8450 address the_pc = __ pc();
8451 __ set_last_Java_frame(sp, noreg, the_pc, rscratch1);
8452
8453 // Call runtime
8454 __ mov(c_rarg1, r0);
8455 __ mov(c_rarg0, rthread);
8456
8457 __ mov(rscratch1, destination);
8458 __ blr(rscratch1);
8459
8460 oop_maps->add_gc_map(the_pc - start, map);
8461
8462 __ reset_last_Java_frame(false);
8463
8464 __ ldp(j_rarg7, j_rarg6, Address(__ post(sp, 2 * wordSize)));
8465 __ ldp(j_rarg5, j_rarg4, Address(__ post(sp, 2 * wordSize)));
8466 __ ldp(j_rarg3, j_rarg2, Address(__ post(sp, 2 * wordSize)));
8467 __ ldp(j_rarg1, j_rarg0, Address(__ post(sp, 2 * wordSize)));
8468
8469 __ ldpd(j_farg7, j_farg6, Address(__ post(sp, 2 * wordSize)));
8470 __ ldpd(j_farg5, j_farg4, Address(__ post(sp, 2 * wordSize)));
8471 __ ldpd(j_farg3, j_farg2, Address(__ post(sp, 2 * wordSize)));
8472 __ ldpd(j_farg1, j_farg0, Address(__ post(sp, 2 * wordSize)));
8473
8474 __ leave();
8475
8476 // check for pending exceptions
8477 Label pending;
8478 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
8479 __ cbnz(rscratch1, pending);
8480
8481 if (has_res) {
8482 __ get_vm_result(r0, rthread);
8483 }
8484
8485 __ ret(lr);
8486
8487 __ bind(pending);
8488 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
8489
8490 // -------------
8491 // make sure all code is generated
8492 masm->flush();
8493
8494 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, frame_size_in_words, oop_maps, false);
8495 return stub->entry_point();
8496 }
8497
8498 // Initialization
8499 void generate_initial_stubs() {
8500 // Generate initial stubs and initializes the entry points
8501
8502 // entry points that exist in all platforms Note: This is code
8503 // that could be shared among different platforms - however the
8504 // benefit seems to be smaller than the disadvantage of having a
8505 // much more complicated generator structure. See also comment in
8506 // stubRoutines.hpp.
8507
8508 StubRoutines::_forward_exception_entry = generate_forward_exception();
8509
8510 StubRoutines::_call_stub_entry =
8511 generate_call_stub(StubRoutines::_call_stub_return_address);
8512
8513 // is referenced by megamorphic call
8514 StubRoutines::_catch_exception_entry = generate_catch_exception();
8515
8516 // Build this early so it's available for the interpreter.
8517 StubRoutines::_throw_StackOverflowError_entry =
8534 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
8535 }
8536
8537 if (UseCRC32CIntrinsics) {
8538 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
8539 }
8540
8541 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
8542 StubRoutines::_dsin = generate_dsin_dcos(/* isCos = */ false);
8543 }
8544
8545 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
8546 StubRoutines::_dcos = generate_dsin_dcos(/* isCos = */ true);
8547 }
8548
8549 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_float16ToFloat) &&
8550 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_floatToFloat16)) {
8551 StubRoutines::_hf2f = generate_float16ToFloat();
8552 StubRoutines::_f2hf = generate_floatToFloat16();
8553 }
8554
8555 if (InlineTypeReturnedAsFields) {
8556 StubRoutines::_load_inline_type_fields_in_regs =
8557 generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::load_inline_type_fields_in_regs), "load_inline_type_fields_in_regs", false);
8558 StubRoutines::_store_inline_type_fields_to_buf =
8559 generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::store_inline_type_fields_to_buf), "store_inline_type_fields_to_buf", true);
8560 }
8561
8562 }
8563
8564 void generate_continuation_stubs() {
8565 // Continuation stubs:
8566 StubRoutines::_cont_thaw = generate_cont_thaw();
8567 StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
8568 StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
8569
8570 JFR_ONLY(generate_jfr_stubs();)
8571 }
8572
8573 #if INCLUDE_JFR
8574 void generate_jfr_stubs() {
8575 StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint();
8576 StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point();
8577 StubRoutines::_jfr_return_lease_stub = generate_jfr_return_lease();
8578 StubRoutines::_jfr_return_lease = StubRoutines::_jfr_return_lease_stub->entry_point();
8579 }
8580 #endif // INCLUDE_JFR
8581
|