311 __ mov(r19_sender_sp, sp);
312 __ blr(c_rarg4);
313
314 // we do this here because the notify will already have been done
315 // if we get to the next instruction via an exception
316 //
317 // n.b. adding this instruction here affects the calculation of
318 // whether or not a routine returns to the call stub (used when
319 // doing stack walks) since the normal test is to check the return
320 // pc against the address saved below. so we may need to allow for
321 // this extra instruction in the check.
322
323 // save current address for use by exception handling code
324
325 return_address = __ pc();
326
327 // store result depending on type (everything that is not
328 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
329 // n.b. this assumes Java returns an integral result in r0
330 // and a floating result in j_farg0
331 __ ldr(j_rarg2, result);
332 Label is_long, is_float, is_double, exit;
333 __ ldr(j_rarg1, result_type);
334 __ cmp(j_rarg1, (u1)T_OBJECT);
335 __ br(Assembler::EQ, is_long);
336 __ cmp(j_rarg1, (u1)T_LONG);
337 __ br(Assembler::EQ, is_long);
338 __ cmp(j_rarg1, (u1)T_FLOAT);
339 __ br(Assembler::EQ, is_float);
340 __ cmp(j_rarg1, (u1)T_DOUBLE);
341 __ br(Assembler::EQ, is_double);
342
343 // handle T_INT case
344 __ strw(r0, Address(j_rarg2));
345
346 __ BIND(exit);
347
348 // pop parameters
349 __ sub(esp, rfp, -sp_after_call_off * wordSize);
350
351 #ifdef ASSERT
352 // verify that threads correspond
353 {
354 Label L, S;
355 __ ldr(rscratch1, thread);
356 __ cmp(rthread, rscratch1);
357 __ br(Assembler::NE, S);
358 __ get_thread(rscratch1);
359 __ cmp(rthread, rscratch1);
360 __ br(Assembler::EQ, L);
361 __ BIND(S);
362 __ stop("StubRoutines::call_stub: threads must correspond");
363 __ BIND(L);
364 }
376 __ ldp(r26, r25, r26_save);
377 __ ldp(r24, r23, r24_save);
378 __ ldp(r22, r21, r22_save);
379 __ ldp(r20, r19, r20_save);
380
381 // restore fpcr
382 __ ldr(rscratch1, fpcr_save);
383 __ set_fpcr(rscratch1);
384
385 __ ldp(c_rarg0, c_rarg1, call_wrapper);
386 __ ldrw(c_rarg2, result_type);
387 __ ldr(c_rarg3, method);
388 __ ldp(c_rarg4, c_rarg5, entry_point);
389 __ ldp(c_rarg6, c_rarg7, parameter_size);
390
391 // leave frame and return to caller
392 __ leave();
393 __ ret(lr);
394
395 // handle return types different from T_INT
396
397 __ BIND(is_long);
398 __ str(r0, Address(j_rarg2, 0));
399 __ br(Assembler::AL, exit);
400
401 __ BIND(is_float);
402 __ strs(j_farg0, Address(j_rarg2, 0));
403 __ br(Assembler::AL, exit);
404
405 __ BIND(is_double);
406 __ strd(j_farg0, Address(j_rarg2, 0));
407 __ br(Assembler::AL, exit);
408
409 return start;
410 }
411
412 // Return point for a Java call if there's an exception thrown in
413 // Java code. The exception is caught and transformed into a
414 // pending exception stored in JavaThread that can be tested from
415 // within the VM.
416 //
417 // Note: Usually the parameters are removed by the callee. In case
418 // of an exception crossing an activation frame boundary, that is
419 // not the case if the callee is compiled code => need to setup the
420 // rsp.
421 //
422 // r0: exception oop
423
424 address generate_catch_exception() {
425 StubCodeMark mark(this, "StubRoutines", "catch_exception");
426 address start = __ pc();
2199 // |array_tag| | header_size | element_type | |log2_element_size|
2200 // 32 30 24 16 8 2 0
2201 //
2202 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2203 //
2204
2205 const int lh_offset = in_bytes(Klass::layout_helper_offset());
2206
2207 // Handle objArrays completely differently...
2208 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2209 __ ldrw(lh, Address(scratch_src_klass, lh_offset));
2210 __ movw(rscratch1, objArray_lh);
2211 __ eorw(rscratch2, lh, rscratch1);
2212 __ cbzw(rscratch2, L_objArray);
2213
2214 // if (src->klass() != dst->klass()) return -1;
2215 __ load_klass(rscratch2, dst);
2216 __ eor(rscratch2, rscratch2, scratch_src_klass);
2217 __ cbnz(rscratch2, L_failed);
2218
2219 // if (!src->is_Array()) return -1;
2220 __ tbz(lh, 31, L_failed); // i.e. (lh >= 0)
2221
2222 // At this point, it is known to be a typeArray (array_tag 0x3).
2223 #ifdef ASSERT
2224 {
2225 BLOCK_COMMENT("assert primitive array {");
2226 Label L;
2227 __ movw(rscratch2, Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2228 __ cmpw(lh, rscratch2);
2229 __ br(Assembler::GE, L);
2230 __ stop("must be a primitive array");
2231 __ bind(L);
2232 BLOCK_COMMENT("} assert primitive array done");
2233 }
2234 #endif
2235
2236 arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
2237 rscratch2, L_failed);
2238
8273 // MACC(Ra, Ra, t0, t1, t2);
8274 // }
8275 // iters = (2*len-i)/2;
8276 // assert(iters == len-j, "must be");
8277 // for (; iters--; j++) {
8278 // assert(Rm == Pm_base[j] && Rn == Pn_base[i-j], "must be");
8279 // MACC(Rm, Rn, t0, t1, t2);
8280 // Rm = *++Pm;
8281 // Rn = *--Pn;
8282 // }
8283 // Pm_base[i-len] = t0;
8284 // t0 = t1; t1 = t2; t2 = 0;
8285 // }
8286
8287 // while (t0)
8288 // t0 = sub(Pm_base, Pn_base, t0, len);
8289 // }
8290 };
8291
8292
8293 // Initialization
8294 void generate_initial_stubs() {
8295 // Generate initial stubs and initializes the entry points
8296
8297 // entry points that exist in all platforms Note: This is code
8298 // that could be shared among different platforms - however the
8299 // benefit seems to be smaller than the disadvantage of having a
8300 // much more complicated generator structure. See also comment in
8301 // stubRoutines.hpp.
8302
8303 StubRoutines::_forward_exception_entry = generate_forward_exception();
8304
8305 StubRoutines::_call_stub_entry =
8306 generate_call_stub(StubRoutines::_call_stub_return_address);
8307
8308 // is referenced by megamorphic call
8309 StubRoutines::_catch_exception_entry = generate_catch_exception();
8310
8311 // Build this early so it's available for the interpreter.
8312 StubRoutines::_throw_StackOverflowError_entry =
8328 StubRoutines::_crc_table_adr = (address)StubRoutines::aarch64::_crc_table;
8329 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
8330 }
8331
8332 if (UseCRC32CIntrinsics) {
8333 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
8334 }
8335
8336 // Disabled until JDK-8210858 is fixed
8337 // if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) {
8338 // StubRoutines::_dlog = generate_dlog();
8339 // }
8340
8341 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
8342 StubRoutines::_dsin = generate_dsin_dcos(/* isCos = */ false);
8343 }
8344
8345 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
8346 StubRoutines::_dcos = generate_dsin_dcos(/* isCos = */ true);
8347 }
8348 }
8349
8350 void generate_continuation_stubs() {
8351 // Continuation stubs:
8352 StubRoutines::_cont_thaw = generate_cont_thaw();
8353 StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
8354 StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
8355
8356 JFR_ONLY(generate_jfr_stubs();)
8357 }
8358
8359 #if INCLUDE_JFR
8360 void generate_jfr_stubs() {
8361 StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint();
8362 StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point();
8363 StubRoutines::_jfr_return_lease_stub = generate_jfr_return_lease();
8364 StubRoutines::_jfr_return_lease = StubRoutines::_jfr_return_lease_stub->entry_point();
8365 }
8366 #endif // INCLUDE_JFR
8367
|
311 __ mov(r19_sender_sp, sp);
312 __ blr(c_rarg4);
313
314 // we do this here because the notify will already have been done
315 // if we get to the next instruction via an exception
316 //
317 // n.b. adding this instruction here affects the calculation of
318 // whether or not a routine returns to the call stub (used when
319 // doing stack walks) since the normal test is to check the return
320 // pc against the address saved below. so we may need to allow for
321 // this extra instruction in the check.
322
323 // save current address for use by exception handling code
324
325 return_address = __ pc();
326
327 // store result depending on type (everything that is not
328 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
329 // n.b. this assumes Java returns an integral result in r0
330 // and a floating result in j_farg0
331 // All of j_rargN may be used to return inline type fields so be careful
332 // not to clobber those.
333 // SharedRuntime::generate_buffered_inline_type_adapter() knows the register
334 // assignment of Rresult below.
335 Register Rresult = r14, Rresult_type = r15;
336 __ ldr(Rresult, result);
337 Label is_long, is_float, is_double, check_prim, exit;
338 __ ldr(Rresult_type, result_type);
339 __ cmp(Rresult_type, (u1)T_OBJECT);
340 __ br(Assembler::EQ, check_prim);
341 __ cmp(Rresult_type, (u1)T_LONG);
342 __ br(Assembler::EQ, is_long);
343 __ cmp(Rresult_type, (u1)T_FLOAT);
344 __ br(Assembler::EQ, is_float);
345 __ cmp(Rresult_type, (u1)T_DOUBLE);
346 __ br(Assembler::EQ, is_double);
347
348 // handle T_INT case
349 __ strw(r0, Address(Rresult));
350
351 __ BIND(exit);
352
353 // pop parameters
354 __ sub(esp, rfp, -sp_after_call_off * wordSize);
355
356 #ifdef ASSERT
357 // verify that threads correspond
358 {
359 Label L, S;
360 __ ldr(rscratch1, thread);
361 __ cmp(rthread, rscratch1);
362 __ br(Assembler::NE, S);
363 __ get_thread(rscratch1);
364 __ cmp(rthread, rscratch1);
365 __ br(Assembler::EQ, L);
366 __ BIND(S);
367 __ stop("StubRoutines::call_stub: threads must correspond");
368 __ BIND(L);
369 }
381 __ ldp(r26, r25, r26_save);
382 __ ldp(r24, r23, r24_save);
383 __ ldp(r22, r21, r22_save);
384 __ ldp(r20, r19, r20_save);
385
386 // restore fpcr
387 __ ldr(rscratch1, fpcr_save);
388 __ set_fpcr(rscratch1);
389
390 __ ldp(c_rarg0, c_rarg1, call_wrapper);
391 __ ldrw(c_rarg2, result_type);
392 __ ldr(c_rarg3, method);
393 __ ldp(c_rarg4, c_rarg5, entry_point);
394 __ ldp(c_rarg6, c_rarg7, parameter_size);
395
396 // leave frame and return to caller
397 __ leave();
398 __ ret(lr);
399
400 // handle return types different from T_INT
401 __ BIND(check_prim);
402 if (InlineTypeReturnedAsFields) {
403 // Check for scalarized return value
404 __ tbz(r0, 0, is_long);
405 // Load pack handler address
406 __ andr(rscratch1, r0, -2);
407 __ ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
408 __ ldr(rscratch1, Address(rscratch1, InlineKlass::pack_handler_jobject_offset()));
409 __ blr(rscratch1);
410 __ b(exit);
411 }
412
413 __ BIND(is_long);
414 __ str(r0, Address(Rresult, 0));
415 __ br(Assembler::AL, exit);
416
417 __ BIND(is_float);
418 __ strs(j_farg0, Address(Rresult, 0));
419 __ br(Assembler::AL, exit);
420
421 __ BIND(is_double);
422 __ strd(j_farg0, Address(Rresult, 0));
423 __ br(Assembler::AL, exit);
424
425 return start;
426 }
427
428 // Return point for a Java call if there's an exception thrown in
429 // Java code. The exception is caught and transformed into a
430 // pending exception stored in JavaThread that can be tested from
431 // within the VM.
432 //
433 // Note: Usually the parameters are removed by the callee. In case
434 // of an exception crossing an activation frame boundary, that is
435 // not the case if the callee is compiled code => need to setup the
436 // rsp.
437 //
438 // r0: exception oop
439
440 address generate_catch_exception() {
441 StubCodeMark mark(this, "StubRoutines", "catch_exception");
442 address start = __ pc();
2215 // |array_tag| | header_size | element_type | |log2_element_size|
2216 // 32 30 24 16 8 2 0
2217 //
2218 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2219 //
2220
2221 const int lh_offset = in_bytes(Klass::layout_helper_offset());
2222
2223 // Handle objArrays completely differently...
2224 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2225 __ ldrw(lh, Address(scratch_src_klass, lh_offset));
2226 __ movw(rscratch1, objArray_lh);
2227 __ eorw(rscratch2, lh, rscratch1);
2228 __ cbzw(rscratch2, L_objArray);
2229
2230 // if (src->klass() != dst->klass()) return -1;
2231 __ load_klass(rscratch2, dst);
2232 __ eor(rscratch2, rscratch2, scratch_src_klass);
2233 __ cbnz(rscratch2, L_failed);
2234
2235 // Check for flat inline type array -> return -1
2236 __ tst(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
2237 __ br(Assembler::NE, L_failed);
2238
2239 // Check for null-free (non-flat) inline type array -> handle as object array
2240 __ tst(lh, Klass::_lh_null_free_array_bit_inplace);
2241 __ br(Assembler::NE, L_failed);
2242
2243 // if (!src->is_Array()) return -1;
2244 __ tbz(lh, 31, L_failed); // i.e. (lh >= 0)
2245
2246 // At this point, it is known to be a typeArray (array_tag 0x3).
2247 #ifdef ASSERT
2248 {
2249 BLOCK_COMMENT("assert primitive array {");
2250 Label L;
2251 __ movw(rscratch2, Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2252 __ cmpw(lh, rscratch2);
2253 __ br(Assembler::GE, L);
2254 __ stop("must be a primitive array");
2255 __ bind(L);
2256 BLOCK_COMMENT("} assert primitive array done");
2257 }
2258 #endif
2259
2260 arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
2261 rscratch2, L_failed);
2262
8297 // MACC(Ra, Ra, t0, t1, t2);
8298 // }
8299 // iters = (2*len-i)/2;
8300 // assert(iters == len-j, "must be");
8301 // for (; iters--; j++) {
8302 // assert(Rm == Pm_base[j] && Rn == Pn_base[i-j], "must be");
8303 // MACC(Rm, Rn, t0, t1, t2);
8304 // Rm = *++Pm;
8305 // Rn = *--Pn;
8306 // }
8307 // Pm_base[i-len] = t0;
8308 // t0 = t1; t1 = t2; t2 = 0;
8309 // }
8310
8311 // while (t0)
8312 // t0 = sub(Pm_base, Pn_base, t0, len);
8313 // }
8314 };
8315
8316
8317 // Call here from the interpreter or compiled code to either load
8318 // multiple returned values from the inline type instance being
8319 // returned to registers or to store returned values to a newly
8320 // allocated inline type instance.
8321 address generate_return_value_stub(address destination, const char* name, bool has_res) {
8322 // We need to save all registers the calling convention may use so
8323 // the runtime calls read or update those registers. This needs to
8324 // be in sync with SharedRuntime::java_return_convention().
8325 // n.b. aarch64 asserts that frame::arg_reg_save_area_bytes == 0
8326 enum layout {
8327 j_rarg7_off = 0, j_rarg7_2, // j_rarg7 is r0
8328 j_rarg6_off, j_rarg6_2,
8329 j_rarg5_off, j_rarg5_2,
8330 j_rarg4_off, j_rarg4_2,
8331 j_rarg3_off, j_rarg3_2,
8332 j_rarg2_off, j_rarg2_2,
8333 j_rarg1_off, j_rarg1_2,
8334 j_rarg0_off, j_rarg0_2,
8335
8336 j_farg7_off, j_farg7_2,
8337 j_farg6_off, j_farg6_2,
8338 j_farg5_off, j_farg5_2,
8339 j_farg4_off, j_farg4_2,
8340 j_farg3_off, j_farg3_2,
8341 j_farg2_off, j_farg2_2,
8342 j_farg1_off, j_farg1_2,
8343 j_farg0_off, j_farg0_2,
8344
8345 rfp_off, rfp_off2,
8346 return_off, return_off2,
8347
8348 framesize // inclusive of return address
8349 };
8350
8351 CodeBuffer code(name, 512, 64);
8352 MacroAssembler* masm = new MacroAssembler(&code);
8353
8354 int frame_size_in_bytes = align_up(framesize*BytesPerInt, 16);
8355 assert(frame_size_in_bytes == framesize*BytesPerInt, "misaligned");
8356 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
8357 int frame_size_in_words = frame_size_in_bytes / wordSize;
8358
8359 OopMapSet* oop_maps = new OopMapSet();
8360 OopMap* map = new OopMap(frame_size_in_slots, 0);
8361
8362 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg7_off), j_rarg7->as_VMReg());
8363 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg6_off), j_rarg6->as_VMReg());
8364 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg5_off), j_rarg5->as_VMReg());
8365 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg4_off), j_rarg4->as_VMReg());
8366 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg3_off), j_rarg3->as_VMReg());
8367 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg2_off), j_rarg2->as_VMReg());
8368 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg1_off), j_rarg1->as_VMReg());
8369 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg0_off), j_rarg0->as_VMReg());
8370
8371 map->set_callee_saved(VMRegImpl::stack2reg(j_farg0_off), j_farg0->as_VMReg());
8372 map->set_callee_saved(VMRegImpl::stack2reg(j_farg1_off), j_farg1->as_VMReg());
8373 map->set_callee_saved(VMRegImpl::stack2reg(j_farg2_off), j_farg2->as_VMReg());
8374 map->set_callee_saved(VMRegImpl::stack2reg(j_farg3_off), j_farg3->as_VMReg());
8375 map->set_callee_saved(VMRegImpl::stack2reg(j_farg4_off), j_farg4->as_VMReg());
8376 map->set_callee_saved(VMRegImpl::stack2reg(j_farg5_off), j_farg5->as_VMReg());
8377 map->set_callee_saved(VMRegImpl::stack2reg(j_farg6_off), j_farg6->as_VMReg());
8378 map->set_callee_saved(VMRegImpl::stack2reg(j_farg7_off), j_farg7->as_VMReg());
8379
8380 address start = __ pc();
8381
8382 __ enter(); // Save FP and LR before call
8383
8384 __ stpd(j_farg1, j_farg0, Address(__ pre(sp, -2 * wordSize)));
8385 __ stpd(j_farg3, j_farg2, Address(__ pre(sp, -2 * wordSize)));
8386 __ stpd(j_farg5, j_farg4, Address(__ pre(sp, -2 * wordSize)));
8387 __ stpd(j_farg7, j_farg6, Address(__ pre(sp, -2 * wordSize)));
8388
8389 __ stp(j_rarg1, j_rarg0, Address(__ pre(sp, -2 * wordSize)));
8390 __ stp(j_rarg3, j_rarg2, Address(__ pre(sp, -2 * wordSize)));
8391 __ stp(j_rarg5, j_rarg4, Address(__ pre(sp, -2 * wordSize)));
8392 __ stp(j_rarg7, j_rarg6, Address(__ pre(sp, -2 * wordSize)));
8393
8394 int frame_complete = __ offset();
8395
8396 // Set up last_Java_sp and last_Java_fp
8397 address the_pc = __ pc();
8398 __ set_last_Java_frame(sp, noreg, the_pc, rscratch1);
8399
8400 // Call runtime
8401 __ mov(c_rarg1, r0);
8402 __ mov(c_rarg0, rthread);
8403
8404 __ mov(rscratch1, destination);
8405 __ blr(rscratch1);
8406
8407 oop_maps->add_gc_map(the_pc - start, map);
8408
8409 __ reset_last_Java_frame(false);
8410
8411 __ ldp(j_rarg7, j_rarg6, Address(__ post(sp, 2 * wordSize)));
8412 __ ldp(j_rarg5, j_rarg4, Address(__ post(sp, 2 * wordSize)));
8413 __ ldp(j_rarg3, j_rarg2, Address(__ post(sp, 2 * wordSize)));
8414 __ ldp(j_rarg1, j_rarg0, Address(__ post(sp, 2 * wordSize)));
8415
8416 __ ldpd(j_farg7, j_farg6, Address(__ post(sp, 2 * wordSize)));
8417 __ ldpd(j_farg5, j_farg4, Address(__ post(sp, 2 * wordSize)));
8418 __ ldpd(j_farg3, j_farg2, Address(__ post(sp, 2 * wordSize)));
8419 __ ldpd(j_farg1, j_farg0, Address(__ post(sp, 2 * wordSize)));
8420
8421 __ leave();
8422
8423 // check for pending exceptions
8424 Label pending;
8425 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
8426 __ cbnz(rscratch1, pending);
8427
8428 if (has_res) {
8429 __ get_vm_result(r0, rthread);
8430 }
8431
8432 __ ret(lr);
8433
8434 __ bind(pending);
8435 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
8436
8437 // -------------
8438 // make sure all code is generated
8439 masm->flush();
8440
8441 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, frame_size_in_words, oop_maps, false);
8442 return stub->entry_point();
8443 }
8444
8445 // Initialization
8446 void generate_initial_stubs() {
8447 // Generate initial stubs and initializes the entry points
8448
8449 // entry points that exist in all platforms Note: This is code
8450 // that could be shared among different platforms - however the
8451 // benefit seems to be smaller than the disadvantage of having a
8452 // much more complicated generator structure. See also comment in
8453 // stubRoutines.hpp.
8454
8455 StubRoutines::_forward_exception_entry = generate_forward_exception();
8456
8457 StubRoutines::_call_stub_entry =
8458 generate_call_stub(StubRoutines::_call_stub_return_address);
8459
8460 // is referenced by megamorphic call
8461 StubRoutines::_catch_exception_entry = generate_catch_exception();
8462
8463 // Build this early so it's available for the interpreter.
8464 StubRoutines::_throw_StackOverflowError_entry =
8480 StubRoutines::_crc_table_adr = (address)StubRoutines::aarch64::_crc_table;
8481 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
8482 }
8483
8484 if (UseCRC32CIntrinsics) {
8485 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
8486 }
8487
8488 // Disabled until JDK-8210858 is fixed
8489 // if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) {
8490 // StubRoutines::_dlog = generate_dlog();
8491 // }
8492
8493 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
8494 StubRoutines::_dsin = generate_dsin_dcos(/* isCos = */ false);
8495 }
8496
8497 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
8498 StubRoutines::_dcos = generate_dsin_dcos(/* isCos = */ true);
8499 }
8500
8501 if (InlineTypeReturnedAsFields) {
8502 StubRoutines::_load_inline_type_fields_in_regs =
8503 generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::load_inline_type_fields_in_regs), "load_inline_type_fields_in_regs", false);
8504 StubRoutines::_store_inline_type_fields_to_buf =
8505 generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::store_inline_type_fields_to_buf), "store_inline_type_fields_to_buf", true);
8506 }
8507 }
8508
8509 void generate_continuation_stubs() {
8510 // Continuation stubs:
8511 StubRoutines::_cont_thaw = generate_cont_thaw();
8512 StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
8513 StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
8514
8515 JFR_ONLY(generate_jfr_stubs();)
8516 }
8517
8518 #if INCLUDE_JFR
8519 void generate_jfr_stubs() {
8520 StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint();
8521 StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point();
8522 StubRoutines::_jfr_return_lease_stub = generate_jfr_return_lease();
8523 StubRoutines::_jfr_return_lease = StubRoutines::_jfr_return_lease_stub->entry_point();
8524 }
8525 #endif // INCLUDE_JFR
8526
|