308 __ mov(r19_sender_sp, sp);
309 __ blr(c_rarg4);
310
311 // we do this here because the notify will already have been done
312 // if we get to the next instruction via an exception
313 //
314 // n.b. adding this instruction here affects the calculation of
315 // whether or not a routine returns to the call stub (used when
316 // doing stack walks) since the normal test is to check the return
317 // pc against the address saved below. so we may need to allow for
318 // this extra instruction in the check.
319
320 // save current address for use by exception handling code
321
322 return_address = __ pc();
323
324 // store result depending on type (everything that is not
325 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
326 // n.b. this assumes Java returns an integral result in r0
327 // and a floating result in j_farg0
328 __ ldr(j_rarg2, result);
329 Label is_long, is_float, is_double, exit;
330 __ ldr(j_rarg1, result_type);
331 __ cmp(j_rarg1, (u1)T_OBJECT);
332 __ br(Assembler::EQ, is_long);
333 __ cmp(j_rarg1, (u1)T_LONG);
334 __ br(Assembler::EQ, is_long);
335 __ cmp(j_rarg1, (u1)T_FLOAT);
336 __ br(Assembler::EQ, is_float);
337 __ cmp(j_rarg1, (u1)T_DOUBLE);
338 __ br(Assembler::EQ, is_double);
339
340 // handle T_INT case
341 __ strw(r0, Address(j_rarg2));
342
343 __ BIND(exit);
344
345 // pop parameters
346 __ sub(esp, rfp, -sp_after_call_off * wordSize);
347
348 #ifdef ASSERT
349 // verify that threads correspond
350 {
351 Label L, S;
352 __ ldr(rscratch1, thread);
353 __ cmp(rthread, rscratch1);
354 __ br(Assembler::NE, S);
355 __ get_thread(rscratch1);
356 __ cmp(rthread, rscratch1);
357 __ br(Assembler::EQ, L);
358 __ BIND(S);
359 __ stop("StubRoutines::call_stub: threads must correspond");
360 __ BIND(L);
361 }
373 __ ldp(r26, r25, r26_save);
374 __ ldp(r24, r23, r24_save);
375 __ ldp(r22, r21, r22_save);
376 __ ldp(r20, r19, r20_save);
377
378 // restore fpcr
379 __ ldr(rscratch1, fpcr_save);
380 __ set_fpcr(rscratch1);
381
382 __ ldp(c_rarg0, c_rarg1, call_wrapper);
383 __ ldrw(c_rarg2, result_type);
384 __ ldr(c_rarg3, method);
385 __ ldp(c_rarg4, c_rarg5, entry_point);
386 __ ldp(c_rarg6, c_rarg7, parameter_size);
387
388 // leave frame and return to caller
389 __ leave();
390 __ ret(lr);
391
392 // handle return types different from T_INT
393
394 __ BIND(is_long);
395 __ str(r0, Address(j_rarg2, 0));
396 __ br(Assembler::AL, exit);
397
398 __ BIND(is_float);
399 __ strs(j_farg0, Address(j_rarg2, 0));
400 __ br(Assembler::AL, exit);
401
402 __ BIND(is_double);
403 __ strd(j_farg0, Address(j_rarg2, 0));
404 __ br(Assembler::AL, exit);
405
406 return start;
407 }
408
409 // Return point for a Java call if there's an exception thrown in
410 // Java code. The exception is caught and transformed into a
411 // pending exception stored in JavaThread that can be tested from
412 // within the VM.
413 //
414 // Note: Usually the parameters are removed by the callee. In case
415 // of an exception crossing an activation frame boundary, that is
416 // not the case if the callee is compiled code => need to setup the
417 // rsp.
418 //
419 // r0: exception oop
420
421 address generate_catch_exception() {
422 StubCodeMark mark(this, "StubRoutines", "catch_exception");
423 address start = __ pc();
2196 // |array_tag| | header_size | element_type | |log2_element_size|
2197 // 32 30 24 16 8 2 0
2198 //
2199 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2200 //
2201
2202 const int lh_offset = in_bytes(Klass::layout_helper_offset());
2203
2204 // Handle objArrays completely differently...
2205 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2206 __ ldrw(lh, Address(scratch_src_klass, lh_offset));
2207 __ movw(rscratch1, objArray_lh);
2208 __ eorw(rscratch2, lh, rscratch1);
2209 __ cbzw(rscratch2, L_objArray);
2210
2211 // if (src->klass() != dst->klass()) return -1;
2212 __ load_klass(rscratch2, dst);
2213 __ eor(rscratch2, rscratch2, scratch_src_klass);
2214 __ cbnz(rscratch2, L_failed);
2215
2216 // if (!src->is_Array()) return -1;
2217 __ tbz(lh, 31, L_failed); // i.e. (lh >= 0)
2218
2219 // At this point, it is known to be a typeArray (array_tag 0x3).
2220 #ifdef ASSERT
2221 {
2222 BLOCK_COMMENT("assert primitive array {");
2223 Label L;
2224 __ movw(rscratch2, Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2225 __ cmpw(lh, rscratch2);
2226 __ br(Assembler::GE, L);
2227 __ stop("must be a primitive array");
2228 __ bind(L);
2229 BLOCK_COMMENT("} assert primitive array done");
2230 }
2231 #endif
2232
2233 arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
2234 rscratch2, L_failed);
2235
8152 // MACC(Ra, Ra, t0, t1, t2);
8153 // }
8154 // iters = (2*len-i)/2;
8155 // assert(iters == len-j, "must be");
8156 // for (; iters--; j++) {
8157 // assert(Rm == Pm_base[j] && Rn == Pn_base[i-j], "must be");
8158 // MACC(Rm, Rn, t0, t1, t2);
8159 // Rm = *++Pm;
8160 // Rn = *--Pn;
8161 // }
8162 // Pm_base[i-len] = t0;
8163 // t0 = t1; t1 = t2; t2 = 0;
8164 // }
8165
8166 // while (t0)
8167 // t0 = sub(Pm_base, Pn_base, t0, len);
8168 // }
8169 };
8170
8171
8172 // Initialization
8173 void generate_initial_stubs() {
8174 // Generate initial stubs and initializes the entry points
8175
8176 // entry points that exist in all platforms Note: This is code
8177 // that could be shared among different platforms - however the
8178 // benefit seems to be smaller than the disadvantage of having a
8179 // much more complicated generator structure. See also comment in
8180 // stubRoutines.hpp.
8181
8182 StubRoutines::_forward_exception_entry = generate_forward_exception();
8183
8184 StubRoutines::_call_stub_entry =
8185 generate_call_stub(StubRoutines::_call_stub_return_address);
8186
8187 // is referenced by megamorphic call
8188 StubRoutines::_catch_exception_entry = generate_catch_exception();
8189
8190 // Initialize table for copy memory (arraycopy) check.
8191 if (UnsafeMemoryAccess::_table == nullptr) {
8198 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
8199 }
8200
8201 if (UseCRC32CIntrinsics) {
8202 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
8203 }
8204
8205 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
8206 StubRoutines::_dsin = generate_dsin_dcos(/* isCos = */ false);
8207 }
8208
8209 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
8210 StubRoutines::_dcos = generate_dsin_dcos(/* isCos = */ true);
8211 }
8212
8213 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_float16ToFloat) &&
8214 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_floatToFloat16)) {
8215 StubRoutines::_hf2f = generate_float16ToFloat();
8216 StubRoutines::_f2hf = generate_floatToFloat16();
8217 }
8218 }
8219
8220 void generate_continuation_stubs() {
8221 // Continuation stubs:
8222 StubRoutines::_cont_thaw = generate_cont_thaw();
8223 StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
8224 StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
8225 }
8226
8227 void generate_final_stubs() {
8228 // support for verify_oop (must happen after universe_init)
8229 if (VerifyOops) {
8230 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
8231 }
8232
8233 // arraycopy stubs used by compilers
8234 generate_arraycopy_stubs();
8235
8236 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
8237 if (bs_nm != nullptr) {
|
308 __ mov(r19_sender_sp, sp);
309 __ blr(c_rarg4);
310
311 // we do this here because the notify will already have been done
312 // if we get to the next instruction via an exception
313 //
314 // n.b. adding this instruction here affects the calculation of
315 // whether or not a routine returns to the call stub (used when
316 // doing stack walks) since the normal test is to check the return
317 // pc against the address saved below. so we may need to allow for
318 // this extra instruction in the check.
319
320 // save current address for use by exception handling code
321
322 return_address = __ pc();
323
324 // store result depending on type (everything that is not
325 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
326 // n.b. this assumes Java returns an integral result in r0
327 // and a floating result in j_farg0
328 // All of j_rargN may be used to return inline type fields so be careful
329 // not to clobber those.
330 // SharedRuntime::generate_buffered_inline_type_adapter() knows the register
331 // assignment of Rresult below.
332 Register Rresult = r14, Rresult_type = r15;
333 __ ldr(Rresult, result);
334 Label is_long, is_float, is_double, check_prim, exit;
335 __ ldr(Rresult_type, result_type);
336 __ cmp(Rresult_type, (u1)T_OBJECT);
337 __ br(Assembler::EQ, check_prim);
338 __ cmp(Rresult_type, (u1)T_LONG);
339 __ br(Assembler::EQ, is_long);
340 __ cmp(Rresult_type, (u1)T_FLOAT);
341 __ br(Assembler::EQ, is_float);
342 __ cmp(Rresult_type, (u1)T_DOUBLE);
343 __ br(Assembler::EQ, is_double);
344
345 // handle T_INT case
346 __ strw(r0, Address(Rresult));
347
348 __ BIND(exit);
349
350 // pop parameters
351 __ sub(esp, rfp, -sp_after_call_off * wordSize);
352
353 #ifdef ASSERT
354 // verify that threads correspond
355 {
356 Label L, S;
357 __ ldr(rscratch1, thread);
358 __ cmp(rthread, rscratch1);
359 __ br(Assembler::NE, S);
360 __ get_thread(rscratch1);
361 __ cmp(rthread, rscratch1);
362 __ br(Assembler::EQ, L);
363 __ BIND(S);
364 __ stop("StubRoutines::call_stub: threads must correspond");
365 __ BIND(L);
366 }
378 __ ldp(r26, r25, r26_save);
379 __ ldp(r24, r23, r24_save);
380 __ ldp(r22, r21, r22_save);
381 __ ldp(r20, r19, r20_save);
382
383 // restore fpcr
384 __ ldr(rscratch1, fpcr_save);
385 __ set_fpcr(rscratch1);
386
387 __ ldp(c_rarg0, c_rarg1, call_wrapper);
388 __ ldrw(c_rarg2, result_type);
389 __ ldr(c_rarg3, method);
390 __ ldp(c_rarg4, c_rarg5, entry_point);
391 __ ldp(c_rarg6, c_rarg7, parameter_size);
392
393 // leave frame and return to caller
394 __ leave();
395 __ ret(lr);
396
397 // handle return types different from T_INT
398 __ BIND(check_prim);
399 if (InlineTypeReturnedAsFields) {
400 // Check for scalarized return value
401 __ tbz(r0, 0, is_long);
402 // Load pack handler address
403 __ andr(rscratch1, r0, -2);
404 __ ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
405 __ ldr(rscratch1, Address(rscratch1, InlineKlass::pack_handler_jobject_offset()));
406 __ blr(rscratch1);
407 __ b(exit);
408 }
409
410 __ BIND(is_long);
411 __ str(r0, Address(Rresult, 0));
412 __ br(Assembler::AL, exit);
413
414 __ BIND(is_float);
415 __ strs(j_farg0, Address(Rresult, 0));
416 __ br(Assembler::AL, exit);
417
418 __ BIND(is_double);
419 __ strd(j_farg0, Address(Rresult, 0));
420 __ br(Assembler::AL, exit);
421
422 return start;
423 }
424
425 // Return point for a Java call if there's an exception thrown in
426 // Java code. The exception is caught and transformed into a
427 // pending exception stored in JavaThread that can be tested from
428 // within the VM.
429 //
430 // Note: Usually the parameters are removed by the callee. In case
431 // of an exception crossing an activation frame boundary, that is
432 // not the case if the callee is compiled code => need to setup the
433 // rsp.
434 //
435 // r0: exception oop
436
437 address generate_catch_exception() {
438 StubCodeMark mark(this, "StubRoutines", "catch_exception");
439 address start = __ pc();
2212 // |array_tag| | header_size | element_type | |log2_element_size|
2213 // 32 30 24 16 8 2 0
2214 //
2215 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2216 //
2217
2218 const int lh_offset = in_bytes(Klass::layout_helper_offset());
2219
2220 // Handle objArrays completely differently...
2221 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2222 __ ldrw(lh, Address(scratch_src_klass, lh_offset));
2223 __ movw(rscratch1, objArray_lh);
2224 __ eorw(rscratch2, lh, rscratch1);
2225 __ cbzw(rscratch2, L_objArray);
2226
2227 // if (src->klass() != dst->klass()) return -1;
2228 __ load_klass(rscratch2, dst);
2229 __ eor(rscratch2, rscratch2, scratch_src_klass);
2230 __ cbnz(rscratch2, L_failed);
2231
2232 // Check for flat inline type array -> return -1
2233 __ test_flat_array_oop(src, rscratch2, L_failed);
2234
2235 // Check for null-free (non-flat) inline type array -> handle as object array
2236 __ test_null_free_array_oop(src, rscratch2, L_objArray);
2237
2238 // if (!src->is_Array()) return -1;
2239 __ tbz(lh, 31, L_failed); // i.e. (lh >= 0)
2240
2241 // At this point, it is known to be a typeArray (array_tag 0x3).
2242 #ifdef ASSERT
2243 {
2244 BLOCK_COMMENT("assert primitive array {");
2245 Label L;
2246 __ movw(rscratch2, Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2247 __ cmpw(lh, rscratch2);
2248 __ br(Assembler::GE, L);
2249 __ stop("must be a primitive array");
2250 __ bind(L);
2251 BLOCK_COMMENT("} assert primitive array done");
2252 }
2253 #endif
2254
2255 arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
2256 rscratch2, L_failed);
2257
8174 // MACC(Ra, Ra, t0, t1, t2);
8175 // }
8176 // iters = (2*len-i)/2;
8177 // assert(iters == len-j, "must be");
8178 // for (; iters--; j++) {
8179 // assert(Rm == Pm_base[j] && Rn == Pn_base[i-j], "must be");
8180 // MACC(Rm, Rn, t0, t1, t2);
8181 // Rm = *++Pm;
8182 // Rn = *--Pn;
8183 // }
8184 // Pm_base[i-len] = t0;
8185 // t0 = t1; t1 = t2; t2 = 0;
8186 // }
8187
8188 // while (t0)
8189 // t0 = sub(Pm_base, Pn_base, t0, len);
8190 // }
8191 };
8192
8193
8194 // Call here from the interpreter or compiled code to either load
8195 // multiple returned values from the inline type instance being
8196 // returned to registers or to store returned values to a newly
8197 // allocated inline type instance.
8198 address generate_return_value_stub(address destination, const char* name, bool has_res) {
8199 // We need to save all registers the calling convention may use so
8200 // the runtime calls read or update those registers. This needs to
8201 // be in sync with SharedRuntime::java_return_convention().
8202 // n.b. aarch64 asserts that frame::arg_reg_save_area_bytes == 0
8203 enum layout {
8204 j_rarg7_off = 0, j_rarg7_2, // j_rarg7 is r0
8205 j_rarg6_off, j_rarg6_2,
8206 j_rarg5_off, j_rarg5_2,
8207 j_rarg4_off, j_rarg4_2,
8208 j_rarg3_off, j_rarg3_2,
8209 j_rarg2_off, j_rarg2_2,
8210 j_rarg1_off, j_rarg1_2,
8211 j_rarg0_off, j_rarg0_2,
8212
8213 j_farg7_off, j_farg7_2,
8214 j_farg6_off, j_farg6_2,
8215 j_farg5_off, j_farg5_2,
8216 j_farg4_off, j_farg4_2,
8217 j_farg3_off, j_farg3_2,
8218 j_farg2_off, j_farg2_2,
8219 j_farg1_off, j_farg1_2,
8220 j_farg0_off, j_farg0_2,
8221
8222 rfp_off, rfp_off2,
8223 return_off, return_off2,
8224
8225 framesize // inclusive of return address
8226 };
8227
8228 CodeBuffer code(name, 512, 64);
8229 MacroAssembler* masm = new MacroAssembler(&code);
8230
8231 int frame_size_in_bytes = align_up(framesize*BytesPerInt, 16);
8232 assert(frame_size_in_bytes == framesize*BytesPerInt, "misaligned");
8233 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
8234 int frame_size_in_words = frame_size_in_bytes / wordSize;
8235
8236 OopMapSet* oop_maps = new OopMapSet();
8237 OopMap* map = new OopMap(frame_size_in_slots, 0);
8238
8239 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg7_off), j_rarg7->as_VMReg());
8240 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg6_off), j_rarg6->as_VMReg());
8241 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg5_off), j_rarg5->as_VMReg());
8242 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg4_off), j_rarg4->as_VMReg());
8243 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg3_off), j_rarg3->as_VMReg());
8244 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg2_off), j_rarg2->as_VMReg());
8245 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg1_off), j_rarg1->as_VMReg());
8246 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg0_off), j_rarg0->as_VMReg());
8247
8248 map->set_callee_saved(VMRegImpl::stack2reg(j_farg0_off), j_farg0->as_VMReg());
8249 map->set_callee_saved(VMRegImpl::stack2reg(j_farg1_off), j_farg1->as_VMReg());
8250 map->set_callee_saved(VMRegImpl::stack2reg(j_farg2_off), j_farg2->as_VMReg());
8251 map->set_callee_saved(VMRegImpl::stack2reg(j_farg3_off), j_farg3->as_VMReg());
8252 map->set_callee_saved(VMRegImpl::stack2reg(j_farg4_off), j_farg4->as_VMReg());
8253 map->set_callee_saved(VMRegImpl::stack2reg(j_farg5_off), j_farg5->as_VMReg());
8254 map->set_callee_saved(VMRegImpl::stack2reg(j_farg6_off), j_farg6->as_VMReg());
8255 map->set_callee_saved(VMRegImpl::stack2reg(j_farg7_off), j_farg7->as_VMReg());
8256
8257 address start = __ pc();
8258
8259 __ enter(); // Save FP and LR before call
8260
8261 __ stpd(j_farg1, j_farg0, Address(__ pre(sp, -2 * wordSize)));
8262 __ stpd(j_farg3, j_farg2, Address(__ pre(sp, -2 * wordSize)));
8263 __ stpd(j_farg5, j_farg4, Address(__ pre(sp, -2 * wordSize)));
8264 __ stpd(j_farg7, j_farg6, Address(__ pre(sp, -2 * wordSize)));
8265
8266 __ stp(j_rarg1, j_rarg0, Address(__ pre(sp, -2 * wordSize)));
8267 __ stp(j_rarg3, j_rarg2, Address(__ pre(sp, -2 * wordSize)));
8268 __ stp(j_rarg5, j_rarg4, Address(__ pre(sp, -2 * wordSize)));
8269 __ stp(j_rarg7, j_rarg6, Address(__ pre(sp, -2 * wordSize)));
8270
8271 int frame_complete = __ offset();
8272
8273 // Set up last_Java_sp and last_Java_fp
8274 address the_pc = __ pc();
8275 __ set_last_Java_frame(sp, noreg, the_pc, rscratch1);
8276
8277 // Call runtime
8278 __ mov(c_rarg1, r0);
8279 __ mov(c_rarg0, rthread);
8280
8281 __ mov(rscratch1, destination);
8282 __ blr(rscratch1);
8283
8284 oop_maps->add_gc_map(the_pc - start, map);
8285
8286 __ reset_last_Java_frame(false);
8287
8288 __ ldp(j_rarg7, j_rarg6, Address(__ post(sp, 2 * wordSize)));
8289 __ ldp(j_rarg5, j_rarg4, Address(__ post(sp, 2 * wordSize)));
8290 __ ldp(j_rarg3, j_rarg2, Address(__ post(sp, 2 * wordSize)));
8291 __ ldp(j_rarg1, j_rarg0, Address(__ post(sp, 2 * wordSize)));
8292
8293 __ ldpd(j_farg7, j_farg6, Address(__ post(sp, 2 * wordSize)));
8294 __ ldpd(j_farg5, j_farg4, Address(__ post(sp, 2 * wordSize)));
8295 __ ldpd(j_farg3, j_farg2, Address(__ post(sp, 2 * wordSize)));
8296 __ ldpd(j_farg1, j_farg0, Address(__ post(sp, 2 * wordSize)));
8297
8298 __ leave();
8299
8300 // check for pending exceptions
8301 Label pending;
8302 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
8303 __ cbnz(rscratch1, pending);
8304
8305 if (has_res) {
8306 __ get_vm_result(r0, rthread);
8307 }
8308
8309 __ ret(lr);
8310
8311 __ bind(pending);
8312 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
8313
8314 // -------------
8315 // make sure all code is generated
8316 masm->flush();
8317
8318 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, frame_size_in_words, oop_maps, false);
8319 return stub->entry_point();
8320 }
8321
8322 // Initialization
8323 void generate_initial_stubs() {
8324 // Generate initial stubs and initializes the entry points
8325
8326 // entry points that exist in all platforms Note: This is code
8327 // that could be shared among different platforms - however the
8328 // benefit seems to be smaller than the disadvantage of having a
8329 // much more complicated generator structure. See also comment in
8330 // stubRoutines.hpp.
8331
8332 StubRoutines::_forward_exception_entry = generate_forward_exception();
8333
8334 StubRoutines::_call_stub_entry =
8335 generate_call_stub(StubRoutines::_call_stub_return_address);
8336
8337 // is referenced by megamorphic call
8338 StubRoutines::_catch_exception_entry = generate_catch_exception();
8339
8340 // Initialize table for copy memory (arraycopy) check.
8341 if (UnsafeMemoryAccess::_table == nullptr) {
8348 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
8349 }
8350
8351 if (UseCRC32CIntrinsics) {
8352 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
8353 }
8354
8355 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
8356 StubRoutines::_dsin = generate_dsin_dcos(/* isCos = */ false);
8357 }
8358
8359 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
8360 StubRoutines::_dcos = generate_dsin_dcos(/* isCos = */ true);
8361 }
8362
8363 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_float16ToFloat) &&
8364 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_floatToFloat16)) {
8365 StubRoutines::_hf2f = generate_float16ToFloat();
8366 StubRoutines::_f2hf = generate_floatToFloat16();
8367 }
8368
8369 if (InlineTypeReturnedAsFields) {
8370 StubRoutines::_load_inline_type_fields_in_regs =
8371 generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::load_inline_type_fields_in_regs), "load_inline_type_fields_in_regs", false);
8372 StubRoutines::_store_inline_type_fields_to_buf =
8373 generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::store_inline_type_fields_to_buf), "store_inline_type_fields_to_buf", true);
8374 }
8375
8376 }
8377
8378 void generate_continuation_stubs() {
8379 // Continuation stubs:
8380 StubRoutines::_cont_thaw = generate_cont_thaw();
8381 StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
8382 StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
8383 }
8384
8385 void generate_final_stubs() {
8386 // support for verify_oop (must happen after universe_init)
8387 if (VerifyOops) {
8388 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
8389 }
8390
8391 // arraycopy stubs used by compilers
8392 generate_arraycopy_stubs();
8393
8394 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
8395 if (bs_nm != nullptr) {
|