311 __ mov(r19_sender_sp, sp);
312 __ blr(c_rarg4);
313
314 // we do this here because the notify will already have been done
315 // if we get to the next instruction via an exception
316 //
317 // n.b. adding this instruction here affects the calculation of
318 // whether or not a routine returns to the call stub (used when
319 // doing stack walks) since the normal test is to check the return
320 // pc against the address saved below. so we may need to allow for
321 // this extra instruction in the check.
322
323 // save current address for use by exception handling code
324
325 return_address = __ pc();
326
327 // store result depending on type (everything that is not
328 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
329 // n.b. this assumes Java returns an integral result in r0
330 // and a floating result in j_farg0
331 __ ldr(j_rarg2, result);
332 Label is_long, is_float, is_double, exit;
333 __ ldr(j_rarg1, result_type);
334 __ cmp(j_rarg1, (u1)T_OBJECT);
335 __ br(Assembler::EQ, is_long);
336 __ cmp(j_rarg1, (u1)T_LONG);
337 __ br(Assembler::EQ, is_long);
338 __ cmp(j_rarg1, (u1)T_FLOAT);
339 __ br(Assembler::EQ, is_float);
340 __ cmp(j_rarg1, (u1)T_DOUBLE);
341 __ br(Assembler::EQ, is_double);
342
343 // handle T_INT case
344 __ strw(r0, Address(j_rarg2));
345
346 __ BIND(exit);
347
348 // pop parameters
349 __ sub(esp, rfp, -sp_after_call_off * wordSize);
350
351 #ifdef ASSERT
352 // verify that threads correspond
353 {
354 Label L, S;
355 __ ldr(rscratch1, thread);
356 __ cmp(rthread, rscratch1);
357 __ br(Assembler::NE, S);
358 __ get_thread(rscratch1);
359 __ cmp(rthread, rscratch1);
360 __ br(Assembler::EQ, L);
361 __ BIND(S);
362 __ stop("StubRoutines::call_stub: threads must correspond");
363 __ BIND(L);
364 }
376 __ ldp(r26, r25, r26_save);
377 __ ldp(r24, r23, r24_save);
378 __ ldp(r22, r21, r22_save);
379 __ ldp(r20, r19, r20_save);
380
381 // restore fpcr
382 __ ldr(rscratch1, fpcr_save);
383 __ set_fpcr(rscratch1);
384
385 __ ldp(c_rarg0, c_rarg1, call_wrapper);
386 __ ldrw(c_rarg2, result_type);
387 __ ldr(c_rarg3, method);
388 __ ldp(c_rarg4, c_rarg5, entry_point);
389 __ ldp(c_rarg6, c_rarg7, parameter_size);
390
391 // leave frame and return to caller
392 __ leave();
393 __ ret(lr);
394
395 // handle return types different from T_INT
396
397 __ BIND(is_long);
398 __ str(r0, Address(j_rarg2, 0));
399 __ br(Assembler::AL, exit);
400
401 __ BIND(is_float);
402 __ strs(j_farg0, Address(j_rarg2, 0));
403 __ br(Assembler::AL, exit);
404
405 __ BIND(is_double);
406 __ strd(j_farg0, Address(j_rarg2, 0));
407 __ br(Assembler::AL, exit);
408
409 return start;
410 }
411
412 // Return point for a Java call if there's an exception thrown in
413 // Java code. The exception is caught and transformed into a
414 // pending exception stored in JavaThread that can be tested from
415 // within the VM.
416 //
417 // Note: Usually the parameters are removed by the callee. In case
418 // of an exception crossing an activation frame boundary, that is
419 // not the case if the callee is compiled code => need to setup the
420 // rsp.
421 //
422 // r0: exception oop
423
424 address generate_catch_exception() {
425 StubGenStubId stub_id = StubGenStubId::catch_exception_id;
426 StubCodeMark mark(this, stub_id);
2207 // |array_tag| | header_size | element_type | |log2_element_size|
2208 // 32 30 24 16 8 2 0
2209 //
2210 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2211 //
2212
2213 const int lh_offset = in_bytes(Klass::layout_helper_offset());
2214
2215 // Handle objArrays completely differently...
2216 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2217 __ ldrw(lh, Address(scratch_src_klass, lh_offset));
2218 __ movw(rscratch1, objArray_lh);
2219 __ eorw(rscratch2, lh, rscratch1);
2220 __ cbzw(rscratch2, L_objArray);
2221
2222 // if (src->klass() != dst->klass()) return -1;
2223 __ load_klass(rscratch2, dst);
2224 __ eor(rscratch2, rscratch2, scratch_src_klass);
2225 __ cbnz(rscratch2, L_failed);
2226
2227 // if (!src->is_Array()) return -1;
2228 __ tbz(lh, 31, L_failed); // i.e. (lh >= 0)
2229
2230 // At this point, it is known to be a typeArray (array_tag 0x3).
2231 #ifdef ASSERT
2232 {
2233 BLOCK_COMMENT("assert primitive array {");
2234 Label L;
2235 __ movw(rscratch2, Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2236 __ cmpw(lh, rscratch2);
2237 __ br(Assembler::GE, L);
2238 __ stop("must be a primitive array");
2239 __ bind(L);
2240 BLOCK_COMMENT("} assert primitive array done");
2241 }
2242 #endif
2243
2244 arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
2245 rscratch2, L_failed);
2246
9818 int vop = VectorSupport::VECTOR_OP_MATH_START + op;
9819 // Skip "tanh" because there is performance regression
9820 if (vop == VectorSupport::VECTOR_OP_TANH) {
9821 continue;
9822 }
9823
9824 // The native library does not support u10 level of "hypot".
9825 const char* ulf = (vop == VectorSupport::VECTOR_OP_HYPOT) ? "u05" : "u10";
9826
9827 snprintf(ebuf, sizeof(ebuf), "%sf4_%sadvsimd", VectorSupport::mathname[op], ulf);
9828 StubRoutines::_vector_f_math[VectorSupport::VEC_SIZE_64][op] = (address)os::dll_lookup(libsleef, ebuf);
9829
9830 snprintf(ebuf, sizeof(ebuf), "%sf4_%sadvsimd", VectorSupport::mathname[op], ulf);
9831 StubRoutines::_vector_f_math[VectorSupport::VEC_SIZE_128][op] = (address)os::dll_lookup(libsleef, ebuf);
9832
9833 snprintf(ebuf, sizeof(ebuf), "%sd2_%sadvsimd", VectorSupport::mathname[op], ulf);
9834 StubRoutines::_vector_d_math[VectorSupport::VEC_SIZE_128][op] = (address)os::dll_lookup(libsleef, ebuf);
9835 }
9836 }
9837
9838 // Initialization
9839 void generate_initial_stubs() {
9840 // Generate initial stubs and initializes the entry points
9841
9842 // entry points that exist in all platforms Note: This is code
9843 // that could be shared among different platforms - however the
9844 // benefit seems to be smaller than the disadvantage of having a
9845 // much more complicated generator structure. See also comment in
9846 // stubRoutines.hpp.
9847
9848 StubRoutines::_forward_exception_entry = generate_forward_exception();
9849
9850 StubRoutines::_call_stub_entry =
9851 generate_call_stub(StubRoutines::_call_stub_return_address);
9852
9853 // is referenced by megamorphic call
9854 StubRoutines::_catch_exception_entry = generate_catch_exception();
9855
9856 // Initialize table for copy memory (arraycopy) check.
9857 if (UnsafeMemoryAccess::_table == nullptr) {
9864 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
9865 }
9866
9867 if (UseCRC32CIntrinsics) {
9868 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
9869 }
9870
9871 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
9872 StubRoutines::_dsin = generate_dsin_dcos(/* isCos = */ false);
9873 }
9874
9875 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
9876 StubRoutines::_dcos = generate_dsin_dcos(/* isCos = */ true);
9877 }
9878
9879 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_float16ToFloat) &&
9880 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_floatToFloat16)) {
9881 StubRoutines::_hf2f = generate_float16ToFloat();
9882 StubRoutines::_f2hf = generate_floatToFloat16();
9883 }
9884 }
9885
9886 void generate_continuation_stubs() {
9887 // Continuation stubs:
9888 StubRoutines::_cont_thaw = generate_cont_thaw();
9889 StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
9890 StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
9891 StubRoutines::_cont_preempt_stub = generate_cont_preempt_stub();
9892 }
9893
9894 void generate_final_stubs() {
9895 // support for verify_oop (must happen after universe_init)
9896 if (VerifyOops) {
9897 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
9898 }
9899
9900 // arraycopy stubs used by compilers
9901 generate_arraycopy_stubs();
9902
9903 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
|
311 __ mov(r19_sender_sp, sp);
312 __ blr(c_rarg4);
313
314 // we do this here because the notify will already have been done
315 // if we get to the next instruction via an exception
316 //
317 // n.b. adding this instruction here affects the calculation of
318 // whether or not a routine returns to the call stub (used when
319 // doing stack walks) since the normal test is to check the return
320 // pc against the address saved below. so we may need to allow for
321 // this extra instruction in the check.
322
323 // save current address for use by exception handling code
324
325 return_address = __ pc();
326
327 // store result depending on type (everything that is not
328 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
329 // n.b. this assumes Java returns an integral result in r0
330 // and a floating result in j_farg0
331 // All of j_rargN may be used to return inline type fields so be careful
332 // not to clobber those.
333 // SharedRuntime::generate_buffered_inline_type_adapter() knows the register
334 // assignment of Rresult below.
335 Register Rresult = r14, Rresult_type = r15;
336 __ ldr(Rresult, result);
337 Label is_long, is_float, is_double, check_prim, exit;
338 __ ldr(Rresult_type, result_type);
339 __ cmp(Rresult_type, (u1)T_OBJECT);
340 __ br(Assembler::EQ, check_prim);
341 __ cmp(Rresult_type, (u1)T_LONG);
342 __ br(Assembler::EQ, is_long);
343 __ cmp(Rresult_type, (u1)T_FLOAT);
344 __ br(Assembler::EQ, is_float);
345 __ cmp(Rresult_type, (u1)T_DOUBLE);
346 __ br(Assembler::EQ, is_double);
347
348 // handle T_INT case
349 __ strw(r0, Address(Rresult));
350
351 __ BIND(exit);
352
353 // pop parameters
354 __ sub(esp, rfp, -sp_after_call_off * wordSize);
355
356 #ifdef ASSERT
357 // verify that threads correspond
358 {
359 Label L, S;
360 __ ldr(rscratch1, thread);
361 __ cmp(rthread, rscratch1);
362 __ br(Assembler::NE, S);
363 __ get_thread(rscratch1);
364 __ cmp(rthread, rscratch1);
365 __ br(Assembler::EQ, L);
366 __ BIND(S);
367 __ stop("StubRoutines::call_stub: threads must correspond");
368 __ BIND(L);
369 }
381 __ ldp(r26, r25, r26_save);
382 __ ldp(r24, r23, r24_save);
383 __ ldp(r22, r21, r22_save);
384 __ ldp(r20, r19, r20_save);
385
386 // restore fpcr
387 __ ldr(rscratch1, fpcr_save);
388 __ set_fpcr(rscratch1);
389
390 __ ldp(c_rarg0, c_rarg1, call_wrapper);
391 __ ldrw(c_rarg2, result_type);
392 __ ldr(c_rarg3, method);
393 __ ldp(c_rarg4, c_rarg5, entry_point);
394 __ ldp(c_rarg6, c_rarg7, parameter_size);
395
396 // leave frame and return to caller
397 __ leave();
398 __ ret(lr);
399
400 // handle return types different from T_INT
401 __ BIND(check_prim);
402 if (InlineTypeReturnedAsFields) {
403 // Check for scalarized return value
404 __ tbz(r0, 0, is_long);
405 // Load pack handler address
406 __ andr(rscratch1, r0, -2);
407 __ ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
408 __ ldr(rscratch1, Address(rscratch1, InlineKlass::pack_handler_jobject_offset()));
409 __ blr(rscratch1);
410 __ b(exit);
411 }
412
413 __ BIND(is_long);
414 __ str(r0, Address(Rresult, 0));
415 __ br(Assembler::AL, exit);
416
417 __ BIND(is_float);
418 __ strs(j_farg0, Address(Rresult, 0));
419 __ br(Assembler::AL, exit);
420
421 __ BIND(is_double);
422 __ strd(j_farg0, Address(Rresult, 0));
423 __ br(Assembler::AL, exit);
424
425 return start;
426 }
427
428 // Return point for a Java call if there's an exception thrown in
429 // Java code. The exception is caught and transformed into a
430 // pending exception stored in JavaThread that can be tested from
431 // within the VM.
432 //
433 // Note: Usually the parameters are removed by the callee. In case
434 // of an exception crossing an activation frame boundary, that is
435 // not the case if the callee is compiled code => need to setup the
436 // rsp.
437 //
438 // r0: exception oop
439
440 address generate_catch_exception() {
441 StubGenStubId stub_id = StubGenStubId::catch_exception_id;
442 StubCodeMark mark(this, stub_id);
2223 // |array_tag| | header_size | element_type | |log2_element_size|
2224 // 32 30 24 16 8 2 0
2225 //
2226 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2227 //
2228
2229 const int lh_offset = in_bytes(Klass::layout_helper_offset());
2230
2231 // Handle objArrays completely differently...
2232 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2233 __ ldrw(lh, Address(scratch_src_klass, lh_offset));
2234 __ movw(rscratch1, objArray_lh);
2235 __ eorw(rscratch2, lh, rscratch1);
2236 __ cbzw(rscratch2, L_objArray);
2237
2238 // if (src->klass() != dst->klass()) return -1;
2239 __ load_klass(rscratch2, dst);
2240 __ eor(rscratch2, rscratch2, scratch_src_klass);
2241 __ cbnz(rscratch2, L_failed);
2242
2243 // Check for flat inline type array -> return -1
2244 __ test_flat_array_oop(src, rscratch2, L_failed);
2245
2246 // Check for null-free (non-flat) inline type array -> handle as object array
2247 __ test_null_free_array_oop(src, rscratch2, L_objArray);
2248
2249 // if (!src->is_Array()) return -1;
2250 __ tbz(lh, 31, L_failed); // i.e. (lh >= 0)
2251
2252 // At this point, it is known to be a typeArray (array_tag 0x3).
2253 #ifdef ASSERT
2254 {
2255 BLOCK_COMMENT("assert primitive array {");
2256 Label L;
2257 __ movw(rscratch2, Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2258 __ cmpw(lh, rscratch2);
2259 __ br(Assembler::GE, L);
2260 __ stop("must be a primitive array");
2261 __ bind(L);
2262 BLOCK_COMMENT("} assert primitive array done");
2263 }
2264 #endif
2265
2266 arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
2267 rscratch2, L_failed);
2268
9840 int vop = VectorSupport::VECTOR_OP_MATH_START + op;
9841 // Skip "tanh" because there is performance regression
9842 if (vop == VectorSupport::VECTOR_OP_TANH) {
9843 continue;
9844 }
9845
9846 // The native library does not support u10 level of "hypot".
9847 const char* ulf = (vop == VectorSupport::VECTOR_OP_HYPOT) ? "u05" : "u10";
9848
9849 snprintf(ebuf, sizeof(ebuf), "%sf4_%sadvsimd", VectorSupport::mathname[op], ulf);
9850 StubRoutines::_vector_f_math[VectorSupport::VEC_SIZE_64][op] = (address)os::dll_lookup(libsleef, ebuf);
9851
9852 snprintf(ebuf, sizeof(ebuf), "%sf4_%sadvsimd", VectorSupport::mathname[op], ulf);
9853 StubRoutines::_vector_f_math[VectorSupport::VEC_SIZE_128][op] = (address)os::dll_lookup(libsleef, ebuf);
9854
9855 snprintf(ebuf, sizeof(ebuf), "%sd2_%sadvsimd", VectorSupport::mathname[op], ulf);
9856 StubRoutines::_vector_d_math[VectorSupport::VEC_SIZE_128][op] = (address)os::dll_lookup(libsleef, ebuf);
9857 }
9858 }
9859
9860 // Call here from the interpreter or compiled code to either load
9861 // multiple returned values from the inline type instance being
9862 // returned to registers or to store returned values to a newly
9863 // allocated inline type instance.
9864 address generate_return_value_stub(address destination, const char* name, bool has_res) {
9865 // We need to save all registers the calling convention may use so
9866 // the runtime calls read or update those registers. This needs to
9867 // be in sync with SharedRuntime::java_return_convention().
9868 // n.b. aarch64 asserts that frame::arg_reg_save_area_bytes == 0
9869 enum layout {
9870 j_rarg7_off = 0, j_rarg7_2, // j_rarg7 is r0
9871 j_rarg6_off, j_rarg6_2,
9872 j_rarg5_off, j_rarg5_2,
9873 j_rarg4_off, j_rarg4_2,
9874 j_rarg3_off, j_rarg3_2,
9875 j_rarg2_off, j_rarg2_2,
9876 j_rarg1_off, j_rarg1_2,
9877 j_rarg0_off, j_rarg0_2,
9878
9879 j_farg7_off, j_farg7_2,
9880 j_farg6_off, j_farg6_2,
9881 j_farg5_off, j_farg5_2,
9882 j_farg4_off, j_farg4_2,
9883 j_farg3_off, j_farg3_2,
9884 j_farg2_off, j_farg2_2,
9885 j_farg1_off, j_farg1_2,
9886 j_farg0_off, j_farg0_2,
9887
9888 rfp_off, rfp_off2,
9889 return_off, return_off2,
9890
9891 framesize // inclusive of return address
9892 };
9893
9894 CodeBuffer code(name, 512, 64);
9895 MacroAssembler* masm = new MacroAssembler(&code);
9896
9897 int frame_size_in_bytes = align_up(framesize*BytesPerInt, 16);
9898 assert(frame_size_in_bytes == framesize*BytesPerInt, "misaligned");
9899 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
9900 int frame_size_in_words = frame_size_in_bytes / wordSize;
9901
9902 OopMapSet* oop_maps = new OopMapSet();
9903 OopMap* map = new OopMap(frame_size_in_slots, 0);
9904
9905 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg7_off), j_rarg7->as_VMReg());
9906 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg6_off), j_rarg6->as_VMReg());
9907 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg5_off), j_rarg5->as_VMReg());
9908 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg4_off), j_rarg4->as_VMReg());
9909 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg3_off), j_rarg3->as_VMReg());
9910 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg2_off), j_rarg2->as_VMReg());
9911 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg1_off), j_rarg1->as_VMReg());
9912 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg0_off), j_rarg0->as_VMReg());
9913
9914 map->set_callee_saved(VMRegImpl::stack2reg(j_farg0_off), j_farg0->as_VMReg());
9915 map->set_callee_saved(VMRegImpl::stack2reg(j_farg1_off), j_farg1->as_VMReg());
9916 map->set_callee_saved(VMRegImpl::stack2reg(j_farg2_off), j_farg2->as_VMReg());
9917 map->set_callee_saved(VMRegImpl::stack2reg(j_farg3_off), j_farg3->as_VMReg());
9918 map->set_callee_saved(VMRegImpl::stack2reg(j_farg4_off), j_farg4->as_VMReg());
9919 map->set_callee_saved(VMRegImpl::stack2reg(j_farg5_off), j_farg5->as_VMReg());
9920 map->set_callee_saved(VMRegImpl::stack2reg(j_farg6_off), j_farg6->as_VMReg());
9921 map->set_callee_saved(VMRegImpl::stack2reg(j_farg7_off), j_farg7->as_VMReg());
9922
9923 address start = __ pc();
9924
9925 __ enter(); // Save FP and LR before call
9926
9927 __ stpd(j_farg1, j_farg0, Address(__ pre(sp, -2 * wordSize)));
9928 __ stpd(j_farg3, j_farg2, Address(__ pre(sp, -2 * wordSize)));
9929 __ stpd(j_farg5, j_farg4, Address(__ pre(sp, -2 * wordSize)));
9930 __ stpd(j_farg7, j_farg6, Address(__ pre(sp, -2 * wordSize)));
9931
9932 __ stp(j_rarg1, j_rarg0, Address(__ pre(sp, -2 * wordSize)));
9933 __ stp(j_rarg3, j_rarg2, Address(__ pre(sp, -2 * wordSize)));
9934 __ stp(j_rarg5, j_rarg4, Address(__ pre(sp, -2 * wordSize)));
9935 __ stp(j_rarg7, j_rarg6, Address(__ pre(sp, -2 * wordSize)));
9936
9937 int frame_complete = __ offset();
9938
9939 // Set up last_Java_sp and last_Java_fp
9940 address the_pc = __ pc();
9941 __ set_last_Java_frame(sp, noreg, the_pc, rscratch1);
9942
9943 // Call runtime
9944 __ mov(c_rarg1, r0);
9945 __ mov(c_rarg0, rthread);
9946
9947 __ mov(rscratch1, destination);
9948 __ blr(rscratch1);
9949
9950 oop_maps->add_gc_map(the_pc - start, map);
9951
9952 __ reset_last_Java_frame(false);
9953
9954 __ ldp(j_rarg7, j_rarg6, Address(__ post(sp, 2 * wordSize)));
9955 __ ldp(j_rarg5, j_rarg4, Address(__ post(sp, 2 * wordSize)));
9956 __ ldp(j_rarg3, j_rarg2, Address(__ post(sp, 2 * wordSize)));
9957 __ ldp(j_rarg1, j_rarg0, Address(__ post(sp, 2 * wordSize)));
9958
9959 __ ldpd(j_farg7, j_farg6, Address(__ post(sp, 2 * wordSize)));
9960 __ ldpd(j_farg5, j_farg4, Address(__ post(sp, 2 * wordSize)));
9961 __ ldpd(j_farg3, j_farg2, Address(__ post(sp, 2 * wordSize)));
9962 __ ldpd(j_farg1, j_farg0, Address(__ post(sp, 2 * wordSize)));
9963
9964 __ leave();
9965
9966 // check for pending exceptions
9967 Label pending;
9968 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
9969 __ cbnz(rscratch1, pending);
9970
9971 if (has_res) {
9972 __ get_vm_result(r0, rthread);
9973 }
9974
9975 __ ret(lr);
9976
9977 __ bind(pending);
9978 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
9979
9980 // -------------
9981 // make sure all code is generated
9982 masm->flush();
9983
9984 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, frame_size_in_words, oop_maps, false);
9985 return stub->entry_point();
9986 }
9987
9988 // Initialization
9989 void generate_initial_stubs() {
9990 // Generate initial stubs and initializes the entry points
9991
9992 // entry points that exist in all platforms Note: This is code
9993 // that could be shared among different platforms - however the
9994 // benefit seems to be smaller than the disadvantage of having a
9995 // much more complicated generator structure. See also comment in
9996 // stubRoutines.hpp.
9997
9998 StubRoutines::_forward_exception_entry = generate_forward_exception();
9999
10000 StubRoutines::_call_stub_entry =
10001 generate_call_stub(StubRoutines::_call_stub_return_address);
10002
10003 // is referenced by megamorphic call
10004 StubRoutines::_catch_exception_entry = generate_catch_exception();
10005
10006 // Initialize table for copy memory (arraycopy) check.
10007 if (UnsafeMemoryAccess::_table == nullptr) {
10014 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
10015 }
10016
10017 if (UseCRC32CIntrinsics) {
10018 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
10019 }
10020
10021 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
10022 StubRoutines::_dsin = generate_dsin_dcos(/* isCos = */ false);
10023 }
10024
10025 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
10026 StubRoutines::_dcos = generate_dsin_dcos(/* isCos = */ true);
10027 }
10028
10029 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_float16ToFloat) &&
10030 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_floatToFloat16)) {
10031 StubRoutines::_hf2f = generate_float16ToFloat();
10032 StubRoutines::_f2hf = generate_floatToFloat16();
10033 }
10034
10035 if (InlineTypeReturnedAsFields) {
10036 StubRoutines::_load_inline_type_fields_in_regs =
10037 generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::load_inline_type_fields_in_regs), "load_inline_type_fields_in_regs", false);
10038 StubRoutines::_store_inline_type_fields_to_buf =
10039 generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::store_inline_type_fields_to_buf), "store_inline_type_fields_to_buf", true);
10040 }
10041
10042 }
10043
10044 void generate_continuation_stubs() {
10045 // Continuation stubs:
10046 StubRoutines::_cont_thaw = generate_cont_thaw();
10047 StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
10048 StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
10049 StubRoutines::_cont_preempt_stub = generate_cont_preempt_stub();
10050 }
10051
10052 void generate_final_stubs() {
10053 // support for verify_oop (must happen after universe_init)
10054 if (VerifyOops) {
10055 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
10056 }
10057
10058 // arraycopy stubs used by compilers
10059 generate_arraycopy_stubs();
10060
10061 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
|