< prev index next >

src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp

Print this page

 311     __ mov(r19_sender_sp, sp);
 312     __ blr(c_rarg4);
 313 
 314     // we do this here because the notify will already have been done
 315     // if we get to the next instruction via an exception
 316     //
 317     // n.b. adding this instruction here affects the calculation of
 318     // whether or not a routine returns to the call stub (used when
 319     // doing stack walks) since the normal test is to check the return
 320     // pc against the address saved below. so we may need to allow for
 321     // this extra instruction in the check.
 322 
 323     // save current address for use by exception handling code
 324 
 325     return_address = __ pc();
 326 
 327     // store result depending on type (everything that is not
 328     // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
 329     // n.b. this assumes Java returns an integral result in r0
 330     // and a floating result in j_farg0
 331     __ ldr(j_rarg2, result);
 332     Label is_long, is_float, is_double, exit;
 333     __ ldr(j_rarg1, result_type);
 334     __ cmp(j_rarg1, (u1)T_OBJECT);







 335     __ br(Assembler::EQ, is_long);
 336     __ cmp(j_rarg1, (u1)T_LONG);
 337     __ br(Assembler::EQ, is_long);
 338     __ cmp(j_rarg1, (u1)T_FLOAT);
 339     __ br(Assembler::EQ, is_float);
 340     __ cmp(j_rarg1, (u1)T_DOUBLE);
 341     __ br(Assembler::EQ, is_double);
 342 
 343     // handle T_INT case
 344     __ strw(r0, Address(j_rarg2));
 345 
 346     __ BIND(exit);
 347 
 348     // pop parameters
 349     __ sub(esp, rfp, -sp_after_call_off * wordSize);
 350 
 351 #ifdef ASSERT
 352     // verify that threads correspond
 353     {
 354       Label L, S;
 355       __ ldr(rscratch1, thread);
 356       __ cmp(rthread, rscratch1);
 357       __ br(Assembler::NE, S);
 358       __ get_thread(rscratch1);
 359       __ cmp(rthread, rscratch1);
 360       __ br(Assembler::EQ, L);
 361       __ BIND(S);
 362       __ stop("StubRoutines::call_stub: threads must correspond");
 363       __ BIND(L);
 364     }

 376     __ ldp(r26, r25,   r26_save);
 377     __ ldp(r24, r23,   r24_save);
 378     __ ldp(r22, r21,   r22_save);
 379     __ ldp(r20, r19,   r20_save);
 380 
 381     // restore fpcr
 382     __ ldr(rscratch1,  fpcr_save);
 383     __ set_fpcr(rscratch1);
 384 
 385     __ ldp(c_rarg0, c_rarg1,  call_wrapper);
 386     __ ldrw(c_rarg2, result_type);
 387     __ ldr(c_rarg3,  method);
 388     __ ldp(c_rarg4, c_rarg5,  entry_point);
 389     __ ldp(c_rarg6, c_rarg7,  parameter_size);
 390 
 391     // leave frame and return to caller
 392     __ leave();
 393     __ ret(lr);
 394 
 395     // handle return types different from T_INT











 396 
 397     __ BIND(is_long);
 398     __ str(r0, Address(j_rarg2, 0));
 399     __ br(Assembler::AL, exit);
 400 
 401     __ BIND(is_float);
 402     __ strs(j_farg0, Address(j_rarg2, 0));
 403     __ br(Assembler::AL, exit);
 404 
 405     __ BIND(is_double);
 406     __ strd(j_farg0, Address(j_rarg2, 0));
 407     __ br(Assembler::AL, exit);
 408 
 409     return start;
 410   }
 411 
 412   // Return point for a Java call if there's an exception thrown in
 413   // Java code.  The exception is caught and transformed into a
 414   // pending exception stored in JavaThread that can be tested from
 415   // within the VM.
 416   //
 417   // Note: Usually the parameters are removed by the callee. In case
 418   // of an exception crossing an activation frame boundary, that is
 419   // not the case if the callee is compiled code => need to setup the
 420   // rsp.
 421   //
 422   // r0: exception oop
 423 
 424   address generate_catch_exception() {
 425     StubCodeMark mark(this, "StubRoutines", "catch_exception");
 426     address start = __ pc();

2220     //  |array_tag|     | header_size | element_type |     |log2_element_size|
2221     // 32        30    24            16              8     2                 0
2222     //
2223     //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2224     //
2225 
2226     const int lh_offset = in_bytes(Klass::layout_helper_offset());
2227 
2228     // Handle objArrays completely differently...
2229     const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2230     __ ldrw(lh, Address(scratch_src_klass, lh_offset));
2231     __ movw(rscratch1, objArray_lh);
2232     __ eorw(rscratch2, lh, rscratch1);
2233     __ cbzw(rscratch2, L_objArray);
2234 
2235     //  if (src->klass() != dst->klass()) return -1;
2236     __ load_klass(rscratch2, dst);
2237     __ eor(rscratch2, rscratch2, scratch_src_klass);
2238     __ cbnz(rscratch2, L_failed);
2239 






2240     //  if (!src->is_Array()) return -1;
2241     __ tbz(lh, 31, L_failed);  // i.e. (lh >= 0)
2242 
2243     // At this point, it is known to be a typeArray (array_tag 0x3).
2244 #ifdef ASSERT
2245     {
2246       BLOCK_COMMENT("assert primitive array {");
2247       Label L;
2248       __ movw(rscratch2, Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2249       __ cmpw(lh, rscratch2);
2250       __ br(Assembler::GE, L);
2251       __ stop("must be a primitive array");
2252       __ bind(L);
2253       BLOCK_COMMENT("} assert primitive array done");
2254     }
2255 #endif
2256 
2257     arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
2258                            rscratch2, L_failed);
2259 

8550       int vop = VectorSupport::VECTOR_OP_MATH_START + op;
8551       // Skip "tanh" because there is performance regression
8552       if (vop == VectorSupport::VECTOR_OP_TANH) {
8553         continue;
8554       }
8555 
8556       // The native library does not support u10 level of "hypot".
8557       const char* ulf = (vop == VectorSupport::VECTOR_OP_HYPOT) ? "u05" : "u10";
8558 
8559       snprintf(ebuf, sizeof(ebuf), "%sf4_%sadvsimd", VectorSupport::mathname[op], ulf);
8560       StubRoutines::_vector_f_math[VectorSupport::VEC_SIZE_64][op] = (address)os::dll_lookup(libsleef, ebuf);
8561 
8562       snprintf(ebuf, sizeof(ebuf), "%sf4_%sadvsimd", VectorSupport::mathname[op], ulf);
8563       StubRoutines::_vector_f_math[VectorSupport::VEC_SIZE_128][op] = (address)os::dll_lookup(libsleef, ebuf);
8564 
8565       snprintf(ebuf, sizeof(ebuf), "%sd2_%sadvsimd", VectorSupport::mathname[op], ulf);
8566       StubRoutines::_vector_d_math[VectorSupport::VEC_SIZE_128][op] = (address)os::dll_lookup(libsleef, ebuf);
8567     }
8568   }
8569 
































































































































8570   // Initialization
8571   void generate_initial_stubs() {
8572     // Generate initial stubs and initializes the entry points
8573 
8574     // entry points that exist in all platforms Note: This is code
8575     // that could be shared among different platforms - however the
8576     // benefit seems to be smaller than the disadvantage of having a
8577     // much more complicated generator structure. See also comment in
8578     // stubRoutines.hpp.
8579 
8580     StubRoutines::_forward_exception_entry = generate_forward_exception();
8581 
8582     StubRoutines::_call_stub_entry =
8583       generate_call_stub(StubRoutines::_call_stub_return_address);
8584 
8585     // is referenced by megamorphic call
8586     StubRoutines::_catch_exception_entry = generate_catch_exception();
8587 
8588     // Initialize table for copy memory (arraycopy) check.
8589     if (UnsafeMemoryAccess::_table == nullptr) {

8596       StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
8597     }
8598 
8599     if (UseCRC32CIntrinsics) {
8600       StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
8601     }
8602 
8603     if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
8604       StubRoutines::_dsin = generate_dsin_dcos(/* isCos = */ false);
8605     }
8606 
8607     if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
8608       StubRoutines::_dcos = generate_dsin_dcos(/* isCos = */ true);
8609     }
8610 
8611     if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_float16ToFloat) &&
8612         vmIntrinsics::is_intrinsic_available(vmIntrinsics::_floatToFloat16)) {
8613       StubRoutines::_hf2f = generate_float16ToFloat();
8614       StubRoutines::_f2hf = generate_floatToFloat16();
8615     }








8616   }
8617 
8618   void generate_continuation_stubs() {
8619     // Continuation stubs:
8620     StubRoutines::_cont_thaw          = generate_cont_thaw();
8621     StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
8622     StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
8623   }
8624 
8625   void generate_final_stubs() {
8626     // support for verify_oop (must happen after universe_init)
8627     if (VerifyOops) {
8628       StubRoutines::_verify_oop_subroutine_entry   = generate_verify_oop();
8629     }
8630 
8631     // arraycopy stubs used by compilers
8632     generate_arraycopy_stubs();
8633 
8634     BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
8635     if (bs_nm != nullptr) {

 311     __ mov(r19_sender_sp, sp);
 312     __ blr(c_rarg4);
 313 
 314     // we do this here because the notify will already have been done
 315     // if we get to the next instruction via an exception
 316     //
 317     // n.b. adding this instruction here affects the calculation of
 318     // whether or not a routine returns to the call stub (used when
 319     // doing stack walks) since the normal test is to check the return
 320     // pc against the address saved below. so we may need to allow for
 321     // this extra instruction in the check.
 322 
 323     // save current address for use by exception handling code
 324 
 325     return_address = __ pc();
 326 
 327     // store result depending on type (everything that is not
 328     // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
 329     // n.b. this assumes Java returns an integral result in r0
 330     // and a floating result in j_farg0
 331     // All of j_rargN may be used to return inline type fields so be careful
 332     // not to clobber those.
 333     // SharedRuntime::generate_buffered_inline_type_adapter() knows the register
 334     // assignment of Rresult below.
 335     Register Rresult = r14, Rresult_type = r15;
 336     __ ldr(Rresult, result);
 337     Label is_long, is_float, is_double, check_prim, exit;
 338     __ ldr(Rresult_type, result_type);
 339     __ cmp(Rresult_type, (u1)T_OBJECT);
 340     __ br(Assembler::EQ, check_prim);
 341     __ cmp(Rresult_type, (u1)T_LONG);
 342     __ br(Assembler::EQ, is_long);
 343     __ cmp(Rresult_type, (u1)T_FLOAT);


 344     __ br(Assembler::EQ, is_float);
 345     __ cmp(Rresult_type, (u1)T_DOUBLE);
 346     __ br(Assembler::EQ, is_double);
 347 
 348     // handle T_INT case
 349     __ strw(r0, Address(Rresult));
 350 
 351     __ BIND(exit);
 352 
 353     // pop parameters
 354     __ sub(esp, rfp, -sp_after_call_off * wordSize);
 355 
 356 #ifdef ASSERT
 357     // verify that threads correspond
 358     {
 359       Label L, S;
 360       __ ldr(rscratch1, thread);
 361       __ cmp(rthread, rscratch1);
 362       __ br(Assembler::NE, S);
 363       __ get_thread(rscratch1);
 364       __ cmp(rthread, rscratch1);
 365       __ br(Assembler::EQ, L);
 366       __ BIND(S);
 367       __ stop("StubRoutines::call_stub: threads must correspond");
 368       __ BIND(L);
 369     }

 381     __ ldp(r26, r25,   r26_save);
 382     __ ldp(r24, r23,   r24_save);
 383     __ ldp(r22, r21,   r22_save);
 384     __ ldp(r20, r19,   r20_save);
 385 
 386     // restore fpcr
 387     __ ldr(rscratch1,  fpcr_save);
 388     __ set_fpcr(rscratch1);
 389 
 390     __ ldp(c_rarg0, c_rarg1,  call_wrapper);
 391     __ ldrw(c_rarg2, result_type);
 392     __ ldr(c_rarg3,  method);
 393     __ ldp(c_rarg4, c_rarg5,  entry_point);
 394     __ ldp(c_rarg6, c_rarg7,  parameter_size);
 395 
 396     // leave frame and return to caller
 397     __ leave();
 398     __ ret(lr);
 399 
 400     // handle return types different from T_INT
 401     __ BIND(check_prim);
 402     if (InlineTypeReturnedAsFields) {
 403       // Check for scalarized return value
 404       __ tbz(r0, 0, is_long);
 405       // Load pack handler address
 406       __ andr(rscratch1, r0, -2);
 407       __ ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 408       __ ldr(rscratch1, Address(rscratch1, InlineKlass::pack_handler_jobject_offset()));
 409       __ blr(rscratch1);
 410       __ b(exit);
 411     }
 412 
 413     __ BIND(is_long);
 414     __ str(r0, Address(Rresult, 0));
 415     __ br(Assembler::AL, exit);
 416 
 417     __ BIND(is_float);
 418     __ strs(j_farg0, Address(Rresult, 0));
 419     __ br(Assembler::AL, exit);
 420 
 421     __ BIND(is_double);
 422     __ strd(j_farg0, Address(Rresult, 0));
 423     __ br(Assembler::AL, exit);
 424 
 425     return start;
 426   }
 427 
 428   // Return point for a Java call if there's an exception thrown in
 429   // Java code.  The exception is caught and transformed into a
 430   // pending exception stored in JavaThread that can be tested from
 431   // within the VM.
 432   //
 433   // Note: Usually the parameters are removed by the callee. In case
 434   // of an exception crossing an activation frame boundary, that is
 435   // not the case if the callee is compiled code => need to setup the
 436   // rsp.
 437   //
 438   // r0: exception oop
 439 
 440   address generate_catch_exception() {
 441     StubCodeMark mark(this, "StubRoutines", "catch_exception");
 442     address start = __ pc();

2236     //  |array_tag|     | header_size | element_type |     |log2_element_size|
2237     // 32        30    24            16              8     2                 0
2238     //
2239     //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2240     //
2241 
2242     const int lh_offset = in_bytes(Klass::layout_helper_offset());
2243 
2244     // Handle objArrays completely differently...
2245     const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2246     __ ldrw(lh, Address(scratch_src_klass, lh_offset));
2247     __ movw(rscratch1, objArray_lh);
2248     __ eorw(rscratch2, lh, rscratch1);
2249     __ cbzw(rscratch2, L_objArray);
2250 
2251     //  if (src->klass() != dst->klass()) return -1;
2252     __ load_klass(rscratch2, dst);
2253     __ eor(rscratch2, rscratch2, scratch_src_klass);
2254     __ cbnz(rscratch2, L_failed);
2255 
2256     // Check for flat inline type array -> return -1
2257     __ test_flat_array_oop(src, rscratch2, L_failed);
2258 
2259     // Check for null-free (non-flat) inline type array -> handle as object array
2260     __ test_null_free_array_oop(src, rscratch2, L_objArray);
2261 
2262     //  if (!src->is_Array()) return -1;
2263     __ tbz(lh, 31, L_failed);  // i.e. (lh >= 0)
2264 
2265     // At this point, it is known to be a typeArray (array_tag 0x3).
2266 #ifdef ASSERT
2267     {
2268       BLOCK_COMMENT("assert primitive array {");
2269       Label L;
2270       __ movw(rscratch2, Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2271       __ cmpw(lh, rscratch2);
2272       __ br(Assembler::GE, L);
2273       __ stop("must be a primitive array");
2274       __ bind(L);
2275       BLOCK_COMMENT("} assert primitive array done");
2276     }
2277 #endif
2278 
2279     arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
2280                            rscratch2, L_failed);
2281 

8572       int vop = VectorSupport::VECTOR_OP_MATH_START + op;
8573       // Skip "tanh" because there is performance regression
8574       if (vop == VectorSupport::VECTOR_OP_TANH) {
8575         continue;
8576       }
8577 
8578       // The native library does not support u10 level of "hypot".
8579       const char* ulf = (vop == VectorSupport::VECTOR_OP_HYPOT) ? "u05" : "u10";
8580 
8581       snprintf(ebuf, sizeof(ebuf), "%sf4_%sadvsimd", VectorSupport::mathname[op], ulf);
8582       StubRoutines::_vector_f_math[VectorSupport::VEC_SIZE_64][op] = (address)os::dll_lookup(libsleef, ebuf);
8583 
8584       snprintf(ebuf, sizeof(ebuf), "%sf4_%sadvsimd", VectorSupport::mathname[op], ulf);
8585       StubRoutines::_vector_f_math[VectorSupport::VEC_SIZE_128][op] = (address)os::dll_lookup(libsleef, ebuf);
8586 
8587       snprintf(ebuf, sizeof(ebuf), "%sd2_%sadvsimd", VectorSupport::mathname[op], ulf);
8588       StubRoutines::_vector_d_math[VectorSupport::VEC_SIZE_128][op] = (address)os::dll_lookup(libsleef, ebuf);
8589     }
8590   }
8591 
8592   // Call here from the interpreter or compiled code to either load
8593   // multiple returned values from the inline type instance being
8594   // returned to registers or to store returned values to a newly
8595   // allocated inline type instance.
8596   address generate_return_value_stub(address destination, const char* name, bool has_res) {
8597     // We need to save all registers the calling convention may use so
8598     // the runtime calls read or update those registers. This needs to
8599     // be in sync with SharedRuntime::java_return_convention().
8600     // n.b. aarch64 asserts that frame::arg_reg_save_area_bytes == 0
8601     enum layout {
8602       j_rarg7_off = 0, j_rarg7_2,    // j_rarg7 is r0
8603       j_rarg6_off, j_rarg6_2,
8604       j_rarg5_off, j_rarg5_2,
8605       j_rarg4_off, j_rarg4_2,
8606       j_rarg3_off, j_rarg3_2,
8607       j_rarg2_off, j_rarg2_2,
8608       j_rarg1_off, j_rarg1_2,
8609       j_rarg0_off, j_rarg0_2,
8610 
8611       j_farg7_off, j_farg7_2,
8612       j_farg6_off, j_farg6_2,
8613       j_farg5_off, j_farg5_2,
8614       j_farg4_off, j_farg4_2,
8615       j_farg3_off, j_farg3_2,
8616       j_farg2_off, j_farg2_2,
8617       j_farg1_off, j_farg1_2,
8618       j_farg0_off, j_farg0_2,
8619 
8620       rfp_off, rfp_off2,
8621       return_off, return_off2,
8622 
8623       framesize // inclusive of return address
8624     };
8625 
8626     CodeBuffer code(name, 512, 64);
8627     MacroAssembler* masm = new MacroAssembler(&code);
8628 
8629     int frame_size_in_bytes = align_up(framesize*BytesPerInt, 16);
8630     assert(frame_size_in_bytes == framesize*BytesPerInt, "misaligned");
8631     int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
8632     int frame_size_in_words = frame_size_in_bytes / wordSize;
8633 
8634     OopMapSet* oop_maps = new OopMapSet();
8635     OopMap* map = new OopMap(frame_size_in_slots, 0);
8636 
8637     map->set_callee_saved(VMRegImpl::stack2reg(j_rarg7_off), j_rarg7->as_VMReg());
8638     map->set_callee_saved(VMRegImpl::stack2reg(j_rarg6_off), j_rarg6->as_VMReg());
8639     map->set_callee_saved(VMRegImpl::stack2reg(j_rarg5_off), j_rarg5->as_VMReg());
8640     map->set_callee_saved(VMRegImpl::stack2reg(j_rarg4_off), j_rarg4->as_VMReg());
8641     map->set_callee_saved(VMRegImpl::stack2reg(j_rarg3_off), j_rarg3->as_VMReg());
8642     map->set_callee_saved(VMRegImpl::stack2reg(j_rarg2_off), j_rarg2->as_VMReg());
8643     map->set_callee_saved(VMRegImpl::stack2reg(j_rarg1_off), j_rarg1->as_VMReg());
8644     map->set_callee_saved(VMRegImpl::stack2reg(j_rarg0_off), j_rarg0->as_VMReg());
8645 
8646     map->set_callee_saved(VMRegImpl::stack2reg(j_farg0_off), j_farg0->as_VMReg());
8647     map->set_callee_saved(VMRegImpl::stack2reg(j_farg1_off), j_farg1->as_VMReg());
8648     map->set_callee_saved(VMRegImpl::stack2reg(j_farg2_off), j_farg2->as_VMReg());
8649     map->set_callee_saved(VMRegImpl::stack2reg(j_farg3_off), j_farg3->as_VMReg());
8650     map->set_callee_saved(VMRegImpl::stack2reg(j_farg4_off), j_farg4->as_VMReg());
8651     map->set_callee_saved(VMRegImpl::stack2reg(j_farg5_off), j_farg5->as_VMReg());
8652     map->set_callee_saved(VMRegImpl::stack2reg(j_farg6_off), j_farg6->as_VMReg());
8653     map->set_callee_saved(VMRegImpl::stack2reg(j_farg7_off), j_farg7->as_VMReg());
8654 
8655     address start = __ pc();
8656 
8657     __ enter(); // Save FP and LR before call
8658 
8659     __ stpd(j_farg1, j_farg0, Address(__ pre(sp, -2 * wordSize)));
8660     __ stpd(j_farg3, j_farg2, Address(__ pre(sp, -2 * wordSize)));
8661     __ stpd(j_farg5, j_farg4, Address(__ pre(sp, -2 * wordSize)));
8662     __ stpd(j_farg7, j_farg6, Address(__ pre(sp, -2 * wordSize)));
8663 
8664     __ stp(j_rarg1, j_rarg0, Address(__ pre(sp, -2 * wordSize)));
8665     __ stp(j_rarg3, j_rarg2, Address(__ pre(sp, -2 * wordSize)));
8666     __ stp(j_rarg5, j_rarg4, Address(__ pre(sp, -2 * wordSize)));
8667     __ stp(j_rarg7, j_rarg6, Address(__ pre(sp, -2 * wordSize)));
8668 
8669     int frame_complete = __ offset();
8670 
8671     // Set up last_Java_sp and last_Java_fp
8672     address the_pc = __ pc();
8673     __ set_last_Java_frame(sp, noreg, the_pc, rscratch1);
8674 
8675     // Call runtime
8676     __ mov(c_rarg1, r0);
8677     __ mov(c_rarg0, rthread);
8678 
8679     __ mov(rscratch1, destination);
8680     __ blr(rscratch1);
8681 
8682     oop_maps->add_gc_map(the_pc - start, map);
8683 
8684     __ reset_last_Java_frame(false);
8685 
8686     __ ldp(j_rarg7, j_rarg6, Address(__ post(sp, 2 * wordSize)));
8687     __ ldp(j_rarg5, j_rarg4, Address(__ post(sp, 2 * wordSize)));
8688     __ ldp(j_rarg3, j_rarg2, Address(__ post(sp, 2 * wordSize)));
8689     __ ldp(j_rarg1, j_rarg0, Address(__ post(sp, 2 * wordSize)));
8690 
8691     __ ldpd(j_farg7, j_farg6, Address(__ post(sp, 2 * wordSize)));
8692     __ ldpd(j_farg5, j_farg4, Address(__ post(sp, 2 * wordSize)));
8693     __ ldpd(j_farg3, j_farg2, Address(__ post(sp, 2 * wordSize)));
8694     __ ldpd(j_farg1, j_farg0, Address(__ post(sp, 2 * wordSize)));
8695 
8696     __ leave();
8697 
8698     // check for pending exceptions
8699     Label pending;
8700     __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
8701     __ cbnz(rscratch1, pending);
8702 
8703     if (has_res) {
8704       __ get_vm_result(r0, rthread);
8705     }
8706 
8707     __ ret(lr);
8708 
8709     __ bind(pending);
8710     __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
8711 
8712     // -------------
8713     // make sure all code is generated
8714     masm->flush();
8715 
8716     RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, frame_size_in_words, oop_maps, false);
8717     return stub->entry_point();
8718   }
8719 
8720   // Initialization
8721   void generate_initial_stubs() {
8722     // Generate initial stubs and initializes the entry points
8723 
8724     // entry points that exist in all platforms Note: This is code
8725     // that could be shared among different platforms - however the
8726     // benefit seems to be smaller than the disadvantage of having a
8727     // much more complicated generator structure. See also comment in
8728     // stubRoutines.hpp.
8729 
8730     StubRoutines::_forward_exception_entry = generate_forward_exception();
8731 
8732     StubRoutines::_call_stub_entry =
8733       generate_call_stub(StubRoutines::_call_stub_return_address);
8734 
8735     // is referenced by megamorphic call
8736     StubRoutines::_catch_exception_entry = generate_catch_exception();
8737 
8738     // Initialize table for copy memory (arraycopy) check.
8739     if (UnsafeMemoryAccess::_table == nullptr) {

8746       StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
8747     }
8748 
8749     if (UseCRC32CIntrinsics) {
8750       StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
8751     }
8752 
8753     if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
8754       StubRoutines::_dsin = generate_dsin_dcos(/* isCos = */ false);
8755     }
8756 
8757     if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
8758       StubRoutines::_dcos = generate_dsin_dcos(/* isCos = */ true);
8759     }
8760 
8761     if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_float16ToFloat) &&
8762         vmIntrinsics::is_intrinsic_available(vmIntrinsics::_floatToFloat16)) {
8763       StubRoutines::_hf2f = generate_float16ToFloat();
8764       StubRoutines::_f2hf = generate_floatToFloat16();
8765     }
8766 
8767     if (InlineTypeReturnedAsFields) {
8768       StubRoutines::_load_inline_type_fields_in_regs =
8769          generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::load_inline_type_fields_in_regs), "load_inline_type_fields_in_regs", false);
8770       StubRoutines::_store_inline_type_fields_to_buf =
8771          generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::store_inline_type_fields_to_buf), "store_inline_type_fields_to_buf", true);
8772     }
8773 
8774   }
8775 
8776   void generate_continuation_stubs() {
8777     // Continuation stubs:
8778     StubRoutines::_cont_thaw          = generate_cont_thaw();
8779     StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
8780     StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
8781   }
8782 
8783   void generate_final_stubs() {
8784     // support for verify_oop (must happen after universe_init)
8785     if (VerifyOops) {
8786       StubRoutines::_verify_oop_subroutine_entry   = generate_verify_oop();
8787     }
8788 
8789     // arraycopy stubs used by compilers
8790     generate_arraycopy_stubs();
8791 
8792     BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
8793     if (bs_nm != nullptr) {
< prev index next >