< prev index next >

src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp

Print this page

 293     //      rmethod: Method*
 294     //      r13: sender sp
 295     BLOCK_COMMENT("call Java function");
 296     __ mov(r13, sp);
 297     __ blr(c_rarg4);
 298 
 299     // we do this here because the notify will already have been done
 300     // if we get to the next instruction via an exception
 301     //
 302     // n.b. adding this instruction here affects the calculation of
 303     // whether or not a routine returns to the call stub (used when
 304     // doing stack walks) since the normal test is to check the return
 305     // pc against the address saved below. so we may need to allow for
 306     // this extra instruction in the check.
 307 
 308     // save current address for use by exception handling code
 309 
 310     return_address = __ pc();
 311 
 312     // store result depending on type (everything that is not
 313     // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
 314     // n.b. this assumes Java returns an integral result in r0
 315     // and a floating result in j_farg0
 316     __ ldr(j_rarg2, result);
 317     Label is_long, is_float, is_double, exit;
 318     __ ldr(j_rarg1, result_type);
 319     __ cmp(j_rarg1, (u1)T_OBJECT);









 320     __ br(Assembler::EQ, is_long);
 321     __ cmp(j_rarg1, (u1)T_LONG);
 322     __ br(Assembler::EQ, is_long);
 323     __ cmp(j_rarg1, (u1)T_FLOAT);
 324     __ br(Assembler::EQ, is_float);
 325     __ cmp(j_rarg1, (u1)T_DOUBLE);
 326     __ br(Assembler::EQ, is_double);
 327 
 328     // handle T_INT case
 329     __ strw(r0, Address(j_rarg2));
 330 
 331     __ BIND(exit);
 332 
 333     // pop parameters
 334     __ sub(esp, rfp, -sp_after_call_off * wordSize);
 335 
 336 #ifdef ASSERT
 337     // verify that threads correspond
 338     {
 339       Label L, S;
 340       __ ldr(rscratch1, thread);
 341       __ cmp(rthread, rscratch1);
 342       __ br(Assembler::NE, S);
 343       __ get_thread(rscratch1);
 344       __ cmp(rthread, rscratch1);
 345       __ br(Assembler::EQ, L);
 346       __ BIND(S);
 347       __ stop("StubRoutines::call_stub: threads must correspond");
 348       __ BIND(L);
 349     }

 355     __ ldpd(v11, v10,  d11_save);
 356     __ ldpd(v9,  v8,   d9_save);
 357 
 358     __ ldp(r28, r27,   r28_save);
 359     __ ldp(r26, r25,   r26_save);
 360     __ ldp(r24, r23,   r24_save);
 361     __ ldp(r22, r21,   r22_save);
 362     __ ldp(r20, r19,   r20_save);
 363 
 364     __ ldp(c_rarg0, c_rarg1,  call_wrapper);
 365     __ ldrw(c_rarg2, result_type);
 366     __ ldr(c_rarg3,  method);
 367     __ ldp(c_rarg4, c_rarg5,  entry_point);
 368     __ ldp(c_rarg6, c_rarg7,  parameter_size);
 369 
 370     // leave frame and return to caller
 371     __ leave();
 372     __ ret(lr);
 373 
 374     // handle return types different from T_INT











 375 
 376     __ BIND(is_long);
 377     __ str(r0, Address(j_rarg2, 0));
 378     __ br(Assembler::AL, exit);
 379 
 380     __ BIND(is_float);
 381     __ strs(j_farg0, Address(j_rarg2, 0));
 382     __ br(Assembler::AL, exit);
 383 
 384     __ BIND(is_double);
 385     __ strd(j_farg0, Address(j_rarg2, 0));
 386     __ br(Assembler::AL, exit);
 387 
 388     return start;
 389   }
 390 
 391   // Return point for a Java call if there's an exception thrown in
 392   // Java code.  The exception is caught and transformed into a
 393   // pending exception stored in JavaThread that can be tested from
 394   // within the VM.
 395   //
 396   // Note: Usually the parameters are removed by the callee. In case
 397   // of an exception crossing an activation frame boundary, that is
 398   // not the case if the callee is compiled code => need to setup the
 399   // rsp.
 400   //
 401   // r0: exception oop
 402 
 403   address generate_catch_exception() {
 404     StubCodeMark mark(this, "StubRoutines", "catch_exception");
 405     address start = __ pc();

1827     bs->arraycopy_prologue(_masm, decorators, is_oop, from, to, count, wb_pre_saved_regs);
1828 
1829     // save the original count
1830     __ mov(count_save, count);
1831 
1832     // Copy from low to high addresses
1833     __ mov(start_to, to);              // Save destination array start address
1834     __ b(L_load_element);
1835 
1836     // ======== begin loop ========
1837     // (Loop is rotated; its entry is L_load_element.)
1838     // Loop control:
1839     //   for (; count != 0; count--) {
1840     //     copied_oop = load_heap_oop(from++);
1841     //     ... generate_type_check ...;
1842     //     store_heap_oop(to++, copied_oop);
1843     //   }
1844     __ align(OptoLoopAlignment);
1845 
1846     __ BIND(L_store_element);
1847     __ store_heap_oop(__ post(to, UseCompressedOops ? 4 : 8), copied_oop, noreg, noreg, AS_RAW);  // store the oop
1848     __ sub(count, count, 1);
1849     __ cbz(count, L_do_card_marks);
1850 
1851     // ======== loop entry is here ========
1852     __ BIND(L_load_element);
1853     __ load_heap_oop(copied_oop, __ post(from, UseCompressedOops ? 4 : 8), noreg, noreg, AS_RAW); // load the oop
1854     __ cbz(copied_oop, L_store_element);
1855 
1856     __ load_klass(r19_klass, copied_oop);// query the object klass
1857     generate_type_check(r19_klass, ckoff, ckval, L_store_element);
1858     // ======== end loop ========
1859 
1860     // It was a real error; we must depend on the caller to finish the job.
1861     // Register count = remaining oops, count_orig = total oops.
1862     // Emit GC store barriers for the oops we have copied and report
1863     // their number to the caller.
1864 
1865     __ subs(count, count_save, count);     // K = partially copied oop count
1866     __ eon(count, count, zr);                   // report (-1^K) to caller
1867     __ br(Assembler::EQ, L_done_pop);

2074     //  |array_tag|     | header_size | element_type |     |log2_element_size|
2075     // 32        30    24            16              8     2                 0
2076     //
2077     //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2078     //
2079 
2080     const int lh_offset = in_bytes(Klass::layout_helper_offset());
2081 
2082     // Handle objArrays completely differently...
2083     const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2084     __ ldrw(lh, Address(scratch_src_klass, lh_offset));
2085     __ movw(rscratch1, objArray_lh);
2086     __ eorw(rscratch2, lh, rscratch1);
2087     __ cbzw(rscratch2, L_objArray);
2088 
2089     //  if (src->klass() != dst->klass()) return -1;
2090     __ load_klass(rscratch2, dst);
2091     __ eor(rscratch2, rscratch2, scratch_src_klass);
2092     __ cbnz(rscratch2, L_failed);
2093 








2094     //  if (!src->is_Array()) return -1;
2095     __ tbz(lh, 31, L_failed);  // i.e. (lh >= 0)
2096 
2097     // At this point, it is known to be a typeArray (array_tag 0x3).
2098 #ifdef ASSERT
2099     {
2100       BLOCK_COMMENT("assert primitive array {");
2101       Label L;
2102       __ movw(rscratch2, Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2103       __ cmpw(lh, rscratch2);
2104       __ br(Assembler::GE, L);
2105       __ stop("must be a primitive array");
2106       __ bind(L);
2107       BLOCK_COMMENT("} assert primitive array done");
2108     }
2109 #endif
2110 
2111     arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
2112                            rscratch2, L_failed);
2113 

7422     //       MACC(Ra, Ra, t0, t1, t2);
7423     //     }
7424     //     iters =  (2*len-i)/2;
7425     //     assert(iters == len-j, "must be");
7426     //     for (; iters--; j++) {
7427     //       assert(Rm == Pm_base[j] && Rn == Pn_base[i-j], "must be");
7428     //       MACC(Rm, Rn, t0, t1, t2);
7429     //       Rm = *++Pm;
7430     //       Rn = *--Pn;
7431     //     }
7432     //     Pm_base[i-len] = t0;
7433     //     t0 = t1; t1 = t2; t2 = 0;
7434     //   }
7435 
7436     //   while (t0)
7437     //     t0 = sub(Pm_base, Pn_base, t0, len);
7438     // }
7439   };
7440 
7441 
































































































































7442   // Initialization
7443   void generate_initial() {
7444     // Generate initial stubs and initializes the entry points
7445 
7446     // entry points that exist in all platforms Note: This is code
7447     // that could be shared among different platforms - however the
7448     // benefit seems to be smaller than the disadvantage of having a
7449     // much more complicated generator structure. See also comment in
7450     // stubRoutines.hpp.
7451 
7452     StubRoutines::_forward_exception_entry = generate_forward_exception();
7453 
7454     StubRoutines::_call_stub_entry =
7455       generate_call_stub(StubRoutines::_call_stub_return_address);
7456 
7457     // is referenced by megamorphic call
7458     StubRoutines::_catch_exception_entry = generate_catch_exception();
7459 
7460     // Build this early so it's available for the interpreter.
7461     StubRoutines::_throw_StackOverflowError_entry =

7472       StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
7473     }
7474 
7475     if (UseCRC32CIntrinsics) {
7476       StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
7477     }
7478 
7479     // Disabled until JDK-8210858 is fixed
7480     // if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) {
7481     //   StubRoutines::_dlog = generate_dlog();
7482     // }
7483 
7484     if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
7485       StubRoutines::_dsin = generate_dsin_dcos(/* isCos = */ false);
7486     }
7487 
7488     if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
7489       StubRoutines::_dcos = generate_dsin_dcos(/* isCos = */ true);
7490     }
7491 







7492     // Safefetch stubs.
7493     generate_safefetch("SafeFetch32", sizeof(int),     &StubRoutines::_safefetch32_entry,
7494                                                        &StubRoutines::_safefetch32_fault_pc,
7495                                                        &StubRoutines::_safefetch32_continuation_pc);
7496     generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
7497                                                        &StubRoutines::_safefetchN_fault_pc,
7498                                                        &StubRoutines::_safefetchN_continuation_pc);
7499   }
7500 
7501   void generate_all() {
7502     // support for verify_oop (must happen after universe_init)
7503     StubRoutines::_verify_oop_subroutine_entry     = generate_verify_oop();
7504     StubRoutines::_throw_AbstractMethodError_entry =
7505       generate_throw_exception("AbstractMethodError throw_exception",
7506                                CAST_FROM_FN_PTR(address,
7507                                                 SharedRuntime::
7508                                                 throw_AbstractMethodError));
7509 
7510     StubRoutines::_throw_IncompatibleClassChangeError_entry =
7511       generate_throw_exception("IncompatibleClassChangeError throw_exception",

 293     //      rmethod: Method*
 294     //      r13: sender sp
 295     BLOCK_COMMENT("call Java function");
 296     __ mov(r13, sp);
 297     __ blr(c_rarg4);
 298 
 299     // we do this here because the notify will already have been done
 300     // if we get to the next instruction via an exception
 301     //
 302     // n.b. adding this instruction here affects the calculation of
 303     // whether or not a routine returns to the call stub (used when
 304     // doing stack walks) since the normal test is to check the return
 305     // pc against the address saved below. so we may need to allow for
 306     // this extra instruction in the check.
 307 
 308     // save current address for use by exception handling code
 309 
 310     return_address = __ pc();
 311 
 312     // store result depending on type (everything that is not
 313     // T_OBJECT, T_PRIMITIVE_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
 314     // n.b. this assumes Java returns an integral result in r0
 315     // and a floating result in j_farg0
 316     // All of j_rargN may be used to return inline type fields so be careful
 317     // not to clobber those.
 318     // SharedRuntime::generate_buffered_inline_type_adapter() knows the register
 319     // assignment of Rresult below.
 320     Register Rresult = r14, Rresult_type = r15;
 321     __ ldr(Rresult, result);
 322     Label is_long, is_float, is_double, check_prim, exit;
 323     __ ldr(Rresult_type, result_type);
 324     __ cmp(Rresult_type, (u1)T_OBJECT);
 325     __ br(Assembler::EQ, check_prim);
 326     __ cmp(Rresult_type, (u1)T_PRIMITIVE_OBJECT);
 327     __ br(Assembler::EQ, check_prim);
 328     __ cmp(Rresult_type, (u1)T_LONG);
 329     __ br(Assembler::EQ, is_long);
 330     __ cmp(Rresult_type, (u1)T_FLOAT);


 331     __ br(Assembler::EQ, is_float);
 332     __ cmp(Rresult_type, (u1)T_DOUBLE);
 333     __ br(Assembler::EQ, is_double);
 334 
 335     // handle T_INT case
 336     __ strw(r0, Address(Rresult));
 337 
 338     __ BIND(exit);
 339 
 340     // pop parameters
 341     __ sub(esp, rfp, -sp_after_call_off * wordSize);
 342 
 343 #ifdef ASSERT
 344     // verify that threads correspond
 345     {
 346       Label L, S;
 347       __ ldr(rscratch1, thread);
 348       __ cmp(rthread, rscratch1);
 349       __ br(Assembler::NE, S);
 350       __ get_thread(rscratch1);
 351       __ cmp(rthread, rscratch1);
 352       __ br(Assembler::EQ, L);
 353       __ BIND(S);
 354       __ stop("StubRoutines::call_stub: threads must correspond");
 355       __ BIND(L);
 356     }

 362     __ ldpd(v11, v10,  d11_save);
 363     __ ldpd(v9,  v8,   d9_save);
 364 
 365     __ ldp(r28, r27,   r28_save);
 366     __ ldp(r26, r25,   r26_save);
 367     __ ldp(r24, r23,   r24_save);
 368     __ ldp(r22, r21,   r22_save);
 369     __ ldp(r20, r19,   r20_save);
 370 
 371     __ ldp(c_rarg0, c_rarg1,  call_wrapper);
 372     __ ldrw(c_rarg2, result_type);
 373     __ ldr(c_rarg3,  method);
 374     __ ldp(c_rarg4, c_rarg5,  entry_point);
 375     __ ldp(c_rarg6, c_rarg7,  parameter_size);
 376 
 377     // leave frame and return to caller
 378     __ leave();
 379     __ ret(lr);
 380 
 381     // handle return types different from T_INT
 382     __ BIND(check_prim);
 383     if (InlineTypeReturnedAsFields) {
 384       // Check for scalarized return value
 385       __ tbz(r0, 0, is_long);
 386       // Load pack handler address
 387       __ andr(rscratch1, r0, -2);
 388       __ ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 389       __ ldr(rscratch1, Address(rscratch1, InlineKlass::pack_handler_jobject_offset()));
 390       __ blr(rscratch1);
 391       __ b(exit);
 392     }
 393 
 394     __ BIND(is_long);
 395     __ str(r0, Address(Rresult, 0));
 396     __ br(Assembler::AL, exit);
 397 
 398     __ BIND(is_float);
 399     __ strs(j_farg0, Address(Rresult, 0));
 400     __ br(Assembler::AL, exit);
 401 
 402     __ BIND(is_double);
 403     __ strd(j_farg0, Address(Rresult, 0));
 404     __ br(Assembler::AL, exit);
 405 
 406     return start;
 407   }
 408 
 409   // Return point for a Java call if there's an exception thrown in
 410   // Java code.  The exception is caught and transformed into a
 411   // pending exception stored in JavaThread that can be tested from
 412   // within the VM.
 413   //
 414   // Note: Usually the parameters are removed by the callee. In case
 415   // of an exception crossing an activation frame boundary, that is
 416   // not the case if the callee is compiled code => need to setup the
 417   // rsp.
 418   //
 419   // r0: exception oop
 420 
 421   address generate_catch_exception() {
 422     StubCodeMark mark(this, "StubRoutines", "catch_exception");
 423     address start = __ pc();

1845     bs->arraycopy_prologue(_masm, decorators, is_oop, from, to, count, wb_pre_saved_regs);
1846 
1847     // save the original count
1848     __ mov(count_save, count);
1849 
1850     // Copy from low to high addresses
1851     __ mov(start_to, to);              // Save destination array start address
1852     __ b(L_load_element);
1853 
1854     // ======== begin loop ========
1855     // (Loop is rotated; its entry is L_load_element.)
1856     // Loop control:
1857     //   for (; count != 0; count--) {
1858     //     copied_oop = load_heap_oop(from++);
1859     //     ... generate_type_check ...;
1860     //     store_heap_oop(to++, copied_oop);
1861     //   }
1862     __ align(OptoLoopAlignment);
1863 
1864     __ BIND(L_store_element);
1865     __ store_heap_oop(__ post(to, UseCompressedOops ? 4 : 8), copied_oop, noreg, noreg, noreg, AS_RAW);  // store the oop
1866     __ sub(count, count, 1);
1867     __ cbz(count, L_do_card_marks);
1868 
1869     // ======== loop entry is here ========
1870     __ BIND(L_load_element);
1871     __ load_heap_oop(copied_oop, __ post(from, UseCompressedOops ? 4 : 8), noreg, noreg, AS_RAW); // load the oop
1872     __ cbz(copied_oop, L_store_element);
1873 
1874     __ load_klass(r19_klass, copied_oop);// query the object klass
1875     generate_type_check(r19_klass, ckoff, ckval, L_store_element);
1876     // ======== end loop ========
1877 
1878     // It was a real error; we must depend on the caller to finish the job.
1879     // Register count = remaining oops, count_orig = total oops.
1880     // Emit GC store barriers for the oops we have copied and report
1881     // their number to the caller.
1882 
1883     __ subs(count, count_save, count);     // K = partially copied oop count
1884     __ eon(count, count, zr);                   // report (-1^K) to caller
1885     __ br(Assembler::EQ, L_done_pop);

2092     //  |array_tag|     | header_size | element_type |     |log2_element_size|
2093     // 32        30    24            16              8     2                 0
2094     //
2095     //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2096     //
2097 
2098     const int lh_offset = in_bytes(Klass::layout_helper_offset());
2099 
2100     // Handle objArrays completely differently...
2101     const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2102     __ ldrw(lh, Address(scratch_src_klass, lh_offset));
2103     __ movw(rscratch1, objArray_lh);
2104     __ eorw(rscratch2, lh, rscratch1);
2105     __ cbzw(rscratch2, L_objArray);
2106 
2107     //  if (src->klass() != dst->klass()) return -1;
2108     __ load_klass(rscratch2, dst);
2109     __ eor(rscratch2, rscratch2, scratch_src_klass);
2110     __ cbnz(rscratch2, L_failed);
2111 
2112     // Check for flat inline type array -> return -1
2113     __ tst(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
2114     __ br(Assembler::NE, L_failed);
2115 
2116     // Check for null-free (non-flat) inline type array -> handle as object array
2117     __ tst(lh, Klass::_lh_null_free_array_bit_inplace);
2118     __ br(Assembler::NE, L_failed);
2119 
2120     //  if (!src->is_Array()) return -1;
2121     __ tbz(lh, 31, L_failed);  // i.e. (lh >= 0)
2122 
2123     // At this point, it is known to be a typeArray (array_tag 0x3).
2124 #ifdef ASSERT
2125     {
2126       BLOCK_COMMENT("assert primitive array {");
2127       Label L;
2128       __ movw(rscratch2, Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2129       __ cmpw(lh, rscratch2);
2130       __ br(Assembler::GE, L);
2131       __ stop("must be a primitive array");
2132       __ bind(L);
2133       BLOCK_COMMENT("} assert primitive array done");
2134     }
2135 #endif
2136 
2137     arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
2138                            rscratch2, L_failed);
2139 

7448     //       MACC(Ra, Ra, t0, t1, t2);
7449     //     }
7450     //     iters =  (2*len-i)/2;
7451     //     assert(iters == len-j, "must be");
7452     //     for (; iters--; j++) {
7453     //       assert(Rm == Pm_base[j] && Rn == Pn_base[i-j], "must be");
7454     //       MACC(Rm, Rn, t0, t1, t2);
7455     //       Rm = *++Pm;
7456     //       Rn = *--Pn;
7457     //     }
7458     //     Pm_base[i-len] = t0;
7459     //     t0 = t1; t1 = t2; t2 = 0;
7460     //   }
7461 
7462     //   while (t0)
7463     //     t0 = sub(Pm_base, Pn_base, t0, len);
7464     // }
7465   };
7466 
7467 
7468   // Call here from the interpreter or compiled code to either load
7469   // multiple returned values from the inline type instance being
7470   // returned to registers or to store returned values to a newly
7471   // allocated inline type instance.
7472   address generate_return_value_stub(address destination, const char* name, bool has_res) {
7473     // We need to save all registers the calling convention may use so
7474     // the runtime calls read or update those registers. This needs to
7475     // be in sync with SharedRuntime::java_return_convention().
7476     // n.b. aarch64 asserts that frame::arg_reg_save_area_bytes == 0
7477     enum layout {
7478       j_rarg7_off = 0, j_rarg7_2,    // j_rarg7 is r0
7479       j_rarg6_off, j_rarg6_2,
7480       j_rarg5_off, j_rarg5_2,
7481       j_rarg4_off, j_rarg4_2,
7482       j_rarg3_off, j_rarg3_2,
7483       j_rarg2_off, j_rarg2_2,
7484       j_rarg1_off, j_rarg1_2,
7485       j_rarg0_off, j_rarg0_2,
7486 
7487       j_farg7_off, j_farg7_2,
7488       j_farg6_off, j_farg6_2,
7489       j_farg5_off, j_farg5_2,
7490       j_farg4_off, j_farg4_2,
7491       j_farg3_off, j_farg3_2,
7492       j_farg2_off, j_farg2_2,
7493       j_farg1_off, j_farg1_2,
7494       j_farg0_off, j_farg0_2,
7495 
7496       rfp_off, rfp_off2,
7497       return_off, return_off2,
7498 
7499       framesize // inclusive of return address
7500     };
7501 
7502     CodeBuffer code(name, 512, 64);
7503     MacroAssembler* masm = new MacroAssembler(&code);
7504 
7505     int frame_size_in_bytes = align_up(framesize*BytesPerInt, 16);
7506     assert(frame_size_in_bytes == framesize*BytesPerInt, "misaligned");
7507     int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
7508     int frame_size_in_words = frame_size_in_bytes / wordSize;
7509 
7510     OopMapSet* oop_maps = new OopMapSet();
7511     OopMap* map = new OopMap(frame_size_in_slots, 0);
7512 
7513     map->set_callee_saved(VMRegImpl::stack2reg(j_rarg7_off), j_rarg7->as_VMReg());
7514     map->set_callee_saved(VMRegImpl::stack2reg(j_rarg6_off), j_rarg6->as_VMReg());
7515     map->set_callee_saved(VMRegImpl::stack2reg(j_rarg5_off), j_rarg5->as_VMReg());
7516     map->set_callee_saved(VMRegImpl::stack2reg(j_rarg4_off), j_rarg4->as_VMReg());
7517     map->set_callee_saved(VMRegImpl::stack2reg(j_rarg3_off), j_rarg3->as_VMReg());
7518     map->set_callee_saved(VMRegImpl::stack2reg(j_rarg2_off), j_rarg2->as_VMReg());
7519     map->set_callee_saved(VMRegImpl::stack2reg(j_rarg1_off), j_rarg1->as_VMReg());
7520     map->set_callee_saved(VMRegImpl::stack2reg(j_rarg0_off), j_rarg0->as_VMReg());
7521 
7522     map->set_callee_saved(VMRegImpl::stack2reg(j_farg0_off), j_farg0->as_VMReg());
7523     map->set_callee_saved(VMRegImpl::stack2reg(j_farg1_off), j_farg1->as_VMReg());
7524     map->set_callee_saved(VMRegImpl::stack2reg(j_farg2_off), j_farg2->as_VMReg());
7525     map->set_callee_saved(VMRegImpl::stack2reg(j_farg3_off), j_farg3->as_VMReg());
7526     map->set_callee_saved(VMRegImpl::stack2reg(j_farg4_off), j_farg4->as_VMReg());
7527     map->set_callee_saved(VMRegImpl::stack2reg(j_farg5_off), j_farg5->as_VMReg());
7528     map->set_callee_saved(VMRegImpl::stack2reg(j_farg6_off), j_farg6->as_VMReg());
7529     map->set_callee_saved(VMRegImpl::stack2reg(j_farg7_off), j_farg7->as_VMReg());
7530 
7531     address start = __ pc();
7532 
7533     __ enter(); // Save FP and LR before call
7534 
7535     __ stpd(j_farg1, j_farg0, Address(__ pre(sp, -2 * wordSize)));
7536     __ stpd(j_farg3, j_farg2, Address(__ pre(sp, -2 * wordSize)));
7537     __ stpd(j_farg5, j_farg4, Address(__ pre(sp, -2 * wordSize)));
7538     __ stpd(j_farg7, j_farg6, Address(__ pre(sp, -2 * wordSize)));
7539 
7540     __ stp(j_rarg1, j_rarg0, Address(__ pre(sp, -2 * wordSize)));
7541     __ stp(j_rarg3, j_rarg2, Address(__ pre(sp, -2 * wordSize)));
7542     __ stp(j_rarg5, j_rarg4, Address(__ pre(sp, -2 * wordSize)));
7543     __ stp(j_rarg7, j_rarg6, Address(__ pre(sp, -2 * wordSize)));
7544 
7545     int frame_complete = __ offset();
7546 
7547     // Set up last_Java_sp and last_Java_fp
7548     address the_pc = __ pc();
7549     __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
7550 
7551     // Call runtime
7552     __ mov(c_rarg1, r0);
7553     __ mov(c_rarg0, rthread);
7554 
7555     __ mov(rscratch1, destination);
7556     __ blr(rscratch1);
7557 
7558     oop_maps->add_gc_map(the_pc - start, map);
7559 
7560     __ reset_last_Java_frame(false);
7561 
7562     __ ldp(j_rarg7, j_rarg6, Address(__ post(sp, 2 * wordSize)));
7563     __ ldp(j_rarg5, j_rarg4, Address(__ post(sp, 2 * wordSize)));
7564     __ ldp(j_rarg3, j_rarg2, Address(__ post(sp, 2 * wordSize)));
7565     __ ldp(j_rarg1, j_rarg0, Address(__ post(sp, 2 * wordSize)));
7566 
7567     __ ldpd(j_farg7, j_farg6, Address(__ post(sp, 2 * wordSize)));
7568     __ ldpd(j_farg5, j_farg4, Address(__ post(sp, 2 * wordSize)));
7569     __ ldpd(j_farg3, j_farg2, Address(__ post(sp, 2 * wordSize)));
7570     __ ldpd(j_farg1, j_farg0, Address(__ post(sp, 2 * wordSize)));
7571 
7572     __ leave();
7573 
7574     // check for pending exceptions
7575     Label pending;
7576     __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
7577     __ cbnz(rscratch1, pending);
7578 
7579     if (has_res) {
7580       __ get_vm_result(r0, rthread);
7581     }
7582 
7583     __ ret(lr);
7584 
7585     __ bind(pending);
7586     __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
7587 
7588     // -------------
7589     // make sure all code is generated
7590     masm->flush();
7591 
7592     RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, frame_size_in_words, oop_maps, false);
7593     return stub->entry_point();
7594   }
7595 
7596   // Initialization
7597   void generate_initial() {
7598     // Generate initial stubs and initializes the entry points
7599 
7600     // entry points that exist in all platforms Note: This is code
7601     // that could be shared among different platforms - however the
7602     // benefit seems to be smaller than the disadvantage of having a
7603     // much more complicated generator structure. See also comment in
7604     // stubRoutines.hpp.
7605 
7606     StubRoutines::_forward_exception_entry = generate_forward_exception();
7607 
7608     StubRoutines::_call_stub_entry =
7609       generate_call_stub(StubRoutines::_call_stub_return_address);
7610 
7611     // is referenced by megamorphic call
7612     StubRoutines::_catch_exception_entry = generate_catch_exception();
7613 
7614     // Build this early so it's available for the interpreter.
7615     StubRoutines::_throw_StackOverflowError_entry =

7626       StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
7627     }
7628 
7629     if (UseCRC32CIntrinsics) {
7630       StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
7631     }
7632 
7633     // Disabled until JDK-8210858 is fixed
7634     // if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) {
7635     //   StubRoutines::_dlog = generate_dlog();
7636     // }
7637 
7638     if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
7639       StubRoutines::_dsin = generate_dsin_dcos(/* isCos = */ false);
7640     }
7641 
7642     if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
7643       StubRoutines::_dcos = generate_dsin_dcos(/* isCos = */ true);
7644     }
7645 
7646     if (InlineTypeReturnedAsFields) {
7647       StubRoutines::_load_inline_type_fields_in_regs =
7648          generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::load_inline_type_fields_in_regs), "load_inline_type_fields_in_regs", false);
7649       StubRoutines::_store_inline_type_fields_to_buf =
7650          generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::store_inline_type_fields_to_buf), "store_inline_type_fields_to_buf", true);
7651     }
7652 
7653     // Safefetch stubs.
7654     generate_safefetch("SafeFetch32", sizeof(int),     &StubRoutines::_safefetch32_entry,
7655                                                        &StubRoutines::_safefetch32_fault_pc,
7656                                                        &StubRoutines::_safefetch32_continuation_pc);
7657     generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
7658                                                        &StubRoutines::_safefetchN_fault_pc,
7659                                                        &StubRoutines::_safefetchN_continuation_pc);
7660   }
7661 
7662   void generate_all() {
7663     // support for verify_oop (must happen after universe_init)
7664     StubRoutines::_verify_oop_subroutine_entry     = generate_verify_oop();
7665     StubRoutines::_throw_AbstractMethodError_entry =
7666       generate_throw_exception("AbstractMethodError throw_exception",
7667                                CAST_FROM_FN_PTR(address,
7668                                                 SharedRuntime::
7669                                                 throw_AbstractMethodError));
7670 
7671     StubRoutines::_throw_IncompatibleClassChangeError_entry =
7672       generate_throw_exception("IncompatibleClassChangeError throw_exception",
< prev index next >