< prev index next >

src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp

Print this page

 292     //      rmethod: Method*
 293     //      r13: sender sp
 294     BLOCK_COMMENT("call Java function");
 295     __ mov(r13, sp);
 296     __ blr(c_rarg4);
 297 
 298     // we do this here because the notify will already have been done
 299     // if we get to the next instruction via an exception
 300     //
 301     // n.b. adding this instruction here affects the calculation of
 302     // whether or not a routine returns to the call stub (used when
 303     // doing stack walks) since the normal test is to check the return
 304     // pc against the address saved below. so we may need to allow for
 305     // this extra instruction in the check.
 306 
 307     // save current address for use by exception handling code
 308 
 309     return_address = __ pc();
 310 
 311     // store result depending on type (everything that is not
 312     // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
 313     // n.b. this assumes Java returns an integral result in r0
 314     // and a floating result in j_farg0
 315     __ ldr(j_rarg2, result);
 316     Label is_long, is_float, is_double, exit;
 317     __ ldr(j_rarg1, result_type);
 318     __ cmp(j_rarg1, (u1)T_OBJECT);





 319     __ br(Assembler::EQ, is_long);
 320     __ cmp(j_rarg1, (u1)T_LONG);


 321     __ br(Assembler::EQ, is_long);
 322     __ cmp(j_rarg1, (u1)T_FLOAT);
 323     __ br(Assembler::EQ, is_float);
 324     __ cmp(j_rarg1, (u1)T_DOUBLE);
 325     __ br(Assembler::EQ, is_double);
 326 
 327     // handle T_INT case
 328     __ strw(r0, Address(j_rarg2));
 329 
 330     __ BIND(exit);
 331 
 332     // pop parameters
 333     __ sub(esp, rfp, -sp_after_call_off * wordSize);
 334 
 335 #ifdef ASSERT
 336     // verify that threads correspond
 337     {
 338       Label L, S;
 339       __ ldr(rscratch1, thread);
 340       __ cmp(rthread, rscratch1);
 341       __ br(Assembler::NE, S);
 342       __ get_thread(rscratch1);
 343       __ cmp(rthread, rscratch1);
 344       __ br(Assembler::EQ, L);
 345       __ BIND(S);
 346       __ stop("StubRoutines::call_stub: threads must correspond");
 347       __ BIND(L);
 348     }

 354     __ ldpd(v11, v10,  d11_save);
 355     __ ldpd(v9,  v8,   d9_save);
 356 
 357     __ ldp(r28, r27,   r28_save);
 358     __ ldp(r26, r25,   r26_save);
 359     __ ldp(r24, r23,   r24_save);
 360     __ ldp(r22, r21,   r22_save);
 361     __ ldp(r20, r19,   r20_save);
 362 
 363     __ ldp(c_rarg0, c_rarg1,  call_wrapper);
 364     __ ldrw(c_rarg2, result_type);
 365     __ ldr(c_rarg3,  method);
 366     __ ldp(c_rarg4, c_rarg5,  entry_point);
 367     __ ldp(c_rarg6, c_rarg7,  parameter_size);
 368 
 369     // leave frame and return to caller
 370     __ leave();
 371     __ ret(lr);
 372 
 373     // handle return types different from T_INT











 374 
 375     __ BIND(is_long);
 376     __ str(r0, Address(j_rarg2, 0));
 377     __ br(Assembler::AL, exit);
 378 
 379     __ BIND(is_float);
 380     __ strs(j_farg0, Address(j_rarg2, 0));
 381     __ br(Assembler::AL, exit);
 382 
 383     __ BIND(is_double);
 384     __ strd(j_farg0, Address(j_rarg2, 0));
 385     __ br(Assembler::AL, exit);
 386 
 387     return start;
 388   }
 389 
 390   // Return point for a Java call if there's an exception thrown in
 391   // Java code.  The exception is caught and transformed into a
 392   // pending exception stored in JavaThread that can be tested from
 393   // within the VM.
 394   //
 395   // Note: Usually the parameters are removed by the callee. In case
 396   // of an exception crossing an activation frame boundary, that is
 397   // not the case if the callee is compiled code => need to setup the
 398   // rsp.
 399   //
 400   // r0: exception oop
 401 
 402   address generate_catch_exception() {
 403     StubCodeMark mark(this, "StubRoutines", "catch_exception");
 404     address start = __ pc();

1826     bs->arraycopy_prologue(_masm, decorators, is_oop, from, to, count, wb_pre_saved_regs);
1827 
1828     // save the original count
1829     __ mov(count_save, count);
1830 
1831     // Copy from low to high addresses
1832     __ mov(start_to, to);              // Save destination array start address
1833     __ b(L_load_element);
1834 
1835     // ======== begin loop ========
1836     // (Loop is rotated; its entry is L_load_element.)
1837     // Loop control:
1838     //   for (; count != 0; count--) {
1839     //     copied_oop = load_heap_oop(from++);
1840     //     ... generate_type_check ...;
1841     //     store_heap_oop(to++, copied_oop);
1842     //   }
1843     __ align(OptoLoopAlignment);
1844 
1845     __ BIND(L_store_element);
1846     __ store_heap_oop(__ post(to, UseCompressedOops ? 4 : 8), copied_oop, noreg, noreg, AS_RAW);  // store the oop
1847     __ sub(count, count, 1);
1848     __ cbz(count, L_do_card_marks);
1849 
1850     // ======== loop entry is here ========
1851     __ BIND(L_load_element);
1852     __ load_heap_oop(copied_oop, __ post(from, UseCompressedOops ? 4 : 8), noreg, noreg, AS_RAW); // load the oop
1853     __ cbz(copied_oop, L_store_element);
1854 
1855     __ load_klass(r19_klass, copied_oop);// query the object klass
1856     generate_type_check(r19_klass, ckoff, ckval, L_store_element);
1857     // ======== end loop ========
1858 
1859     // It was a real error; we must depend on the caller to finish the job.
1860     // Register count = remaining oops, count_orig = total oops.
1861     // Emit GC store barriers for the oops we have copied and report
1862     // their number to the caller.
1863 
1864     __ subs(count, count_save, count);     // K = partially copied oop count
1865     __ eon(count, count, zr);                   // report (-1^K) to caller
1866     __ br(Assembler::EQ, L_done_pop);

2073     //  |array_tag|     | header_size | element_type |     |log2_element_size|
2074     // 32        30    24            16              8     2                 0
2075     //
2076     //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2077     //
2078 
2079     const int lh_offset = in_bytes(Klass::layout_helper_offset());
2080 
2081     // Handle objArrays completely differently...
2082     const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2083     __ ldrw(lh, Address(scratch_src_klass, lh_offset));
2084     __ movw(rscratch1, objArray_lh);
2085     __ eorw(rscratch2, lh, rscratch1);
2086     __ cbzw(rscratch2, L_objArray);
2087 
2088     //  if (src->klass() != dst->klass()) return -1;
2089     __ load_klass(rscratch2, dst);
2090     __ eor(rscratch2, rscratch2, scratch_src_klass);
2091     __ cbnz(rscratch2, L_failed);
2092 








2093     //  if (!src->is_Array()) return -1;
2094     __ tbz(lh, 31, L_failed);  // i.e. (lh >= 0)
2095 
2096     // At this point, it is known to be a typeArray (array_tag 0x3).
2097 #ifdef ASSERT
2098     {
2099       BLOCK_COMMENT("assert primitive array {");
2100       Label L;
2101       __ movw(rscratch2, Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2102       __ cmpw(lh, rscratch2);
2103       __ br(Assembler::GE, L);
2104       __ stop("must be a primitive array");
2105       __ bind(L);
2106       BLOCK_COMMENT("} assert primitive array done");
2107     }
2108 #endif
2109 
2110     arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
2111                            rscratch2, L_failed);
2112 

7207     //       MACC(Ra, Ra, t0, t1, t2);
7208     //     }
7209     //     iters =  (2*len-i)/2;
7210     //     assert(iters == len-j, "must be");
7211     //     for (; iters--; j++) {
7212     //       assert(Rm == Pm_base[j] && Rn == Pn_base[i-j], "must be");
7213     //       MACC(Rm, Rn, t0, t1, t2);
7214     //       Rm = *++Pm;
7215     //       Rn = *--Pn;
7216     //     }
7217     //     Pm_base[i-len] = t0;
7218     //     t0 = t1; t1 = t2; t2 = 0;
7219     //   }
7220 
7221     //   while (t0)
7222     //     t0 = sub(Pm_base, Pn_base, t0, len);
7223     // }
7224   };
7225 
7226 
































































































































7227   // Initialization
7228   void generate_initial() {
7229     // Generate initial stubs and initializes the entry points
7230 
7231     // entry points that exist in all platforms Note: This is code
7232     // that could be shared among different platforms - however the
7233     // benefit seems to be smaller than the disadvantage of having a
7234     // much more complicated generator structure. See also comment in
7235     // stubRoutines.hpp.
7236 
7237     StubRoutines::_forward_exception_entry = generate_forward_exception();
7238 
7239     StubRoutines::_call_stub_entry =
7240       generate_call_stub(StubRoutines::_call_stub_return_address);
7241 
7242     // is referenced by megamorphic call
7243     StubRoutines::_catch_exception_entry = generate_catch_exception();
7244 
7245     // Build this early so it's available for the interpreter.
7246     StubRoutines::_throw_StackOverflowError_entry =

7257       StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
7258     }
7259 
7260     if (UseCRC32CIntrinsics) {
7261       StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
7262     }
7263 
7264     // Disabled until JDK-8210858 is fixed
7265     // if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) {
7266     //   StubRoutines::_dlog = generate_dlog();
7267     // }
7268 
7269     if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
7270       StubRoutines::_dsin = generate_dsin_dcos(/* isCos = */ false);
7271     }
7272 
7273     if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
7274       StubRoutines::_dcos = generate_dsin_dcos(/* isCos = */ true);
7275     }
7276 







7277     // Safefetch stubs.
7278     generate_safefetch("SafeFetch32", sizeof(int),     &StubRoutines::_safefetch32_entry,
7279                                                        &StubRoutines::_safefetch32_fault_pc,
7280                                                        &StubRoutines::_safefetch32_continuation_pc);
7281     generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
7282                                                        &StubRoutines::_safefetchN_fault_pc,
7283                                                        &StubRoutines::_safefetchN_continuation_pc);
7284   }
7285 
7286   void generate_all() {
7287     // support for verify_oop (must happen after universe_init)
7288     StubRoutines::_verify_oop_subroutine_entry     = generate_verify_oop();
7289     StubRoutines::_throw_AbstractMethodError_entry =
7290       generate_throw_exception("AbstractMethodError throw_exception",
7291                                CAST_FROM_FN_PTR(address,
7292                                                 SharedRuntime::
7293                                                 throw_AbstractMethodError));
7294 
7295     StubRoutines::_throw_IncompatibleClassChangeError_entry =
7296       generate_throw_exception("IncompatibleClassChangeError throw_exception",

 292     //      rmethod: Method*
 293     //      r13: sender sp
 294     BLOCK_COMMENT("call Java function");
 295     __ mov(r13, sp);
 296     __ blr(c_rarg4);
 297 
 298     // we do this here because the notify will already have been done
 299     // if we get to the next instruction via an exception
 300     //
 301     // n.b. adding this instruction here affects the calculation of
 302     // whether or not a routine returns to the call stub (used when
 303     // doing stack walks) since the normal test is to check the return
 304     // pc against the address saved below. so we may need to allow for
 305     // this extra instruction in the check.
 306 
 307     // save current address for use by exception handling code
 308 
 309     return_address = __ pc();
 310 
 311     // store result depending on type (everything that is not
 312     // T_OBJECT, T_INLINE_TYPE, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
 313     // n.b. this assumes Java returns an integral result in r0
 314     // and a floating result in j_farg0
 315     // All of j_rargN may be used to return inline type fields so be careful
 316     // not to clobber those.
 317     // SharedRuntime::generate_buffered_inline_type_adapter() knows the register
 318     // assignment of Rresult below.
 319     Register Rresult = r14, Rresult_type = r15;
 320     __ ldr(Rresult, result);
 321     Label is_long, is_float, is_double, is_value, exit;
 322     __ ldr(Rresult_type, result_type);
 323     __ cmp(Rresult_type, (u1)T_OBJECT);
 324     __ br(Assembler::EQ, is_long);
 325     __ cmp(Rresult_type, (u1)T_INLINE_TYPE);
 326     __ br(Assembler::EQ, is_value);
 327     __ cmp(Rresult_type, (u1)T_LONG);
 328     __ br(Assembler::EQ, is_long);
 329     __ cmp(Rresult_type, (u1)T_FLOAT);
 330     __ br(Assembler::EQ, is_float);
 331     __ cmp(Rresult_type, (u1)T_DOUBLE);
 332     __ br(Assembler::EQ, is_double);
 333 
 334     // handle T_INT case
 335     __ strw(r0, Address(Rresult));
 336 
 337     __ BIND(exit);
 338 
 339     // pop parameters
 340     __ sub(esp, rfp, -sp_after_call_off * wordSize);
 341 
 342 #ifdef ASSERT
 343     // verify that threads correspond
 344     {
 345       Label L, S;
 346       __ ldr(rscratch1, thread);
 347       __ cmp(rthread, rscratch1);
 348       __ br(Assembler::NE, S);
 349       __ get_thread(rscratch1);
 350       __ cmp(rthread, rscratch1);
 351       __ br(Assembler::EQ, L);
 352       __ BIND(S);
 353       __ stop("StubRoutines::call_stub: threads must correspond");
 354       __ BIND(L);
 355     }

 361     __ ldpd(v11, v10,  d11_save);
 362     __ ldpd(v9,  v8,   d9_save);
 363 
 364     __ ldp(r28, r27,   r28_save);
 365     __ ldp(r26, r25,   r26_save);
 366     __ ldp(r24, r23,   r24_save);
 367     __ ldp(r22, r21,   r22_save);
 368     __ ldp(r20, r19,   r20_save);
 369 
 370     __ ldp(c_rarg0, c_rarg1,  call_wrapper);
 371     __ ldrw(c_rarg2, result_type);
 372     __ ldr(c_rarg3,  method);
 373     __ ldp(c_rarg4, c_rarg5,  entry_point);
 374     __ ldp(c_rarg6, c_rarg7,  parameter_size);
 375 
 376     // leave frame and return to caller
 377     __ leave();
 378     __ ret(lr);
 379 
 380     // handle return types different from T_INT
 381     __ BIND(is_value);
 382     if (InlineTypeReturnedAsFields) {
 383       // Check for flattened return value
 384       __ tbz(r0, 0, is_long);
 385       // Load pack handler address
 386       __ andr(rscratch1, r0, -2);
 387       __ ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 388       __ ldr(rscratch1, Address(rscratch1, InlineKlass::pack_handler_jobject_offset()));
 389       __ blr(rscratch1);
 390       __ b(exit);
 391     }
 392 
 393     __ BIND(is_long);
 394     __ str(r0, Address(Rresult, 0));
 395     __ br(Assembler::AL, exit);
 396 
 397     __ BIND(is_float);
 398     __ strs(j_farg0, Address(Rresult, 0));
 399     __ br(Assembler::AL, exit);
 400 
 401     __ BIND(is_double);
 402     __ strd(j_farg0, Address(Rresult, 0));
 403     __ br(Assembler::AL, exit);
 404 
 405     return start;
 406   }
 407 
 408   // Return point for a Java call if there's an exception thrown in
 409   // Java code.  The exception is caught and transformed into a
 410   // pending exception stored in JavaThread that can be tested from
 411   // within the VM.
 412   //
 413   // Note: Usually the parameters are removed by the callee. In case
 414   // of an exception crossing an activation frame boundary, that is
 415   // not the case if the callee is compiled code => need to setup the
 416   // rsp.
 417   //
 418   // r0: exception oop
 419 
 420   address generate_catch_exception() {
 421     StubCodeMark mark(this, "StubRoutines", "catch_exception");
 422     address start = __ pc();

1844     bs->arraycopy_prologue(_masm, decorators, is_oop, from, to, count, wb_pre_saved_regs);
1845 
1846     // save the original count
1847     __ mov(count_save, count);
1848 
1849     // Copy from low to high addresses
1850     __ mov(start_to, to);              // Save destination array start address
1851     __ b(L_load_element);
1852 
1853     // ======== begin loop ========
1854     // (Loop is rotated; its entry is L_load_element.)
1855     // Loop control:
1856     //   for (; count != 0; count--) {
1857     //     copied_oop = load_heap_oop(from++);
1858     //     ... generate_type_check ...;
1859     //     store_heap_oop(to++, copied_oop);
1860     //   }
1861     __ align(OptoLoopAlignment);
1862 
1863     __ BIND(L_store_element);
1864     __ store_heap_oop(__ post(to, UseCompressedOops ? 4 : 8), copied_oop, noreg, noreg, noreg, AS_RAW);  // store the oop
1865     __ sub(count, count, 1);
1866     __ cbz(count, L_do_card_marks);
1867 
1868     // ======== loop entry is here ========
1869     __ BIND(L_load_element);
1870     __ load_heap_oop(copied_oop, __ post(from, UseCompressedOops ? 4 : 8), noreg, noreg, AS_RAW); // load the oop
1871     __ cbz(copied_oop, L_store_element);
1872 
1873     __ load_klass(r19_klass, copied_oop);// query the object klass
1874     generate_type_check(r19_klass, ckoff, ckval, L_store_element);
1875     // ======== end loop ========
1876 
1877     // It was a real error; we must depend on the caller to finish the job.
1878     // Register count = remaining oops, count_orig = total oops.
1879     // Emit GC store barriers for the oops we have copied and report
1880     // their number to the caller.
1881 
1882     __ subs(count, count_save, count);     // K = partially copied oop count
1883     __ eon(count, count, zr);                   // report (-1^K) to caller
1884     __ br(Assembler::EQ, L_done_pop);

2091     //  |array_tag|     | header_size | element_type |     |log2_element_size|
2092     // 32        30    24            16              8     2                 0
2093     //
2094     //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2095     //
2096 
2097     const int lh_offset = in_bytes(Klass::layout_helper_offset());
2098 
2099     // Handle objArrays completely differently...
2100     const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2101     __ ldrw(lh, Address(scratch_src_klass, lh_offset));
2102     __ movw(rscratch1, objArray_lh);
2103     __ eorw(rscratch2, lh, rscratch1);
2104     __ cbzw(rscratch2, L_objArray);
2105 
2106     //  if (src->klass() != dst->klass()) return -1;
2107     __ load_klass(rscratch2, dst);
2108     __ eor(rscratch2, rscratch2, scratch_src_klass);
2109     __ cbnz(rscratch2, L_failed);
2110 
2111     // Check for flat inline type array -> return -1
2112     __ tst(lh, Klass::_lh_array_tag_vt_value_bit_inplace);
2113     __ br(Assembler::NE, L_failed);
2114 
2115     // Check for null-free (non-flat) inline type array -> handle as object array
2116     __ tst(lh, Klass::_lh_null_free_bit_inplace);
2117     __ br(Assembler::NE, L_failed);
2118 
2119     //  if (!src->is_Array()) return -1;
2120     __ tbz(lh, 31, L_failed);  // i.e. (lh >= 0)
2121 
2122     // At this point, it is known to be a typeArray (array_tag 0x3).
2123 #ifdef ASSERT
2124     {
2125       BLOCK_COMMENT("assert primitive array {");
2126       Label L;
2127       __ movw(rscratch2, Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2128       __ cmpw(lh, rscratch2);
2129       __ br(Assembler::GE, L);
2130       __ stop("must be a primitive array");
2131       __ bind(L);
2132       BLOCK_COMMENT("} assert primitive array done");
2133     }
2134 #endif
2135 
2136     arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
2137                            rscratch2, L_failed);
2138 

7233     //       MACC(Ra, Ra, t0, t1, t2);
7234     //     }
7235     //     iters =  (2*len-i)/2;
7236     //     assert(iters == len-j, "must be");
7237     //     for (; iters--; j++) {
7238     //       assert(Rm == Pm_base[j] && Rn == Pn_base[i-j], "must be");
7239     //       MACC(Rm, Rn, t0, t1, t2);
7240     //       Rm = *++Pm;
7241     //       Rn = *--Pn;
7242     //     }
7243     //     Pm_base[i-len] = t0;
7244     //     t0 = t1; t1 = t2; t2 = 0;
7245     //   }
7246 
7247     //   while (t0)
7248     //     t0 = sub(Pm_base, Pn_base, t0, len);
7249     // }
7250   };
7251 
7252 
7253   // Call here from the interpreter or compiled code to either load
7254   // multiple returned values from the inline type instance being
7255   // returned to registers or to store returned values to a newly
7256   // allocated inline type instance.
7257   address generate_return_value_stub(address destination, const char* name, bool has_res) {
7258     // We need to save all registers the calling convention may use so
7259     // the runtime calls read or update those registers. This needs to
7260     // be in sync with SharedRuntime::java_return_convention().
7261     // n.b. aarch64 asserts that frame::arg_reg_save_area_bytes == 0
7262     enum layout {
7263       j_rarg7_off = 0, j_rarg7_2,    // j_rarg7 is r0
7264       j_rarg6_off, j_rarg6_2,
7265       j_rarg5_off, j_rarg5_2,
7266       j_rarg4_off, j_rarg4_2,
7267       j_rarg3_off, j_rarg3_2,
7268       j_rarg2_off, j_rarg2_2,
7269       j_rarg1_off, j_rarg1_2,
7270       j_rarg0_off, j_rarg0_2,
7271 
7272       j_farg7_off, j_farg7_2,
7273       j_farg6_off, j_farg6_2,
7274       j_farg5_off, j_farg5_2,
7275       j_farg4_off, j_farg4_2,
7276       j_farg3_off, j_farg3_2,
7277       j_farg2_off, j_farg2_2,
7278       j_farg1_off, j_farg1_2,
7279       j_farg0_off, j_farg0_2,
7280 
7281       rfp_off, rfp_off2,
7282       return_off, return_off2,
7283 
7284       framesize // inclusive of return address
7285     };
7286 
7287     CodeBuffer code(name, 512, 64);
7288     MacroAssembler* masm = new MacroAssembler(&code);
7289 
7290     int frame_size_in_bytes = align_up(framesize*BytesPerInt, 16);
7291     assert(frame_size_in_bytes == framesize*BytesPerInt, "misaligned");
7292     int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
7293     int frame_size_in_words = frame_size_in_bytes / wordSize;
7294 
7295     OopMapSet* oop_maps = new OopMapSet();
7296     OopMap* map = new OopMap(frame_size_in_slots, 0);
7297 
7298     map->set_callee_saved(VMRegImpl::stack2reg(j_rarg7_off), j_rarg7->as_VMReg());
7299     map->set_callee_saved(VMRegImpl::stack2reg(j_rarg6_off), j_rarg6->as_VMReg());
7300     map->set_callee_saved(VMRegImpl::stack2reg(j_rarg5_off), j_rarg5->as_VMReg());
7301     map->set_callee_saved(VMRegImpl::stack2reg(j_rarg4_off), j_rarg4->as_VMReg());
7302     map->set_callee_saved(VMRegImpl::stack2reg(j_rarg3_off), j_rarg3->as_VMReg());
7303     map->set_callee_saved(VMRegImpl::stack2reg(j_rarg2_off), j_rarg2->as_VMReg());
7304     map->set_callee_saved(VMRegImpl::stack2reg(j_rarg1_off), j_rarg1->as_VMReg());
7305     map->set_callee_saved(VMRegImpl::stack2reg(j_rarg0_off), j_rarg0->as_VMReg());
7306 
7307     map->set_callee_saved(VMRegImpl::stack2reg(j_farg0_off), j_farg0->as_VMReg());
7308     map->set_callee_saved(VMRegImpl::stack2reg(j_farg1_off), j_farg1->as_VMReg());
7309     map->set_callee_saved(VMRegImpl::stack2reg(j_farg2_off), j_farg2->as_VMReg());
7310     map->set_callee_saved(VMRegImpl::stack2reg(j_farg3_off), j_farg3->as_VMReg());
7311     map->set_callee_saved(VMRegImpl::stack2reg(j_farg4_off), j_farg4->as_VMReg());
7312     map->set_callee_saved(VMRegImpl::stack2reg(j_farg5_off), j_farg5->as_VMReg());
7313     map->set_callee_saved(VMRegImpl::stack2reg(j_farg6_off), j_farg6->as_VMReg());
7314     map->set_callee_saved(VMRegImpl::stack2reg(j_farg7_off), j_farg7->as_VMReg());
7315 
7316     address start = __ pc();
7317 
7318     __ enter(); // Save FP and LR before call
7319 
7320     __ stpd(j_farg1, j_farg0, Address(__ pre(sp, -2 * wordSize)));
7321     __ stpd(j_farg3, j_farg2, Address(__ pre(sp, -2 * wordSize)));
7322     __ stpd(j_farg5, j_farg4, Address(__ pre(sp, -2 * wordSize)));
7323     __ stpd(j_farg7, j_farg6, Address(__ pre(sp, -2 * wordSize)));
7324 
7325     __ stp(j_rarg1, j_rarg0, Address(__ pre(sp, -2 * wordSize)));
7326     __ stp(j_rarg3, j_rarg2, Address(__ pre(sp, -2 * wordSize)));
7327     __ stp(j_rarg5, j_rarg4, Address(__ pre(sp, -2 * wordSize)));
7328     __ stp(j_rarg7, j_rarg6, Address(__ pre(sp, -2 * wordSize)));
7329 
7330     int frame_complete = __ offset();
7331 
7332     // Set up last_Java_sp and last_Java_fp
7333     address the_pc = __ pc();
7334     __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
7335 
7336     // Call runtime
7337     __ mov(c_rarg1, r0);
7338     __ mov(c_rarg0, rthread);
7339 
7340     __ mov(rscratch1, destination);
7341     __ blr(rscratch1);
7342 
7343     oop_maps->add_gc_map(the_pc - start, map);
7344 
7345     __ reset_last_Java_frame(false);
7346 
7347     __ ldp(j_rarg7, j_rarg6, Address(__ post(sp, 2 * wordSize)));
7348     __ ldp(j_rarg5, j_rarg4, Address(__ post(sp, 2 * wordSize)));
7349     __ ldp(j_rarg3, j_rarg2, Address(__ post(sp, 2 * wordSize)));
7350     __ ldp(j_rarg1, j_rarg0, Address(__ post(sp, 2 * wordSize)));
7351 
7352     __ ldpd(j_farg7, j_farg6, Address(__ post(sp, 2 * wordSize)));
7353     __ ldpd(j_farg5, j_farg4, Address(__ post(sp, 2 * wordSize)));
7354     __ ldpd(j_farg3, j_farg2, Address(__ post(sp, 2 * wordSize)));
7355     __ ldpd(j_farg1, j_farg0, Address(__ post(sp, 2 * wordSize)));
7356 
7357     __ leave();
7358 
7359     // check for pending exceptions
7360     Label pending;
7361     __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
7362     __ cbnz(rscratch1, pending);
7363 
7364     if (has_res) {
7365       __ get_vm_result(r0, rthread);
7366     }
7367 
7368     __ ret(lr);
7369 
7370     __ bind(pending);
7371     __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
7372 
7373     // -------------
7374     // make sure all code is generated
7375     masm->flush();
7376 
7377     RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, frame_size_in_words, oop_maps, false);
7378     return stub->entry_point();
7379   }
7380 
7381   // Initialization
7382   void generate_initial() {
7383     // Generate initial stubs and initializes the entry points
7384 
7385     // entry points that exist in all platforms Note: This is code
7386     // that could be shared among different platforms - however the
7387     // benefit seems to be smaller than the disadvantage of having a
7388     // much more complicated generator structure. See also comment in
7389     // stubRoutines.hpp.
7390 
7391     StubRoutines::_forward_exception_entry = generate_forward_exception();
7392 
7393     StubRoutines::_call_stub_entry =
7394       generate_call_stub(StubRoutines::_call_stub_return_address);
7395 
7396     // is referenced by megamorphic call
7397     StubRoutines::_catch_exception_entry = generate_catch_exception();
7398 
7399     // Build this early so it's available for the interpreter.
7400     StubRoutines::_throw_StackOverflowError_entry =

7411       StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
7412     }
7413 
7414     if (UseCRC32CIntrinsics) {
7415       StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
7416     }
7417 
7418     // Disabled until JDK-8210858 is fixed
7419     // if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) {
7420     //   StubRoutines::_dlog = generate_dlog();
7421     // }
7422 
7423     if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
7424       StubRoutines::_dsin = generate_dsin_dcos(/* isCos = */ false);
7425     }
7426 
7427     if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
7428       StubRoutines::_dcos = generate_dsin_dcos(/* isCos = */ true);
7429     }
7430 
7431     if (InlineTypeReturnedAsFields) {
7432       StubRoutines::_load_inline_type_fields_in_regs =
7433          generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::load_inline_type_fields_in_regs), "load_inline_type_fields_in_regs", false);
7434       StubRoutines::_store_inline_type_fields_to_buf =
7435          generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::store_inline_type_fields_to_buf), "store_inline_type_fields_to_buf", true);
7436     }
7437 
7438     // Safefetch stubs.
7439     generate_safefetch("SafeFetch32", sizeof(int),     &StubRoutines::_safefetch32_entry,
7440                                                        &StubRoutines::_safefetch32_fault_pc,
7441                                                        &StubRoutines::_safefetch32_continuation_pc);
7442     generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
7443                                                        &StubRoutines::_safefetchN_fault_pc,
7444                                                        &StubRoutines::_safefetchN_continuation_pc);
7445   }
7446 
7447   void generate_all() {
7448     // support for verify_oop (must happen after universe_init)
7449     StubRoutines::_verify_oop_subroutine_entry     = generate_verify_oop();
7450     StubRoutines::_throw_AbstractMethodError_entry =
7451       generate_throw_exception("AbstractMethodError throw_exception",
7452                                CAST_FROM_FN_PTR(address,
7453                                                 SharedRuntime::
7454                                                 throw_AbstractMethodError));
7455 
7456     StubRoutines::_throw_IncompatibleClassChangeError_entry =
7457       generate_throw_exception("IncompatibleClassChangeError throw_exception",
< prev index next >