< prev index next >

src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp

Print this page

1149   stack_slots = 2; // will be adjusted in setup
1150   OopMap* map = continuation_enter_setup(masm, stack_slots);
1151   frame_complete = __ pc() - start;
1152 
1153   fill_continuation_entry(masm);
1154 
1155   __ cbnz(c_rarg2, call_thaw);
1156 
1157   const address tr_call = __ trampoline_call(resolve);
1158   if (tr_call == nullptr) {
1159     fatal("CodeCache is full at gen_continuation_enter");
1160   }
1161 
1162   oop_maps->add_gc_map(__ pc() - start, map);
1163   __ post_call_nop();
1164 
1165   __ b(exit);
1166 
1167   __ bind(call_thaw);
1168 

1169   __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
1170   oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1171   ContinuationEntry::_return_pc_offset = __ pc() - start;
1172   __ post_call_nop();
1173 
1174   __ bind(exit);

1175   continuation_enter_cleanup(masm);
1176   __ leave();
1177   __ ret(lr);
1178 
1179   /// exception handling
1180 
1181   exception_offset = __ pc() - start;
1182   {
1183       __ mov(r19, r0); // save return value contaning the exception oop in callee-saved R19
1184 
1185       continuation_enter_cleanup(masm);
1186 
1187       __ ldr(c_rarg1, Address(rfp, wordSize)); // return address
1188       __ authenticate_return_address(c_rarg1);
1189       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, c_rarg1);
1190 
1191       // see OptoRuntime::generate_exception_blob: r0 -- exception oop, r3 -- exception pc
1192 
1193       __ mov(r1, r0); // the exception handler
1194       __ mov(r0, r19); // restore return value contaning the exception oop

1251     continuation_enter_cleanup(masm);
1252 
1253     __ bind(pinned); // pinned -- return to caller
1254 
1255     // handle pending exception thrown by freeze
1256     __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1257     Label ok;
1258     __ cbz(rscratch1, ok);
1259     __ leave();
1260     __ lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
1261     __ br(rscratch1);
1262     __ bind(ok);
1263 
1264     __ leave();
1265     __ ret(lr);
1266 
1267     OopMap* map = new OopMap(framesize, 1);
1268     oop_maps->add_gc_map(the_pc - start, map);
1269 }
1270 




1271 static void gen_special_dispatch(MacroAssembler* masm,
1272                                  const methodHandle& method,
1273                                  const BasicType* sig_bt,
1274                                  const VMRegPair* regs) {
1275   verify_oop_args(masm, method, sig_bt, regs);
1276   vmIntrinsics::ID iid = method->intrinsic_id();
1277 
1278   // Now write the args into the outgoing interpreter space
1279   bool     has_receiver   = false;
1280   Register receiver_reg   = noreg;
1281   int      member_arg_pos = -1;
1282   Register member_reg     = noreg;
1283   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1284   if (ref_kind != 0) {
1285     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1286     member_reg = r19;  // known to be free at this point
1287     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1288   } else if (iid == vmIntrinsics::_invokeBasic) {
1289     has_receiver = true;
1290   } else if (iid == vmIntrinsics::_linkToNative) {

1717   int c_arg = total_c_args - total_in_args;
1718 
1719   // Pre-load a static method's oop into c_rarg1.
1720   if (method->is_static()) {
1721 
1722     //  load oop into a register
1723     __ movoop(c_rarg1,
1724               JNIHandles::make_local(method->method_holder()->java_mirror()));
1725 
1726     // Now handlize the static class mirror it's known not-null.
1727     __ str(c_rarg1, Address(sp, klass_offset));
1728     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1729 
1730     // Now get the handle
1731     __ lea(c_rarg1, Address(sp, klass_offset));
1732     // and protect the arg if we must spill
1733     c_arg--;
1734   }
1735 
1736   // Change state to native (we save the return address in the thread, since it might not
1737   // be pushed on the stack when we do a stack traversal).
1738   // We use the same pc/oopMap repeatedly when we call out

1739 
1740   Label native_return;
1741   __ set_last_Java_frame(sp, noreg, native_return, rscratch1);








1742 
1743   Label dtrace_method_entry, dtrace_method_entry_done;
1744   if (DTraceMethodProbes) {
1745     __ b(dtrace_method_entry);
1746     __ bind(dtrace_method_entry_done);
1747   }
1748 
1749   // RedefineClasses() tracing support for obsolete method entry
1750   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1751     // protect the args we've loaded
1752     save_args(masm, total_c_args, c_arg, out_regs);
1753     __ mov_metadata(c_rarg1, method());
1754     __ call_VM_leaf(
1755       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1756       rthread, c_rarg1);
1757     restore_args(masm, total_c_args, c_arg, out_regs);
1758   }
1759 
1760   // Lock a synchronized method
1761 

1799       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
1800 
1801       // Hmm should this move to the slow path code area???
1802 
1803       // Test if the oopMark is an obvious stack pointer, i.e.,
1804       //  1) (mark & 3) == 0, and
1805       //  2) sp <= mark < mark + os::pagesize()
1806       // These 3 tests can be done by evaluating the following
1807       // expression: ((mark - sp) & (3 - os::vm_page_size())),
1808       // assuming both stack pointer and pagesize have their
1809       // least significant 2 bits clear.
1810       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1811 
1812       __ sub(swap_reg, sp, swap_reg);
1813       __ neg(swap_reg, swap_reg);
1814       __ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
1815 
1816       // Save the test result, for recursive case, the result is zero
1817       __ str(swap_reg, Address(lock_reg, mark_word_offset));
1818       __ br(Assembler::NE, slow_path_lock);



1819     } else {
1820       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1821       __ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
1822     }
1823     __ bind(count);
1824     __ increment(Address(rthread, JavaThread::held_monitor_count_offset()));
1825 
1826     // Slow path will re-enter here
1827     __ bind(lock_done);
1828   }
1829 
1830 
1831   // Finally just about ready to make the JNI call
1832 
1833   // get JNIEnv* which is first argument to native
1834   __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1835 
1836   // Now set thread in native
1837   __ mov(rscratch1, _thread_in_native);
1838   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1839   __ stlrw(rscratch1, rscratch2);
1840 
1841   __ rt_call(native_func);
1842 
1843   __ bind(native_return);
1844 
1845   intptr_t return_pc = (intptr_t) __ pc();
1846   oop_maps->add_gc_map(return_pc - start, map);
1847 
1848   // Verify or restore cpu control state after JNI call
1849   __ restore_cpu_control_state_after_jni(rscratch1, rscratch2);
1850 
1851   // Unpack native results.
1852   switch (ret_type) {
1853   case T_BOOLEAN: __ c2bool(r0);                     break;
1854   case T_CHAR   : __ ubfx(r0, r0, 0, 16);            break;
1855   case T_BYTE   : __ sbfx(r0, r0, 0, 8);             break;
1856   case T_SHORT  : __ sbfx(r0, r0, 0, 16);            break;
1857   case T_INT    : __ sbfx(r0, r0, 0, 32);            break;
1858   case T_DOUBLE :
1859   case T_FLOAT  :
1860     // Result is in v0 we'll save as needed
1861     break;
1862   case T_ARRAY:                 // Really a handle
1863   case T_OBJECT:                // Really a handle
1864       break; // can't de-handlize until after safepoint check
1865   case T_VOID: break;
1866   case T_LONG: break;
1867   default       : ShouldNotReachHere();

1886     __ dmb(Assembler::ISH);
1887   }
1888 
1889   __ verify_sve_vector_length();
1890 
1891   // Check for safepoint operation in progress and/or pending suspend requests.
1892   {
1893     // No need for acquire as Java threads always disarm themselves.
1894     __ safepoint_poll(safepoint_in_progress, true /* at_return */, false /* acquire */, false /* in_nmethod */);
1895     __ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
1896     __ cbnzw(rscratch1, safepoint_in_progress);
1897     __ bind(safepoint_in_progress_done);
1898   }
1899 
1900   // change thread state
1901   __ mov(rscratch1, _thread_in_Java);
1902   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1903   __ stlrw(rscratch1, rscratch2);
1904   __ bind(after_transition);
1905 












1906   Label reguard;
1907   Label reguard_done;
1908   __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1909   __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
1910   __ br(Assembler::EQ, reguard);
1911   __ bind(reguard_done);
1912 
1913   // native result if any is live
1914 
1915   // Unlock
1916   Label unlock_done;
1917   Label slow_path_unlock;
1918   if (method->is_synchronized()) {
1919 
1920     // Get locked oop from the handle we passed to jni
1921     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1922 
1923     Label done, not_recursive;
1924 
1925     if (LockingMode == LM_LEGACY) {
1926       // Simple recursive lock?
1927       __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1928       __ cbnz(rscratch1, not_recursive);
1929       __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1930       __ b(done);
1931     }
1932 
1933     __ bind(not_recursive);
1934 
1935     // Must save r0 if if it is live now because cmpxchg must use it
1936     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1937       save_native_result(masm, ret_type, stack_slots);
1938     }
1939 
1940     if (LockingMode == LM_MONITOR) {
1941       __ b(slow_path_unlock);
1942     } else if (LockingMode == LM_LEGACY) {
1943       // get address of the stack lock
1944       __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1945       //  get old displaced header
1946       __ ldr(old_hdr, Address(r0, 0));
1947 
1948       // Atomic swap old header if oop still contains the stack lock
1949       Label count;
1950       __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
1951       __ bind(count);
1952       __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1953     } else {
1954       assert(LockingMode == LM_LIGHTWEIGHT, "");
1955       __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
1956       __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1957     }
1958 
1959     // slow path re-enters here
1960     __ bind(unlock_done);
1961     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1962       restore_native_result(masm, ret_type, stack_slots);
1963     }
1964 
1965     __ bind(done);
1966   }
1967 
1968   Label dtrace_method_exit, dtrace_method_exit_done;
1969   if (DTraceMethodProbes) {
1970     __ b(dtrace_method_exit);
1971     __ bind(dtrace_method_exit_done);
1972   }
1973 
1974   __ reset_last_Java_frame(false);
1975 
1976   // Unbox oop result, e.g. JNIHandles::resolve result.

2004   // and forward the exception
2005   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2006 
2007   // Slow path locking & unlocking
2008   if (method->is_synchronized()) {
2009 
2010     __ block_comment("Slow path lock {");
2011     __ bind(slow_path_lock);
2012 
2013     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2014     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2015 
2016     // protect the args we've loaded
2017     save_args(masm, total_c_args, c_arg, out_regs);
2018 
2019     __ mov(c_rarg0, obj_reg);
2020     __ mov(c_rarg1, lock_reg);
2021     __ mov(c_rarg2, rthread);
2022 
2023     // Not a leaf but we have last_Java_frame setup as we want



2024     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);

2025     restore_args(masm, total_c_args, c_arg, out_regs);
2026 
2027 #ifdef ASSERT
2028     { Label L;
2029       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2030       __ cbz(rscratch1, L);
2031       __ stop("no pending exception allowed on exit from monitorenter");
2032       __ bind(L);
2033     }
2034 #endif
2035     __ b(lock_done);
2036 
2037     __ block_comment("} Slow path lock");
2038 
2039     __ block_comment("Slow path unlock {");
2040     __ bind(slow_path_unlock);
2041 
2042     // If we haven't already saved the native result we must save it now as xmm registers
2043     // are still exposed.
2044 

2545     _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2546     _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2547   }
2548 #endif
2549 }
2550 
2551 // Number of stack slots between incoming argument block and the start of
2552 // a new frame.  The PROLOG must add this many slots to the stack.  The
2553 // EPILOG must remove this many slots. aarch64 needs two slots for
2554 // return address and fp.
2555 // TODO think this is correct but check
2556 uint SharedRuntime::in_preserve_stack_slots() {
2557   return 4;
2558 }
2559 
2560 uint SharedRuntime::out_preserve_stack_slots() {
2561   return 0;
2562 }
2563 
2564 




2565 //------------------------------generate_handler_blob------
2566 //
2567 // Generate a special Compile2Runtime blob that saves all registers,
2568 // and setup oopmap.
2569 //
2570 SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address call_ptr) {
2571   assert(is_polling_page_id(id), "expected a polling page stub id");
2572 
2573   ResourceMark rm;
2574   OopMapSet *oop_maps = new OopMapSet();
2575   OopMap* map;
2576 
2577   // Allocate space for the code.  Setup code generation tools.
2578   const char* name = SharedRuntime::stub_name(id);
2579   CodeBuffer buffer(name, 2048, 1024);
2580   MacroAssembler* masm = new MacroAssembler(&buffer);
2581 
2582   address start   = __ pc();
2583   address call_pc = nullptr;
2584   int frame_size_in_words;

1149   stack_slots = 2; // will be adjusted in setup
1150   OopMap* map = continuation_enter_setup(masm, stack_slots);
1151   frame_complete = __ pc() - start;
1152 
1153   fill_continuation_entry(masm);
1154 
1155   __ cbnz(c_rarg2, call_thaw);
1156 
1157   const address tr_call = __ trampoline_call(resolve);
1158   if (tr_call == nullptr) {
1159     fatal("CodeCache is full at gen_continuation_enter");
1160   }
1161 
1162   oop_maps->add_gc_map(__ pc() - start, map);
1163   __ post_call_nop();
1164 
1165   __ b(exit);
1166 
1167   __ bind(call_thaw);
1168 
1169   ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
1170   __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
1171   oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1172   ContinuationEntry::_return_pc_offset = __ pc() - start;
1173   __ post_call_nop();
1174 
1175   __ bind(exit);
1176   ContinuationEntry::_cleanup_offset = __ pc() - start;
1177   continuation_enter_cleanup(masm);
1178   __ leave();
1179   __ ret(lr);
1180 
1181   /// exception handling
1182 
1183   exception_offset = __ pc() - start;
1184   {
1185       __ mov(r19, r0); // save return value contaning the exception oop in callee-saved R19
1186 
1187       continuation_enter_cleanup(masm);
1188 
1189       __ ldr(c_rarg1, Address(rfp, wordSize)); // return address
1190       __ authenticate_return_address(c_rarg1);
1191       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, c_rarg1);
1192 
1193       // see OptoRuntime::generate_exception_blob: r0 -- exception oop, r3 -- exception pc
1194 
1195       __ mov(r1, r0); // the exception handler
1196       __ mov(r0, r19); // restore return value contaning the exception oop

1253     continuation_enter_cleanup(masm);
1254 
1255     __ bind(pinned); // pinned -- return to caller
1256 
1257     // handle pending exception thrown by freeze
1258     __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1259     Label ok;
1260     __ cbz(rscratch1, ok);
1261     __ leave();
1262     __ lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
1263     __ br(rscratch1);
1264     __ bind(ok);
1265 
1266     __ leave();
1267     __ ret(lr);
1268 
1269     OopMap* map = new OopMap(framesize, 1);
1270     oop_maps->add_gc_map(the_pc - start, map);
1271 }
1272 
1273 void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
1274   ::continuation_enter_cleanup(masm);
1275 }
1276 
1277 static void gen_special_dispatch(MacroAssembler* masm,
1278                                  const methodHandle& method,
1279                                  const BasicType* sig_bt,
1280                                  const VMRegPair* regs) {
1281   verify_oop_args(masm, method, sig_bt, regs);
1282   vmIntrinsics::ID iid = method->intrinsic_id();
1283 
1284   // Now write the args into the outgoing interpreter space
1285   bool     has_receiver   = false;
1286   Register receiver_reg   = noreg;
1287   int      member_arg_pos = -1;
1288   Register member_reg     = noreg;
1289   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1290   if (ref_kind != 0) {
1291     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1292     member_reg = r19;  // known to be free at this point
1293     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1294   } else if (iid == vmIntrinsics::_invokeBasic) {
1295     has_receiver = true;
1296   } else if (iid == vmIntrinsics::_linkToNative) {

1723   int c_arg = total_c_args - total_in_args;
1724 
1725   // Pre-load a static method's oop into c_rarg1.
1726   if (method->is_static()) {
1727 
1728     //  load oop into a register
1729     __ movoop(c_rarg1,
1730               JNIHandles::make_local(method->method_holder()->java_mirror()));
1731 
1732     // Now handlize the static class mirror it's known not-null.
1733     __ str(c_rarg1, Address(sp, klass_offset));
1734     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1735 
1736     // Now get the handle
1737     __ lea(c_rarg1, Address(sp, klass_offset));
1738     // and protect the arg if we must spill
1739     c_arg--;
1740   }
1741 
1742   // Change state to native (we save the return address in the thread, since it might not
1743   // be pushed on the stack when we do a stack traversal). It is enough that the pc()
1744   // points into the right code segment. It does not have to be the correct return pc.
1745   // We use the same pc/oopMap repeatedly when we call out.
1746 
1747   Label native_return;
1748   if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
1749     // For convenience we use the pc we want to resume to in case of preemption on Object.wait.
1750     __ set_last_Java_frame(sp, noreg, native_return, rscratch1);
1751   } else {
1752     intptr_t the_pc = (intptr_t) __ pc();
1753     oop_maps->add_gc_map(the_pc - start, map);
1754 
1755     __ set_last_Java_frame(sp, noreg, __ pc(), rscratch1);
1756   }
1757 
1758   Label dtrace_method_entry, dtrace_method_entry_done;
1759   if (DTraceMethodProbes) {
1760     __ b(dtrace_method_entry);
1761     __ bind(dtrace_method_entry_done);
1762   }
1763 
1764   // RedefineClasses() tracing support for obsolete method entry
1765   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1766     // protect the args we've loaded
1767     save_args(masm, total_c_args, c_arg, out_regs);
1768     __ mov_metadata(c_rarg1, method());
1769     __ call_VM_leaf(
1770       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1771       rthread, c_rarg1);
1772     restore_args(masm, total_c_args, c_arg, out_regs);
1773   }
1774 
1775   // Lock a synchronized method
1776 

1814       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
1815 
1816       // Hmm should this move to the slow path code area???
1817 
1818       // Test if the oopMark is an obvious stack pointer, i.e.,
1819       //  1) (mark & 3) == 0, and
1820       //  2) sp <= mark < mark + os::pagesize()
1821       // These 3 tests can be done by evaluating the following
1822       // expression: ((mark - sp) & (3 - os::vm_page_size())),
1823       // assuming both stack pointer and pagesize have their
1824       // least significant 2 bits clear.
1825       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1826 
1827       __ sub(swap_reg, sp, swap_reg);
1828       __ neg(swap_reg, swap_reg);
1829       __ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
1830 
1831       // Save the test result, for recursive case, the result is zero
1832       __ str(swap_reg, Address(lock_reg, mark_word_offset));
1833       __ br(Assembler::NE, slow_path_lock);
1834 
1835       __ bind(count);
1836       __ inc_held_monitor_count();
1837     } else {
1838       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1839       __ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
1840     }


1841 
1842     // Slow path will re-enter here
1843     __ bind(lock_done);
1844   }
1845 
1846 
1847   // Finally just about ready to make the JNI call
1848 
1849   // get JNIEnv* which is first argument to native
1850   __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1851 
1852   // Now set thread in native
1853   __ mov(rscratch1, _thread_in_native);
1854   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1855   __ stlrw(rscratch1, rscratch2);
1856 
1857   __ rt_call(native_func);
1858 





1859   // Verify or restore cpu control state after JNI call
1860   __ restore_cpu_control_state_after_jni(rscratch1, rscratch2);
1861 
1862   // Unpack native results.
1863   switch (ret_type) {
1864   case T_BOOLEAN: __ c2bool(r0);                     break;
1865   case T_CHAR   : __ ubfx(r0, r0, 0, 16);            break;
1866   case T_BYTE   : __ sbfx(r0, r0, 0, 8);             break;
1867   case T_SHORT  : __ sbfx(r0, r0, 0, 16);            break;
1868   case T_INT    : __ sbfx(r0, r0, 0, 32);            break;
1869   case T_DOUBLE :
1870   case T_FLOAT  :
1871     // Result is in v0 we'll save as needed
1872     break;
1873   case T_ARRAY:                 // Really a handle
1874   case T_OBJECT:                // Really a handle
1875       break; // can't de-handlize until after safepoint check
1876   case T_VOID: break;
1877   case T_LONG: break;
1878   default       : ShouldNotReachHere();

1897     __ dmb(Assembler::ISH);
1898   }
1899 
1900   __ verify_sve_vector_length();
1901 
1902   // Check for safepoint operation in progress and/or pending suspend requests.
1903   {
1904     // No need for acquire as Java threads always disarm themselves.
1905     __ safepoint_poll(safepoint_in_progress, true /* at_return */, false /* acquire */, false /* in_nmethod */);
1906     __ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
1907     __ cbnzw(rscratch1, safepoint_in_progress);
1908     __ bind(safepoint_in_progress_done);
1909   }
1910 
1911   // change thread state
1912   __ mov(rscratch1, _thread_in_Java);
1913   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1914   __ stlrw(rscratch1, rscratch2);
1915   __ bind(after_transition);
1916 
1917   if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
1918     // Check preemption for Object.wait()
1919     __ ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset()));
1920     __ cbz(rscratch1, native_return);
1921     __ str(zr, Address(rthread, JavaThread::preempt_alternate_return_offset()));
1922     __ br(rscratch1);
1923     __ bind(native_return);
1924 
1925     intptr_t the_pc = (intptr_t) __ pc();
1926     oop_maps->add_gc_map(the_pc - start, map);
1927   }
1928 
1929   Label reguard;
1930   Label reguard_done;
1931   __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1932   __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
1933   __ br(Assembler::EQ, reguard);
1934   __ bind(reguard_done);
1935 
1936   // native result if any is live
1937 
1938   // Unlock
1939   Label unlock_done;
1940   Label slow_path_unlock;
1941   if (method->is_synchronized()) {
1942 
1943     // Get locked oop from the handle we passed to jni
1944     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1945 
1946     Label done, not_recursive;
1947 
1948     if (LockingMode == LM_LEGACY) {
1949       // Simple recursive lock?
1950       __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1951       __ cbnz(rscratch1, not_recursive);
1952       __ dec_held_monitor_count();
1953       __ b(done);
1954     }
1955 
1956     __ bind(not_recursive);
1957 
1958     // Must save r0 if if it is live now because cmpxchg must use it
1959     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1960       save_native_result(masm, ret_type, stack_slots);
1961     }
1962 
1963     if (LockingMode == LM_MONITOR) {
1964       __ b(slow_path_unlock);
1965     } else if (LockingMode == LM_LEGACY) {
1966       // get address of the stack lock
1967       __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1968       //  get old displaced header
1969       __ ldr(old_hdr, Address(r0, 0));
1970 
1971       // Atomic swap old header if oop still contains the stack lock
1972       Label count;
1973       __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
1974       __ bind(count);
1975       __ dec_held_monitor_count();
1976     } else {
1977       assert(LockingMode == LM_LIGHTWEIGHT, "");
1978       __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);

1979     }
1980 
1981     // slow path re-enters here
1982     __ bind(unlock_done);
1983     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1984       restore_native_result(masm, ret_type, stack_slots);
1985     }
1986 
1987     __ bind(done);
1988   }
1989 
1990   Label dtrace_method_exit, dtrace_method_exit_done;
1991   if (DTraceMethodProbes) {
1992     __ b(dtrace_method_exit);
1993     __ bind(dtrace_method_exit_done);
1994   }
1995 
1996   __ reset_last_Java_frame(false);
1997 
1998   // Unbox oop result, e.g. JNIHandles::resolve result.

2026   // and forward the exception
2027   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2028 
2029   // Slow path locking & unlocking
2030   if (method->is_synchronized()) {
2031 
2032     __ block_comment("Slow path lock {");
2033     __ bind(slow_path_lock);
2034 
2035     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2036     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2037 
2038     // protect the args we've loaded
2039     save_args(masm, total_c_args, c_arg, out_regs);
2040 
2041     __ mov(c_rarg0, obj_reg);
2042     __ mov(c_rarg1, lock_reg);
2043     __ mov(c_rarg2, rthread);
2044 
2045     // Not a leaf but we have last_Java_frame setup as we want
2046     // Force freeze slow path in case we try to preempt. We will pin the
2047     // vthread to the carrier (see FreezeBase::recurse_freeze_native_frame()).
2048     __ push_cont_fastpath();
2049     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2050     __ pop_cont_fastpath();
2051     restore_args(masm, total_c_args, c_arg, out_regs);
2052 
2053 #ifdef ASSERT
2054     { Label L;
2055       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2056       __ cbz(rscratch1, L);
2057       __ stop("no pending exception allowed on exit from monitorenter");
2058       __ bind(L);
2059     }
2060 #endif
2061     __ b(lock_done);
2062 
2063     __ block_comment("} Slow path lock");
2064 
2065     __ block_comment("Slow path unlock {");
2066     __ bind(slow_path_unlock);
2067 
2068     // If we haven't already saved the native result we must save it now as xmm registers
2069     // are still exposed.
2070 

2571     _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2572     _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2573   }
2574 #endif
2575 }
2576 
2577 // Number of stack slots between incoming argument block and the start of
2578 // a new frame.  The PROLOG must add this many slots to the stack.  The
2579 // EPILOG must remove this many slots. aarch64 needs two slots for
2580 // return address and fp.
2581 // TODO think this is correct but check
2582 uint SharedRuntime::in_preserve_stack_slots() {
2583   return 4;
2584 }
2585 
2586 uint SharedRuntime::out_preserve_stack_slots() {
2587   return 0;
2588 }
2589 
2590 
2591 VMReg SharedRuntime::thread_register() {
2592   return rthread->as_VMReg();
2593 }
2594 
2595 //------------------------------generate_handler_blob------
2596 //
2597 // Generate a special Compile2Runtime blob that saves all registers,
2598 // and setup oopmap.
2599 //
2600 SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address call_ptr) {
2601   assert(is_polling_page_id(id), "expected a polling page stub id");
2602 
2603   ResourceMark rm;
2604   OopMapSet *oop_maps = new OopMapSet();
2605   OopMap* map;
2606 
2607   // Allocate space for the code.  Setup code generation tools.
2608   const char* name = SharedRuntime::stub_name(id);
2609   CodeBuffer buffer(name, 2048, 1024);
2610   MacroAssembler* masm = new MacroAssembler(&buffer);
2611 
2612   address start   = __ pc();
2613   address call_pc = nullptr;
2614   int frame_size_in_words;
< prev index next >