1162 stack_slots = 2; // will be adjusted in setup
1163 OopMap* map = continuation_enter_setup(masm, stack_slots);
1164 frame_complete = __ pc() - start;
1165
1166 fill_continuation_entry(masm);
1167
1168 __ cbnz(c_rarg2, call_thaw);
1169
1170 const address tr_call = __ trampoline_call(resolve);
1171 if (tr_call == nullptr) {
1172 fatal("CodeCache is full at gen_continuation_enter");
1173 }
1174
1175 oop_maps->add_gc_map(__ pc() - start, map);
1176 __ post_call_nop();
1177
1178 __ b(exit);
1179
1180 __ bind(call_thaw);
1181
1182 __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
1183 oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1184 ContinuationEntry::_return_pc_offset = __ pc() - start;
1185 __ post_call_nop();
1186
1187 __ bind(exit);
1188 continuation_enter_cleanup(masm);
1189 __ leave();
1190 __ ret(lr);
1191
1192 /// exception handling
1193
1194 exception_offset = __ pc() - start;
1195 {
1196 __ mov(r19, r0); // save return value contaning the exception oop in callee-saved R19
1197
1198 continuation_enter_cleanup(masm);
1199
1200 __ ldr(c_rarg1, Address(rfp, wordSize)); // return address
1201 __ authenticate_return_address(c_rarg1);
1264 continuation_enter_cleanup(masm);
1265
1266 __ bind(pinned); // pinned -- return to caller
1267
1268 // handle pending exception thrown by freeze
1269 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1270 Label ok;
1271 __ cbz(rscratch1, ok);
1272 __ leave();
1273 __ lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
1274 __ br(rscratch1);
1275 __ bind(ok);
1276
1277 __ leave();
1278 __ ret(lr);
1279
1280 OopMap* map = new OopMap(framesize, 1);
1281 oop_maps->add_gc_map(the_pc - start, map);
1282 }
1283
1284 static void gen_special_dispatch(MacroAssembler* masm,
1285 const methodHandle& method,
1286 const BasicType* sig_bt,
1287 const VMRegPair* regs) {
1288 verify_oop_args(masm, method, sig_bt, regs);
1289 vmIntrinsics::ID iid = method->intrinsic_id();
1290
1291 // Now write the args into the outgoing interpreter space
1292 bool has_receiver = false;
1293 Register receiver_reg = noreg;
1294 int member_arg_pos = -1;
1295 Register member_reg = noreg;
1296 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1297 if (ref_kind != 0) {
1298 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1299 member_reg = r19; // known to be free at this point
1300 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1301 } else if (iid == vmIntrinsics::_invokeBasic) {
1302 has_receiver = true;
1303 } else if (iid == vmIntrinsics::_linkToNative) {
1732 // Pre-load a static method's oop into c_rarg1.
1733 if (method->is_static()) {
1734
1735 // load oop into a register
1736 __ movoop(c_rarg1,
1737 JNIHandles::make_local(method->method_holder()->java_mirror()));
1738
1739 // Now handlize the static class mirror it's known not-null.
1740 __ str(c_rarg1, Address(sp, klass_offset));
1741 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1742
1743 // Now get the handle
1744 __ lea(c_rarg1, Address(sp, klass_offset));
1745 // and protect the arg if we must spill
1746 c_arg--;
1747 }
1748
1749 // Change state to native (we save the return address in the thread, since it might not
1750 // be pushed on the stack when we do a stack traversal).
1751 // We use the same pc/oopMap repeatedly when we call out
1752
1753 Label native_return;
1754 __ set_last_Java_frame(sp, noreg, native_return, rscratch1);
1755
1756 Label dtrace_method_entry, dtrace_method_entry_done;
1757 if (DTraceMethodProbes) {
1758 __ b(dtrace_method_entry);
1759 __ bind(dtrace_method_entry_done);
1760 }
1761
1762 // RedefineClasses() tracing support for obsolete method entry
1763 if (log_is_enabled(Trace, redefine, class, obsolete)) {
1764 // protect the args we've loaded
1765 save_args(masm, total_c_args, c_arg, out_regs);
1766 __ mov_metadata(c_rarg1, method());
1767 __ call_VM_leaf(
1768 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1769 rthread, c_rarg1);
1770 restore_args(masm, total_c_args, c_arg, out_regs);
1771 }
1772
1773 // Lock a synchronized method
1774
1812 __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
1813
1814 // Hmm should this move to the slow path code area???
1815
1816 // Test if the oopMark is an obvious stack pointer, i.e.,
1817 // 1) (mark & 3) == 0, and
1818 // 2) sp <= mark < mark + os::pagesize()
1819 // These 3 tests can be done by evaluating the following
1820 // expression: ((mark - sp) & (3 - os::vm_page_size())),
1821 // assuming both stack pointer and pagesize have their
1822 // least significant 2 bits clear.
1823 // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1824
1825 __ sub(swap_reg, sp, swap_reg);
1826 __ neg(swap_reg, swap_reg);
1827 __ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
1828
1829 // Save the test result, for recursive case, the result is zero
1830 __ str(swap_reg, Address(lock_reg, mark_word_offset));
1831 __ br(Assembler::NE, slow_path_lock);
1832 } else {
1833 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1834 __ lightweight_lock(obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
1835 }
1836 __ bind(count);
1837 __ increment(Address(rthread, JavaThread::held_monitor_count_offset()));
1838
1839 // Slow path will re-enter here
1840 __ bind(lock_done);
1841 }
1842
1843
1844 // Finally just about ready to make the JNI call
1845
1846 // get JNIEnv* which is first argument to native
1847 __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1848
1849 // Now set thread in native
1850 __ mov(rscratch1, _thread_in_native);
1851 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1852 __ stlrw(rscratch1, rscratch2);
1853
1854 __ rt_call(native_func);
1855
1856 __ bind(native_return);
1857
1858 intptr_t return_pc = (intptr_t) __ pc();
1859 oop_maps->add_gc_map(return_pc - start, map);
1860
1861 // Verify or restore cpu control state after JNI call
1862 __ restore_cpu_control_state_after_jni(rscratch1, rscratch2);
1863
1864 // Unpack native results.
1865 switch (ret_type) {
1866 case T_BOOLEAN: __ c2bool(r0); break;
1867 case T_CHAR : __ ubfx(r0, r0, 0, 16); break;
1868 case T_BYTE : __ sbfx(r0, r0, 0, 8); break;
1869 case T_SHORT : __ sbfx(r0, r0, 0, 16); break;
1870 case T_INT : __ sbfx(r0, r0, 0, 32); break;
1871 case T_DOUBLE :
1872 case T_FLOAT :
1873 // Result is in v0 we'll save as needed
1874 break;
1875 case T_ARRAY: // Really a handle
1876 case T_OBJECT: // Really a handle
1877 break; // can't de-handlize until after safepoint check
1878 case T_VOID: break;
1879 case T_LONG: break;
1880 default : ShouldNotReachHere();
1907 // global SafepointSynchronize::_state flag is ordered after this load
1908 // of the thread-local polling word. We don't want this poll to
1909 // return false (i.e. not safepointing) and a later poll of the global
1910 // SafepointSynchronize::_state spuriously to return true.
1911 //
1912 // This is to avoid a race when we're in a native->Java transition
1913 // racing the code which wakes up from a safepoint.
1914
1915 __ safepoint_poll(safepoint_in_progress, true /* at_return */, true /* acquire */, false /* in_nmethod */);
1916 __ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
1917 __ cbnzw(rscratch1, safepoint_in_progress);
1918 __ bind(safepoint_in_progress_done);
1919 }
1920
1921 // change thread state
1922 __ mov(rscratch1, _thread_in_Java);
1923 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1924 __ stlrw(rscratch1, rscratch2);
1925 __ bind(after_transition);
1926
1927 Label reguard;
1928 Label reguard_done;
1929 __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1930 __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
1931 __ br(Assembler::EQ, reguard);
1932 __ bind(reguard_done);
1933
1934 // native result if any is live
1935
1936 // Unlock
1937 Label unlock_done;
1938 Label slow_path_unlock;
1939 if (method->is_synchronized()) {
1940
1941 // Get locked oop from the handle we passed to jni
1942 __ ldr(obj_reg, Address(oop_handle_reg, 0));
1943
1944 Label done, not_recursive;
1945
1946 if (LockingMode == LM_LEGACY) {
1947 // Simple recursive lock?
1948 __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1949 __ cbnz(rscratch1, not_recursive);
1950 __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1951 __ b(done);
1952 }
1953
1954 __ bind(not_recursive);
1955
1956 // Must save r0 if if it is live now because cmpxchg must use it
1957 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1958 save_native_result(masm, ret_type, stack_slots);
1959 }
1960
1961 if (LockingMode == LM_MONITOR) {
1962 __ b(slow_path_unlock);
1963 } else if (LockingMode == LM_LEGACY) {
1964 // get address of the stack lock
1965 __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1966 // get old displaced header
1967 __ ldr(old_hdr, Address(r0, 0));
1968
1969 // Atomic swap old header if oop still contains the stack lock
1970 Label count;
1971 __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
1972 __ bind(count);
1973 __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1974 } else {
1975 assert(LockingMode == LM_LIGHTWEIGHT, "");
1976 __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
1977 __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1978 }
1979
1980 // slow path re-enters here
1981 __ bind(unlock_done);
1982 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1983 restore_native_result(masm, ret_type, stack_slots);
1984 }
1985
1986 __ bind(done);
1987 }
1988
1989 Label dtrace_method_exit, dtrace_method_exit_done;
1990 if (DTraceMethodProbes) {
1991 __ b(dtrace_method_exit);
1992 __ bind(dtrace_method_exit_done);
1993 }
1994
1995 __ reset_last_Java_frame(false);
1996
1997 // Unbox oop result, e.g. JNIHandles::resolve result.
2025 // and forward the exception
2026 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2027
2028 // Slow path locking & unlocking
2029 if (method->is_synchronized()) {
2030
2031 __ block_comment("Slow path lock {");
2032 __ bind(slow_path_lock);
2033
2034 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2035 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2036
2037 // protect the args we've loaded
2038 save_args(masm, total_c_args, c_arg, out_regs);
2039
2040 __ mov(c_rarg0, obj_reg);
2041 __ mov(c_rarg1, lock_reg);
2042 __ mov(c_rarg2, rthread);
2043
2044 // Not a leaf but we have last_Java_frame setup as we want
2045 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2046 restore_args(masm, total_c_args, c_arg, out_regs);
2047
2048 #ifdef ASSERT
2049 { Label L;
2050 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2051 __ cbz(rscratch1, L);
2052 __ stop("no pending exception allowed on exit from monitorenter");
2053 __ bind(L);
2054 }
2055 #endif
2056 __ b(lock_done);
2057
2058 __ block_comment("} Slow path lock");
2059
2060 __ block_comment("Slow path unlock {");
2061 __ bind(slow_path_unlock);
2062
2063 // If we haven't already saved the native result we must save it now as xmm registers
2064 // are still exposed.
2065
2564 if (EnableJVMCI) {
2565 _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2566 _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2567 }
2568 #endif
2569 }
2570
2571 // Number of stack slots between incoming argument block and the start of
2572 // a new frame. The PROLOG must add this many slots to the stack. The
2573 // EPILOG must remove this many slots. aarch64 needs two slots for
2574 // return address and fp.
2575 // TODO think this is correct but check
2576 uint SharedRuntime::in_preserve_stack_slots() {
2577 return 4;
2578 }
2579
2580 uint SharedRuntime::out_preserve_stack_slots() {
2581 return 0;
2582 }
2583
2584 #ifdef COMPILER2
2585 //------------------------------generate_uncommon_trap_blob--------------------
2586 void SharedRuntime::generate_uncommon_trap_blob() {
2587 // Allocate space for the code
2588 ResourceMark rm;
2589 // Setup code generation tools
2590 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
2591 MacroAssembler* masm = new MacroAssembler(&buffer);
2592
2593 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
2594
2595 address start = __ pc();
2596
2597 // Push self-frame. We get here with a return address in LR
2598 // and sp should be 16 byte aligned
2599 // push rfp and retaddr by hand
2600 __ protect_return_address();
2601 __ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize)));
2602 // we don't expect an arg reg save area
2603 #ifndef PRODUCT
|
1162 stack_slots = 2; // will be adjusted in setup
1163 OopMap* map = continuation_enter_setup(masm, stack_slots);
1164 frame_complete = __ pc() - start;
1165
1166 fill_continuation_entry(masm);
1167
1168 __ cbnz(c_rarg2, call_thaw);
1169
1170 const address tr_call = __ trampoline_call(resolve);
1171 if (tr_call == nullptr) {
1172 fatal("CodeCache is full at gen_continuation_enter");
1173 }
1174
1175 oop_maps->add_gc_map(__ pc() - start, map);
1176 __ post_call_nop();
1177
1178 __ b(exit);
1179
1180 __ bind(call_thaw);
1181
1182 ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
1183 __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
1184 oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1185 ContinuationEntry::_return_pc_offset = __ pc() - start;
1186 __ post_call_nop();
1187
1188 __ bind(exit);
1189 continuation_enter_cleanup(masm);
1190 __ leave();
1191 __ ret(lr);
1192
1193 /// exception handling
1194
1195 exception_offset = __ pc() - start;
1196 {
1197 __ mov(r19, r0); // save return value contaning the exception oop in callee-saved R19
1198
1199 continuation_enter_cleanup(masm);
1200
1201 __ ldr(c_rarg1, Address(rfp, wordSize)); // return address
1202 __ authenticate_return_address(c_rarg1);
1265 continuation_enter_cleanup(masm);
1266
1267 __ bind(pinned); // pinned -- return to caller
1268
1269 // handle pending exception thrown by freeze
1270 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1271 Label ok;
1272 __ cbz(rscratch1, ok);
1273 __ leave();
1274 __ lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
1275 __ br(rscratch1);
1276 __ bind(ok);
1277
1278 __ leave();
1279 __ ret(lr);
1280
1281 OopMap* map = new OopMap(framesize, 1);
1282 oop_maps->add_gc_map(the_pc - start, map);
1283 }
1284
1285 void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
1286 ::continuation_enter_cleanup(masm);
1287 }
1288
1289 static void gen_special_dispatch(MacroAssembler* masm,
1290 const methodHandle& method,
1291 const BasicType* sig_bt,
1292 const VMRegPair* regs) {
1293 verify_oop_args(masm, method, sig_bt, regs);
1294 vmIntrinsics::ID iid = method->intrinsic_id();
1295
1296 // Now write the args into the outgoing interpreter space
1297 bool has_receiver = false;
1298 Register receiver_reg = noreg;
1299 int member_arg_pos = -1;
1300 Register member_reg = noreg;
1301 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1302 if (ref_kind != 0) {
1303 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1304 member_reg = r19; // known to be free at this point
1305 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1306 } else if (iid == vmIntrinsics::_invokeBasic) {
1307 has_receiver = true;
1308 } else if (iid == vmIntrinsics::_linkToNative) {
1737 // Pre-load a static method's oop into c_rarg1.
1738 if (method->is_static()) {
1739
1740 // load oop into a register
1741 __ movoop(c_rarg1,
1742 JNIHandles::make_local(method->method_holder()->java_mirror()));
1743
1744 // Now handlize the static class mirror it's known not-null.
1745 __ str(c_rarg1, Address(sp, klass_offset));
1746 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1747
1748 // Now get the handle
1749 __ lea(c_rarg1, Address(sp, klass_offset));
1750 // and protect the arg if we must spill
1751 c_arg--;
1752 }
1753
1754 // Change state to native (we save the return address in the thread, since it might not
1755 // be pushed on the stack when we do a stack traversal).
1756 // We use the same pc/oopMap repeatedly when we call out
1757 Label resume_pc;
1758 __ set_last_Java_frame(sp, noreg, resume_pc, rscratch1);
1759
1760 Label dtrace_method_entry, dtrace_method_entry_done;
1761 if (DTraceMethodProbes) {
1762 __ b(dtrace_method_entry);
1763 __ bind(dtrace_method_entry_done);
1764 }
1765
1766 // RedefineClasses() tracing support for obsolete method entry
1767 if (log_is_enabled(Trace, redefine, class, obsolete)) {
1768 // protect the args we've loaded
1769 save_args(masm, total_c_args, c_arg, out_regs);
1770 __ mov_metadata(c_rarg1, method());
1771 __ call_VM_leaf(
1772 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1773 rthread, c_rarg1);
1774 restore_args(masm, total_c_args, c_arg, out_regs);
1775 }
1776
1777 // Lock a synchronized method
1778
1816 __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
1817
1818 // Hmm should this move to the slow path code area???
1819
1820 // Test if the oopMark is an obvious stack pointer, i.e.,
1821 // 1) (mark & 3) == 0, and
1822 // 2) sp <= mark < mark + os::pagesize()
1823 // These 3 tests can be done by evaluating the following
1824 // expression: ((mark - sp) & (3 - os::vm_page_size())),
1825 // assuming both stack pointer and pagesize have their
1826 // least significant 2 bits clear.
1827 // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1828
1829 __ sub(swap_reg, sp, swap_reg);
1830 __ neg(swap_reg, swap_reg);
1831 __ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
1832
1833 // Save the test result, for recursive case, the result is zero
1834 __ str(swap_reg, Address(lock_reg, mark_word_offset));
1835 __ br(Assembler::NE, slow_path_lock);
1836 __ b(lock_done);
1837 } else {
1838 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1839 __ lightweight_lock(obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
1840 __ b(lock_done);
1841 }
1842 __ bind(count);
1843 __ inc_held_monitor_count();
1844
1845 // Slow path will re-enter here
1846 __ bind(lock_done);
1847 }
1848
1849
1850 // Finally just about ready to make the JNI call
1851
1852 // get JNIEnv* which is first argument to native
1853 __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1854
1855 // Now set thread in native
1856 __ mov(rscratch1, _thread_in_native);
1857 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1858 __ stlrw(rscratch1, rscratch2);
1859
1860 __ rt_call(native_func);
1861
1862 // Verify or restore cpu control state after JNI call
1863 __ restore_cpu_control_state_after_jni(rscratch1, rscratch2);
1864
1865 // Unpack native results.
1866 switch (ret_type) {
1867 case T_BOOLEAN: __ c2bool(r0); break;
1868 case T_CHAR : __ ubfx(r0, r0, 0, 16); break;
1869 case T_BYTE : __ sbfx(r0, r0, 0, 8); break;
1870 case T_SHORT : __ sbfx(r0, r0, 0, 16); break;
1871 case T_INT : __ sbfx(r0, r0, 0, 32); break;
1872 case T_DOUBLE :
1873 case T_FLOAT :
1874 // Result is in v0 we'll save as needed
1875 break;
1876 case T_ARRAY: // Really a handle
1877 case T_OBJECT: // Really a handle
1878 break; // can't de-handlize until after safepoint check
1879 case T_VOID: break;
1880 case T_LONG: break;
1881 default : ShouldNotReachHere();
1908 // global SafepointSynchronize::_state flag is ordered after this load
1909 // of the thread-local polling word. We don't want this poll to
1910 // return false (i.e. not safepointing) and a later poll of the global
1911 // SafepointSynchronize::_state spuriously to return true.
1912 //
1913 // This is to avoid a race when we're in a native->Java transition
1914 // racing the code which wakes up from a safepoint.
1915
1916 __ safepoint_poll(safepoint_in_progress, true /* at_return */, true /* acquire */, false /* in_nmethod */);
1917 __ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
1918 __ cbnzw(rscratch1, safepoint_in_progress);
1919 __ bind(safepoint_in_progress_done);
1920 }
1921
1922 // change thread state
1923 __ mov(rscratch1, _thread_in_Java);
1924 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1925 __ stlrw(rscratch1, rscratch2);
1926 __ bind(after_transition);
1927
1928 // Check preemption for Object.wait()
1929 if (method->is_object_wait0()) {
1930 Label not_preempted;
1931 __ ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset()));
1932 __ cbz(rscratch1, not_preempted);
1933 __ str(zr, Address(rthread, JavaThread::preempt_alternate_return_offset()));
1934 __ br(rscratch1);
1935 __ bind(not_preempted);
1936 }
1937 __ bind(resume_pc);
1938
1939 intptr_t the_pc = (intptr_t) __ pc();
1940 oop_maps->add_gc_map(the_pc - start, map);
1941
1942 Label reguard;
1943 Label reguard_done;
1944 __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1945 __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
1946 __ br(Assembler::EQ, reguard);
1947 __ bind(reguard_done);
1948
1949 // native result if any is live
1950
1951 // Unlock
1952 Label unlock_done;
1953 Label slow_path_unlock;
1954 if (method->is_synchronized()) {
1955
1956 // Get locked oop from the handle we passed to jni
1957 __ ldr(obj_reg, Address(oop_handle_reg, 0));
1958
1959 Label done, not_recursive;
1960
1961 if (LockingMode == LM_LEGACY) {
1962 // Simple recursive lock?
1963 __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1964 __ cbnz(rscratch1, not_recursive);
1965 __ b(done);
1966 }
1967
1968 __ bind(not_recursive);
1969
1970 // Must save r0 if if it is live now because cmpxchg must use it
1971 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1972 save_native_result(masm, ret_type, stack_slots);
1973 }
1974
1975 if (LockingMode == LM_MONITOR) {
1976 __ b(slow_path_unlock);
1977 } else if (LockingMode == LM_LEGACY) {
1978 // get address of the stack lock
1979 __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1980 // get old displaced header
1981 __ ldr(old_hdr, Address(r0, 0));
1982
1983 // Atomic swap old header if oop still contains the stack lock
1984 Label count;
1985 __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
1986 __ bind(count);
1987 __ dec_held_monitor_count();
1988 } else {
1989 assert(LockingMode == LM_LIGHTWEIGHT, "");
1990 __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
1991 }
1992
1993 // slow path re-enters here
1994 __ bind(unlock_done);
1995 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1996 restore_native_result(masm, ret_type, stack_slots);
1997 }
1998
1999 __ bind(done);
2000 }
2001
2002 Label dtrace_method_exit, dtrace_method_exit_done;
2003 if (DTraceMethodProbes) {
2004 __ b(dtrace_method_exit);
2005 __ bind(dtrace_method_exit_done);
2006 }
2007
2008 __ reset_last_Java_frame(false);
2009
2010 // Unbox oop result, e.g. JNIHandles::resolve result.
2038 // and forward the exception
2039 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2040
2041 // Slow path locking & unlocking
2042 if (method->is_synchronized()) {
2043
2044 __ block_comment("Slow path lock {");
2045 __ bind(slow_path_lock);
2046
2047 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2048 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2049
2050 // protect the args we've loaded
2051 save_args(masm, total_c_args, c_arg, out_regs);
2052
2053 __ mov(c_rarg0, obj_reg);
2054 __ mov(c_rarg1, lock_reg);
2055 __ mov(c_rarg2, rthread);
2056
2057 // Not a leaf but we have last_Java_frame setup as we want
2058 __ push_cont_fastpath();
2059 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2060 __ pop_cont_fastpath();
2061 restore_args(masm, total_c_args, c_arg, out_regs);
2062
2063 #ifdef ASSERT
2064 { Label L;
2065 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2066 __ cbz(rscratch1, L);
2067 __ stop("no pending exception allowed on exit from monitorenter");
2068 __ bind(L);
2069 }
2070 #endif
2071 __ b(lock_done);
2072
2073 __ block_comment("} Slow path lock");
2074
2075 __ block_comment("Slow path unlock {");
2076 __ bind(slow_path_unlock);
2077
2078 // If we haven't already saved the native result we must save it now as xmm registers
2079 // are still exposed.
2080
2579 if (EnableJVMCI) {
2580 _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2581 _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2582 }
2583 #endif
2584 }
2585
2586 // Number of stack slots between incoming argument block and the start of
2587 // a new frame. The PROLOG must add this many slots to the stack. The
2588 // EPILOG must remove this many slots. aarch64 needs two slots for
2589 // return address and fp.
2590 // TODO think this is correct but check
2591 uint SharedRuntime::in_preserve_stack_slots() {
2592 return 4;
2593 }
2594
2595 uint SharedRuntime::out_preserve_stack_slots() {
2596 return 0;
2597 }
2598
2599 VMReg SharedRuntime::thread_register() {
2600 return rthread->as_VMReg();
2601 }
2602
2603 #ifdef COMPILER2
2604 //------------------------------generate_uncommon_trap_blob--------------------
2605 void SharedRuntime::generate_uncommon_trap_blob() {
2606 // Allocate space for the code
2607 ResourceMark rm;
2608 // Setup code generation tools
2609 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
2610 MacroAssembler* masm = new MacroAssembler(&buffer);
2611
2612 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
2613
2614 address start = __ pc();
2615
2616 // Push self-frame. We get here with a return address in LR
2617 // and sp should be 16 byte aligned
2618 // push rfp and retaddr by hand
2619 __ protect_return_address();
2620 __ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize)));
2621 // we don't expect an arg reg save area
2622 #ifndef PRODUCT
|