1034
1035 fill_continuation_entry(masm);
1036
1037 __ bnez(c_rarg2, call_thaw);
1038
1039 // Make sure the call is patchable
1040 __ align(NativeInstruction::instruction_size);
1041
1042 const address tr_call = __ reloc_call(resolve);
1043 if (tr_call == nullptr) {
1044 fatal("CodeCache is full at gen_continuation_enter");
1045 }
1046
1047 oop_maps->add_gc_map(__ pc() - start, map);
1048 __ post_call_nop();
1049
1050 __ j(exit);
1051
1052 __ bind(call_thaw);
1053
1054 __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
1055 oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1056 ContinuationEntry::_return_pc_offset = __ pc() - start;
1057 __ post_call_nop();
1058
1059 __ bind(exit);
1060 continuation_enter_cleanup(masm);
1061 __ leave();
1062 __ ret();
1063
1064 // exception handling
1065 exception_offset = __ pc() - start;
1066 {
1067 __ mv(x9, x10); // save return value contaning the exception oop in callee-saved x9
1068
1069 continuation_enter_cleanup(masm);
1070
1071 __ ld(c_rarg1, Address(fp, -1 * wordSize)); // return address
1072 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), xthread, c_rarg1);
1073
1074 // see OptoRuntime::generate_exception_blob: x10 -- exception oop, x13 -- exception pc
1075
1076 __ mv(x11, x10); // the exception handler
1077 __ mv(x10, x9); // restore return value contaning the exception oop
1078 __ verify_oop(x10);
1079
1134 continuation_enter_cleanup(masm);
1135
1136 __ bind(pinned); // pinned -- return to caller
1137
1138 // handle pending exception thrown by freeze
1139 __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1140 Label ok;
1141 __ beqz(t0, ok);
1142 __ leave();
1143 __ la(t0, RuntimeAddress(StubRoutines::forward_exception_entry()));
1144 __ jr(t0);
1145 __ bind(ok);
1146
1147 __ leave();
1148 __ ret();
1149
1150 OopMap* map = new OopMap(framesize, 1);
1151 oop_maps->add_gc_map(the_pc - start, map);
1152 }
1153
1154 static void gen_special_dispatch(MacroAssembler* masm,
1155 const methodHandle& method,
1156 const BasicType* sig_bt,
1157 const VMRegPair* regs) {
1158 verify_oop_args(masm, method, sig_bt, regs);
1159 vmIntrinsics::ID iid = method->intrinsic_id();
1160
1161 // Now write the args into the outgoing interpreter space
1162 bool has_receiver = false;
1163 Register receiver_reg = noreg;
1164 int member_arg_pos = -1;
1165 Register member_reg = noreg;
1166 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1167 if (ref_kind != 0) {
1168 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1169 member_reg = x9; // known to be free at this point
1170 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1171 } else if (iid == vmIntrinsics::_invokeBasic) {
1172 has_receiver = true;
1173 } else if (iid == vmIntrinsics::_linkToNative) {
1616 int c_arg = total_c_args - total_in_args;
1617
1618 // Pre-load a static method's oop into c_rarg1.
1619 if (method->is_static()) {
1620
1621 // load oop into a register
1622 __ movoop(c_rarg1,
1623 JNIHandles::make_local(method->method_holder()->java_mirror()));
1624
1625 // Now handlize the static class mirror it's known not-null.
1626 __ sd(c_rarg1, Address(sp, klass_offset));
1627 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1628
1629 // Now get the handle
1630 __ la(c_rarg1, Address(sp, klass_offset));
1631 // and protect the arg if we must spill
1632 c_arg--;
1633 }
1634
1635 // Change state to native (we save the return address in the thread, since it might not
1636 // be pushed on the stack when we do a stack traversal).
1637 // We use the same pc/oopMap repeatedly when we call out
1638
1639 Label native_return;
1640 __ set_last_Java_frame(sp, noreg, native_return, t0);
1641
1642 Label dtrace_method_entry, dtrace_method_entry_done;
1643 if (DTraceMethodProbes) {
1644 __ j(dtrace_method_entry);
1645 __ bind(dtrace_method_entry_done);
1646 }
1647
1648 // RedefineClasses() tracing support for obsolete method entry
1649 if (log_is_enabled(Trace, redefine, class, obsolete)) {
1650 // protect the args we've loaded
1651 save_args(masm, total_c_args, c_arg, out_regs);
1652 __ mov_metadata(c_rarg1, method());
1653 __ call_VM_leaf(
1654 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1655 xthread, c_rarg1);
1656 restore_args(masm, total_c_args, c_arg, out_regs);
1657 }
1658
1659 // Lock a synchronized method
1660
1696 __ sd(swap_reg, Address(lock_reg, mark_word_offset));
1697
1698 // src -> dest if dest == x10 else x10 <- dest
1699 __ cmpxchg_obj_header(x10, lock_reg, obj_reg, lock_tmp, count, /*fallthrough*/nullptr);
1700
1701 // Test if the oopMark is an obvious stack pointer, i.e.,
1702 // 1) (mark & 3) == 0, and
1703 // 2) sp <= mark < mark + os::pagesize()
1704 // These 3 tests can be done by evaluating the following
1705 // expression: ((mark - sp) & (3 - os::vm_page_size())),
1706 // assuming both stack pointer and pagesize have their
1707 // least significant 2 bits clear.
1708 // NOTE: the oopMark is in swap_reg % 10 as the result of cmpxchg
1709
1710 __ sub(swap_reg, swap_reg, sp);
1711 __ andi(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
1712
1713 // Save the test result, for recursive case, the result is zero
1714 __ sd(swap_reg, Address(lock_reg, mark_word_offset));
1715 __ bnez(swap_reg, slow_path_lock);
1716 } else {
1717 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1718 __ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
1719 }
1720
1721 __ bind(count);
1722 __ increment(Address(xthread, JavaThread::held_monitor_count_offset()));
1723
1724 // Slow path will re-enter here
1725 __ bind(lock_done);
1726 }
1727
1728
1729 // Finally just about ready to make the JNI call
1730
1731 // get JNIEnv* which is first argument to native
1732 __ la(c_rarg0, Address(xthread, in_bytes(JavaThread::jni_environment_offset())));
1733
1734 // Now set thread in native
1735 __ la(t1, Address(xthread, JavaThread::thread_state_offset()));
1736 __ mv(t0, _thread_in_native);
1737 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1738 __ sw(t0, Address(t1));
1739
1740 __ rt_call(native_func);
1741
1742 __ bind(native_return);
1743
1744 intptr_t return_pc = (intptr_t) __ pc();
1745 oop_maps->add_gc_map(return_pc - start, map);
1746
1747 // Verify or restore cpu control state after JNI call
1748 __ restore_cpu_control_state_after_jni(t0);
1749
1750 // Unpack native results.
1751 if (ret_type != T_OBJECT && ret_type != T_ARRAY) {
1752 __ cast_primitive_type(ret_type, x10);
1753 }
1754
1755 Label safepoint_in_progress, safepoint_in_progress_done;
1756 Label after_transition;
1757
1758 // Switch thread to "native transition" state before reading the synchronization state.
1759 // This additional state is necessary because reading and testing the synchronization
1760 // state is not atomic w.r.t. GC, as this scenario demonstrates:
1761 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1762 // VM thread changes sync state to synchronizing and suspends threads for GC.
1763 // Thread A is resumed to finish this native method, but doesn't block here since it
1764 // didn't see any synchronization is progress, and escapes.
1765 __ mv(t0, _thread_in_native_trans);
1766
1777 // global SafepointSynchronize::_state flag is ordered after this load
1778 // of the thread-local polling word. We don't want this poll to
1779 // return false (i.e. not safepointing) and a later poll of the global
1780 // SafepointSynchronize::_state spuriously to return true.
1781 // This is to avoid a race when we're in a native->Java transition
1782 // racing the code which wakes up from a safepoint.
1783
1784 __ safepoint_poll(safepoint_in_progress, true /* at_return */, true /* acquire */, false /* in_nmethod */);
1785 __ lwu(t0, Address(xthread, JavaThread::suspend_flags_offset()));
1786 __ bnez(t0, safepoint_in_progress);
1787 __ bind(safepoint_in_progress_done);
1788 }
1789
1790 // change thread state
1791 __ la(t1, Address(xthread, JavaThread::thread_state_offset()));
1792 __ mv(t0, _thread_in_Java);
1793 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1794 __ sw(t0, Address(t1));
1795 __ bind(after_transition);
1796
1797 Label reguard;
1798 Label reguard_done;
1799 __ lbu(t0, Address(xthread, JavaThread::stack_guard_state_offset()));
1800 __ mv(t1, StackOverflow::stack_guard_yellow_reserved_disabled);
1801 __ beq(t0, t1, reguard);
1802 __ bind(reguard_done);
1803
1804 // native result if any is live
1805
1806 // Unlock
1807 Label unlock_done;
1808 Label slow_path_unlock;
1809 if (method->is_synchronized()) {
1810
1811 // Get locked oop from the handle we passed to jni
1812 __ ld(obj_reg, Address(oop_handle_reg, 0));
1813
1814 Label done, not_recursive;
1815
1816 if (LockingMode == LM_LEGACY) {
1817 // Simple recursive lock?
1818 __ ld(t0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1819 __ bnez(t0, not_recursive);
1820 __ decrement(Address(xthread, JavaThread::held_monitor_count_offset()));
1821 __ j(done);
1822 }
1823
1824 __ bind(not_recursive);
1825
1826 // Must save x10 if if it is live now because cmpxchg must use it
1827 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1828 save_native_result(masm, ret_type, stack_slots);
1829 }
1830
1831 if (LockingMode == LM_MONITOR) {
1832 __ j(slow_path_unlock);
1833 } else if (LockingMode == LM_LEGACY) {
1834 // get address of the stack lock
1835 __ la(x10, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1836 // get old displaced header
1837 __ ld(old_hdr, Address(x10, 0));
1838
1839 // Atomic swap old header if oop still contains the stack lock
1840 Label count;
1841 __ cmpxchg_obj_header(x10, old_hdr, obj_reg, lock_tmp, count, &slow_path_unlock);
1842 __ bind(count);
1843 __ decrement(Address(xthread, JavaThread::held_monitor_count_offset()));
1844 } else {
1845 assert(LockingMode == LM_LIGHTWEIGHT, "");
1846 __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
1847 __ decrement(Address(xthread, JavaThread::held_monitor_count_offset()));
1848 }
1849
1850 // slow path re-enters here
1851 __ bind(unlock_done);
1852 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1853 restore_native_result(masm, ret_type, stack_slots);
1854 }
1855
1856 __ bind(done);
1857 }
1858
1859 Label dtrace_method_exit, dtrace_method_exit_done;
1860 if (DTraceMethodProbes) {
1861 __ j(dtrace_method_exit);
1862 __ bind(dtrace_method_exit_done);
1863 }
1864
1865 __ reset_last_Java_frame(false);
1866
1867 // Unbox oop result, e.g. JNIHandles::resolve result.
1896 // and forward the exception
1897 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1898
1899 // Slow path locking & unlocking
1900 if (method->is_synchronized()) {
1901
1902 __ block_comment("Slow path lock {");
1903 __ bind(slow_path_lock);
1904
1905 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
1906 // args are (oop obj, BasicLock* lock, JavaThread* thread)
1907
1908 // protect the args we've loaded
1909 save_args(masm, total_c_args, c_arg, out_regs);
1910
1911 __ mv(c_rarg0, obj_reg);
1912 __ mv(c_rarg1, lock_reg);
1913 __ mv(c_rarg2, xthread);
1914
1915 // Not a leaf but we have last_Java_frame setup as we want
1916 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
1917 restore_args(masm, total_c_args, c_arg, out_regs);
1918
1919 #ifdef ASSERT
1920 { Label L;
1921 __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1922 __ beqz(t0, L);
1923 __ stop("no pending exception allowed on exit from monitorenter");
1924 __ bind(L);
1925 }
1926 #endif
1927 __ j(lock_done);
1928
1929 __ block_comment("} Slow path lock");
1930
1931 __ block_comment("Slow path unlock {");
1932 __ bind(slow_path_unlock);
1933
1934 if (ret_type == T_FLOAT || ret_type == T_DOUBLE) {
1935 save_native_result(masm, ret_type, stack_slots);
1936 }
2427 #if INCLUDE_JVMCI
2428 if (EnableJVMCI) {
2429 _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2430 _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2431 }
2432 #endif
2433 }
2434
2435 // Number of stack slots between incoming argument block and the start of
2436 // a new frame. The PROLOG must add this many slots to the stack. The
2437 // EPILOG must remove this many slots.
2438 // RISCV needs two words for RA (return address) and FP (frame pointer).
2439 uint SharedRuntime::in_preserve_stack_slots() {
2440 return 2 * VMRegImpl::slots_per_word;
2441 }
2442
2443 uint SharedRuntime::out_preserve_stack_slots() {
2444 return 0;
2445 }
2446
2447 //------------------------------generate_handler_blob------
2448 //
2449 // Generate a special Compile2Runtime blob that saves all registers,
2450 // and setup oopmap.
2451 //
2452 SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address call_ptr) {
2453 assert(is_polling_page_id(id), "expected a polling page stub id");
2454
2455 ResourceMark rm;
2456 OopMapSet *oop_maps = new OopMapSet();
2457 assert_cond(oop_maps != nullptr);
2458 OopMap* map = nullptr;
2459
2460 // Allocate space for the code. Setup code generation tools.
2461 const char* name = SharedRuntime::stub_name(id);
2462 CodeBuffer buffer(name, 2048, 1024);
2463 MacroAssembler* masm = new MacroAssembler(&buffer);
2464 assert_cond(masm != nullptr);
2465
2466 address start = __ pc();
|
1034
1035 fill_continuation_entry(masm);
1036
1037 __ bnez(c_rarg2, call_thaw);
1038
1039 // Make sure the call is patchable
1040 __ align(NativeInstruction::instruction_size);
1041
1042 const address tr_call = __ reloc_call(resolve);
1043 if (tr_call == nullptr) {
1044 fatal("CodeCache is full at gen_continuation_enter");
1045 }
1046
1047 oop_maps->add_gc_map(__ pc() - start, map);
1048 __ post_call_nop();
1049
1050 __ j(exit);
1051
1052 __ bind(call_thaw);
1053
1054 ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
1055 __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
1056 oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1057 ContinuationEntry::_return_pc_offset = __ pc() - start;
1058 __ post_call_nop();
1059
1060 __ bind(exit);
1061 ContinuationEntry::_cleanup_offset = __ pc() - start;
1062 continuation_enter_cleanup(masm);
1063 __ leave();
1064 __ ret();
1065
1066 // exception handling
1067 exception_offset = __ pc() - start;
1068 {
1069 __ mv(x9, x10); // save return value contaning the exception oop in callee-saved x9
1070
1071 continuation_enter_cleanup(masm);
1072
1073 __ ld(c_rarg1, Address(fp, -1 * wordSize)); // return address
1074 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), xthread, c_rarg1);
1075
1076 // see OptoRuntime::generate_exception_blob: x10 -- exception oop, x13 -- exception pc
1077
1078 __ mv(x11, x10); // the exception handler
1079 __ mv(x10, x9); // restore return value contaning the exception oop
1080 __ verify_oop(x10);
1081
1136 continuation_enter_cleanup(masm);
1137
1138 __ bind(pinned); // pinned -- return to caller
1139
1140 // handle pending exception thrown by freeze
1141 __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1142 Label ok;
1143 __ beqz(t0, ok);
1144 __ leave();
1145 __ la(t0, RuntimeAddress(StubRoutines::forward_exception_entry()));
1146 __ jr(t0);
1147 __ bind(ok);
1148
1149 __ leave();
1150 __ ret();
1151
1152 OopMap* map = new OopMap(framesize, 1);
1153 oop_maps->add_gc_map(the_pc - start, map);
1154 }
1155
1156 void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
1157 ::continuation_enter_cleanup(masm);
1158 }
1159
1160 static void gen_special_dispatch(MacroAssembler* masm,
1161 const methodHandle& method,
1162 const BasicType* sig_bt,
1163 const VMRegPair* regs) {
1164 verify_oop_args(masm, method, sig_bt, regs);
1165 vmIntrinsics::ID iid = method->intrinsic_id();
1166
1167 // Now write the args into the outgoing interpreter space
1168 bool has_receiver = false;
1169 Register receiver_reg = noreg;
1170 int member_arg_pos = -1;
1171 Register member_reg = noreg;
1172 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1173 if (ref_kind != 0) {
1174 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1175 member_reg = x9; // known to be free at this point
1176 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1177 } else if (iid == vmIntrinsics::_invokeBasic) {
1178 has_receiver = true;
1179 } else if (iid == vmIntrinsics::_linkToNative) {
1622 int c_arg = total_c_args - total_in_args;
1623
1624 // Pre-load a static method's oop into c_rarg1.
1625 if (method->is_static()) {
1626
1627 // load oop into a register
1628 __ movoop(c_rarg1,
1629 JNIHandles::make_local(method->method_holder()->java_mirror()));
1630
1631 // Now handlize the static class mirror it's known not-null.
1632 __ sd(c_rarg1, Address(sp, klass_offset));
1633 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1634
1635 // Now get the handle
1636 __ la(c_rarg1, Address(sp, klass_offset));
1637 // and protect the arg if we must spill
1638 c_arg--;
1639 }
1640
1641 // Change state to native (we save the return address in the thread, since it might not
1642 // be pushed on the stack when we do a stack traversal). It is enough that the pc()
1643 // points into the right code segment. It does not have to be the correct return pc.
1644 // We use the same pc/oopMap repeatedly when we call out.
1645
1646 Label native_return;
1647 if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
1648 // For convenience we use the pc we want to resume to in case of preemption on Object.wait.
1649 __ set_last_Java_frame(sp, noreg, native_return, t0);
1650 } else {
1651 intptr_t the_pc = (intptr_t) __ pc();
1652 oop_maps->add_gc_map(the_pc - start, map);
1653
1654 __ set_last_Java_frame(sp, noreg, __ pc(), t0);
1655 }
1656
1657 Label dtrace_method_entry, dtrace_method_entry_done;
1658 if (DTraceMethodProbes) {
1659 __ j(dtrace_method_entry);
1660 __ bind(dtrace_method_entry_done);
1661 }
1662
1663 // RedefineClasses() tracing support for obsolete method entry
1664 if (log_is_enabled(Trace, redefine, class, obsolete)) {
1665 // protect the args we've loaded
1666 save_args(masm, total_c_args, c_arg, out_regs);
1667 __ mov_metadata(c_rarg1, method());
1668 __ call_VM_leaf(
1669 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1670 xthread, c_rarg1);
1671 restore_args(masm, total_c_args, c_arg, out_regs);
1672 }
1673
1674 // Lock a synchronized method
1675
1711 __ sd(swap_reg, Address(lock_reg, mark_word_offset));
1712
1713 // src -> dest if dest == x10 else x10 <- dest
1714 __ cmpxchg_obj_header(x10, lock_reg, obj_reg, lock_tmp, count, /*fallthrough*/nullptr);
1715
1716 // Test if the oopMark is an obvious stack pointer, i.e.,
1717 // 1) (mark & 3) == 0, and
1718 // 2) sp <= mark < mark + os::pagesize()
1719 // These 3 tests can be done by evaluating the following
1720 // expression: ((mark - sp) & (3 - os::vm_page_size())),
1721 // assuming both stack pointer and pagesize have their
1722 // least significant 2 bits clear.
1723 // NOTE: the oopMark is in swap_reg % 10 as the result of cmpxchg
1724
1725 __ sub(swap_reg, swap_reg, sp);
1726 __ andi(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
1727
1728 // Save the test result, for recursive case, the result is zero
1729 __ sd(swap_reg, Address(lock_reg, mark_word_offset));
1730 __ bnez(swap_reg, slow_path_lock);
1731
1732 __ bind(count);
1733 __ inc_held_monitor_count();
1734 } else {
1735 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1736 __ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
1737 }
1738
1739 // Slow path will re-enter here
1740 __ bind(lock_done);
1741 }
1742
1743
1744 // Finally just about ready to make the JNI call
1745
1746 // get JNIEnv* which is first argument to native
1747 __ la(c_rarg0, Address(xthread, in_bytes(JavaThread::jni_environment_offset())));
1748
1749 // Now set thread in native
1750 __ la(t1, Address(xthread, JavaThread::thread_state_offset()));
1751 __ mv(t0, _thread_in_native);
1752 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1753 __ sw(t0, Address(t1));
1754
1755 __ rt_call(native_func);
1756
1757 // Verify or restore cpu control state after JNI call
1758 __ restore_cpu_control_state_after_jni(t0);
1759
1760 // Unpack native results.
1761 if (ret_type != T_OBJECT && ret_type != T_ARRAY) {
1762 __ cast_primitive_type(ret_type, x10);
1763 }
1764
1765 Label safepoint_in_progress, safepoint_in_progress_done;
1766 Label after_transition;
1767
1768 // Switch thread to "native transition" state before reading the synchronization state.
1769 // This additional state is necessary because reading and testing the synchronization
1770 // state is not atomic w.r.t. GC, as this scenario demonstrates:
1771 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1772 // VM thread changes sync state to synchronizing and suspends threads for GC.
1773 // Thread A is resumed to finish this native method, but doesn't block here since it
1774 // didn't see any synchronization is progress, and escapes.
1775 __ mv(t0, _thread_in_native_trans);
1776
1787 // global SafepointSynchronize::_state flag is ordered after this load
1788 // of the thread-local polling word. We don't want this poll to
1789 // return false (i.e. not safepointing) and a later poll of the global
1790 // SafepointSynchronize::_state spuriously to return true.
1791 // This is to avoid a race when we're in a native->Java transition
1792 // racing the code which wakes up from a safepoint.
1793
1794 __ safepoint_poll(safepoint_in_progress, true /* at_return */, true /* acquire */, false /* in_nmethod */);
1795 __ lwu(t0, Address(xthread, JavaThread::suspend_flags_offset()));
1796 __ bnez(t0, safepoint_in_progress);
1797 __ bind(safepoint_in_progress_done);
1798 }
1799
1800 // change thread state
1801 __ la(t1, Address(xthread, JavaThread::thread_state_offset()));
1802 __ mv(t0, _thread_in_Java);
1803 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1804 __ sw(t0, Address(t1));
1805 __ bind(after_transition);
1806
1807 if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
1808 // Check preemption for Object.wait()
1809 __ ld(t0, Address(xthread, JavaThread::preempt_alternate_return_offset()));
1810 __ beqz(t0, native_return);
1811 __ sd(zr, Address(xthread, JavaThread::preempt_alternate_return_offset()));
1812 __ jr(t0);
1813 __ bind(native_return);
1814
1815 intptr_t the_pc = (intptr_t) __ pc();
1816 oop_maps->add_gc_map(the_pc - start, map);
1817 }
1818
1819 Label reguard;
1820 Label reguard_done;
1821 __ lbu(t0, Address(xthread, JavaThread::stack_guard_state_offset()));
1822 __ mv(t1, StackOverflow::stack_guard_yellow_reserved_disabled);
1823 __ beq(t0, t1, reguard);
1824 __ bind(reguard_done);
1825
1826 // native result if any is live
1827
1828 // Unlock
1829 Label unlock_done;
1830 Label slow_path_unlock;
1831 if (method->is_synchronized()) {
1832
1833 // Get locked oop from the handle we passed to jni
1834 __ ld(obj_reg, Address(oop_handle_reg, 0));
1835
1836 Label done, not_recursive;
1837
1838 if (LockingMode == LM_LEGACY) {
1839 // Simple recursive lock?
1840 __ ld(t0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1841 __ bnez(t0, not_recursive);
1842 __ dec_held_monitor_count();
1843 __ j(done);
1844 }
1845
1846 __ bind(not_recursive);
1847
1848 // Must save x10 if if it is live now because cmpxchg must use it
1849 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1850 save_native_result(masm, ret_type, stack_slots);
1851 }
1852
1853 if (LockingMode == LM_MONITOR) {
1854 __ j(slow_path_unlock);
1855 } else if (LockingMode == LM_LEGACY) {
1856 // get address of the stack lock
1857 __ la(x10, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1858 // get old displaced header
1859 __ ld(old_hdr, Address(x10, 0));
1860
1861 // Atomic swap old header if oop still contains the stack lock
1862 Label count;
1863 __ cmpxchg_obj_header(x10, old_hdr, obj_reg, lock_tmp, count, &slow_path_unlock);
1864 __ bind(count);
1865 __ dec_held_monitor_count();
1866 } else {
1867 assert(LockingMode == LM_LIGHTWEIGHT, "");
1868 __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
1869 }
1870
1871 // slow path re-enters here
1872 __ bind(unlock_done);
1873 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1874 restore_native_result(masm, ret_type, stack_slots);
1875 }
1876
1877 __ bind(done);
1878 }
1879
1880 Label dtrace_method_exit, dtrace_method_exit_done;
1881 if (DTraceMethodProbes) {
1882 __ j(dtrace_method_exit);
1883 __ bind(dtrace_method_exit_done);
1884 }
1885
1886 __ reset_last_Java_frame(false);
1887
1888 // Unbox oop result, e.g. JNIHandles::resolve result.
1917 // and forward the exception
1918 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1919
1920 // Slow path locking & unlocking
1921 if (method->is_synchronized()) {
1922
1923 __ block_comment("Slow path lock {");
1924 __ bind(slow_path_lock);
1925
1926 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
1927 // args are (oop obj, BasicLock* lock, JavaThread* thread)
1928
1929 // protect the args we've loaded
1930 save_args(masm, total_c_args, c_arg, out_regs);
1931
1932 __ mv(c_rarg0, obj_reg);
1933 __ mv(c_rarg1, lock_reg);
1934 __ mv(c_rarg2, xthread);
1935
1936 // Not a leaf but we have last_Java_frame setup as we want
1937 // Force freeze slow path in case we try to preempt. We will pin the
1938 // vthread to the carrier (see FreezeBase::recurse_freeze_native_frame()).
1939 __ push_cont_fastpath();
1940 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
1941 __ pop_cont_fastpath();
1942 restore_args(masm, total_c_args, c_arg, out_regs);
1943
1944 #ifdef ASSERT
1945 { Label L;
1946 __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1947 __ beqz(t0, L);
1948 __ stop("no pending exception allowed on exit from monitorenter");
1949 __ bind(L);
1950 }
1951 #endif
1952 __ j(lock_done);
1953
1954 __ block_comment("} Slow path lock");
1955
1956 __ block_comment("Slow path unlock {");
1957 __ bind(slow_path_unlock);
1958
1959 if (ret_type == T_FLOAT || ret_type == T_DOUBLE) {
1960 save_native_result(masm, ret_type, stack_slots);
1961 }
2452 #if INCLUDE_JVMCI
2453 if (EnableJVMCI) {
2454 _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2455 _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2456 }
2457 #endif
2458 }
2459
2460 // Number of stack slots between incoming argument block and the start of
2461 // a new frame. The PROLOG must add this many slots to the stack. The
2462 // EPILOG must remove this many slots.
2463 // RISCV needs two words for RA (return address) and FP (frame pointer).
2464 uint SharedRuntime::in_preserve_stack_slots() {
2465 return 2 * VMRegImpl::slots_per_word;
2466 }
2467
2468 uint SharedRuntime::out_preserve_stack_slots() {
2469 return 0;
2470 }
2471
2472 VMReg SharedRuntime::thread_register() {
2473 return xthread->as_VMReg();
2474 }
2475
2476 //------------------------------generate_handler_blob------
2477 //
2478 // Generate a special Compile2Runtime blob that saves all registers,
2479 // and setup oopmap.
2480 //
2481 SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address call_ptr) {
2482 assert(is_polling_page_id(id), "expected a polling page stub id");
2483
2484 ResourceMark rm;
2485 OopMapSet *oop_maps = new OopMapSet();
2486 assert_cond(oop_maps != nullptr);
2487 OopMap* map = nullptr;
2488
2489 // Allocate space for the code. Setup code generation tools.
2490 const char* name = SharedRuntime::stub_name(id);
2491 CodeBuffer buffer(name, 2048, 1024);
2492 MacroAssembler* masm = new MacroAssembler(&buffer);
2493 assert_cond(masm != nullptr);
2494
2495 address start = __ pc();
|