1655
1656 Label slow_path_lock;
1657 Label lock_done;
1658
1659 if (method->is_synchronized()) {
1660 Label count;
1661
1662 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1663
1664 // Get the handle (the 2nd argument)
1665 __ mv(oop_handle_reg, c_rarg1);
1666
1667 // Get address of the box
1668
1669 __ la(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1670
1671 // Load the oop from the handle
1672 __ ld(obj_reg, Address(oop_handle_reg, 0));
1673
1674 if (!UseHeavyMonitors) {
1675 // Load (object->mark() | 1) into swap_reg % x10
1676 __ ld(t0, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1677 __ ori(swap_reg, t0, 1);
1678
1679 // Save (object->mark() | 1) into BasicLock's displaced header
1680 __ sd(swap_reg, Address(lock_reg, mark_word_offset));
1681
1682 // src -> dest if dest == x10 else x10 <- dest
1683 __ cmpxchg_obj_header(x10, lock_reg, obj_reg, t0, count, /*fallthrough*/NULL);
1684
1685 // Test if the oopMark is an obvious stack pointer, i.e.,
1686 // 1) (mark & 3) == 0, and
1687 // 2) sp <= mark < mark + os::pagesize()
1688 // These 3 tests can be done by evaluating the following
1689 // expression: ((mark - sp) & (3 - os::vm_page_size())),
1690 // assuming both stack pointer and pagesize have their
1691 // least significant 2 bits clear.
1692 // NOTE: the oopMark is in swap_reg % 10 as the result of cmpxchg
1693
1694 __ sub(swap_reg, swap_reg, sp);
1695 __ andi(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
1696
1697 // Save the test result, for recursive case, the result is zero
1698 __ sd(swap_reg, Address(lock_reg, mark_word_offset));
1699 __ bnez(swap_reg, slow_path_lock);
1700 } else {
1701 __ j(slow_path_lock);
1702 }
1703
1704 __ bind(count);
1705 __ increment(Address(xthread, JavaThread::held_monitor_count_offset()));
1706
1707 // Slow path will re-enter here
1708 __ bind(lock_done);
1709 }
1710
1711
1712 // Finally just about ready to make the JNI call
1713
1714 // get JNIEnv* which is first argument to native
1715 __ la(c_rarg0, Address(xthread, in_bytes(JavaThread::jni_environment_offset())));
1716
1717 // Now set thread in native
1718 __ la(t1, Address(xthread, JavaThread::thread_state_offset()));
1719 __ mv(t0, _thread_in_native);
1774
1775 Label reguard;
1776 Label reguard_done;
1777 __ lbu(t0, Address(xthread, JavaThread::stack_guard_state_offset()));
1778 __ mv(t1, StackOverflow::stack_guard_yellow_reserved_disabled);
1779 __ beq(t0, t1, reguard);
1780 __ bind(reguard_done);
1781
1782 // native result if any is live
1783
1784 // Unlock
1785 Label unlock_done;
1786 Label slow_path_unlock;
1787 if (method->is_synchronized()) {
1788
1789 // Get locked oop from the handle we passed to jni
1790 __ ld(obj_reg, Address(oop_handle_reg, 0));
1791
1792 Label done, not_recursive;
1793
1794 if (!UseHeavyMonitors) {
1795 // Simple recursive lock?
1796 __ ld(t0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1797 __ bnez(t0, not_recursive);
1798 __ decrement(Address(xthread, JavaThread::held_monitor_count_offset()));
1799 __ j(done);
1800 }
1801
1802 __ bind(not_recursive);
1803
1804 // Must save x10 if if it is live now because cmpxchg must use it
1805 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1806 save_native_result(masm, ret_type, stack_slots);
1807 }
1808
1809 if (!UseHeavyMonitors) {
1810 // get address of the stack lock
1811 __ la(x10, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1812 // get old displaced header
1813 __ ld(old_hdr, Address(x10, 0));
1814
1815 // Atomic swap old header if oop still contains the stack lock
1816 Label count;
1817 __ cmpxchg_obj_header(x10, old_hdr, obj_reg, t0, count, &slow_path_unlock);
1818 __ bind(count);
1819 __ decrement(Address(xthread, JavaThread::held_monitor_count_offset()));
1820 } else {
1821 __ j(slow_path_unlock);
1822 }
1823
1824 // slow path re-enters here
1825 __ bind(unlock_done);
1826 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1827 restore_native_result(masm, ret_type, stack_slots);
1828 }
1829
1830 __ bind(done);
1831 }
1832
1833 Label dtrace_method_exit, dtrace_method_exit_done;
1834 {
1835 ExternalAddress target((address)&DTraceMethodProbes);
1836 __ relocate(target.rspec(), [&] {
1837 int32_t offset;
1838 __ la_patchable(t0, target, offset);
|
1655
1656 Label slow_path_lock;
1657 Label lock_done;
1658
1659 if (method->is_synchronized()) {
1660 Label count;
1661
1662 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1663
1664 // Get the handle (the 2nd argument)
1665 __ mv(oop_handle_reg, c_rarg1);
1666
1667 // Get address of the box
1668
1669 __ la(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1670
1671 // Load the oop from the handle
1672 __ ld(obj_reg, Address(oop_handle_reg, 0));
1673
1674 if (!UseHeavyMonitors) {
1675 if (UseFastLocking) {
1676 __ ld(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1677 __ fast_lock(obj_reg, swap_reg, tmp, t0, slow_path_lock);
1678 } else {
1679 // Load (object->mark() | 1) into swap_reg % x10
1680 __ ld(t0, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1681 __ ori(swap_reg, t0, 1);
1682
1683 // Save (object->mark() | 1) into BasicLock's displaced header
1684 __ sd(swap_reg, Address(lock_reg, mark_word_offset));
1685
1686 // src -> dest if dest == x10 else x10 <- dest
1687 __ cmpxchg_obj_header(x10, lock_reg, obj_reg, t0, count, /*fallthrough*/NULL);
1688
1689 // Test if the oopMark is an obvious stack pointer, i.e.,
1690 // 1) (mark & 3) == 0, and
1691 // 2) sp <= mark < mark + os::pagesize()
1692 // These 3 tests can be done by evaluating the following
1693 // expression: ((mark - sp) & (3 - os::vm_page_size())),
1694 // assuming both stack pointer and pagesize have their
1695 // least significant 2 bits clear.
1696 // NOTE: the oopMark is in swap_reg % 10 as the result of cmpxchg
1697
1698 __ sub(swap_reg, swap_reg, sp);
1699 __ andi(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
1700
1701 // Save the test result, for recursive case, the result is zero
1702 __ sd(swap_reg, Address(lock_reg, mark_word_offset));
1703 __ bnez(swap_reg, slow_path_lock);
1704 }
1705 } else {
1706 __ j(slow_path_lock);
1707 }
1708
1709 __ bind(count);
1710 __ increment(Address(xthread, JavaThread::held_monitor_count_offset()));
1711
1712 // Slow path will re-enter here
1713 __ bind(lock_done);
1714 }
1715
1716
1717 // Finally just about ready to make the JNI call
1718
1719 // get JNIEnv* which is first argument to native
1720 __ la(c_rarg0, Address(xthread, in_bytes(JavaThread::jni_environment_offset())));
1721
1722 // Now set thread in native
1723 __ la(t1, Address(xthread, JavaThread::thread_state_offset()));
1724 __ mv(t0, _thread_in_native);
1779
1780 Label reguard;
1781 Label reguard_done;
1782 __ lbu(t0, Address(xthread, JavaThread::stack_guard_state_offset()));
1783 __ mv(t1, StackOverflow::stack_guard_yellow_reserved_disabled);
1784 __ beq(t0, t1, reguard);
1785 __ bind(reguard_done);
1786
1787 // native result if any is live
1788
1789 // Unlock
1790 Label unlock_done;
1791 Label slow_path_unlock;
1792 if (method->is_synchronized()) {
1793
1794 // Get locked oop from the handle we passed to jni
1795 __ ld(obj_reg, Address(oop_handle_reg, 0));
1796
1797 Label done, not_recursive;
1798
1799 if (!UseHeavyMonitors && !UseFastLocking) {
1800 // Simple recursive lock?
1801 __ ld(t0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1802 __ bnez(t0, not_recursive);
1803 __ decrement(Address(xthread, JavaThread::held_monitor_count_offset()));
1804 __ j(done);
1805 }
1806
1807 __ bind(not_recursive);
1808
1809 // Must save x10 if if it is live now because cmpxchg must use it
1810 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1811 save_native_result(masm, ret_type, stack_slots);
1812 }
1813
1814 if (!UseHeavyMonitors) {
1815 if (UseFastLocking) {
1816 __ ld(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1817 __ fast_unlock(obj_reg, old_hdr, swap_reg, t0, slow_path_unlock);
1818 } else {
1819 // get address of the stack lock
1820 __ la(x10, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1821 // get old displaced header
1822 __ ld(old_hdr, Address(x10, 0));
1823
1824 // Atomic swap old header if oop still contains the stack lock
1825 Label count;
1826 __ cmpxchg_obj_header(x10, old_hdr, obj_reg, t0, count, &slow_path_unlock);
1827 __ bind(count);
1828 }
1829 __ decrement(Address(xthread, JavaThread::held_monitor_count_offset()));
1830 } else {
1831 __ j(slow_path_unlock);
1832 }
1833
1834 // slow path re-enters here
1835 __ bind(unlock_done);
1836 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1837 restore_native_result(masm, ret_type, stack_slots);
1838 }
1839
1840 __ bind(done);
1841 }
1842
1843 Label dtrace_method_exit, dtrace_method_exit_done;
1844 {
1845 ExternalAddress target((address)&DTraceMethodProbes);
1846 __ relocate(target.rspec(), [&] {
1847 int32_t offset;
1848 __ la_patchable(t0, target, offset);
|