1758 const Register tmp = lr;
1759
1760 Label slow_path_lock;
1761 Label lock_done;
1762
1763 if (method->is_synchronized()) {
1764 Label count;
1765 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1766
1767 // Get the handle (the 2nd argument)
1768 __ mov(oop_handle_reg, c_rarg1);
1769
1770 // Get address of the box
1771
1772 __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1773
1774 // Load the oop from the handle
1775 __ ldr(obj_reg, Address(oop_handle_reg, 0));
1776
1777 if (!UseHeavyMonitors) {
1778 // Load (object->mark() | 1) into swap_reg %r0
1779 __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1780 __ orr(swap_reg, rscratch1, 1);
1781
1782 // Save (object->mark() | 1) into BasicLock's displaced header
1783 __ str(swap_reg, Address(lock_reg, mark_word_offset));
1784
1785 // src -> dest iff dest == r0 else r0 <- dest
1786 __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/NULL);
1787
1788 // Hmm should this move to the slow path code area???
1789
1790 // Test if the oopMark is an obvious stack pointer, i.e.,
1791 // 1) (mark & 3) == 0, and
1792 // 2) sp <= mark < mark + os::pagesize()
1793 // These 3 tests can be done by evaluating the following
1794 // expression: ((mark - sp) & (3 - os::vm_page_size())),
1795 // assuming both stack pointer and pagesize have their
1796 // least significant 2 bits clear.
1797 // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1798
1799 __ sub(swap_reg, sp, swap_reg);
1800 __ neg(swap_reg, swap_reg);
1801 __ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
1802
1803 // Save the test result, for recursive case, the result is zero
1804 __ str(swap_reg, Address(lock_reg, mark_word_offset));
1805 __ br(Assembler::NE, slow_path_lock);
1806 } else {
1807 __ b(slow_path_lock);
1808 }
1809 __ bind(count);
1810 __ increment(Address(rthread, JavaThread::held_monitor_count_offset()));
1811
1812 // Slow path will re-enter here
1813 __ bind(lock_done);
1814 }
1815
1816
1817 // Finally just about ready to make the JNI call
1818
1819 // get JNIEnv* which is first argument to native
1820 __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1821
1822 // Now set thread in native
1823 __ mov(rscratch1, _thread_in_native);
1824 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1825 __ stlrw(rscratch1, rscratch2);
1896
1897 Label reguard;
1898 Label reguard_done;
1899 __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1900 __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
1901 __ br(Assembler::EQ, reguard);
1902 __ bind(reguard_done);
1903
1904 // native result if any is live
1905
1906 // Unlock
1907 Label unlock_done;
1908 Label slow_path_unlock;
1909 if (method->is_synchronized()) {
1910
1911 // Get locked oop from the handle we passed to jni
1912 __ ldr(obj_reg, Address(oop_handle_reg, 0));
1913
1914 Label done, not_recursive;
1915
1916 if (!UseHeavyMonitors) {
1917 // Simple recursive lock?
1918 __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1919 __ cbnz(rscratch1, not_recursive);
1920 __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1921 __ b(done);
1922 }
1923
1924 __ bind(not_recursive);
1925
1926 // Must save r0 if if it is live now because cmpxchg must use it
1927 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1928 save_native_result(masm, ret_type, stack_slots);
1929 }
1930
1931 if (!UseHeavyMonitors) {
1932 // get address of the stack lock
1933 __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1934 // get old displaced header
1935 __ ldr(old_hdr, Address(r0, 0));
1936
1937 // Atomic swap old header if oop still contains the stack lock
1938 Label count;
1939 __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
1940 __ bind(count);
1941 __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1942 } else {
1943 __ b(slow_path_unlock);
1944 }
1945
1946 // slow path re-enters here
1947 __ bind(unlock_done);
1948 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1949 restore_native_result(masm, ret_type, stack_slots);
1950 }
1951
1952 __ bind(done);
1953 }
1954
1955 Label dtrace_method_exit, dtrace_method_exit_done;
1956 {
1957 uint64_t offset;
1958 __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
1959 __ ldrb(rscratch1, Address(rscratch1, offset));
1960 __ cbnzw(rscratch1, dtrace_method_exit);
|
1758 const Register tmp = lr;
1759
1760 Label slow_path_lock;
1761 Label lock_done;
1762
1763 if (method->is_synchronized()) {
1764 Label count;
1765 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1766
1767 // Get the handle (the 2nd argument)
1768 __ mov(oop_handle_reg, c_rarg1);
1769
1770 // Get address of the box
1771
1772 __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1773
1774 // Load the oop from the handle
1775 __ ldr(obj_reg, Address(oop_handle_reg, 0));
1776
1777 if (!UseHeavyMonitors) {
1778 if (UseFastLocking) {
1779 __ ldr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1780 __ fast_lock(obj_reg, swap_reg, tmp, rscratch1, slow_path_lock);
1781 } else {
1782 // Load (object->mark() | 1) into swap_reg %r0
1783 __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1784 __ orr(swap_reg, rscratch1, 1);
1785
1786 // Save (object->mark() | 1) into BasicLock's displaced header
1787 __ str(swap_reg, Address(lock_reg, mark_word_offset));
1788
1789 // src -> dest iff dest == r0 else r0 <- dest
1790 __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/NULL);
1791
1792 // Hmm should this move to the slow path code area???
1793
1794 // Test if the oopMark is an obvious stack pointer, i.e.,
1795 // 1) (mark & 3) == 0, and
1796 // 2) sp <= mark < mark + os::pagesize()
1797 // These 3 tests can be done by evaluating the following
1798 // expression: ((mark - sp) & (3 - os::vm_page_size())),
1799 // assuming both stack pointer and pagesize have their
1800 // least significant 2 bits clear.
1801 // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1802
1803 __ sub(swap_reg, sp, swap_reg);
1804 __ neg(swap_reg, swap_reg);
1805 __ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
1806
1807 // Save the test result, for recursive case, the result is zero
1808 __ str(swap_reg, Address(lock_reg, mark_word_offset));
1809 __ br(Assembler::NE, slow_path_lock);
1810 }
1811 } else {
1812 __ b(slow_path_lock);
1813 }
1814 __ bind(count);
1815 __ increment(Address(rthread, JavaThread::held_monitor_count_offset()));
1816
1817 // Slow path will re-enter here
1818 __ bind(lock_done);
1819 }
1820
1821
1822 // Finally just about ready to make the JNI call
1823
1824 // get JNIEnv* which is first argument to native
1825 __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1826
1827 // Now set thread in native
1828 __ mov(rscratch1, _thread_in_native);
1829 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1830 __ stlrw(rscratch1, rscratch2);
1901
1902 Label reguard;
1903 Label reguard_done;
1904 __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1905 __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
1906 __ br(Assembler::EQ, reguard);
1907 __ bind(reguard_done);
1908
1909 // native result if any is live
1910
1911 // Unlock
1912 Label unlock_done;
1913 Label slow_path_unlock;
1914 if (method->is_synchronized()) {
1915
1916 // Get locked oop from the handle we passed to jni
1917 __ ldr(obj_reg, Address(oop_handle_reg, 0));
1918
1919 Label done, not_recursive;
1920
1921 if (!UseHeavyMonitors && !UseFastLocking) {
1922 // Simple recursive lock?
1923 __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1924 __ cbnz(rscratch1, not_recursive);
1925 __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1926 __ b(done);
1927 }
1928
1929 __ bind(not_recursive);
1930
1931 // Must save r0 if if it is live now because cmpxchg must use it
1932 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1933 save_native_result(masm, ret_type, stack_slots);
1934 }
1935
1936 if (!UseHeavyMonitors) {
1937 if (UseFastLocking) {
1938 __ ldr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1939 __ fast_unlock(obj_reg, old_hdr, swap_reg, rscratch1, slow_path_unlock);
1940 } else {
1941 // get address of the stack lock
1942 __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1943 // get old displaced header
1944 __ ldr(old_hdr, Address(r0, 0));
1945
1946 // Atomic swap old header if oop still contains the stack lock
1947 Label count;
1948 __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
1949 __ bind(count);
1950 }
1951 __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1952 } else {
1953 __ b(slow_path_unlock);
1954 }
1955
1956 // slow path re-enters here
1957 __ bind(unlock_done);
1958 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1959 restore_native_result(masm, ret_type, stack_slots);
1960 }
1961
1962 __ bind(done);
1963 }
1964
1965 Label dtrace_method_exit, dtrace_method_exit_done;
1966 {
1967 uint64_t offset;
1968 __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
1969 __ ldrb(rscratch1, Address(rscratch1, offset));
1970 __ cbnzw(rscratch1, dtrace_method_exit);
|