1 /*
2 * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
1805 Label slow_path_lock;
1806 Label lock_done;
1807
1808 // Lock a synchronized method
1809 if (method->is_synchronized()) {
1810 assert(!is_critical_native, "unhandled");
1811
1812
1813 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1814
1815 // Get the handle (the 2nd argument)
1816 __ movptr(oop_handle_reg, Address(rsp, wordSize));
1817
1818 // Get address of the box
1819
1820 __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
1821
1822 // Load the oop from the handle
1823 __ movptr(obj_reg, Address(oop_handle_reg, 0));
1824
1825 if (UseBiasedLocking) {
1826 // Note that oop_handle_reg is trashed during this call
1827 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, oop_handle_reg, noreg, false, lock_done, &slow_path_lock);
1828 }
1829
1830 // Load immediate 1 into swap_reg %rax,
1831 __ movptr(swap_reg, 1);
1832
1833 // Load (object->mark() | 1) into swap_reg %rax,
1834 __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1835
1836 // Save (object->mark() | 1) into BasicLock's displaced header
1837 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1838
1839 // src -> dest iff dest == rax, else rax, <- dest
1840 // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
1841 __ lock();
1842 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1843 __ jcc(Assembler::equal, lock_done);
1844
1845 // Test if the oopMark is an obvious stack pointer, i.e.,
1846 // 1) (mark & 3) == 0, and
1847 // 2) rsp <= mark < mark + os::pagesize()
1848 // These 3 tests can be done by evaluating the following
1849 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
1850 // assuming both stack pointer and pagesize have their
1851 // least significant 2 bits clear.
1852 // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
1853
1854 __ subptr(swap_reg, rsp);
1855 __ andptr(swap_reg, 3 - os::vm_page_size());
1856
1857 // Save the test result, for recursive case, the result is zero
1858 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1859 __ jcc(Assembler::notEqual, slow_path_lock);
1860 // Slow path will re-enter here
1861 __ bind(lock_done);
1862
1863 if (UseBiasedLocking) {
1864 // Re-fetch oop_handle_reg as we trashed it above
1865 __ movptr(oop_handle_reg, Address(rsp, wordSize));
1866 }
1867 }
1868
1869
1870 // Finally just about ready to make the JNI call
1871
1872 // get JNIEnv* which is first argument to native
1873 if (!is_critical_native) {
1874 __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
1875 __ movptr(Address(rsp, 0), rdx);
1876
1877 // Now set thread in native
1878 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1879 }
1979 __ bind(reguard_done);
1980
1981 // Handle possible exception (will unlock if necessary)
1982
1983 // native result if any is live
1984
1985 // Unlock
1986 Label slow_path_unlock;
1987 Label unlock_done;
1988 if (method->is_synchronized()) {
1989
1990 Label done;
1991
1992 // Get locked oop from the handle we passed to jni
1993 __ movptr(obj_reg, Address(oop_handle_reg, 0));
1994
1995 if (UseBiasedLocking) {
1996 __ biased_locking_exit(obj_reg, rbx, done);
1997 }
1998
1999 // Simple recursive lock?
2000
2001 __ cmpptr(Address(rbp, lock_slot_rbp_offset), (int32_t)NULL_WORD);
2002 __ jcc(Assembler::equal, done);
2003
2004 // Must save rax, if if it is live now because cmpxchg must use it
2005 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2006 save_native_result(masm, ret_type, stack_slots);
2007 }
2008
2009 // get old displaced header
2010 __ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
2011
2012 // get address of the stack lock
2013 __ lea(rax, Address(rbp, lock_slot_rbp_offset));
2014
2015 // Atomic swap old header if oop still contains the stack lock
2016 // src -> dest iff dest == rax, else rax, <- dest
2017 // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
2018 __ lock();
2019 __ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2020 __ jcc(Assembler::notEqual, slow_path_unlock);
2021
2022 // slow path re-enters here
2023 __ bind(unlock_done);
2024 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2025 restore_native_result(masm, ret_type, stack_slots);
2026 }
2027
2028 __ bind(done);
2029
2030 }
2031
2032 {
2033 SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
2034 // Tell dtrace about this method exit
2035 save_native_result(masm, ret_type, stack_slots);
2036 __ mov_metadata(rax, method());
2037 __ call_VM_leaf(
2038 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2039 thread, rax);
2040 restore_native_result(masm, ret_type, stack_slots);
|
1 /*
2 * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
1805 Label slow_path_lock;
1806 Label lock_done;
1807
1808 // Lock a synchronized method
1809 if (method->is_synchronized()) {
1810 assert(!is_critical_native, "unhandled");
1811
1812
1813 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1814
1815 // Get the handle (the 2nd argument)
1816 __ movptr(oop_handle_reg, Address(rsp, wordSize));
1817
1818 // Get address of the box
1819
1820 __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
1821
1822 // Load the oop from the handle
1823 __ movptr(obj_reg, Address(oop_handle_reg, 0));
1824
1825 if (LockingMode == LM_MONITOR) {
1826 __ jmp(slow_path_lock);
1827 } else if (LockingMode == LM_LEGACY) {
1828 if (UseBiasedLocking) {
1829 // Note that oop_handle_reg is trashed during this call
1830 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, oop_handle_reg, noreg, false, lock_done, &slow_path_lock);
1831 }
1832
1833 // Load immediate 1 into swap_reg %rax,
1834 __ movptr(swap_reg, 1);
1835
1836 // Load (object->mark() | 1) into swap_reg %rax,
1837 __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1838
1839 // Save (object->mark() | 1) into BasicLock's displaced header
1840 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1841
1842 // src -> dest iff dest == rax, else rax, <- dest
1843 // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
1844 __ lock();
1845 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1846 __ jcc(Assembler::equal, lock_done);
1847
1848 // Test if the oopMark is an obvious stack pointer, i.e.,
1849 // 1) (mark & 3) == 0, and
1850 // 2) rsp <= mark < mark + os::pagesize()
1851 // These 3 tests can be done by evaluating the following
1852 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
1853 // assuming both stack pointer and pagesize have their
1854 // least significant 2 bits clear.
1855 // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
1856
1857 __ subptr(swap_reg, rsp);
1858 __ andptr(swap_reg, 3 - os::vm_page_size());
1859
1860 // Save the test result, for recursive case, the result is zero
1861 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1862 __ jcc(Assembler::notEqual, slow_path_lock);
1863 } else {
1864 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1865 __ lightweight_lock(obj_reg, swap_reg, thread, lock_reg, slow_path_lock);
1866 }
1867 // Slow path will re-enter here
1868 __ bind(lock_done);
1869
1870 if (UseBiasedLocking) {
1871 // Re-fetch oop_handle_reg as we trashed it above
1872 __ movptr(oop_handle_reg, Address(rsp, wordSize));
1873 }
1874 }
1875
1876
1877 // Finally just about ready to make the JNI call
1878
1879 // get JNIEnv* which is first argument to native
1880 if (!is_critical_native) {
1881 __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
1882 __ movptr(Address(rsp, 0), rdx);
1883
1884 // Now set thread in native
1885 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1886 }
1986 __ bind(reguard_done);
1987
1988 // Handle possible exception (will unlock if necessary)
1989
1990 // native result if any is live
1991
1992 // Unlock
1993 Label slow_path_unlock;
1994 Label unlock_done;
1995 if (method->is_synchronized()) {
1996
1997 Label done;
1998
1999 // Get locked oop from the handle we passed to jni
2000 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2001
2002 if (UseBiasedLocking) {
2003 __ biased_locking_exit(obj_reg, rbx, done);
2004 }
2005
2006 if (LockingMode == LM_LEGACY) {
2007 // Simple recursive lock?
2008
2009 __ cmpptr(Address(rbp, lock_slot_rbp_offset), (int32_t)NULL_WORD);
2010 __ jcc(Assembler::equal, done);
2011 }
2012
2013 // Must save rax, if if it is live now because cmpxchg must use it
2014 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2015 save_native_result(masm, ret_type, stack_slots);
2016 }
2017
2018 if (LockingMode == LM_MONITOR) {
2019 __ jmp(slow_path_unlock);
2020 } else if (LockingMode == LM_LEGACY) {
2021 // get old displaced header
2022 __ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
2023
2024 // get address of the stack lock
2025 __ lea(rax, Address(rbp, lock_slot_rbp_offset));
2026
2027 // Atomic swap old header if oop still contains the stack lock
2028 // src -> dest iff dest == rax, else rax, <- dest
2029 // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
2030 __ lock();
2031 __ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2032 __ jcc(Assembler::notEqual, slow_path_unlock);
2033 } else {
2034 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2035 __ lightweight_unlock(obj_reg, swap_reg, thread, lock_reg, slow_path_unlock);
2036 }
2037
2038 // slow path re-enters here
2039 __ bind(unlock_done);
2040 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2041 restore_native_result(masm, ret_type, stack_slots);
2042 }
2043
2044 __ bind(done);
2045
2046 }
2047
2048 {
2049 SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
2050 // Tell dtrace about this method exit
2051 save_native_result(masm, ret_type, stack_slots);
2052 __ mov_metadata(rax, method());
2053 __ call_VM_leaf(
2054 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2055 thread, rax);
2056 restore_native_result(masm, ret_type, stack_slots);
|