1806 Label slow_path_lock;
1807 Label lock_done;
1808
1809 // Lock a synchronized method
1810 if (method->is_synchronized()) {
1811 assert(!is_critical_native, "unhandled");
1812
1813
1814 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1815
1816 // Get the handle (the 2nd argument)
1817 __ movptr(oop_handle_reg, Address(rsp, wordSize));
1818
1819 // Get address of the box
1820
1821 __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
1822
1823 // Load the oop from the handle
1824 __ movptr(obj_reg, Address(oop_handle_reg, 0));
1825
1826 if (UseBiasedLocking) {
1827 // Note that oop_handle_reg is trashed during this call
1828 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, oop_handle_reg, noreg, false, lock_done, &slow_path_lock);
1829 }
1830
1831 // Load immediate 1 into swap_reg %rax,
1832 __ movptr(swap_reg, 1);
1833
1834 // Load (object->mark() | 1) into swap_reg %rax,
1835 __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1836
1837 // Save (object->mark() | 1) into BasicLock's displaced header
1838 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1839
1840 // src -> dest iff dest == rax, else rax, <- dest
1841 // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
1842 __ lock();
1843 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1844 __ jcc(Assembler::equal, lock_done);
1845
1846 // Test if the oopMark is an obvious stack pointer, i.e.,
1847 // 1) (mark & 3) == 0, and
1848 // 2) rsp <= mark < mark + os::pagesize()
1849 // These 3 tests can be done by evaluating the following
1850 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
1851 // assuming both stack pointer and pagesize have their
1852 // least significant 2 bits clear.
1853 // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
1854
1855 __ subptr(swap_reg, rsp);
1856 __ andptr(swap_reg, 3 - os::vm_page_size());
1857
1858 // Save the test result, for recursive case, the result is zero
1859 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1860 __ jcc(Assembler::notEqual, slow_path_lock);
1861 // Slow path will re-enter here
1862 __ bind(lock_done);
1863
1864 if (UseBiasedLocking) {
1865 // Re-fetch oop_handle_reg as we trashed it above
1866 __ movptr(oop_handle_reg, Address(rsp, wordSize));
1867 }
1868 }
1869
1870
1871 // Finally just about ready to make the JNI call
1872
1873 // get JNIEnv* which is first argument to native
1874 if (!is_critical_native) {
1875 __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
1876 __ movptr(Address(rsp, 0), rdx);
1877
1878 // Now set thread in native
1879 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1880 }
1980 __ bind(reguard_done);
1981
1982 // Handle possible exception (will unlock if necessary)
1983
1984 // native result if any is live
1985
1986 // Unlock
1987 Label slow_path_unlock;
1988 Label unlock_done;
1989 if (method->is_synchronized()) {
1990
1991 Label done;
1992
1993 // Get locked oop from the handle we passed to jni
1994 __ movptr(obj_reg, Address(oop_handle_reg, 0));
1995
1996 if (UseBiasedLocking) {
1997 __ biased_locking_exit(obj_reg, rbx, done);
1998 }
1999
2000 // Simple recursive lock?
2001
2002 __ cmpptr(Address(rbp, lock_slot_rbp_offset), (int32_t)NULL_WORD);
2003 __ jcc(Assembler::equal, done);
2004
2005 // Must save rax, if if it is live now because cmpxchg must use it
2006 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2007 save_native_result(masm, ret_type, stack_slots);
2008 }
2009
2010 // get old displaced header
2011 __ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
2012
2013 // get address of the stack lock
2014 __ lea(rax, Address(rbp, lock_slot_rbp_offset));
2015
2016 // Atomic swap old header if oop still contains the stack lock
2017 // src -> dest iff dest == rax, else rax, <- dest
2018 // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
2019 __ lock();
2020 __ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2021 __ jcc(Assembler::notEqual, slow_path_unlock);
2022
2023 // slow path re-enters here
2024 __ bind(unlock_done);
2025 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2026 restore_native_result(masm, ret_type, stack_slots);
2027 }
2028
2029 __ bind(done);
2030
2031 }
2032
2033 {
2034 SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
2035 // Tell dtrace about this method exit
2036 save_native_result(masm, ret_type, stack_slots);
2037 __ mov_metadata(rax, method());
2038 __ call_VM_leaf(
2039 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2040 thread, rax);
2041 restore_native_result(masm, ret_type, stack_slots);
|
1806 Label slow_path_lock;
1807 Label lock_done;
1808
1809 // Lock a synchronized method
1810 if (method->is_synchronized()) {
1811 assert(!is_critical_native, "unhandled");
1812
1813
1814 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1815
1816 // Get the handle (the 2nd argument)
1817 __ movptr(oop_handle_reg, Address(rsp, wordSize));
1818
1819 // Get address of the box
1820
1821 __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
1822
1823 // Load the oop from the handle
1824 __ movptr(obj_reg, Address(oop_handle_reg, 0));
1825
1826 if (LockingMode == LM_MONITOR) {
1827 __ jmp(slow_path_lock);
1828 } else if (LockingMode == LM_LEGACY) {
1829 if (UseBiasedLocking) {
1830 // Note that oop_handle_reg is trashed during this call
1831 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, oop_handle_reg, noreg, false, lock_done, &slow_path_lock);
1832 }
1833
1834 // Load immediate 1 into swap_reg %rax,
1835 __ movptr(swap_reg, 1);
1836
1837 // Load (object->mark() | 1) into swap_reg %rax,
1838 __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1839
1840 // Save (object->mark() | 1) into BasicLock's displaced header
1841 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1842
1843 // src -> dest iff dest == rax, else rax, <- dest
1844 // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
1845 __ lock();
1846 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1847 __ jcc(Assembler::equal, lock_done);
1848
1849 // Test if the oopMark is an obvious stack pointer, i.e.,
1850 // 1) (mark & 3) == 0, and
1851 // 2) rsp <= mark < mark + os::pagesize()
1852 // These 3 tests can be done by evaluating the following
1853 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
1854 // assuming both stack pointer and pagesize have their
1855 // least significant 2 bits clear.
1856 // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
1857
1858 __ subptr(swap_reg, rsp);
1859 __ andptr(swap_reg, 3 - os::vm_page_size());
1860
1861 // Save the test result, for recursive case, the result is zero
1862 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1863 __ jcc(Assembler::notEqual, slow_path_lock);
1864 } else {
1865 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1866 // Load object header
1867 __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1868 __ fast_lock_impl(obj_reg, swap_reg, thread, lock_reg, slow_path_lock);
1869 }
1870 // Slow path will re-enter here
1871 __ bind(lock_done);
1872
1873 if (UseBiasedLocking) {
1874 // Re-fetch oop_handle_reg as we trashed it above
1875 __ movptr(oop_handle_reg, Address(rsp, wordSize));
1876 }
1877 }
1878
1879
1880 // Finally just about ready to make the JNI call
1881
1882 // get JNIEnv* which is first argument to native
1883 if (!is_critical_native) {
1884 __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
1885 __ movptr(Address(rsp, 0), rdx);
1886
1887 // Now set thread in native
1888 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1889 }
1989 __ bind(reguard_done);
1990
1991 // Handle possible exception (will unlock if necessary)
1992
1993 // native result if any is live
1994
1995 // Unlock
1996 Label slow_path_unlock;
1997 Label unlock_done;
1998 if (method->is_synchronized()) {
1999
2000 Label done;
2001
2002 // Get locked oop from the handle we passed to jni
2003 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2004
2005 if (UseBiasedLocking) {
2006 __ biased_locking_exit(obj_reg, rbx, done);
2007 }
2008
2009 if (LockingMode == LM_LEGACY) {
2010 // Simple recursive lock?
2011
2012 __ cmpptr(Address(rbp, lock_slot_rbp_offset), (int32_t)NULL_WORD);
2013 __ jcc(Assembler::equal, done);
2014 }
2015
2016 // Must save rax, if if it is live now because cmpxchg must use it
2017 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2018 save_native_result(masm, ret_type, stack_slots);
2019 }
2020
2021 if (LockingMode == LM_MONITOR) {
2022 __ jmp(slow_path_unlock);
2023 } else if (LockingMode == LM_LEGACY) {
2024 // get old displaced header
2025 __ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
2026
2027 // get address of the stack lock
2028 __ lea(rax, Address(rbp, lock_slot_rbp_offset));
2029
2030 // Atomic swap old header if oop still contains the stack lock
2031 // src -> dest iff dest == rax, else rax, <- dest
2032 // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
2033 __ lock();
2034 __ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2035 __ jcc(Assembler::notEqual, slow_path_unlock);
2036 } else {
2037 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2038 __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2039 __ andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
2040 __ fast_unlock_impl(obj_reg, swap_reg, lock_reg, slow_path_unlock);
2041 }
2042
2043 // slow path re-enters here
2044 __ bind(unlock_done);
2045 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2046 restore_native_result(masm, ret_type, stack_slots);
2047 }
2048
2049 __ bind(done);
2050
2051 }
2052
2053 {
2054 SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
2055 // Tell dtrace about this method exit
2056 save_native_result(masm, ret_type, stack_slots);
2057 __ mov_metadata(rax, method());
2058 __ call_VM_leaf(
2059 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2060 thread, rax);
2061 restore_native_result(masm, ret_type, stack_slots);
|