1806 Label slow_path_lock;
1807 Label lock_done;
1808
1809 // Lock a synchronized method
1810 if (method->is_synchronized()) {
1811 assert(!is_critical_native, "unhandled");
1812
1813
1814 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1815
1816 // Get the handle (the 2nd argument)
1817 __ movptr(oop_handle_reg, Address(rsp, wordSize));
1818
1819 // Get address of the box
1820
1821 __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
1822
1823 // Load the oop from the handle
1824 __ movptr(obj_reg, Address(oop_handle_reg, 0));
1825
1826 if (UseBiasedLocking) {
1827 // Note that oop_handle_reg is trashed during this call
1828 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, oop_handle_reg, noreg, false, lock_done, &slow_path_lock);
1829 }
1830
1831 // Load immediate 1 into swap_reg %rax,
1832 __ movptr(swap_reg, 1);
1833
1834 // Load (object->mark() | 1) into swap_reg %rax,
1835 __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1836
1837 // Save (object->mark() | 1) into BasicLock's displaced header
1838 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1839
1840 // src -> dest iff dest == rax, else rax, <- dest
1841 // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
1842 __ lock();
1843 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1844 __ jcc(Assembler::equal, lock_done);
1845
1846 // Test if the oopMark is an obvious stack pointer, i.e.,
1847 // 1) (mark & 3) == 0, and
1848 // 2) rsp <= mark < mark + os::pagesize()
1849 // These 3 tests can be done by evaluating the following
1850 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
1851 // assuming both stack pointer and pagesize have their
1852 // least significant 2 bits clear.
1853 // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
1854
1855 __ subptr(swap_reg, rsp);
1856 __ andptr(swap_reg, 3 - os::vm_page_size());
1857
1858 // Save the test result, for recursive case, the result is zero
1859 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1860 __ jcc(Assembler::notEqual, slow_path_lock);
1861 // Slow path will re-enter here
1862 __ bind(lock_done);
1863
1864 if (UseBiasedLocking) {
1865 // Re-fetch oop_handle_reg as we trashed it above
1866 __ movptr(oop_handle_reg, Address(rsp, wordSize));
1867 }
1868 }
1869
1870
1871 // Finally just about ready to make the JNI call
1872
1873 // get JNIEnv* which is first argument to native
1874 if (!is_critical_native) {
1875 __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
1876 __ movptr(Address(rsp, 0), rdx);
1877
1878 // Now set thread in native
1879 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1880 }
1980 __ bind(reguard_done);
1981
1982 // Handle possible exception (will unlock if necessary)
1983
1984 // native result if any is live
1985
1986 // Unlock
1987 Label slow_path_unlock;
1988 Label unlock_done;
1989 if (method->is_synchronized()) {
1990
1991 Label done;
1992
1993 // Get locked oop from the handle we passed to jni
1994 __ movptr(obj_reg, Address(oop_handle_reg, 0));
1995
1996 if (UseBiasedLocking) {
1997 __ biased_locking_exit(obj_reg, rbx, done);
1998 }
1999
2000 // Simple recursive lock?
2001
2002 __ cmpptr(Address(rbp, lock_slot_rbp_offset), (int32_t)NULL_WORD);
2003 __ jcc(Assembler::equal, done);
2004
2005 // Must save rax, if if it is live now because cmpxchg must use it
2006 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2007 save_native_result(masm, ret_type, stack_slots);
2008 }
2009
2010 // get old displaced header
2011 __ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
2012
2013 // get address of the stack lock
2014 __ lea(rax, Address(rbp, lock_slot_rbp_offset));
2015
2016 // Atomic swap old header if oop still contains the stack lock
2017 // src -> dest iff dest == rax, else rax, <- dest
2018 // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
2019 __ lock();
2020 __ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2021 __ jcc(Assembler::notEqual, slow_path_unlock);
2022
2023 // slow path re-enters here
2024 __ bind(unlock_done);
2025 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2026 restore_native_result(masm, ret_type, stack_slots);
2027 }
2028
2029 __ bind(done);
2030
2031 }
2032
2033 {
2034 SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
2035 // Tell dtrace about this method exit
2036 save_native_result(masm, ret_type, stack_slots);
2037 __ mov_metadata(rax, method());
2038 __ call_VM_leaf(
2039 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2040 thread, rax);
2041 restore_native_result(masm, ret_type, stack_slots);
|
1806 Label slow_path_lock;
1807 Label lock_done;
1808
1809 // Lock a synchronized method
1810 if (method->is_synchronized()) {
1811 assert(!is_critical_native, "unhandled");
1812
1813
1814 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1815
1816 // Get the handle (the 2nd argument)
1817 __ movptr(oop_handle_reg, Address(rsp, wordSize));
1818
1819 // Get address of the box
1820
1821 __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
1822
1823 // Load the oop from the handle
1824 __ movptr(obj_reg, Address(oop_handle_reg, 0));
1825
1826 if (UseFastLocking) {
1827 // Load object header
1828 __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1829 __ fast_lock_impl(obj_reg, swap_reg, thread, lock_reg, slow_path_lock);
1830 } else {
1831 if (UseBiasedLocking) {
1832 // Note that oop_handle_reg is trashed during this call
1833 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, oop_handle_reg, noreg, false, lock_done, &slow_path_lock);
1834 }
1835
1836 // Load immediate 1 into swap_reg %rax,
1837 __ movptr(swap_reg, 1);
1838
1839 // Load (object->mark() | 1) into swap_reg %rax,
1840 __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1841
1842 // Save (object->mark() | 1) into BasicLock's displaced header
1843 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1844
1845 // src -> dest iff dest == rax, else rax, <- dest
1846 // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
1847 __ lock();
1848 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1849 __ jcc(Assembler::equal, lock_done);
1850
1851 // Test if the oopMark is an obvious stack pointer, i.e.,
1852 // 1) (mark & 3) == 0, and
1853 // 2) rsp <= mark < mark + os::pagesize()
1854 // These 3 tests can be done by evaluating the following
1855 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
1856 // assuming both stack pointer and pagesize have their
1857 // least significant 2 bits clear.
1858 // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
1859
1860 __ subptr(swap_reg, rsp);
1861 __ andptr(swap_reg, 3 - os::vm_page_size());
1862
1863 // Save the test result, for recursive case, the result is zero
1864 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1865 __ jcc(Assembler::notEqual, slow_path_lock);
1866 }
1867 // Slow path will re-enter here
1868 __ bind(lock_done);
1869
1870 if (UseBiasedLocking) {
1871 // Re-fetch oop_handle_reg as we trashed it above
1872 __ movptr(oop_handle_reg, Address(rsp, wordSize));
1873 }
1874 }
1875
1876
1877 // Finally just about ready to make the JNI call
1878
1879 // get JNIEnv* which is first argument to native
1880 if (!is_critical_native) {
1881 __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
1882 __ movptr(Address(rsp, 0), rdx);
1883
1884 // Now set thread in native
1885 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1886 }
1986 __ bind(reguard_done);
1987
1988 // Handle possible exception (will unlock if necessary)
1989
1990 // native result if any is live
1991
1992 // Unlock
1993 Label slow_path_unlock;
1994 Label unlock_done;
1995 if (method->is_synchronized()) {
1996
1997 Label done;
1998
1999 // Get locked oop from the handle we passed to jni
2000 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2001
2002 if (UseBiasedLocking) {
2003 __ biased_locking_exit(obj_reg, rbx, done);
2004 }
2005
2006 if (!UseFastLocking) {
2007 // Simple recursive lock?
2008
2009 __ cmpptr(Address(rbp, lock_slot_rbp_offset), (int32_t)NULL_WORD);
2010 __ jcc(Assembler::equal, done);
2011 }
2012
2013 // Must save rax, if if it is live now because cmpxchg must use it
2014 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2015 save_native_result(masm, ret_type, stack_slots);
2016 }
2017
2018 if (UseFastLocking) {
2019 __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2020 __ andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
2021 __ fast_unlock_impl(obj_reg, swap_reg, lock_reg, slow_path_unlock);
2022 } else {
2023 // get old displaced header
2024 __ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
2025
2026 // get address of the stack lock
2027 __ lea(rax, Address(rbp, lock_slot_rbp_offset));
2028
2029 // Atomic swap old header if oop still contains the stack lock
2030 // src -> dest iff dest == rax, else rax, <- dest
2031 // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
2032 __ lock();
2033 __ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2034 __ jcc(Assembler::notEqual, slow_path_unlock);
2035 }
2036
2037 // slow path re-enters here
2038 __ bind(unlock_done);
2039 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2040 restore_native_result(masm, ret_type, stack_slots);
2041 }
2042
2043 __ bind(done);
2044
2045 }
2046
2047 {
2048 SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
2049 // Tell dtrace about this method exit
2050 save_native_result(masm, ret_type, stack_slots);
2051 __ mov_metadata(rax, method());
2052 __ call_VM_leaf(
2053 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2054 thread, rax);
2055 restore_native_result(masm, ret_type, stack_slots);
|