< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp

Print this page

1805   Label slow_path_lock;
1806   Label lock_done;
1807 
1808   // Lock a synchronized method
1809   if (method->is_synchronized()) {
1810     assert(!is_critical_native, "unhandled");
1811 
1812 
1813     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1814 
1815     // Get the handle (the 2nd argument)
1816     __ movptr(oop_handle_reg, Address(rsp, wordSize));
1817 
1818     // Get address of the box
1819 
1820     __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
1821 
1822     // Load the oop from the handle
1823     __ movptr(obj_reg, Address(oop_handle_reg, 0));
1824 
1825     if (UseBiasedLocking) {
1826       // Note that oop_handle_reg is trashed during this call
1827       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, oop_handle_reg, noreg, false, lock_done, &slow_path_lock);
1828     }



1829 
1830     // Load immediate 1 into swap_reg %rax,
1831     __ movptr(swap_reg, 1);
1832 
1833     // Load (object->mark() | 1) into swap_reg %rax,
1834     __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1835 
1836     // Save (object->mark() | 1) into BasicLock's displaced header
1837     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1838 
1839     // src -> dest iff dest == rax, else rax, <- dest
1840     // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
1841     __ lock();
1842     __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1843     __ jcc(Assembler::equal, lock_done);
1844 
1845     // Test if the oopMark is an obvious stack pointer, i.e.,
1846     //  1) (mark & 3) == 0, and
1847     //  2) rsp <= mark < mark + os::pagesize()
1848     // These 3 tests can be done by evaluating the following
1849     // expression: ((mark - rsp) & (3 - os::vm_page_size())),
1850     // assuming both stack pointer and pagesize have their
1851     // least significant 2 bits clear.
1852     // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
1853 
1854     __ subptr(swap_reg, rsp);
1855     __ andptr(swap_reg, 3 - os::vm_page_size());
1856 
1857     // Save the test result, for recursive case, the result is zero
1858     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1859     __ jcc(Assembler::notEqual, slow_path_lock);






1860     // Slow path will re-enter here
1861     __ bind(lock_done);
1862 
1863     if (UseBiasedLocking) {
1864       // Re-fetch oop_handle_reg as we trashed it above
1865       __ movptr(oop_handle_reg, Address(rsp, wordSize));
1866     }
1867   }
1868 
1869 
1870   // Finally just about ready to make the JNI call
1871 
1872   // get JNIEnv* which is first argument to native
1873   if (!is_critical_native) {
1874     __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
1875     __ movptr(Address(rsp, 0), rdx);
1876 
1877     // Now set thread in native
1878     __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1879   }

1979   __ bind(reguard_done);
1980 
1981   // Handle possible exception (will unlock if necessary)
1982 
1983   // native result if any is live
1984 
1985   // Unlock
1986   Label slow_path_unlock;
1987   Label unlock_done;
1988   if (method->is_synchronized()) {
1989 
1990     Label done;
1991 
1992     // Get locked oop from the handle we passed to jni
1993     __ movptr(obj_reg, Address(oop_handle_reg, 0));
1994 
1995     if (UseBiasedLocking) {
1996       __ biased_locking_exit(obj_reg, rbx, done);
1997     }
1998 
1999     // Simple recursive lock?

2000 
2001     __ cmpptr(Address(rbp, lock_slot_rbp_offset), (int32_t)NULL_WORD);
2002     __ jcc(Assembler::equal, done);

2003 
2004     // Must save rax, if if it is live now because cmpxchg must use it
2005     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2006       save_native_result(masm, ret_type, stack_slots);
2007     }
2008 
2009     //  get old displaced header
2010     __ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
2011 
2012     // get address of the stack lock
2013     __ lea(rax, Address(rbp, lock_slot_rbp_offset));
2014 
2015     // Atomic swap old header if oop still contains the stack lock
2016     // src -> dest iff dest == rax, else rax, <- dest
2017     // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
2018     __ lock();
2019     __ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2020     __ jcc(Assembler::notEqual, slow_path_unlock);









2021 
2022     // slow path re-enters here
2023     __ bind(unlock_done);
2024     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2025       restore_native_result(masm, ret_type, stack_slots);
2026     }
2027 
2028     __ bind(done);
2029 
2030   }
2031 
2032   {
2033     SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
2034     // Tell dtrace about this method exit
2035     save_native_result(masm, ret_type, stack_slots);
2036     __ mov_metadata(rax, method());
2037     __ call_VM_leaf(
2038          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2039          thread, rax);
2040     restore_native_result(masm, ret_type, stack_slots);

1805   Label slow_path_lock;
1806   Label lock_done;
1807 
1808   // Lock a synchronized method
1809   if (method->is_synchronized()) {
1810     assert(!is_critical_native, "unhandled");
1811 
1812 
1813     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1814 
1815     // Get the handle (the 2nd argument)
1816     __ movptr(oop_handle_reg, Address(rsp, wordSize));
1817 
1818     // Get address of the box
1819 
1820     __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
1821 
1822     // Load the oop from the handle
1823     __ movptr(obj_reg, Address(oop_handle_reg, 0));
1824 
1825     if (LockingMode == LM_MONITOR) {
1826       __ jmp(slow_path_lock);
1827     } else if (LockingMode == LM_LEGACY) {
1828       if (UseBiasedLocking) {
1829         // Note that oop_handle_reg is trashed during this call
1830         __ biased_locking_enter(lock_reg, obj_reg, swap_reg, oop_handle_reg, noreg, false, lock_done, &slow_path_lock);
1831       }
1832 
1833       // Load immediate 1 into swap_reg %rax,
1834       __ movptr(swap_reg, 1);
1835 
1836       // Load (object->mark() | 1) into swap_reg %rax,
1837       __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1838 
1839       // Save (object->mark() | 1) into BasicLock's displaced header
1840       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1841 
1842       // src -> dest iff dest == rax, else rax, <- dest
1843       // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
1844       __ lock();
1845       __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1846       __ jcc(Assembler::equal, lock_done);
1847 
1848       // Test if the oopMark is an obvious stack pointer, i.e.,
1849       //  1) (mark & 3) == 0, and
1850       //  2) rsp <= mark < mark + os::pagesize()
1851       // These 3 tests can be done by evaluating the following
1852       // expression: ((mark - rsp) & (3 - os::vm_page_size())),
1853       // assuming both stack pointer and pagesize have their
1854       // least significant 2 bits clear.
1855       // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
1856 
1857       __ subptr(swap_reg, rsp);
1858       __ andptr(swap_reg, 3 - os::vm_page_size());
1859 
1860       // Save the test result, for recursive case, the result is zero
1861       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1862       __ jcc(Assembler::notEqual, slow_path_lock);
1863     } else {
1864       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1865      // Load object header
1866      __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1867      __ fast_lock_impl(obj_reg, swap_reg, thread, lock_reg, slow_path_lock);
1868     }
1869     // Slow path will re-enter here
1870     __ bind(lock_done);
1871 
1872     if (UseBiasedLocking) {
1873       // Re-fetch oop_handle_reg as we trashed it above
1874       __ movptr(oop_handle_reg, Address(rsp, wordSize));
1875     }
1876   }
1877 
1878 
1879   // Finally just about ready to make the JNI call
1880 
1881   // get JNIEnv* which is first argument to native
1882   if (!is_critical_native) {
1883     __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
1884     __ movptr(Address(rsp, 0), rdx);
1885 
1886     // Now set thread in native
1887     __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1888   }

1988   __ bind(reguard_done);
1989 
1990   // Handle possible exception (will unlock if necessary)
1991 
1992   // native result if any is live
1993 
1994   // Unlock
1995   Label slow_path_unlock;
1996   Label unlock_done;
1997   if (method->is_synchronized()) {
1998 
1999     Label done;
2000 
2001     // Get locked oop from the handle we passed to jni
2002     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2003 
2004     if (UseBiasedLocking) {
2005       __ biased_locking_exit(obj_reg, rbx, done);
2006     }
2007 
2008     if (LockingMode == LM_LEGACY) {
2009       // Simple recursive lock?
2010 
2011       __ cmpptr(Address(rbp, lock_slot_rbp_offset), (int32_t)NULL_WORD);
2012       __ jcc(Assembler::equal, done);
2013     }
2014 
2015     // Must save rax, if if it is live now because cmpxchg must use it
2016     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2017       save_native_result(masm, ret_type, stack_slots);
2018     }
2019 
2020     if (LockingMode == LM_MONITOR) {
2021       __ jmp(slow_path_unlock);
2022     } else if (LockingMode == LM_LEGACY) {
2023       //  get old displaced header
2024       __ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
2025 
2026       // get address of the stack lock
2027       __ lea(rax, Address(rbp, lock_slot_rbp_offset));
2028 
2029       // Atomic swap old header if oop still contains the stack lock
2030       // src -> dest iff dest == rax, else rax, <- dest
2031       // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
2032       __ lock();
2033       __ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2034       __ jcc(Assembler::notEqual, slow_path_unlock);
2035     } else {
2036       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2037       __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2038       __ andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
2039       __ fast_unlock_impl(obj_reg, swap_reg, lock_reg, slow_path_unlock);
2040     }
2041 
2042     // slow path re-enters here
2043     __ bind(unlock_done);
2044     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2045       restore_native_result(masm, ret_type, stack_slots);
2046     }
2047 
2048     __ bind(done);
2049 
2050   }
2051 
2052   {
2053     SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
2054     // Tell dtrace about this method exit
2055     save_native_result(masm, ret_type, stack_slots);
2056     __ mov_metadata(rax, method());
2057     __ call_VM_leaf(
2058          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2059          thread, rax);
2060     restore_native_result(masm, ret_type, stack_slots);
< prev index next >