1660 Label slow_path_lock;
1661 Label lock_done;
1662
1663 // Lock a synchronized method
1664 if (method->is_synchronized()) {
1665 Label count_mon;
1666
1667 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1668
1669 // Get the handle (the 2nd argument)
1670 __ movptr(oop_handle_reg, Address(rsp, wordSize));
1671
1672 // Get address of the box
1673
1674 __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
1675
1676 // Load the oop from the handle
1677 __ movptr(obj_reg, Address(oop_handle_reg, 0));
1678
1679 if (!UseHeavyMonitors) {
1680 // Load immediate 1 into swap_reg %rax,
1681 __ movptr(swap_reg, 1);
1682
1683 // Load (object->mark() | 1) into swap_reg %rax,
1684 __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1685
1686 // Save (object->mark() | 1) into BasicLock's displaced header
1687 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1688
1689 // src -> dest iff dest == rax, else rax, <- dest
1690 // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
1691 __ lock();
1692 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1693 __ jcc(Assembler::equal, count_mon);
1694
1695 // Test if the oopMark is an obvious stack pointer, i.e.,
1696 // 1) (mark & 3) == 0, and
1697 // 2) rsp <= mark < mark + os::pagesize()
1698 // These 3 tests can be done by evaluating the following
1699 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
1700 // assuming both stack pointer and pagesize have their
1701 // least significant 2 bits clear.
1702 // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
1703
1704 __ subptr(swap_reg, rsp);
1705 __ andptr(swap_reg, 3 - (int)os::vm_page_size());
1706
1707 // Save the test result, for recursive case, the result is zero
1708 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1709 __ jcc(Assembler::notEqual, slow_path_lock);
1710 } else {
1711 __ jmp(slow_path_lock);
1712 }
1713 __ bind(count_mon);
1714 __ inc_held_monitor_count();
1715
1716 // Slow path will re-enter here
1717 __ bind(lock_done);
1718 }
1719
1720
1721 // Finally just about ready to make the JNI call
1722
1723 // get JNIEnv* which is first argument to native
1724 __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
1725 __ movptr(Address(rsp, 0), rdx);
1726
1727 // Now set thread in native
1728 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1729
1813 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
1814 __ jcc(Assembler::equal, reguard);
1815
1816 // slow path reguard re-enters here
1817 __ bind(reguard_done);
1818
1819 // Handle possible exception (will unlock if necessary)
1820
1821 // native result if any is live
1822
1823 // Unlock
1824 Label slow_path_unlock;
1825 Label unlock_done;
1826 if (method->is_synchronized()) {
1827
1828 Label fast_done;
1829
1830 // Get locked oop from the handle we passed to jni
1831 __ movptr(obj_reg, Address(oop_handle_reg, 0));
1832
1833 if (!UseHeavyMonitors) {
1834 Label not_recur;
1835 // Simple recursive lock?
1836 __ cmpptr(Address(rbp, lock_slot_rbp_offset), NULL_WORD);
1837 __ jcc(Assembler::notEqual, not_recur);
1838 __ dec_held_monitor_count();
1839 __ jmpb(fast_done);
1840 __ bind(not_recur);
1841 }
1842
1843 // Must save rax, if it is live now because cmpxchg must use it
1844 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1845 save_native_result(masm, ret_type, stack_slots);
1846 }
1847
1848 if (!UseHeavyMonitors) {
1849 // get old displaced header
1850 __ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
1851
1852 // get address of the stack lock
1853 __ lea(rax, Address(rbp, lock_slot_rbp_offset));
1854
1855 // Atomic swap old header if oop still contains the stack lock
1856 // src -> dest iff dest == rax, else rax, <- dest
1857 // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
1858 __ lock();
1859 __ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1860 __ jcc(Assembler::notEqual, slow_path_unlock);
1861 __ dec_held_monitor_count();
1862 } else {
1863 __ jmp(slow_path_unlock);
1864 }
1865
1866 // slow path re-enters here
1867 __ bind(unlock_done);
1868 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1869 restore_native_result(masm, ret_type, stack_slots);
1870 }
1871
1872 __ bind(fast_done);
1873 }
1874
1875 {
1876 SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0, noreg);
1877 // Tell dtrace about this method exit
1878 save_native_result(masm, ret_type, stack_slots);
1879 __ mov_metadata(rax, method());
1880 __ call_VM_leaf(
|
1660 Label slow_path_lock;
1661 Label lock_done;
1662
1663 // Lock a synchronized method
1664 if (method->is_synchronized()) {
1665 Label count_mon;
1666
1667 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1668
1669 // Get the handle (the 2nd argument)
1670 __ movptr(oop_handle_reg, Address(rsp, wordSize));
1671
1672 // Get address of the box
1673
1674 __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
1675
1676 // Load the oop from the handle
1677 __ movptr(obj_reg, Address(oop_handle_reg, 0));
1678
1679 if (!UseHeavyMonitors) {
1680 if (UseFastLocking) {
1681 // Load object header
1682 __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1683 __ fast_lock_impl(obj_reg, swap_reg, thread, lock_reg, slow_path_lock);
1684 } else {
1685 // Load immediate 1 into swap_reg %rax,
1686 __ movptr(swap_reg, 1);
1687
1688 // Load (object->mark() | 1) into swap_reg %rax,
1689 __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1690
1691 // Save (object->mark() | 1) into BasicLock's displaced header
1692 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1693
1694 // src -> dest iff dest == rax, else rax, <- dest
1695 // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
1696 __ lock();
1697 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1698 __ jcc(Assembler::equal, count_mon);
1699
1700 // Test if the oopMark is an obvious stack pointer, i.e.,
1701 // 1) (mark & 3) == 0, and
1702 // 2) rsp <= mark < mark + os::pagesize()
1703 // These 3 tests can be done by evaluating the following
1704 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
1705 // assuming both stack pointer and pagesize have their
1706 // least significant 2 bits clear.
1707 // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
1708
1709 __ subptr(swap_reg, rsp);
1710 __ andptr(swap_reg, 3 - (int)os::vm_page_size());
1711
1712 // Save the test result, for recursive case, the result is zero
1713 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1714 __ jcc(Assembler::notEqual, slow_path_lock);
1715 }
1716 } else {
1717 __ jmp(slow_path_lock);
1718 }
1719 __ bind(count_mon);
1720 __ inc_held_monitor_count();
1721
1722 // Slow path will re-enter here
1723 __ bind(lock_done);
1724 }
1725
1726
1727 // Finally just about ready to make the JNI call
1728
1729 // get JNIEnv* which is first argument to native
1730 __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
1731 __ movptr(Address(rsp, 0), rdx);
1732
1733 // Now set thread in native
1734 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1735
1819 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
1820 __ jcc(Assembler::equal, reguard);
1821
1822 // slow path reguard re-enters here
1823 __ bind(reguard_done);
1824
1825 // Handle possible exception (will unlock if necessary)
1826
1827 // native result if any is live
1828
1829 // Unlock
1830 Label slow_path_unlock;
1831 Label unlock_done;
1832 if (method->is_synchronized()) {
1833
1834 Label fast_done;
1835
1836 // Get locked oop from the handle we passed to jni
1837 __ movptr(obj_reg, Address(oop_handle_reg, 0));
1838
1839 if (!UseHeavyMonitors && !UseFastLocking) {
1840 Label not_recur;
1841 // Simple recursive lock?
1842 __ cmpptr(Address(rbp, lock_slot_rbp_offset), NULL_WORD);
1843 __ jcc(Assembler::notEqual, not_recur);
1844 __ dec_held_monitor_count();
1845 __ jmpb(fast_done);
1846 __ bind(not_recur);
1847 }
1848
1849 // Must save rax, if it is live now because cmpxchg must use it
1850 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1851 save_native_result(masm, ret_type, stack_slots);
1852 }
1853
1854 if (!UseHeavyMonitors) {
1855 if (UseFastLocking) {
1856 __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1857 __ andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
1858 __ fast_unlock_impl(obj_reg, swap_reg, lock_reg, slow_path_unlock);
1859 } else {
1860 // get old displaced header
1861 __ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
1862
1863 // get address of the stack lock
1864 __ lea(rax, Address(rbp, lock_slot_rbp_offset));
1865
1866 // Atomic swap old header if oop still contains the stack lock
1867 // src -> dest iff dest == rax, else rax, <- dest
1868 // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
1869 __ lock();
1870 __ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1871 __ jcc(Assembler::notEqual, slow_path_unlock);
1872 }
1873 __ dec_held_monitor_count();
1874 } else {
1875 __ jmp(slow_path_unlock);
1876 }
1877
1878 // slow path re-enters here
1879 __ bind(unlock_done);
1880 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1881 restore_native_result(masm, ret_type, stack_slots);
1882 }
1883
1884 __ bind(fast_done);
1885 }
1886
1887 {
1888 SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0, noreg);
1889 // Tell dtrace about this method exit
1890 save_native_result(masm, ret_type, stack_slots);
1891 __ mov_metadata(rax, method());
1892 __ call_VM_leaf(
|