< prev index next >

src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp

Print this page

1756   const Register tmp = lr;
1757 
1758   Label slow_path_lock;
1759   Label lock_done;
1760 
1761   if (method->is_synchronized()) {
1762     assert(!is_critical_native, "unhandled");
1763 
1764     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1765 
1766     // Get the handle (the 2nd argument)
1767     __ mov(oop_handle_reg, c_rarg1);
1768 
1769     // Get address of the box
1770 
1771     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1772 
1773     // Load the oop from the handle
1774     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1775 
1776     if (UseBiasedLocking) {
1777       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock);
1778     }
1779 
1780     // Load (object->mark() | 1) into swap_reg %r0
1781     __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1782     __ orr(swap_reg, rscratch1, 1);
1783 
1784     // Save (object->mark() | 1) into BasicLock's displaced header
1785     __ str(swap_reg, Address(lock_reg, mark_word_offset));

1786 
1787     // src -> dest iff dest == r0 else r0 <- dest
1788     { Label here;
1789       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
1790     }
1791 
1792     // Hmm should this move to the slow path code area???



1793 
1794     // Test if the oopMark is an obvious stack pointer, i.e.,
1795     //  1) (mark & 3) == 0, and
1796     //  2) sp <= mark < mark + os::pagesize()
1797     // These 3 tests can be done by evaluating the following
1798     // expression: ((mark - sp) & (3 - os::vm_page_size())),
1799     // assuming both stack pointer and pagesize have their
1800     // least significant 2 bits clear.
1801     // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1802 
1803     __ sub(swap_reg, sp, swap_reg);
1804     __ neg(swap_reg, swap_reg);
1805     __ ands(swap_reg, swap_reg, 3 - os::vm_page_size());





1806 
1807     // Save the test result, for recursive case, the result is zero
1808     __ str(swap_reg, Address(lock_reg, mark_word_offset));
1809     __ br(Assembler::NE, slow_path_lock);
1810 








1811     // Slow path will re-enter here
1812 
1813     __ bind(lock_done);
1814   }
1815 
1816 
1817   // Finally just about ready to make the JNI call
1818 
1819   // get JNIEnv* which is first argument to native
1820   if (!is_critical_native) {
1821     __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1822 
1823     // Now set thread in native
1824     __ mov(rscratch1, _thread_in_native);
1825     __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1826     __ stlrw(rscratch1, rscratch2);
1827   }
1828 
1829   rt_call(masm, native_func);
1830 

1912   __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
1913   __ br(Assembler::EQ, reguard);
1914   __ bind(reguard_done);
1915 
1916   // native result if any is live
1917 
1918   // Unlock
1919   Label unlock_done;
1920   Label slow_path_unlock;
1921   if (method->is_synchronized()) {
1922 
1923     // Get locked oop from the handle we passed to jni
1924     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1925 
1926     Label done;
1927 
1928     if (UseBiasedLocking) {
1929       __ biased_locking_exit(obj_reg, old_hdr, done);
1930     }
1931 
1932     // Simple recursive lock?
1933 
1934     __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1935     __ cbz(rscratch1, done);

1936 
1937     // Must save r0 if if it is live now because cmpxchg must use it
1938     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1939       save_native_result(masm, ret_type, stack_slots);
1940     }
1941 
1942 
1943     // get address of the stack lock
1944     __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1945     //  get old displaced header
1946     __ ldr(old_hdr, Address(r0, 0));



1947 
1948     // Atomic swap old header if oop still contains the stack lock
1949     Label succeed;
1950     __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock);
1951     __ bind(succeed);






1952 
1953     // slow path re-enters here
1954     __ bind(unlock_done);
1955     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1956       restore_native_result(masm, ret_type, stack_slots);
1957     }
1958 
1959     __ bind(done);
1960   }
1961 
1962   Label dtrace_method_exit, dtrace_method_exit_done;
1963   {
1964     uint64_t offset;
1965     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
1966     __ ldrb(rscratch1, Address(rscratch1, offset));
1967     __ cbnzw(rscratch1, dtrace_method_exit);
1968     __ bind(dtrace_method_exit_done);
1969   }
1970 
1971   __ reset_last_Java_frame(false);

1756   const Register tmp = lr;
1757 
1758   Label slow_path_lock;
1759   Label lock_done;
1760 
1761   if (method->is_synchronized()) {
1762     assert(!is_critical_native, "unhandled");
1763 
1764     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1765 
1766     // Get the handle (the 2nd argument)
1767     __ mov(oop_handle_reg, c_rarg1);
1768 
1769     // Get address of the box
1770 
1771     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1772 
1773     // Load the oop from the handle
1774     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1775 
1776     if (LockingMode == LM_MONITOR) {
1777       __ b(slow_path_lock);
1778     } else if (LockingMode == LM_LEGACY) {
1779       if (UseBiasedLocking) {
1780         __ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock);
1781       }

1782 
1783       // Load (object->mark() | 1) into swap_reg %r0
1784       __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1785       __ orr(swap_reg, rscratch1, 1);
1786 
1787       // Save (object->mark() | 1) into BasicLock's displaced header
1788       __ str(swap_reg, Address(lock_reg, mark_word_offset));


1789 
1790       // src -> dest iff dest == r0 else r0 <- dest
1791       { Label here;
1792         __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
1793       }
1794 
1795       // Hmm should this move to the slow path code area???







1796 
1797       // Test if the oopMark is an obvious stack pointer, i.e.,
1798       //  1) (mark & 3) == 0, and
1799       //  2) sp <= mark < mark + os::pagesize()
1800       // These 3 tests can be done by evaluating the following
1801       // expression: ((mark - sp) & (3 - os::vm_page_size())),
1802       // assuming both stack pointer and pagesize have their
1803       // least significant 2 bits clear.
1804       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1805 
1806       __ sub(swap_reg, sp, swap_reg);
1807       __ neg(swap_reg, swap_reg);
1808       __ ands(swap_reg, swap_reg, 3 - os::vm_page_size());
1809 
1810       // Save the test result, for recursive case, the result is zero
1811       __ str(swap_reg, Address(lock_reg, mark_word_offset));
1812       __ br(Assembler::NE, slow_path_lock);
1813     } else {
1814       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1815       __ ldr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1816       __ fast_lock(obj_reg, swap_reg, tmp, rscratch1, slow_path_lock);
1817     }
1818     // Slow path will re-enter here
1819 
1820     __ bind(lock_done);
1821   }
1822 
1823 
1824   // Finally just about ready to make the JNI call
1825 
1826   // get JNIEnv* which is first argument to native
1827   if (!is_critical_native) {
1828     __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1829 
1830     // Now set thread in native
1831     __ mov(rscratch1, _thread_in_native);
1832     __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1833     __ stlrw(rscratch1, rscratch2);
1834   }
1835 
1836   rt_call(masm, native_func);
1837 

1919   __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
1920   __ br(Assembler::EQ, reguard);
1921   __ bind(reguard_done);
1922 
1923   // native result if any is live
1924 
1925   // Unlock
1926   Label unlock_done;
1927   Label slow_path_unlock;
1928   if (method->is_synchronized()) {
1929 
1930     // Get locked oop from the handle we passed to jni
1931     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1932 
1933     Label done;
1934 
1935     if (UseBiasedLocking) {
1936       __ biased_locking_exit(obj_reg, old_hdr, done);
1937     }
1938 
1939     if (LockingMode == LM_LEGACY) {
1940       // Simple recursive lock?
1941       __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1942       __ cbz(rscratch1, done);
1943     }
1944 
1945     // Must save r0 if if it is live now because cmpxchg must use it
1946     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1947       save_native_result(masm, ret_type, stack_slots);
1948     }
1949 
1950 
1951     if (LockingMode == LM_MONITOR) {
1952       __ b(slow_path_unlock);
1953     } else if (LockingMode == LM_LEGACY) {
1954       // get address of the stack lock
1955       __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1956       //  get old displaced header
1957       __ ldr(old_hdr, Address(r0, 0));
1958 
1959       // Atomic swap old header if oop still contains the stack lock
1960       Label succeed;
1961       __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock);
1962       __ bind(succeed);
1963     } else {
1964       assert(LockingMode == LM_LIGHTWEIGHT, "");
1965       __ ldr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1966       __ tbnz(old_hdr, exact_log2(markWord::monitor_value), slow_path_unlock);
1967       __ fast_unlock(obj_reg, old_hdr, swap_reg, rscratch1, slow_path_unlock);
1968     }
1969 
1970     // slow path re-enters here
1971     __ bind(unlock_done);
1972     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1973       restore_native_result(masm, ret_type, stack_slots);
1974     }
1975 
1976     __ bind(done);
1977   }
1978 
1979   Label dtrace_method_exit, dtrace_method_exit_done;
1980   {
1981     uint64_t offset;
1982     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
1983     __ ldrb(rscratch1, Address(rscratch1, offset));
1984     __ cbnzw(rscratch1, dtrace_method_exit);
1985     __ bind(dtrace_method_exit_done);
1986   }
1987 
1988   __ reset_last_Java_frame(false);
< prev index next >