< prev index next >

src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp

Print this page

1756   const Register tmp = lr;
1757 
1758   Label slow_path_lock;
1759   Label lock_done;
1760 
1761   if (method->is_synchronized()) {
1762     assert(!is_critical_native, "unhandled");
1763 
1764     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1765 
1766     // Get the handle (the 2nd argument)
1767     __ mov(oop_handle_reg, c_rarg1);
1768 
1769     // Get address of the box
1770 
1771     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1772 
1773     // Load the oop from the handle
1774     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1775 
1776     if (UseBiasedLocking) {
1777       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock);
1778     }
1779 
1780     // Load (object->mark() | 1) into swap_reg %r0
1781     __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1782     __ orr(swap_reg, rscratch1, 1);
1783 
1784     // Save (object->mark() | 1) into BasicLock's displaced header
1785     __ str(swap_reg, Address(lock_reg, mark_word_offset));

1786 
1787     // src -> dest iff dest == r0 else r0 <- dest
1788     { Label here;
1789       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
1790     }
1791 
1792     // Hmm should this move to the slow path code area???



1793 
1794     // Test if the oopMark is an obvious stack pointer, i.e.,
1795     //  1) (mark & 3) == 0, and
1796     //  2) sp <= mark < mark + os::pagesize()
1797     // These 3 tests can be done by evaluating the following
1798     // expression: ((mark - sp) & (3 - os::vm_page_size())),
1799     // assuming both stack pointer and pagesize have their
1800     // least significant 2 bits clear.
1801     // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1802 
1803     __ sub(swap_reg, sp, swap_reg);
1804     __ neg(swap_reg, swap_reg);
1805     __ ands(swap_reg, swap_reg, 3 - os::vm_page_size());





1806 
1807     // Save the test result, for recursive case, the result is zero
1808     __ str(swap_reg, Address(lock_reg, mark_word_offset));
1809     __ br(Assembler::NE, slow_path_lock);
1810 




1811     // Slow path will re-enter here
1812 
1813     __ bind(lock_done);
1814   }
1815 
1816 
1817   // Finally just about ready to make the JNI call
1818 
1819   // get JNIEnv* which is first argument to native
1820   if (!is_critical_native) {
1821     __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1822 
1823     // Now set thread in native
1824     __ mov(rscratch1, _thread_in_native);
1825     __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1826     __ stlrw(rscratch1, rscratch2);
1827   }
1828 
1829   rt_call(masm, native_func);
1830 

1912   __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
1913   __ br(Assembler::EQ, reguard);
1914   __ bind(reguard_done);
1915 
1916   // native result if any is live
1917 
1918   // Unlock
1919   Label unlock_done;
1920   Label slow_path_unlock;
1921   if (method->is_synchronized()) {
1922 
1923     // Get locked oop from the handle we passed to jni
1924     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1925 
1926     Label done;
1927 
1928     if (UseBiasedLocking) {
1929       __ biased_locking_exit(obj_reg, old_hdr, done);
1930     }
1931 
1932     // Simple recursive lock?
1933 
1934     __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1935     __ cbz(rscratch1, done);

1936 
1937     // Must save r0 if if it is live now because cmpxchg must use it
1938     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1939       save_native_result(masm, ret_type, stack_slots);
1940     }
1941 
1942 
1943     // get address of the stack lock
1944     __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1945     //  get old displaced header
1946     __ ldr(old_hdr, Address(r0, 0));
1947 
1948     // Atomic swap old header if oop still contains the stack lock
1949     Label succeed;
1950     __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock);
1951     __ bind(succeed);





1952 
1953     // slow path re-enters here
1954     __ bind(unlock_done);
1955     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1956       restore_native_result(masm, ret_type, stack_slots);
1957     }
1958 
1959     __ bind(done);
1960   }
1961 
1962   Label dtrace_method_exit, dtrace_method_exit_done;
1963   {
1964     uint64_t offset;
1965     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
1966     __ ldrb(rscratch1, Address(rscratch1, offset));
1967     __ cbnzw(rscratch1, dtrace_method_exit);
1968     __ bind(dtrace_method_exit_done);
1969   }
1970 
1971   __ reset_last_Java_frame(false);

1756   const Register tmp = lr;
1757 
1758   Label slow_path_lock;
1759   Label lock_done;
1760 
1761   if (method->is_synchronized()) {
1762     assert(!is_critical_native, "unhandled");
1763 
1764     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1765 
1766     // Get the handle (the 2nd argument)
1767     __ mov(oop_handle_reg, c_rarg1);
1768 
1769     // Get address of the box
1770 
1771     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1772 
1773     // Load the oop from the handle
1774     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1775 
1776     if (UseFastLocking) {
1777       __ ldr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1778       __ fast_lock(obj_reg, swap_reg, tmp, rscratch1, slow_path_lock);
1779     } else {
1780       if (UseBiasedLocking) {
1781         __ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock);
1782       }
1783 
1784       // Load (object->mark() | 1) into swap_reg %r0
1785       __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1786       __ orr(swap_reg, rscratch1, 1);
1787 
1788       // Save (object->mark() | 1) into BasicLock's displaced header
1789       __ str(swap_reg, Address(lock_reg, mark_word_offset));


1790 
1791       // src -> dest iff dest == r0 else r0 <- dest
1792       { Label here;
1793         __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
1794       }
1795 
1796       // Hmm should this move to the slow path code area???







1797 
1798       // Test if the oopMark is an obvious stack pointer, i.e.,
1799       //  1) (mark & 3) == 0, and
1800       //  2) sp <= mark < mark + os::pagesize()
1801       // These 3 tests can be done by evaluating the following
1802       // expression: ((mark - sp) & (3 - os::vm_page_size())),
1803       // assuming both stack pointer and pagesize have their
1804       // least significant 2 bits clear.
1805       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1806 
1807       __ sub(swap_reg, sp, swap_reg);
1808       __ neg(swap_reg, swap_reg);
1809       __ ands(swap_reg, swap_reg, 3 - os::vm_page_size());
1810 
1811       // Save the test result, for recursive case, the result is zero
1812       __ str(swap_reg, Address(lock_reg, mark_word_offset));
1813       __ br(Assembler::NE, slow_path_lock);
1814     }
1815     // Slow path will re-enter here
1816 
1817     __ bind(lock_done);
1818   }
1819 
1820 
1821   // Finally just about ready to make the JNI call
1822 
1823   // get JNIEnv* which is first argument to native
1824   if (!is_critical_native) {
1825     __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1826 
1827     // Now set thread in native
1828     __ mov(rscratch1, _thread_in_native);
1829     __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1830     __ stlrw(rscratch1, rscratch2);
1831   }
1832 
1833   rt_call(masm, native_func);
1834 

1916   __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
1917   __ br(Assembler::EQ, reguard);
1918   __ bind(reguard_done);
1919 
1920   // native result if any is live
1921 
1922   // Unlock
1923   Label unlock_done;
1924   Label slow_path_unlock;
1925   if (method->is_synchronized()) {
1926 
1927     // Get locked oop from the handle we passed to jni
1928     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1929 
1930     Label done;
1931 
1932     if (UseBiasedLocking) {
1933       __ biased_locking_exit(obj_reg, old_hdr, done);
1934     }
1935 
1936     if (!UseFastLocking) {
1937       // Simple recursive lock?
1938       __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1939       __ cbz(rscratch1, done);
1940     }
1941 
1942     // Must save r0 if if it is live now because cmpxchg must use it
1943     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1944       save_native_result(masm, ret_type, stack_slots);
1945     }
1946 
1947 
1948     if (UseFastLocking) {
1949       __ ldr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1950       __ fast_unlock(obj_reg, old_hdr, swap_reg, rscratch1, slow_path_unlock);
1951     } else {
1952       // get address of the stack lock
1953       __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1954       //  get old displaced header
1955       __ ldr(old_hdr, Address(r0, 0));
1956 
1957       // Atomic swap old header if oop still contains the stack lock
1958       Label succeed;
1959       __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock);
1960       __ bind(succeed);
1961     }
1962 
1963     // slow path re-enters here
1964     __ bind(unlock_done);
1965     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1966       restore_native_result(masm, ret_type, stack_slots);
1967     }
1968 
1969     __ bind(done);
1970   }
1971 
1972   Label dtrace_method_exit, dtrace_method_exit_done;
1973   {
1974     uint64_t offset;
1975     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
1976     __ ldrb(rscratch1, Address(rscratch1, offset));
1977     __ cbnzw(rscratch1, dtrace_method_exit);
1978     __ bind(dtrace_method_exit_done);
1979   }
1980 
1981   __ reset_last_Java_frame(false);
< prev index next >