< prev index next >

src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp

Print this page

   1 /*
   2  * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any

1735 
1736   // RedefineClasses() tracing support for obsolete method entry
1737   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1738     // protect the args we've loaded
1739     save_args(masm, total_c_args, c_arg, out_regs);
1740     __ mov_metadata(c_rarg1, method());
1741     __ call_VM_leaf(
1742       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1743       rthread, c_rarg1);
1744     restore_args(masm, total_c_args, c_arg, out_regs);
1745   }
1746 
1747   // Lock a synchronized method
1748 
1749   // Register definitions used by locking and unlocking
1750 
1751   const Register swap_reg = r0;
1752   const Register obj_reg  = r19;  // Will contain the oop
1753   const Register lock_reg = r13;  // Address of compiler lock object (BasicLock)
1754   const Register old_hdr  = r13;  // value of old header at unlock time

1755   const Register tmp = lr;
1756 
1757   Label slow_path_lock;
1758   Label lock_done;
1759 
1760   if (method->is_synchronized()) {
1761     assert(!is_critical_native, "unhandled");
1762 
1763     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1764 
1765     // Get the handle (the 2nd argument)
1766     __ mov(oop_handle_reg, c_rarg1);
1767 
1768     // Get address of the box
1769 
1770     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1771 
1772     // Load the oop from the handle
1773     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1774 
1775     if (UseBiasedLocking) {
1776       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock);
1777     }
1778 
1779     // Load (object->mark() | 1) into swap_reg %r0
1780     __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1781     __ orr(swap_reg, rscratch1, 1);
1782 
1783     // Save (object->mark() | 1) into BasicLock's displaced header
1784     __ str(swap_reg, Address(lock_reg, mark_word_offset));

1785 
1786     // src -> dest iff dest == r0 else r0 <- dest
1787     { Label here;
1788       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
1789     }
1790 
1791     // Hmm should this move to the slow path code area???



1792 
1793     // Test if the oopMark is an obvious stack pointer, i.e.,
1794     //  1) (mark & 3) == 0, and
1795     //  2) sp <= mark < mark + os::pagesize()
1796     // These 3 tests can be done by evaluating the following
1797     // expression: ((mark - sp) & (3 - os::vm_page_size())),
1798     // assuming both stack pointer and pagesize have their
1799     // least significant 2 bits clear.
1800     // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1801 
1802     __ sub(swap_reg, sp, swap_reg);
1803     __ neg(swap_reg, swap_reg);
1804     __ ands(swap_reg, swap_reg, 3 - os::vm_page_size());





1805 
1806     // Save the test result, for recursive case, the result is zero
1807     __ str(swap_reg, Address(lock_reg, mark_word_offset));
1808     __ br(Assembler::NE, slow_path_lock);
1809 







1810     // Slow path will re-enter here
1811 
1812     __ bind(lock_done);
1813   }
1814 
1815 
1816   // Finally just about ready to make the JNI call
1817 
1818   // get JNIEnv* which is first argument to native
1819   if (!is_critical_native) {
1820     __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1821 
1822     // Now set thread in native
1823     __ mov(rscratch1, _thread_in_native);
1824     __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1825     __ stlrw(rscratch1, rscratch2);
1826   }
1827 
1828   rt_call(masm, native_func);
1829 

1911   __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
1912   __ br(Assembler::EQ, reguard);
1913   __ bind(reguard_done);
1914 
1915   // native result if any is live
1916 
1917   // Unlock
1918   Label unlock_done;
1919   Label slow_path_unlock;
1920   if (method->is_synchronized()) {
1921 
1922     // Get locked oop from the handle we passed to jni
1923     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1924 
1925     Label done;
1926 
1927     if (UseBiasedLocking) {
1928       __ biased_locking_exit(obj_reg, old_hdr, done);
1929     }
1930 
1931     // Simple recursive lock?
1932 
1933     __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1934     __ cbz(rscratch1, done);

1935 
1936     // Must save r0 if if it is live now because cmpxchg must use it
1937     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1938       save_native_result(masm, ret_type, stack_slots);
1939     }
1940 
1941 
1942     // get address of the stack lock
1943     __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1944     //  get old displaced header
1945     __ ldr(old_hdr, Address(r0, 0));



1946 
1947     // Atomic swap old header if oop still contains the stack lock
1948     Label succeed;
1949     __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock);
1950     __ bind(succeed);




1951 
1952     // slow path re-enters here
1953     __ bind(unlock_done);
1954     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1955       restore_native_result(masm, ret_type, stack_slots);
1956     }
1957 
1958     __ bind(done);
1959   }
1960 
1961   Label dtrace_method_exit, dtrace_method_exit_done;
1962   {
1963     uint64_t offset;
1964     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
1965     __ ldrb(rscratch1, Address(rscratch1, offset));
1966     __ cbnzw(rscratch1, dtrace_method_exit);
1967     __ bind(dtrace_method_exit_done);
1968   }
1969 
1970   __ reset_last_Java_frame(false);

   1 /*
   2  * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any

1735 
1736   // RedefineClasses() tracing support for obsolete method entry
1737   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1738     // protect the args we've loaded
1739     save_args(masm, total_c_args, c_arg, out_regs);
1740     __ mov_metadata(c_rarg1, method());
1741     __ call_VM_leaf(
1742       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1743       rthread, c_rarg1);
1744     restore_args(masm, total_c_args, c_arg, out_regs);
1745   }
1746 
1747   // Lock a synchronized method
1748 
1749   // Register definitions used by locking and unlocking
1750 
1751   const Register swap_reg = r0;
1752   const Register obj_reg  = r19;  // Will contain the oop
1753   const Register lock_reg = r13;  // Address of compiler lock object (BasicLock)
1754   const Register old_hdr  = r13;  // value of old header at unlock time
1755   const Register lock_tmp = r14;  // Temporary used by lightweight_lock/unlock
1756   const Register tmp = lr;
1757 
1758   Label slow_path_lock;
1759   Label lock_done;
1760 
1761   if (method->is_synchronized()) {
1762     assert(!is_critical_native, "unhandled");
1763 
1764     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1765 
1766     // Get the handle (the 2nd argument)
1767     __ mov(oop_handle_reg, c_rarg1);
1768 
1769     // Get address of the box
1770 
1771     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1772 
1773     // Load the oop from the handle
1774     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1775 
1776     if (LockingMode == LM_MONITOR) {
1777       __ b(slow_path_lock);
1778     } else if (LockingMode == LM_LEGACY) {
1779       if (UseBiasedLocking) {
1780         __ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock);
1781       }

1782 
1783       // Load (object->mark() | 1) into swap_reg %r0
1784       __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1785       __ orr(swap_reg, rscratch1, 1);
1786 
1787       // Save (object->mark() | 1) into BasicLock's displaced header
1788       __ str(swap_reg, Address(lock_reg, mark_word_offset));


1789 
1790       // src -> dest iff dest == r0 else r0 <- dest
1791       { Label here;
1792         __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
1793       }
1794 
1795       // Hmm should this move to the slow path code area???







1796 
1797       // Test if the oopMark is an obvious stack pointer, i.e.,
1798       //  1) (mark & 3) == 0, and
1799       //  2) sp <= mark < mark + os::pagesize()
1800       // These 3 tests can be done by evaluating the following
1801       // expression: ((mark - sp) & (3 - os::vm_page_size())),
1802       // assuming both stack pointer and pagesize have their
1803       // least significant 2 bits clear.
1804       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1805 
1806       __ sub(swap_reg, sp, swap_reg);
1807       __ neg(swap_reg, swap_reg);
1808       __ ands(swap_reg, swap_reg, 3 - os::vm_page_size());
1809 
1810       // Save the test result, for recursive case, the result is zero
1811       __ str(swap_reg, Address(lock_reg, mark_word_offset));
1812       __ br(Assembler::NE, slow_path_lock);
1813     } else {
1814       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1815       __ lightweight_lock(obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
1816     }
1817     // Slow path will re-enter here
1818 
1819     __ bind(lock_done);
1820   }
1821 
1822 
1823   // Finally just about ready to make the JNI call
1824 
1825   // get JNIEnv* which is first argument to native
1826   if (!is_critical_native) {
1827     __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1828 
1829     // Now set thread in native
1830     __ mov(rscratch1, _thread_in_native);
1831     __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1832     __ stlrw(rscratch1, rscratch2);
1833   }
1834 
1835   rt_call(masm, native_func);
1836 

1918   __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
1919   __ br(Assembler::EQ, reguard);
1920   __ bind(reguard_done);
1921 
1922   // native result if any is live
1923 
1924   // Unlock
1925   Label unlock_done;
1926   Label slow_path_unlock;
1927   if (method->is_synchronized()) {
1928 
1929     // Get locked oop from the handle we passed to jni
1930     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1931 
1932     Label done;
1933 
1934     if (UseBiasedLocking) {
1935       __ biased_locking_exit(obj_reg, old_hdr, done);
1936     }
1937 
1938     if (LockingMode == LM_LEGACY) {
1939       // Simple recursive lock?
1940       __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1941       __ cbz(rscratch1, done);
1942     }
1943 
1944     // Must save r0 if if it is live now because cmpxchg must use it
1945     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1946       save_native_result(masm, ret_type, stack_slots);
1947     }
1948 
1949 
1950     if (LockingMode == LM_MONITOR) {
1951       __ b(slow_path_unlock);
1952     } else if (LockingMode == LM_LEGACY) {
1953       // get address of the stack lock
1954       __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1955       //  get old displaced header
1956       __ ldr(old_hdr, Address(r0, 0));
1957 
1958       // Atomic swap old header if oop still contains the stack lock
1959       Label succeed;
1960       __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock);
1961       __ bind(succeed);
1962     } else {
1963       assert(LockingMode == LM_LIGHTWEIGHT, "");
1964       __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
1965     }
1966 
1967     // slow path re-enters here
1968     __ bind(unlock_done);
1969     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1970       restore_native_result(masm, ret_type, stack_slots);
1971     }
1972 
1973     __ bind(done);
1974   }
1975 
1976   Label dtrace_method_exit, dtrace_method_exit_done;
1977   {
1978     uint64_t offset;
1979     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
1980     __ ldrb(rscratch1, Address(rscratch1, offset));
1981     __ cbnzw(rscratch1, dtrace_method_exit);
1982     __ bind(dtrace_method_exit_done);
1983   }
1984 
1985   __ reset_last_Java_frame(false);
< prev index next >