< prev index next >

src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp

Print this page

   1 /*
   2  * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any

1795       // Hmm should this move to the slow path code area???
1796 
1797       // Test if the oopMark is an obvious stack pointer, i.e.,
1798       //  1) (mark & 3) == 0, and
1799       //  2) sp <= mark < mark + os::pagesize()
1800       // These 3 tests can be done by evaluating the following
1801       // expression: ((mark - sp) & (3 - os::vm_page_size())),
1802       // assuming both stack pointer and pagesize have their
1803       // least significant 2 bits clear.
1804       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1805 
1806       __ sub(swap_reg, sp, swap_reg);
1807       __ neg(swap_reg, swap_reg);
1808       __ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
1809 
1810       // Save the test result, for recursive case, the result is zero
1811       __ str(swap_reg, Address(lock_reg, mark_word_offset));
1812       __ br(Assembler::NE, slow_path_lock);
1813     } else {
1814       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1815       __ ldr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1816       __ lightweight_lock(obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
1817     }
1818     __ bind(count);
1819     __ increment(Address(rthread, JavaThread::held_monitor_count_offset()));
1820 
1821     // Slow path will re-enter here
1822     __ bind(lock_done);
1823   }
1824 
1825 
1826   // Finally just about ready to make the JNI call
1827 
1828   // get JNIEnv* which is first argument to native
1829   __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1830 
1831   // Now set thread in native
1832   __ mov(rscratch1, _thread_in_native);
1833   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1834   __ stlrw(rscratch1, rscratch2);
1835 

1935     // Must save r0 if if it is live now because cmpxchg must use it
1936     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1937       save_native_result(masm, ret_type, stack_slots);
1938     }
1939 
1940     if (LockingMode == LM_MONITOR) {
1941       __ b(slow_path_unlock);
1942     } else if (LockingMode == LM_LEGACY) {
1943       // get address of the stack lock
1944       __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1945       //  get old displaced header
1946       __ ldr(old_hdr, Address(r0, 0));
1947 
1948       // Atomic swap old header if oop still contains the stack lock
1949       Label count;
1950       __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
1951       __ bind(count);
1952       __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1953     } else {
1954       assert(LockingMode == LM_LIGHTWEIGHT, "");
1955       __ ldr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1956       __ tbnz(old_hdr, exact_log2(markWord::monitor_value), slow_path_unlock);
1957       __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
1958       __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1959     }
1960 
1961     // slow path re-enters here
1962     __ bind(unlock_done);
1963     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1964       restore_native_result(masm, ret_type, stack_slots);
1965     }
1966 
1967     __ bind(done);
1968   }
1969 
1970   Label dtrace_method_exit, dtrace_method_exit_done;
1971   {
1972     uint64_t offset;
1973     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
1974     __ ldrb(rscratch1, Address(rscratch1, offset));
1975     __ cbnzw(rscratch1, dtrace_method_exit);
1976     __ bind(dtrace_method_exit_done);

   1 /*
   2  * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any

1795       // Hmm should this move to the slow path code area???
1796 
1797       // Test if the oopMark is an obvious stack pointer, i.e.,
1798       //  1) (mark & 3) == 0, and
1799       //  2) sp <= mark < mark + os::pagesize()
1800       // These 3 tests can be done by evaluating the following
1801       // expression: ((mark - sp) & (3 - os::vm_page_size())),
1802       // assuming both stack pointer and pagesize have their
1803       // least significant 2 bits clear.
1804       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1805 
1806       __ sub(swap_reg, sp, swap_reg);
1807       __ neg(swap_reg, swap_reg);
1808       __ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
1809 
1810       // Save the test result, for recursive case, the result is zero
1811       __ str(swap_reg, Address(lock_reg, mark_word_offset));
1812       __ br(Assembler::NE, slow_path_lock);
1813     } else {
1814       assert(LockingMode == LM_LIGHTWEIGHT, "must be");

1815       __ lightweight_lock(obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
1816     }
1817     __ bind(count);
1818     __ increment(Address(rthread, JavaThread::held_monitor_count_offset()));
1819 
1820     // Slow path will re-enter here
1821     __ bind(lock_done);
1822   }
1823 
1824 
1825   // Finally just about ready to make the JNI call
1826 
1827   // get JNIEnv* which is first argument to native
1828   __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1829 
1830   // Now set thread in native
1831   __ mov(rscratch1, _thread_in_native);
1832   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1833   __ stlrw(rscratch1, rscratch2);
1834 

1934     // Must save r0 if if it is live now because cmpxchg must use it
1935     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1936       save_native_result(masm, ret_type, stack_slots);
1937     }
1938 
1939     if (LockingMode == LM_MONITOR) {
1940       __ b(slow_path_unlock);
1941     } else if (LockingMode == LM_LEGACY) {
1942       // get address of the stack lock
1943       __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1944       //  get old displaced header
1945       __ ldr(old_hdr, Address(r0, 0));
1946 
1947       // Atomic swap old header if oop still contains the stack lock
1948       Label count;
1949       __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
1950       __ bind(count);
1951       __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1952     } else {
1953       assert(LockingMode == LM_LIGHTWEIGHT, "");


1954       __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
1955       __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1956     }
1957 
1958     // slow path re-enters here
1959     __ bind(unlock_done);
1960     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1961       restore_native_result(masm, ret_type, stack_slots);
1962     }
1963 
1964     __ bind(done);
1965   }
1966 
1967   Label dtrace_method_exit, dtrace_method_exit_done;
1968   {
1969     uint64_t offset;
1970     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
1971     __ ldrb(rscratch1, Address(rscratch1, offset));
1972     __ cbnzw(rscratch1, dtrace_method_exit);
1973     __ bind(dtrace_method_exit_done);
< prev index next >