< prev index next >

src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp

Print this page


   1 /*
   2  * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *


1822   const Register tmp = lr;
1823 
1824   Label slow_path_lock;
1825   Label lock_done;
1826 
1827   if (method->is_synchronized()) {
1828     assert(!is_critical_native, "unhandled");
1829 
1830     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1831 
1832     // Get the handle (the 2nd argument)
1833     __ mov(oop_handle_reg, c_rarg1);
1834 
1835     // Get address of the box
1836 
1837     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1838 
1839     // Load the oop from the handle
1840     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1841 


1842     if (UseBiasedLocking) {
1843       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock);
1844     }
1845 
1846     // Load (object->mark() | 1) into swap_reg %r0
1847     __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1848     __ orr(swap_reg, rscratch1, 1);
1849 
1850     // Save (object->mark() | 1) into BasicLock's displaced header
1851     __ str(swap_reg, Address(lock_reg, mark_word_offset));
1852 
1853     // src -> dest iff dest == r0 else r0 <- dest
1854     { Label here;
1855       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
1856     }
1857 
1858     // Hmm should this move to the slow path code area???
1859 
1860     // Test if the oopMark is an obvious stack pointer, i.e.,
1861     //  1) (mark & 3) == 0, and


1983   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1984   __ stlrw(rscratch1, rscratch2);
1985   __ bind(after_transition);
1986 
1987   Label reguard;
1988   Label reguard_done;
1989   __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1990   __ cmpw(rscratch1, JavaThread::stack_guard_yellow_reserved_disabled);
1991   __ br(Assembler::EQ, reguard);
1992   __ bind(reguard_done);
1993 
1994   // native result if any is live
1995 
1996   // Unlock
1997   Label unlock_done;
1998   Label slow_path_unlock;
1999   if (method->is_synchronized()) {
2000 
2001     // Get locked oop from the handle we passed to jni
2002     __ ldr(obj_reg, Address(oop_handle_reg, 0));

2003 
2004     Label done;
2005 
2006     if (UseBiasedLocking) {
2007       __ biased_locking_exit(obj_reg, old_hdr, done);
2008     }
2009 
2010     // Simple recursive lock?
2011 
2012     __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2013     __ cbz(rscratch1, done);
2014 
2015     // Must save r0 if if it is live now because cmpxchg must use it
2016     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2017       save_native_result(masm, ret_type, stack_slots);
2018     }
2019 
2020 
2021     // get address of the stack lock
2022     __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));


   1 /*
   2  * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2018, Red Hat, Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *


1822   const Register tmp = lr;
1823 
1824   Label slow_path_lock;
1825   Label lock_done;
1826 
1827   if (method->is_synchronized()) {
1828     assert(!is_critical_native, "unhandled");
1829 
1830     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1831 
1832     // Get the handle (the 2nd argument)
1833     __ mov(oop_handle_reg, c_rarg1);
1834 
1835     // Get address of the box
1836 
1837     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1838 
1839     // Load the oop from the handle
1840     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1841 
1842     __ resolve_for_write(0, obj_reg);
1843 
1844     if (UseBiasedLocking) {
1845       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock);
1846     }
1847 
1848     // Load (object->mark() | 1) into swap_reg %r0
1849     __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1850     __ orr(swap_reg, rscratch1, 1);
1851 
1852     // Save (object->mark() | 1) into BasicLock's displaced header
1853     __ str(swap_reg, Address(lock_reg, mark_word_offset));
1854 
1855     // src -> dest iff dest == r0 else r0 <- dest
1856     { Label here;
1857       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
1858     }
1859 
1860     // Hmm should this move to the slow path code area???
1861 
1862     // Test if the oopMark is an obvious stack pointer, i.e.,
1863     //  1) (mark & 3) == 0, and


1985   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1986   __ stlrw(rscratch1, rscratch2);
1987   __ bind(after_transition);
1988 
1989   Label reguard;
1990   Label reguard_done;
1991   __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1992   __ cmpw(rscratch1, JavaThread::stack_guard_yellow_reserved_disabled);
1993   __ br(Assembler::EQ, reguard);
1994   __ bind(reguard_done);
1995 
1996   // native result if any is live
1997 
1998   // Unlock
1999   Label unlock_done;
2000   Label slow_path_unlock;
2001   if (method->is_synchronized()) {
2002 
2003     // Get locked oop from the handle we passed to jni
2004     __ ldr(obj_reg, Address(oop_handle_reg, 0));
2005     __ resolve_for_write(0, obj_reg);
2006 
2007     Label done;
2008 
2009     if (UseBiasedLocking) {
2010       __ biased_locking_exit(obj_reg, old_hdr, done);
2011     }
2012 
2013     // Simple recursive lock?
2014 
2015     __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2016     __ cbz(rscratch1, done);
2017 
2018     // Must save r0 if if it is live now because cmpxchg must use it
2019     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2020       save_native_result(masm, ret_type, stack_slots);
2021     }
2022 
2023 
2024     // get address of the stack lock
2025     __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));


< prev index next >