< prev index next >

src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp

Print this page


   1 /*
   2  * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *


1821   const Register tmp = lr;
1822 
1823   Label slow_path_lock;
1824   Label lock_done;
1825 
1826   if (method->is_synchronized()) {
1827     assert(!is_critical_native, "unhandled");
1828 
1829     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1830 
1831     // Get the handle (the 2nd argument)
1832     __ mov(oop_handle_reg, c_rarg1);
1833 
1834     // Get address of the box
1835 
1836     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1837 
1838     // Load the oop from the handle
1839     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1840 


1841     if (UseBiasedLocking) {
1842       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock);
1843     }
1844 
1845     // Load (object->mark() | 1) into swap_reg %r0
1846     __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1847     __ orr(swap_reg, rscratch1, 1);
1848 
1849     // Save (object->mark() | 1) into BasicLock's displaced header
1850     __ str(swap_reg, Address(lock_reg, mark_word_offset));
1851 
1852     // src -> dest iff dest == r0 else r0 <- dest
1853     { Label here;
1854       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
1855     }
1856 
1857     // Hmm should this move to the slow path code area???
1858 
1859     // Test if the oopMark is an obvious stack pointer, i.e.,
1860     //  1) (mark & 3) == 0, and


1982   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1983   __ stlrw(rscratch1, rscratch2);
1984   __ bind(after_transition);
1985 
1986   Label reguard;
1987   Label reguard_done;
1988   __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1989   __ cmpw(rscratch1, JavaThread::stack_guard_yellow_reserved_disabled);
1990   __ br(Assembler::EQ, reguard);
1991   __ bind(reguard_done);
1992 
1993   // native result if any is live
1994 
1995   // Unlock
1996   Label unlock_done;
1997   Label slow_path_unlock;
1998   if (method->is_synchronized()) {
1999 
2000     // Get locked oop from the handle we passed to jni
2001     __ ldr(obj_reg, Address(oop_handle_reg, 0));

2002 
2003     Label done;
2004 
2005     if (UseBiasedLocking) {
2006       __ biased_locking_exit(obj_reg, old_hdr, done);
2007     }
2008 
2009     // Simple recursive lock?
2010 
2011     __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2012     __ cbz(rscratch1, done);
2013 
2014     // Must save r0 if if it is live now because cmpxchg must use it
2015     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2016       save_native_result(masm, ret_type, stack_slots);
2017     }
2018 
2019 
2020     // get address of the stack lock
2021     __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));


   1 /*
   2  * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2018, Red Hat, Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *


1821   const Register tmp = lr;
1822 
1823   Label slow_path_lock;
1824   Label lock_done;
1825 
1826   if (method->is_synchronized()) {
1827     assert(!is_critical_native, "unhandled");
1828 
1829     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1830 
1831     // Get the handle (the 2nd argument)
1832     __ mov(oop_handle_reg, c_rarg1);
1833 
1834     // Get address of the box
1835 
1836     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1837 
1838     // Load the oop from the handle
1839     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1840 
1841     __ resolve_for_write(0, obj_reg);
1842 
1843     if (UseBiasedLocking) {
1844       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock);
1845     }
1846 
1847     // Load (object->mark() | 1) into swap_reg %r0
1848     __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1849     __ orr(swap_reg, rscratch1, 1);
1850 
1851     // Save (object->mark() | 1) into BasicLock's displaced header
1852     __ str(swap_reg, Address(lock_reg, mark_word_offset));
1853 
1854     // src -> dest iff dest == r0 else r0 <- dest
1855     { Label here;
1856       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
1857     }
1858 
1859     // Hmm should this move to the slow path code area???
1860 
1861     // Test if the oopMark is an obvious stack pointer, i.e.,
1862     //  1) (mark & 3) == 0, and


1984   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1985   __ stlrw(rscratch1, rscratch2);
1986   __ bind(after_transition);
1987 
1988   Label reguard;
1989   Label reguard_done;
1990   __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1991   __ cmpw(rscratch1, JavaThread::stack_guard_yellow_reserved_disabled);
1992   __ br(Assembler::EQ, reguard);
1993   __ bind(reguard_done);
1994 
1995   // native result if any is live
1996 
1997   // Unlock
1998   Label unlock_done;
1999   Label slow_path_unlock;
2000   if (method->is_synchronized()) {
2001 
2002     // Get locked oop from the handle we passed to jni
2003     __ ldr(obj_reg, Address(oop_handle_reg, 0));
2004     __ resolve_for_write(0, obj_reg);
2005 
2006     Label done;
2007 
2008     if (UseBiasedLocking) {
2009       __ biased_locking_exit(obj_reg, old_hdr, done);
2010     }
2011 
2012     // Simple recursive lock?
2013 
2014     __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2015     __ cbz(rscratch1, done);
2016 
2017     // Must save r0 if if it is live now because cmpxchg must use it
2018     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2019       save_native_result(masm, ret_type, stack_slots);
2020     }
2021 
2022 
2023     // get address of the stack lock
2024     __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));


< prev index next >