< prev index next >

src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp

Print this page

   1 /*
   2  * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.

 750     const Register tmp3 = c_rarg5;
 751 
 752     const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
 753     const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
 754     const int mark_offset = lock_offset +
 755                             BasicLock::displaced_header_offset_in_bytes();
 756 
 757     Label slow_case;
 758 
 759     // Load object pointer into obj_reg %c_rarg3
 760     ldr(obj_reg, Address(lock_reg, obj_offset));
 761 
 762     if (DiagnoseSyncOnValueBasedClasses != 0) {
 763       load_klass(tmp, obj_reg);
 764       ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
 765       tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
 766       br(Assembler::NE, slow_case);
 767     }
 768 
 769     if (LockingMode == LM_LIGHTWEIGHT) {
 770       ldr(tmp, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 771       lightweight_lock(obj_reg, tmp, tmp2, tmp3, slow_case);
 772       b(count);
 773     } else if (LockingMode == LM_LEGACY) {
 774       // Load (object->mark() | 1) into swap_reg
 775       ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 776       orr(swap_reg, rscratch1, 1);
 777 
 778       // Save (object->mark() | 1) into BasicLock's displaced header
 779       str(swap_reg, Address(lock_reg, mark_offset));
 780 
 781       assert(lock_offset == 0,
 782              "displached header must be first word in BasicObjectLock");
 783 
 784       Label fail;
 785       cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
 786 
 787       // Fast check for recursive lock.
 788       //
 789       // Can apply the optimization only if this is a stack lock
 790       // allocated in this thread. For efficiency, we can focus on

 867     const Register header_reg = c_rarg2;  // Will contain the old oopMark
 868     const Register obj_reg    = c_rarg3;  // Will contain the oop
 869     const Register tmp_reg    = c_rarg4;  // Temporary used by lightweight_unlock
 870 
 871     save_bcp(); // Save in case of exception
 872 
 873     if (LockingMode != LM_LIGHTWEIGHT) {
 874       // Convert from BasicObjectLock structure to object and BasicLock
 875       // structure Store the BasicLock address into %r0
 876       lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset()));
 877     }
 878 
 879     // Load oop into obj_reg(%c_rarg3)
 880     ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
 881 
 882     // Free entry
 883     str(zr, Address(lock_reg, BasicObjectLock::obj_offset()));
 884 
 885     if (LockingMode == LM_LIGHTWEIGHT) {
 886       Label slow_case;
 887 
 888       // Check for non-symmetric locking. This is allowed by the spec and the interpreter
 889       // must handle it.
 890       Register tmp = rscratch1;
 891       // First check for lock-stack underflow.
 892       ldrw(tmp, Address(rthread, JavaThread::lock_stack_top_offset()));
 893       cmpw(tmp, (unsigned)LockStack::start_offset());
 894       br(Assembler::LE, slow_case);
 895       // Then check if the top of the lock-stack matches the unlocked object.
 896       subw(tmp, tmp, oopSize);
 897       ldr(tmp, Address(rthread, tmp));
 898       cmpoop(tmp, obj_reg);
 899       br(Assembler::NE, slow_case);
 900 
 901       ldr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 902       tbnz(header_reg, exact_log2(markWord::monitor_value), slow_case);
 903       lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
 904       b(count);
 905       bind(slow_case);
 906     } else if (LockingMode == LM_LEGACY) {
 907       // Load the old header from BasicLock structure
 908       ldr(header_reg, Address(swap_reg,
 909                               BasicLock::displaced_header_offset_in_bytes()));
 910 
 911       // Test for recursion
 912       cbz(header_reg, count);
 913 
 914       // Atomic swap back the old header
 915       cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
 916     }
 917     // Call the runtime routine for slow case.
 918     str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); // restore obj
 919     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
 920     b(done);
 921 
 922     bind(count);

   1 /*
   2  * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.

 750     const Register tmp3 = c_rarg5;
 751 
 752     const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
 753     const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
 754     const int mark_offset = lock_offset +
 755                             BasicLock::displaced_header_offset_in_bytes();
 756 
 757     Label slow_case;
 758 
 759     // Load object pointer into obj_reg %c_rarg3
 760     ldr(obj_reg, Address(lock_reg, obj_offset));
 761 
 762     if (DiagnoseSyncOnValueBasedClasses != 0) {
 763       load_klass(tmp, obj_reg);
 764       ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
 765       tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
 766       br(Assembler::NE, slow_case);
 767     }
 768 
 769     if (LockingMode == LM_LIGHTWEIGHT) {

 770       lightweight_lock(obj_reg, tmp, tmp2, tmp3, slow_case);
 771       b(count);
 772     } else if (LockingMode == LM_LEGACY) {
 773       // Load (object->mark() | 1) into swap_reg
 774       ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 775       orr(swap_reg, rscratch1, 1);
 776 
 777       // Save (object->mark() | 1) into BasicLock's displaced header
 778       str(swap_reg, Address(lock_reg, mark_offset));
 779 
 780       assert(lock_offset == 0,
 781              "displached header must be first word in BasicObjectLock");
 782 
 783       Label fail;
 784       cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
 785 
 786       // Fast check for recursive lock.
 787       //
 788       // Can apply the optimization only if this is a stack lock
 789       // allocated in this thread. For efficiency, we can focus on

 866     const Register header_reg = c_rarg2;  // Will contain the old oopMark
 867     const Register obj_reg    = c_rarg3;  // Will contain the oop
 868     const Register tmp_reg    = c_rarg4;  // Temporary used by lightweight_unlock
 869 
 870     save_bcp(); // Save in case of exception
 871 
 872     if (LockingMode != LM_LIGHTWEIGHT) {
 873       // Convert from BasicObjectLock structure to object and BasicLock
 874       // structure Store the BasicLock address into %r0
 875       lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset()));
 876     }
 877 
 878     // Load oop into obj_reg(%c_rarg3)
 879     ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
 880 
 881     // Free entry
 882     str(zr, Address(lock_reg, BasicObjectLock::obj_offset()));
 883 
 884     if (LockingMode == LM_LIGHTWEIGHT) {
 885       Label slow_case;
















 886       lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
 887       b(count);
 888       bind(slow_case);
 889     } else if (LockingMode == LM_LEGACY) {
 890       // Load the old header from BasicLock structure
 891       ldr(header_reg, Address(swap_reg,
 892                               BasicLock::displaced_header_offset_in_bytes()));
 893 
 894       // Test for recursion
 895       cbz(header_reg, count);
 896 
 897       // Atomic swap back the old header
 898       cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
 899     }
 900     // Call the runtime routine for slow case.
 901     str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); // restore obj
 902     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
 903     b(done);
 904 
 905     bind(count);
< prev index next >