< prev index next >

src/hotspot/cpu/arm/interp_masm_arm.cpp

Print this page
@@ -861,158 +861,40 @@
  // Argument: R1 : Points to BasicObjectLock to be used for locking.
  // Must be initialized with object to lock.
  // Blows volatile registers R0-R3, Rtemp, LR. Calls VM.
  void InterpreterMacroAssembler::lock_object(Register Rlock) {
    assert(Rlock == R1, "the second argument");
+   const Register Robj = R2;
+   assert_different_registers(Robj, Rlock);
+   const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
  
-   if (UseHeavyMonitors) {
-     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), Rlock);
-   } else {
-     Label done;
- 
-     const Register Robj = R2;
-     const Register Rmark = R3;
-     assert_different_registers(Robj, Rmark, Rlock, R0, Rtemp);
- 
-     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
-     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
-     const int mark_offset = lock_offset + BasicLock::displaced_header_offset_in_bytes();
- 
-     Label already_locked, slow_case;
- 
-     // Load object pointer
-     ldr(Robj, Address(Rlock, obj_offset));
- 
-     if (DiagnoseSyncOnValueBasedClasses != 0) {
-       load_klass(R0, Robj);
-       ldr_u32(R0, Address(R0, Klass::access_flags_offset()));
-       tst(R0, JVM_ACC_IS_VALUE_BASED_CLASS);
-       b(slow_case, ne);
-     }
+   // Load object pointer
+   ldr(Robj, Address(Rlock, obj_offset));
  
-     // On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
-     // That would be acceptable as ether CAS or slow case path is taken in that case.
-     // Exception to that is if the object is locked by the calling thread, then the recursive test will pass (guaranteed as
-     // loads are satisfied from a store queue if performed on the same processor).
- 
-     assert(oopDesc::mark_offset_in_bytes() == 0, "must be");
-     ldr(Rmark, Address(Robj, oopDesc::mark_offset_in_bytes()));
- 
-     // Test if object is already locked
-     tst(Rmark, markWord::unlocked_value);
-     b(already_locked, eq);
- 
-     // Save old object->mark() into BasicLock's displaced header
-     str(Rmark, Address(Rlock, mark_offset));
- 
-     cas_for_lock_acquire(Rmark, Rlock, Robj, Rtemp, slow_case);
- 
-     b(done);
- 
-     // If we got here that means the object is locked by ether calling thread or another thread.
-     bind(already_locked);
-     // Handling of locked objects: recursive locks and slow case.
- 
-     // Fast check for recursive lock.
-     //
-     // Can apply the optimization only if this is a stack lock
-     // allocated in this thread. For efficiency, we can focus on
-     // recently allocated stack locks (instead of reading the stack
-     // base and checking whether 'mark' points inside the current
-     // thread stack):
-     //  1) (mark & 3) == 0
-     //  2) SP <= mark < SP + os::pagesize()
-     //
-     // Warning: SP + os::pagesize can overflow the stack base. We must
-     // neither apply the optimization for an inflated lock allocated
-     // just above the thread stack (this is why condition 1 matters)
-     // nor apply the optimization if the stack lock is inside the stack
-     // of another thread. The latter is avoided even in case of overflow
-     // because we have guard pages at the end of all stacks. Hence, if
-     // we go over the stack base and hit the stack of another thread,
-     // this should not be in a writeable area that could contain a
-     // stack lock allocated by that thread. As a consequence, a stack
-     // lock less than page size away from SP is guaranteed to be
-     // owned by the current thread.
-     //
-     // Note: assuming SP is aligned, we can check the low bits of
-     // (mark-SP) instead of the low bits of mark. In that case,
-     // assuming page size is a power of 2, we can merge the two
-     // conditions into a single test:
-     // => ((mark - SP) & (3 - os::pagesize())) == 0
- 
-     // (3 - os::pagesize()) cannot be encoded as an ARM immediate operand.
-     // Check independently the low bits and the distance to SP.
-     // -1- test low 2 bits
-     movs(R0, AsmOperand(Rmark, lsl, 30));
-     // -2- test (mark - SP) if the low two bits are 0
-     sub(R0, Rmark, SP, eq);
-     movs(R0, AsmOperand(R0, lsr, exact_log2(os::vm_page_size())), eq);
-     // If still 'eq' then recursive locking OK: store 0 into lock record
-     str(R0, Address(Rlock, mark_offset), eq);
- 
-     b(done, eq);
- 
-     bind(slow_case);
- 
-     // Call the runtime routine for slow case
-     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), Rlock);
- 
-     bind(done);
-   }
+   // TODO: Implement fast-locking.
+   mov(R0, Robj);
+   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), R0);
  }
  
  
  // Unlocks an object. Used in monitorexit bytecode and remove_activation.
  //
  // Argument: R0: Points to BasicObjectLock structure for lock
  // Throw an IllegalMonitorException if object is not locked by current thread
  // Blows volatile registers R0-R3, Rtemp, LR. Calls VM.
  void InterpreterMacroAssembler::unlock_object(Register Rlock) {
    assert(Rlock == R0, "the first argument");
+   const Register Robj = R2;
+   assert_different_registers(Robj, Rlock);
+   const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
  
-   if (UseHeavyMonitors) {
-     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), Rlock);
-   } else {
-     Label done, slow_case;
- 
-     const Register Robj = R2;
-     const Register Rmark = R3;
-     assert_different_registers(Robj, Rmark, Rlock, Rtemp);
- 
-     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
-     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
-     const int mark_offset = lock_offset + BasicLock::displaced_header_offset_in_bytes();
- 
-     const Register Rzero = zero_register(Rtemp);
- 
-     // Load oop into Robj
-     ldr(Robj, Address(Rlock, obj_offset));
- 
-     // Free entry
-     str(Rzero, Address(Rlock, obj_offset));
- 
-     // Load the old header from BasicLock structure
-     ldr(Rmark, Address(Rlock, mark_offset));
- 
-     // Test for recursion (zero mark in BasicLock)
-     cbz(Rmark, done);
- 
-     bool allow_fallthrough_on_failure = true;
- 
-     cas_for_lock_release(Rlock, Rmark, Robj, Rtemp, slow_case, allow_fallthrough_on_failure);
- 
-     b(done, eq);
- 
-     bind(slow_case);
- 
-     // Call the runtime routine for slow case.
-     str(Robj, Address(Rlock, obj_offset)); // restore obj
-     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), Rlock);
+   // Load oop into Robj
+   ldr(Robj, Address(Rlock, obj_offset));
  
-     bind(done);
-   }
+   // TODO: Implement fast-locking.
+   mov(R0, Robj);
+   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), R0);
  }
  
  
  // Test ImethodDataPtr.  If it is null, continue at the specified label
  void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, Label& zero_continue) {
< prev index next >