< prev index next >

src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp

Print this page
*** 222,38 ***
    decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
  
    bind(no_count);
  }
  
! void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register t1,
                                                Register t2, Register t3) {
    assert(LockingMode == LM_LIGHTWEIGHT, "must be");
!   assert_different_registers(obj, t1, t2, t3);
  
    // Handle inflated monitor.
    Label inflated;
    // Finish fast lock successfully. MUST branch to with flag == EQ
    Label locked;
    // Finish fast lock unsuccessfully. MUST branch to with flag == NE
    Label slow_path;
  
    if (DiagnoseSyncOnValueBasedClasses != 0) {
      load_klass(t1, obj);
      ldrw(t1, Address(t1, Klass::access_flags_offset()));
      tstw(t1, JVM_ACC_IS_VALUE_BASED_CLASS);
      br(Assembler::NE, slow_path);
    }
  
    const Register t1_mark = t1;
  
    { // Lightweight locking
  
      // Push lock to the lock stack and finish successfully. MUST branch to with flag == EQ
      Label push;
  
      const Register t2_top = t2;
-     const Register t3_t = t3;
  
      // Check if lock-stack is full.
      ldrw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset()));
      cmpw(t2_top, (unsigned)LockStack::end_offset() - 1);
      br(Assembler::GT, slow_path);
--- 222,43 ---
    decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
  
    bind(no_count);
  }
  
! void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Register t1,
                                                Register t2, Register t3) {
    assert(LockingMode == LM_LIGHTWEIGHT, "must be");
!   assert_different_registers(obj, box, t1, t2, t3);
  
    // Handle inflated monitor.
    Label inflated;
    // Finish fast lock successfully. MUST branch to with flag == EQ
    Label locked;
    // Finish fast lock unsuccessfully. MUST branch to with flag == NE
    Label slow_path;
  
+   if (UseObjectMonitorTable) {
+     // Clear cache in case fast locking succeeds.
+     str(zr, Address(box, BasicLock::object_monitor_cache_offset_in_bytes()));
+   }
+ 
    if (DiagnoseSyncOnValueBasedClasses != 0) {
      load_klass(t1, obj);
      ldrw(t1, Address(t1, Klass::access_flags_offset()));
      tstw(t1, JVM_ACC_IS_VALUE_BASED_CLASS);
      br(Assembler::NE, slow_path);
    }
  
    const Register t1_mark = t1;
+   const Register t3_t = t3;
  
    { // Lightweight locking
  
      // Push lock to the lock stack and finish successfully. MUST branch to with flag == EQ
      Label push;
  
      const Register t2_top = t2;
  
      // Check if lock-stack is full.
      ldrw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset()));
      cmpw(t2_top, (unsigned)LockStack::end_offset() - 1);
      br(Assembler::GT, slow_path);

*** 287,30 ***
    }
  
    { // Handle inflated monitor.
      bind(inflated);
  
!     // mark contains the tagged ObjectMonitor*.
!     const Register t1_tagged_monitor = t1_mark;
!     const uintptr_t monitor_tag = markWord::monitor_value;
      const Register t2_owner_addr = t2;
      const Register t3_owner = t3;
  
      // Compute owner address.
!     lea(t2_owner_addr, Address(t1_tagged_monitor, (in_bytes(ObjectMonitor::owner_offset()) - monitor_tag)));
  
      // CAS owner (null => current thread).
      cmpxchg(t2_owner_addr, zr, rthread, Assembler::xword, /*acquire*/ true,
              /*release*/ false, /*weak*/ false, t3_owner);
!     br(Assembler::EQ, locked);
  
      // Check if recursive.
      cmp(t3_owner, rthread);
      br(Assembler::NE, slow_path);
  
      // Recursive.
!     increment(Address(t1_tagged_monitor, in_bytes(ObjectMonitor::recursions_offset()) - monitor_tag), 1);
    }
  
    bind(locked);
    increment(Address(rthread, JavaThread::held_monitor_count_offset()));
  
--- 292,80 ---
    }
  
    { // Handle inflated monitor.
      bind(inflated);
  
!     const Register t1_monitor = t1;
! 
!     if (!UseObjectMonitorTable) {
+       assert(t1_monitor == t1_mark, "should be the same here");
+     } else {
+       Label monitor_found;
+ 
+       // Load cache address
+       lea(t3_t, Address(rthread, JavaThread::om_cache_oops_offset()));
+ 
+       const int num_unrolled = 2;
+       for (int i = 0; i < num_unrolled; i++) {
+         ldr(t1, Address(t3_t));
+         cmp(obj, t1);
+         br(Assembler::EQ, monitor_found);
+         if (i + 1 != num_unrolled) {
+           increment(t3_t, in_bytes(OMCache::oop_to_oop_difference()));
+         }
+       }
+ 
+       // Loop after unrolling, advance iterator.
+       increment(t3_t, in_bytes(OMCache::oop_to_oop_difference()));
+ 
+       Label loop;
+ 
+       // Search for obj in cache.
+       bind(loop);
+ 
+       // Check for match.
+       ldr(t1, Address(t3_t));
+       cmp(obj, t1);
+       br(Assembler::EQ, monitor_found);
+ 
+       // Search until null encountered, guaranteed _null_sentinel at end.
+       increment(t3_t, in_bytes(OMCache::oop_to_oop_difference()));
+       cbnz(t1, loop);
+       // Cache Miss, NE set from cmp above, cbnz does not set flags
+       b(slow_path);
+ 
+       bind(monitor_found);
+       ldr(t1_monitor, Address(t3_t, OMCache::oop_to_monitor_difference()));
+     }
+ 
      const Register t2_owner_addr = t2;
      const Register t3_owner = t3;
+     const ByteSize monitor_tag = in_ByteSize(UseObjectMonitorTable ? 0 : checked_cast<int>(markWord::monitor_value));
+     const Address owner_address{t1_monitor, ObjectMonitor::owner_offset() - monitor_tag};
+     const Address recursions_address{t1_monitor, ObjectMonitor::recursions_offset() - monitor_tag};
+ 
+     Label monitor_locked;
  
      // Compute owner address.
!     lea(t2_owner_addr, owner_address);
  
      // CAS owner (null => current thread).
      cmpxchg(t2_owner_addr, zr, rthread, Assembler::xword, /*acquire*/ true,
              /*release*/ false, /*weak*/ false, t3_owner);
!     br(Assembler::EQ, monitor_locked);
  
      // Check if recursive.
      cmp(t3_owner, rthread);
      br(Assembler::NE, slow_path);
  
      // Recursive.
!     increment(recursions_address, 1);
+ 
+     bind(monitor_locked);
+     if (UseObjectMonitorTable) {
+       str(t1_monitor, Address(box, BasicLock::object_monitor_cache_offset_in_bytes()));
+     }
    }
  
    bind(locked);
    increment(Address(rthread, JavaThread::held_monitor_count_offset()));
  

*** 329,17 ***
    bind(flag_correct);
  #endif
    // C2 uses the value of Flags (NE vs EQ) to determine the continuation.
  }
  
! void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register t1, Register t2,
!                                                 Register t3) {
    assert(LockingMode == LM_LIGHTWEIGHT, "must be");
!   assert_different_registers(obj, t1, t2, t3);
  
    // Handle inflated monitor.
!   Label inflated, inflated_load_monitor;
    // Finish fast unlock successfully. MUST branch to with flag == EQ
    Label unlocked;
    // Finish fast unlock unsuccessfully. MUST branch to with flag == NE
    Label slow_path;
  
--- 384,17 ---
    bind(flag_correct);
  #endif
    // C2 uses the value of Flags (NE vs EQ) to determine the continuation.
  }
  
! void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box, Register t1,
!                                                 Register t2, Register t3) {
    assert(LockingMode == LM_LIGHTWEIGHT, "must be");
!   assert_different_registers(obj, box, t1, t2, t3);
  
    // Handle inflated monitor.
!   Label inflated, inflated_load_mark;
    // Finish fast unlock successfully. MUST branch to with flag == EQ
    Label unlocked;
    // Finish fast unlock unsuccessfully. MUST branch to with flag == NE
    Label slow_path;
  

*** 347,17 ***
    const Register t2_top = t2;
    const Register t3_t = t3;
  
    { // Lightweight unlock
  
      // Check if obj is top of lock-stack.
      ldrw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset()));
      subw(t2_top, t2_top, oopSize);
      ldr(t3_t, Address(rthread, t2_top));
      cmp(obj, t3_t);
      // Top of lock stack was not obj. Must be monitor.
!     br(Assembler::NE, inflated_load_monitor);
  
      // Pop lock-stack.
      DEBUG_ONLY(str(zr, Address(rthread, t2_top));)
      strw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset()));
  
--- 402,19 ---
    const Register t2_top = t2;
    const Register t3_t = t3;
  
    { // Lightweight unlock
  
+     Label push_and_slow_path;
+ 
      // Check if obj is top of lock-stack.
      ldrw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset()));
      subw(t2_top, t2_top, oopSize);
      ldr(t3_t, Address(rthread, t2_top));
      cmp(obj, t3_t);
      // Top of lock stack was not obj. Must be monitor.
!     br(Assembler::NE, inflated_load_mark);
  
      // Pop lock-stack.
      DEBUG_ONLY(str(zr, Address(rthread, t2_top));)
      strw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset()));
  

*** 370,30 ***
      // Not recursive.
      // Load Mark.
      ldr(t1_mark, Address(obj, oopDesc::mark_offset_in_bytes()));
  
      // Check header for monitor (0b10).
!     tbnz(t1_mark, exact_log2(markWord::monitor_value), inflated);
  
      // Try to unlock. Transition lock bits 0b00 => 0b01
      assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
      orr(t3_t, t1_mark, markWord::unlocked_value);
      cmpxchg(/*addr*/ obj, /*expected*/ t1_mark, /*new*/ t3_t, Assembler::xword,
              /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg);
      br(Assembler::EQ, unlocked);
  
      // Compare and exchange failed.
      // Restore lock-stack and handle the unlock in runtime.
      DEBUG_ONLY(str(obj, Address(rthread, t2_top));)
      addw(t2_top, t2_top, oopSize);
      str(t2_top, Address(rthread, JavaThread::lock_stack_top_offset()));
      b(slow_path);
    }
  
  
    { // Handle inflated monitor.
!     bind(inflated_load_monitor);
      ldr(t1_mark, Address(obj, oopDesc::mark_offset_in_bytes()));
  #ifdef ASSERT
      tbnz(t1_mark, exact_log2(markWord::monitor_value), inflated);
      stop("Fast Unlock not monitor");
  #endif
--- 427,34 ---
      // Not recursive.
      // Load Mark.
      ldr(t1_mark, Address(obj, oopDesc::mark_offset_in_bytes()));
  
      // Check header for monitor (0b10).
!     // Because we got here by popping (meaning we pushed in locked)
+     // there will be no monitor in the box. So we need to push back the obj
+     // so that the runtime can fix any potential anonymous owner.
+     tbnz(t1_mark, exact_log2(markWord::monitor_value), UseObjectMonitorTable ? push_and_slow_path : inflated);
  
      // Try to unlock. Transition lock bits 0b00 => 0b01
      assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
      orr(t3_t, t1_mark, markWord::unlocked_value);
      cmpxchg(/*addr*/ obj, /*expected*/ t1_mark, /*new*/ t3_t, Assembler::xword,
              /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg);
      br(Assembler::EQ, unlocked);
  
+     bind(push_and_slow_path);
      // Compare and exchange failed.
      // Restore lock-stack and handle the unlock in runtime.
      DEBUG_ONLY(str(obj, Address(rthread, t2_top));)
      addw(t2_top, t2_top, oopSize);
      str(t2_top, Address(rthread, JavaThread::lock_stack_top_offset()));
      b(slow_path);
    }
  
  
    { // Handle inflated monitor.
!     bind(inflated_load_mark);
      ldr(t1_mark, Address(obj, oopDesc::mark_offset_in_bytes()));
  #ifdef ASSERT
      tbnz(t1_mark, exact_log2(markWord::monitor_value), inflated);
      stop("Fast Unlock not monitor");
  #endif

*** 410,16 ***
      br(Assembler::NE, inflated);
      stop("Fast Unlock lock on stack");
      bind(check_done);
  #endif
  
!     // mark contains the tagged ObjectMonitor*.
-     const Register t1_monitor = t1_mark;
-     const uintptr_t monitor_tag = markWord::monitor_value;
  
!     // Untag the monitor.
!     sub(t1_monitor, t1_mark, monitor_tag);
  
      const Register t2_recursions = t2;
      Label not_recursive;
  
      // Check if recursive.
--- 471,23 ---
      br(Assembler::NE, inflated);
      stop("Fast Unlock lock on stack");
      bind(check_done);
  #endif
  
!     const Register t1_monitor = t1;
  
!     if (!UseObjectMonitorTable) {
!       assert(t1_monitor == t1_mark, "should be the same here");
+ 
+       // Untag the monitor.
+       add(t1_monitor, t1_mark, -(int)markWord::monitor_value);
+     } else {
+       ldr(t1_monitor, Address(box, BasicLock::object_monitor_cache_offset_in_bytes()));
+       // null check with Flags == NE, no valid pointer below alignof(ObjectMonitor*)
+       cmp(t1_monitor, checked_cast<uint8_t>(alignof(ObjectMonitor*)));
+       br(Assembler::LO, slow_path);
+     }
  
      const Register t2_recursions = t2;
      Label not_recursive;
  
      // Check if recursive.

*** 2492,5 ***
--- 2560,24 ---
        return true;
      }
    }
    return MacroAssembler::in_scratch_emit_size();
  }
+ 
+ void C2_MacroAssembler::load_nklass_compact(Register dst, Register obj, Register index, int scale, int disp) {
+   // Note: Don't clobber obj anywhere in that method!
+ 
+   // The incoming address is pointing into obj-start + klass_offset_in_bytes. We need to extract
+   // obj-start, so that we can load from the object's mark-word instead. Usually the address
+   // comes as obj-start in obj and klass_offset_in_bytes in disp. However, sometimes C2
+   // emits code that pre-computes obj-start + klass_offset_in_bytes into a register, and
+   // then passes that register as obj and 0 in disp. The following code extracts the base
+   // and offset to load the mark-word.
+   int offset = oopDesc::mark_offset_in_bytes() + disp - oopDesc::klass_offset_in_bytes();
+   if (index == noreg) {
+     ldr(dst, Address(obj, offset));
+   } else {
+     lea(dst, Address(obj, index, Address::lsl(scale)));
+     ldr(dst, Address(dst, offset));
+   }
+   lsr(dst, dst, markWord::klass_shift);
+ }
< prev index next >