< prev index next >

src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp

Print this page
*** 50,33 ***
                                    Register tmp2Reg, Register tmp3Reg) {
    Register oop = objectReg;
    Register box = boxReg;
    Register disp_hdr = tmpReg;
    Register tmp = tmp2Reg;
-   Label cont;
    Label object_has_monitor;
    Label count, no_count;
  
    assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_lock_lightweight");
!   assert_different_registers(oop, box, tmp, disp_hdr);
  
    // Load markWord from object into displaced_header.
    ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
  
    if (DiagnoseSyncOnValueBasedClasses != 0) {
      load_klass(tmp, oop);
      ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
      tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
!     br(Assembler::NE, cont);
    }
  
    // Check for existing monitor
    tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
  
    if (LockingMode == LM_MONITOR) {
      tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
!     b(cont);
    } else {
      assert(LockingMode == LM_LEGACY, "must be");
      // Set tmp to be (markWord of object | UNLOCK_VALUE).
      orr(tmp, disp_hdr, markWord::unlocked_value);
  
--- 50,32 ---
                                    Register tmp2Reg, Register tmp3Reg) {
    Register oop = objectReg;
    Register box = boxReg;
    Register disp_hdr = tmpReg;
    Register tmp = tmp2Reg;
    Label object_has_monitor;
    Label count, no_count;
  
    assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_lock_lightweight");
!   assert_different_registers(oop, box, tmp, disp_hdr, rscratch1);
  
    // Load markWord from object into displaced_header.
    ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
  
    if (DiagnoseSyncOnValueBasedClasses != 0) {
      load_klass(tmp, oop);
      ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
      tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
!     br(Assembler::NE, no_count);
    }
  
    // Check for existing monitor
    tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
  
    if (LockingMode == LM_MONITOR) {
      tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
!     b(no_count);
    } else {
      assert(LockingMode == LM_LEGACY, "must be");
      // Set tmp to be (markWord of object | UNLOCK_VALUE).
      orr(tmp, disp_hdr, markWord::unlocked_value);
  

*** 86,11 ***
      // Compare object markWord with an unlocked value (tmp) and if
      // equal exchange the stack address of our box with object markWord.
      // On failure disp_hdr contains the possibly locked markWord.
      cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
              /*release*/ true, /*weak*/ false, disp_hdr);
!     br(Assembler::EQ, cont);
  
      assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
  
      // If the compare-and-exchange succeeded, then we found an unlocked
      // object, will have now locked it will continue at label cont
--- 85,11 ---
      // Compare object markWord with an unlocked value (tmp) and if
      // equal exchange the stack address of our box with object markWord.
      // On failure disp_hdr contains the possibly locked markWord.
      cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
              /*release*/ true, /*weak*/ false, disp_hdr);
!     br(Assembler::EQ, count);
  
      assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
  
      // If the compare-and-exchange succeeded, then we found an unlocked
      // object, will have now locked it will continue at label cont

*** 102,80 ***
      mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
      // If condition is true we are cont and hence we can store 0 as the
      // displaced header in the box, which indicates that it is a recursive lock.
      ands(tmp/*==0?*/, disp_hdr, tmp);   // Sets flags for result
      str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
!     b(cont);
    }
  
    // Handle existing monitor.
    bind(object_has_monitor);
  
    // The object's monitor m is unlocked iff m->owner == nullptr,
!   // otherwise m->owner may contain a thread or a stack address.
    //
    // Try to CAS m->owner from null to current thread.
    add(tmp, disp_hdr, (in_bytes(ObjectMonitor::owner_offset())-markWord::monitor_value));
!   cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
            /*release*/ true, /*weak*/ false, tmp3Reg); // Sets flags for result
  
    // Store a non-null value into the box to avoid looking like a re-entrant
    // lock. The fast-path monitor unlock code checks for
    // markWord::monitor_value so use markWord::unused_mark which has the
    // relevant bit set, and also matches ObjectSynchronizer::enter.
    mov(tmp, (address)markWord::unused_mark().value());
    str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
  
!   br(Assembler::EQ, cont); // CAS success means locking succeeded
  
!   cmp(tmp3Reg, rthread);
!   br(Assembler::NE, cont); // Check for recursive locking
  
    // Recursive lock case
    increment(Address(disp_hdr, in_bytes(ObjectMonitor::recursions_offset()) - markWord::monitor_value), 1);
    // flag == EQ still from the cmp above, checking if this is a reentrant lock
! 
-   bind(cont);
-   // flag == EQ indicates success
-   // flag == NE indicates failure
-   br(Assembler::NE, no_count);
  
    bind(count);
!   increment(Address(rthread, JavaThread::held_monitor_count_offset()));
  
    bind(no_count);
  }
  
  void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, Register tmpReg,
                                      Register tmp2Reg) {
    Register oop = objectReg;
    Register box = boxReg;
    Register disp_hdr = tmpReg;
    Register tmp = tmp2Reg;
    Label cont;
    Label object_has_monitor;
!   Label count, no_count;
  
    assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_unlock_lightweight");
    assert_different_registers(oop, box, tmp, disp_hdr);
  
    if (LockingMode == LM_LEGACY) {
      // Find the lock address and load the displaced header from the stack.
      ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
  
      // If the displaced header is 0, we have a recursive unlock.
      cmp(disp_hdr, zr);
!     br(Assembler::EQ, cont);
    }
  
    // Handle existing monitor.
    ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
    tbnz(tmp, exact_log2(markWord::monitor_value), object_has_monitor);
  
    if (LockingMode == LM_MONITOR) {
      tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
!     b(cont);
    } else {
      assert(LockingMode == LM_LEGACY, "must be");
      // Check if it is still a light weight lock, this is is true if we
      // see the stack address of the basicLock in the markWord of the
      // object.
--- 101,80 ---
      mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
      // If condition is true we are cont and hence we can store 0 as the
      // displaced header in the box, which indicates that it is a recursive lock.
      ands(tmp/*==0?*/, disp_hdr, tmp);   // Sets flags for result
      str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
!     b(no_count);
    }
  
    // Handle existing monitor.
    bind(object_has_monitor);
  
    // The object's monitor m is unlocked iff m->owner == nullptr,
!   // otherwise m->owner may contain a thread id, a stack address for LM_LEGACY,
+   // or the ANONYMOUS_OWNER constant for LM_LIGHTWEIGHT.
    //
    // Try to CAS m->owner from null to current thread.
+   ldr(rscratch2, Address(rthread, JavaThread::lock_id_offset()));
    add(tmp, disp_hdr, (in_bytes(ObjectMonitor::owner_offset())-markWord::monitor_value));
!   cmpxchg(tmp, zr, rscratch2, Assembler::xword, /*acquire*/ true,
            /*release*/ true, /*weak*/ false, tmp3Reg); // Sets flags for result
  
    // Store a non-null value into the box to avoid looking like a re-entrant
    // lock. The fast-path monitor unlock code checks for
    // markWord::monitor_value so use markWord::unused_mark which has the
    // relevant bit set, and also matches ObjectSynchronizer::enter.
    mov(tmp, (address)markWord::unused_mark().value());
    str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
  
!   br(Assembler::EQ, no_count); // CAS success means locking succeeded
  
!   cmp(tmp3Reg, rscratch2);
!   br(Assembler::NE, no_count); // Check for recursive locking
  
    // Recursive lock case
    increment(Address(disp_hdr, in_bytes(ObjectMonitor::recursions_offset()) - markWord::monitor_value), 1);
    // flag == EQ still from the cmp above, checking if this is a reentrant lock
!   b(no_count);
  
    bind(count);
!   inc_held_monitor_count();
  
    bind(no_count);
+   // flag == EQ indicates success
+   // flag == NE indicates failure
  }
  
  void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, Register tmpReg,
                                      Register tmp2Reg) {
    Register oop = objectReg;
    Register box = boxReg;
    Register disp_hdr = tmpReg;
    Register tmp = tmp2Reg;
    Label cont;
    Label object_has_monitor;
!   Label no_count;
  
    assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_unlock_lightweight");
    assert_different_registers(oop, box, tmp, disp_hdr);
  
    if (LockingMode == LM_LEGACY) {
      // Find the lock address and load the displaced header from the stack.
      ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
  
      // If the displaced header is 0, we have a recursive unlock.
      cmp(disp_hdr, zr);
!     br(Assembler::EQ, no_count);
    }
  
    // Handle existing monitor.
    ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
    tbnz(tmp, exact_log2(markWord::monitor_value), object_has_monitor);
  
    if (LockingMode == LM_MONITOR) {
      tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
!     b(no_count);
    } else {
      assert(LockingMode == LM_LEGACY, "must be");
      // Check if it is still a light weight lock, this is is true if we
      // see the stack address of the basicLock in the markWord of the
      // object.

*** 190,38 ***
    // Handle existing monitor.
    bind(object_has_monitor);
    STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
    add(tmp, tmp, -(int)markWord::monitor_value); // monitor
  
    ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
  
    Label notRecursive;
    cbz(disp_hdr, notRecursive);
  
    // Recursive lock
    sub(disp_hdr, disp_hdr, 1u);
    str(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
    cmp(disp_hdr, disp_hdr); // Sets flags for result
!   b(cont);
  
    bind(notRecursive);
    ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset()));
    ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset()));
    orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
    cmp(rscratch1, zr); // Sets flags for result
!   cbnz(rscratch1, cont);
    // need a release store here
    lea(tmp, Address(tmp, ObjectMonitor::owner_offset()));
    stlr(zr, tmp); // set unowned
  
    bind(cont);
    // flag == EQ indicates success
    // flag == NE indicates failure
    br(Assembler::NE, no_count);
  
!   bind(count);
-   decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
  
    bind(no_count);
  }
  
  void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register t1,
--- 189,50 ---
    // Handle existing monitor.
    bind(object_has_monitor);
    STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
    add(tmp, tmp, -(int)markWord::monitor_value); // monitor
  
+   // If the owner is anonymous, we need to fix it -- in an outline stub.
+   Register tmp2 = disp_hdr;
+   ldr(tmp2, Address(tmp, ObjectMonitor::owner_offset()));
+   // We cannot use tbnz here, the target might be too far away and cannot
+   // be encoded.
+   mov(rscratch1, (uint64_t)ObjectMonitor::ANONYMOUS_OWNER);
+   cmp(tmp2, rscratch1);
+   C2HandleAnonOMOwnerStub* stub = new (Compile::current()->comp_arena()) C2HandleAnonOMOwnerStub(tmp, tmp2);
+   Compile::current()->output()->add_stub(stub);
+   br(Assembler::EQ, stub->entry());
+   bind(stub->continuation());
+ 
    ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
  
    Label notRecursive;
    cbz(disp_hdr, notRecursive);
  
    // Recursive lock
    sub(disp_hdr, disp_hdr, 1u);
    str(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
    cmp(disp_hdr, disp_hdr); // Sets flags for result
!   b(no_count);
  
    bind(notRecursive);
    ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset()));
    ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset()));
    orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
    cmp(rscratch1, zr); // Sets flags for result
!   cbnz(rscratch1, no_count);
    // need a release store here
    lea(tmp, Address(tmp, ObjectMonitor::owner_offset()));
    stlr(zr, tmp); // set unowned
+   b(no_count);
  
    bind(cont);
    // flag == EQ indicates success
    // flag == NE indicates failure
    br(Assembler::NE, no_count);
  
!   dec_held_monitor_count();
  
    bind(no_count);
  }
  
  void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register t1,

*** 296,25 ***
      const Register t3_owner = t3;
  
      // Compute owner address.
      lea(t2_owner_addr, Address(t1_tagged_monitor, (in_bytes(ObjectMonitor::owner_offset()) - monitor_tag)));
  
!     // CAS owner (null => current thread).
!     cmpxchg(t2_owner_addr, zr, rthread, Assembler::xword, /*acquire*/ true,
              /*release*/ false, /*weak*/ false, t3_owner);
      br(Assembler::EQ, locked);
  
      // Check if recursive.
!     cmp(t3_owner, rthread);
      br(Assembler::NE, slow_path);
  
      // Recursive.
      increment(Address(t1_tagged_monitor, in_bytes(ObjectMonitor::recursions_offset()) - monitor_tag), 1);
    }
  
    bind(locked);
-   increment(Address(rthread, JavaThread::held_monitor_count_offset()));
  
  #ifdef ASSERT
    // Check that locked label is reached with Flags == EQ.
    Label flag_correct;
    br(Assembler::EQ, flag_correct);
--- 307,25 ---
      const Register t3_owner = t3;
  
      // Compute owner address.
      lea(t2_owner_addr, Address(t1_tagged_monitor, (in_bytes(ObjectMonitor::owner_offset()) - monitor_tag)));
  
!     // CAS owner (null => current thread id).
!     ldr(rscratch2, Address(rthread, JavaThread::lock_id_offset()));
+     cmpxchg(t2_owner_addr, zr, rscratch2, Assembler::xword, /*acquire*/ true,
              /*release*/ false, /*weak*/ false, t3_owner);
      br(Assembler::EQ, locked);
  
      // Check if recursive.
!     cmp(t3_owner, rscratch2);
      br(Assembler::NE, slow_path);
  
      // Recursive.
      increment(Address(t1_tagged_monitor, in_bytes(ObjectMonitor::recursions_offset()) - monitor_tag), 1);
    }
  
    bind(locked);
  
  #ifdef ASSERT
    // Check that locked label is reached with Flags == EQ.
    Label flag_correct;
    br(Assembler::EQ, flag_correct);

*** 448,22 ***
      cmp(rscratch1, zr);
      br(Assembler::EQ, release);
  
      // The owner may be anonymous and we removed the last obj entry in
      // the lock-stack. This loses the information about the owner.
!     // Write the thread to the owner field so the runtime knows the owner.
!     str(rthread, Address(t2_owner_addr));
      b(slow_path);
  
      bind(release);
      // Set owner to null.
      // Release to satisfy the JMM
      stlr(zr, t2_owner_addr);
    }
  
    bind(unlocked);
-   decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
  
  #ifdef ASSERT
    // Check that unlocked label is reached with Flags == EQ.
    Label flag_correct;
    br(Assembler::EQ, flag_correct);
--- 459,22 ---
      cmp(rscratch1, zr);
      br(Assembler::EQ, release);
  
      // The owner may be anonymous and we removed the last obj entry in
      // the lock-stack. This loses the information about the owner.
!     // Write the thread id to the owner field so the runtime knows the owner.
!     ldr(t3_t, Address(rthread, JavaThread::lock_id_offset()));
+     str(t3_t, Address(t2_owner_addr));
      b(slow_path);
  
      bind(release);
      // Set owner to null.
      // Release to satisfy the JMM
      stlr(zr, t2_owner_addr);
    }
  
    bind(unlocked);
  
  #ifdef ASSERT
    // Check that unlocked label is reached with Flags == EQ.
    Label flag_correct;
    br(Assembler::EQ, flag_correct);
< prev index next >