< prev index next >

src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp

Print this page
@@ -48,10 +48,11 @@
  #include "oops/klass.inline.hpp"
  #include "runtime/biasedLocking.hpp"
  #include "runtime/icache.hpp"
  #include "runtime/interfaceSupport.inline.hpp"
  #include "runtime/jniHandles.inline.hpp"
+ #include "runtime/objectMonitor.hpp"
  #include "runtime/sharedRuntime.hpp"
  #include "runtime/stubRoutines.hpp"
  #include "runtime/thread.hpp"
  #include "utilities/powerOfTwo.hpp"
  #ifdef COMPILER1

@@ -2487,10 +2488,14 @@
    BLOCK_COMMENT("cmpxchg {");
    if (UseLSE) {
      mov(result, expected);
      lse_cas(result, new_val, addr, size, acquire, release, /*not_pair*/ true);
      compare_eq(result, expected, size);
+ #ifdef ASSERT
+     // Poison rscratch1 which is written on !UseLSE branch
+     mov(rscratch1, 0x1f1f1f1f1f1f1f1f);
+ #endif
    } else {
      Label retry_load, done;
      if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
        prfm(Address(addr), PSTL1STRM);
      bind(retry_load);

@@ -3803,13 +3808,50 @@
    ldr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
    ldr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
    ldr(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes())); // InstanceKlass*
  }
  
- void MacroAssembler::load_klass(Register dst, Register src) {
-   if (UseCompressedClassPointers) {
+ // Loads the obj's Klass* into dst.
+ // src and dst must be distinct registers
+ // Preserves all registers (incl src, rscratch1 and rscratch2), but clobbers condition flags
+ void MacroAssembler::load_nklass(Register dst, Register src) {
+   assert(UseCompressedClassPointers, "expects UseCompressedClassPointers");
+ 
+   if (!UseCompactObjectHeaders) {
      ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
+     return;
+   }
+ 
+   Label fast;
+ 
+   // Check if we can take the (common) fast path, if obj is unlocked.
+   ldr(dst, Address(src, oopDesc::mark_offset_in_bytes()));
+   tbz(dst, exact_log2(markWord::monitor_value), fast);
+ 
+   // Fetch displaced header
+   ldr(dst, Address(dst, OM_OFFSET_NO_MONITOR_VALUE_TAG(header)));
+ 
+   // Fast-path: shift and decode Klass*.
+   bind(fast);
+   lsr(dst, dst, markWord::klass_shift);
+ }
+ 
+ void MacroAssembler::load_klass(Register dst, Register src, bool null_check_src) {
+   if (null_check_src) {
+     if (UseCompactObjectHeaders) {
+       null_check(src, oopDesc::mark_offset_in_bytes());
+     } else {
+       null_check(src, oopDesc::klass_offset_in_bytes());
+     }
+   }
+ 
+   if (UseCompressedClassPointers) {
+     if (UseCompactObjectHeaders) {
+       load_nklass(dst, src);
+     } else {
+       ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
+     }
      decode_klass_not_null(dst);
    } else {
      ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
    }
  }

@@ -3844,12 +3886,17 @@
    ldr(dst, Address(dst, mirror_offset));
    resolve_oop_handle(dst, tmp);
  }
  
  void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
+   assert_different_registers(oop, trial_klass, tmp);
    if (UseCompressedClassPointers) {
-     ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
+     if (UseCompactObjectHeaders) {
+       load_nklass(tmp, oop);
+     } else {
+       ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
+     }
      if (CompressedKlassPointers::base() == NULL) {
        cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
        return;
      } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
                 && CompressedKlassPointers::shift() == 0) {

@@ -3862,15 +3909,10 @@
      ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
    }
    cmp(trial_klass, tmp);
  }
  
- void MacroAssembler::load_prototype_header(Register dst, Register src) {
-   load_klass(dst, src);
-   ldr(dst, Address(dst, Klass::prototype_header_offset()));
- }
- 
  void MacroAssembler::store_klass(Register dst, Register src) {
    // FIXME: Should this be a store release?  concurrent gcs assumes
    // klass length is valid if klass field is not null.
    if (UseCompressedClassPointers) {
      encode_klass_not_null(src);

@@ -3885,10 +3927,15 @@
      // Store to klass gap in destination
      strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
    }
  }
  
+ void MacroAssembler::load_prototype_header(Register dst, Register src) {
+   load_klass(dst, src);
+   ldr(dst, Address(dst, Klass::prototype_header_offset()));
+ }
+ 
  // Algorithm must match CompressedOops::encode.
  void MacroAssembler::encode_heap_oop(Register d, Register s) {
  #ifdef ASSERT
    verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
  #endif

@@ -5377,5 +5424,101 @@
        default:
          ShouldNotReachHere();
      }
    }
  }
+ 
+ // Implements lightweight-locking.
+ // Branches to slow upon failure to lock the object, with ZF cleared.
+ // Falls through upon success with ZF set.
+ //
+ //  - obj: the object to be locked
+ //  - hdr: the header, already loaded from obj, will be destroyed
+ //  - t1, t2: temporary registers, will be destroyed
+ void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow) {
+   assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
+   assert_different_registers(obj, hdr, t1, t2, rscratch1);
+ 
+   // Check if we would have space on lock-stack for the object.
+   ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
+   cmpw(t1, (unsigned)LockStack::end_offset() - 1);
+   br(Assembler::GT, slow);
+ 
+   // Load (object->mark() | 1) into hdr
+   orr(hdr, hdr, markWord::unlocked_value);
+   // Clear lock-bits, into t2
+   eor(t2, hdr, markWord::unlocked_value);
+   // Try to swing header from unlocked to locked
+   // Clobbers rscratch1 when UseLSE is false
+   cmpxchg(/*addr*/ obj, /*expected*/ hdr, /*new*/ t2, Assembler::xword,
+           /*acquire*/ true, /*release*/ true, /*weak*/ false, t1);
+   br(Assembler::NE, slow);
+ 
+   // After successful lock, push object on lock-stack
+   ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
+   str(obj, Address(rthread, t1));
+   addw(t1, t1, oopSize);
+   strw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
+ }
+ 
+ // Implements lightweight-unlocking.
+ // Branches to slow upon failure, with ZF cleared.
+ // Falls through upon success, with ZF set.
+ //
+ // - obj: the object to be unlocked
+ // - hdr: the (pre-loaded) header of the object
+ // - t1, t2: temporary registers
+ void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Register t1, Register t2, Label& slow) {
+   assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
+   assert_different_registers(obj, hdr, t1, t2, rscratch1);
+ 
+ #ifdef ASSERT
+   {
+     // The following checks rely on the fact that LockStack is only ever modified by
+     // its owning thread, even if the lock got inflated concurrently; removal of LockStack
+     // entries after inflation will happen delayed in that case.
+ 
+     // Check for lock-stack underflow.
+     Label stack_ok;
+     ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
+     cmpw(t1, (unsigned)LockStack::start_offset());
+     br(Assembler::GT, stack_ok);
+     STOP("Lock-stack underflow");
+     bind(stack_ok);
+   }
+   {
+     // Check if the top of the lock-stack matches the unlocked object.
+     Label tos_ok;
+     subw(t1, t1, oopSize);
+     ldr(t1, Address(rthread, t1));
+     cmpoop(t1, obj);
+     br(Assembler::EQ, tos_ok);
+     STOP("Top of lock-stack does not match the unlocked object");
+     bind(tos_ok);
+   }
+   {
+     // Check that hdr is fast-locked.
+     Label hdr_ok;
+     tst(hdr, markWord::lock_mask_in_place);
+     br(Assembler::EQ, hdr_ok);
+     STOP("Header is not fast-locked");
+     bind(hdr_ok);
+   }
+ #endif
+ 
+   // Load the new header (unlocked) into t1
+   orr(t1, hdr, markWord::unlocked_value);
+ 
+   // Try to swing header from locked to unlocked
+   // Clobbers rscratch1 when UseLSE is false
+   cmpxchg(obj, hdr, t1, Assembler::xword,
+           /*acquire*/ true, /*release*/ true, /*weak*/ false, t2);
+   br(Assembler::NE, slow);
+ 
+   // After successful unlock, pop object from lock-stack
+   ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
+   subw(t1, t1, oopSize);
+ #ifdef ASSERT
+   str(zr, Address(rthread, t1));
+ #endif
+   strw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
+ }
< prev index next >