< prev index next >

src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp

Print this page
@@ -71,36 +71,63 @@
    __ bind(entry());
    __ call(RuntimeAddress(StubRoutines::method_entry_barrier()));
    __ jmp(continuation(), false /* maybe_short */);
  }
  
+ #ifdef _LP64
+ int C2HandleAnonOMOwnerStub::max_size() const {
+   // Max size of stub has been determined by testing with 0, in which case
+   // C2CodeStubList::emit() will throw an assertion and report the actual size that
+   // is needed.
+   return DEBUG_ONLY(40) NOT_DEBUG(25);
+ }
+ 
+ void C2HandleAnonOMOwnerStub::emit(C2_MacroAssembler& masm) {
+   __ bind(entry());
+   Register mon = monitor();
+   Register t = tmp();
+   __ movptr(t, Address(r15_thread, JavaThread::lock_id_offset()));
+   __ movptr(Address(mon, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), t);
+   if (LockingMode == LM_LIGHTWEIGHT) {
+     __ subl(Address(r15_thread, JavaThread::lock_stack_top_offset()), oopSize);
+ #ifdef ASSERT
+     __ movl(t, Address(r15_thread, JavaThread::lock_stack_top_offset()));
+     __ movptr(Address(r15_thread, t), 0);
+ #endif
+   } else {
+     __ movptr(Address(mon, OM_OFFSET_NO_MONITOR_VALUE_TAG(stack_locker)), NULL_WORD);
+     __ decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset()));
+   }
+   __ jmp(continuation());
+ }
+ #endif
+ 
  int C2FastUnlockLightweightStub::max_size() const {
    return 128;
  }
  
  void C2FastUnlockLightweightStub::emit(C2_MacroAssembler& masm) {
-   assert(_t == rax, "must be");
+   assert(_t1 == rax, "must be");
  
-   Label restore_held_monitor_count_and_slow_path;
+   Label slow_path;
  
    { // Restore lock-stack and handle the unlock in runtime.
  
      __ bind(_push_and_slow_path);
  #ifdef ASSERT
      // The obj was only cleared in debug.
-     __ movl(_t, Address(_thread, JavaThread::lock_stack_top_offset()));
-     __ movptr(Address(_thread, _t), _obj);
+     __ movl(_t1, Address(_thread, JavaThread::lock_stack_top_offset()));
+     __ movptr(Address(_thread, _t1), _obj);
  #endif
      __ addl(Address(_thread, JavaThread::lock_stack_top_offset()), oopSize);
    }
  
-   { // Restore held monitor count and slow path.
+   { // Handle the unlock in runtime
  
-     __ bind(restore_held_monitor_count_and_slow_path);
-     // Restore held monitor count.
-     __ increment(Address(_thread, JavaThread::held_monitor_count_offset()));
-     // increment will always result in ZF = 0 (no overflows).
+     __ bind(slow_path);
+     // set ZF=0 to indicate failure
+     __ orl(_t1, 1);
      __ jmp(slow_path_continuation());
    }
  
    { // Handle monitor medium path.
  

@@ -108,15 +135,15 @@
  
      Label fix_zf_and_unlocked;
      const Register monitor = _mark;
  
  #ifndef _LP64
-     __ jmpb(restore_held_monitor_count_and_slow_path);
+     __ jmpb(slow_path);
  #else // _LP64
      // successor null check.
      __ cmpptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), NULL_WORD);
-     __ jccb(Assembler::equal, restore_held_monitor_count_and_slow_path);
+     __ jccb(Assembler::equal, slow_path);
  
      // Release lock.
      __ movptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
  
      // Fence.

@@ -131,12 +158,13 @@
      // Try to relock, if it fails the monitor has been handed over
      // TODO: Caveat, this may fail due to deflation, which does
      //       not handle the monitor handoff. Currently only works
      //       due to the responsible thread.
      __ xorptr(rax, rax);
-     __ lock(); __ cmpxchgptr(_thread, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
-     __ jccb  (Assembler::equal, restore_held_monitor_count_and_slow_path);
+     __ movptr(_t2, Address(_thread, JavaThread::lock_id_offset()));
+     __ lock(); __ cmpxchgptr(_t2, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
+     __ jccb  (Assembler::equal, slow_path);
  #endif
  
      __ bind(fix_zf_and_unlocked);
      __ xorl(rax, rax);
      __ jmp(unlocked_continuation());
< prev index next >