< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page
@@ -23,10 +23,11 @@
   */
  
  #include "precompiled.hpp"
  #include "classfile/vmSymbols.hpp"
  #include "jfr/jfrEvents.hpp"
+ #include "gc/shared/suspendibleThreadSet.hpp"
  #include "logging/log.hpp"
  #include "logging/logStream.hpp"
  #include "memory/allocation.inline.hpp"
  #include "memory/padded.hpp"
  #include "memory/resourceArea.hpp"

@@ -37,10 +38,11 @@
  #include "runtime/frame.inline.hpp"
  #include "runtime/handles.inline.hpp"
  #include "runtime/handshake.hpp"
  #include "runtime/interfaceSupport.inline.hpp"
  #include "runtime/javaThread.hpp"
+ #include "runtime/lockStack.inline.hpp"
  #include "runtime/mutexLocker.hpp"
  #include "runtime/objectMonitor.hpp"
  #include "runtime/objectMonitor.inline.hpp"
  #include "runtime/os.inline.hpp"
  #include "runtime/osThread.hpp"

@@ -309,11 +311,12 @@
    assert(current->thread_state() == _thread_in_Java, "invariant");
    NoSafepointVerifier nsv;
    if (obj == nullptr) return false;  // slow-path for invalid obj
    const markWord mark = obj->mark();
  
-   if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
+   if ((mark.is_fast_locked() && current->lock_stack().contains(oop(obj))) ||
+       (mark.has_locker() && current->is_lock_owned((address)mark.locker()))) {
      // Degenerate notify
      // stack-locked by caller so by definition the implied waitset is empty.
      return true;
    }
  

@@ -392,11 +395,13 @@
      // BasicLock cannot be mis-interpreted by any stack walkers. For
      // performance reasons, stack walkers generally first check for
      // stack-locking in the object's header, the second check is for
      // recursive stack-locking in the displaced header in the BasicLock,
      // and last are the inflated Java Monitor (ObjectMonitor) checks.
-     lock->set_displaced_header(markWord::unused_mark());
+     if (!UseFastLocking) {
+       lock->set_displaced_header(markWord::unused_mark());
+     }
  
      if (owner == nullptr && m->try_set_owner_from(nullptr, current) == nullptr) {
        assert(m->_recursions == 0, "invariant");
        current->inc_held_monitor_count();
        return true;

@@ -481,34 +486,58 @@
    }
  
    current->inc_held_monitor_count();
  
    if (!useHeavyMonitors()) {
-     markWord mark = obj->mark();
-     if (mark.is_neutral()) {
-       // Anticipate successful CAS -- the ST of the displaced mark must
-       // be visible <= the ST performed by the CAS.
-       lock->set_displaced_header(mark);
-       if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
+     if (UseFastLocking) {
+       LockStack& lock_stack = current->lock_stack();
+ 
+       markWord header = obj()->mark_acquire();
+       while (true) {
+         if (header.is_neutral()) {
+           assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
+           // Try to swing into 'fast-locked' state without inflating.
+           markWord locked_header = header.set_fast_locked();
+           markWord witness = obj()->cas_set_mark(locked_header, header);
+           if (witness == header) {
+             // Successfully fast-locked, push object to lock-stack and return.
+             lock_stack.push(obj());
+             return;
+           }
+           // Otherwise retry.
+           header = witness;
+         } else {
+           // Fall-through to inflate-enter.
+           break;
+         }
+       }
+     } else {
+       markWord mark = obj->mark();
+       if (mark.is_neutral()) {
+         // Anticipate successful CAS -- the ST of the displaced mark must
+         // be visible <= the ST performed by the CAS.
+         lock->set_displaced_header(mark);
+         if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
+           return;
+         }
+         // Fall through to inflate() ...
+       } else if (mark.has_locker() &&
+                  current->is_lock_owned((address)mark.locker())) {
+         assert(lock != mark.locker(), "must not re-lock the same lock");
+         assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
+         lock->set_displaced_header(markWord::from_pointer(nullptr));
          return;
        }
-       // Fall through to inflate() ...
-     } else if (mark.has_locker() &&
-                current->is_lock_owned((address)mark.locker())) {
-       assert(lock != mark.locker(), "must not re-lock the same lock");
-       assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
-       lock->set_displaced_header(markWord::from_pointer(nullptr));
-       return;
-     }
  
-     // The object header will never be displaced to this lock,
-     // so it does not matter what the value is, except that it
-     // must be non-zero to avoid looking like a re-entrant lock,
-     // and must not look locked either.
-     lock->set_displaced_header(markWord::unused_mark());
+       // The object header will never be displaced to this lock,
+       // so it does not matter what the value is, except that it
+       // must be non-zero to avoid looking like a re-entrant lock,
+       // and must not look locked either.
+       lock->set_displaced_header(markWord::unused_mark());
+     }
    } else if (VerifyHeavyMonitors) {
-     guarantee(!obj->mark().has_locker(), "must not be stack-locked");
+     guarantee(!obj->mark().has_locker() && !obj->mark().is_fast_locked(), "must not be stack-locked");
    }
  
    // An async deflation can race after the inflate() call and before
    // enter() can make the ObjectMonitor busy. enter() returns false if
    // we have lost the race to async deflation and we simply try again.

@@ -523,57 +552,82 @@
  void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
    current->dec_held_monitor_count();
  
    if (!useHeavyMonitors()) {
      markWord mark = object->mark();
- 
-     markWord dhw = lock->displaced_header();
-     if (dhw.value() == 0) {
-       // If the displaced header is null, then this exit matches up with
-       // a recursive enter. No real work to do here except for diagnostics.
- #ifndef PRODUCT
-       if (mark != markWord::INFLATING()) {
-         // Only do diagnostics if we are not racing an inflation. Simply
-         // exiting a recursive enter of a Java Monitor that is being
-         // inflated is safe; see the has_monitor() comment below.
-         assert(!mark.is_neutral(), "invariant");
-         assert(!mark.has_locker() ||
-         current->is_lock_owned((address)mark.locker()), "invariant");
-         if (mark.has_monitor()) {
-           // The BasicLock's displaced_header is marked as a recursive
-           // enter and we have an inflated Java Monitor (ObjectMonitor).
-           // This is a special case where the Java Monitor was inflated
-           // after this thread entered the stack-lock recursively. When a
-           // Java Monitor is inflated, we cannot safely walk the Java
-           // Monitor owner's stack and update the BasicLocks because a
-           // Java Monitor can be asynchronously inflated by a thread that
-           // does not own the Java Monitor.
-           ObjectMonitor* m = mark.monitor();
-           assert(m->object()->mark() == mark, "invariant");
-           assert(m->is_entered(current), "invariant");
+     if (UseFastLocking) {
+       if (mark.is_fast_locked()) {
+         markWord unlocked_header = mark.set_unlocked();
+         markWord witness = object->cas_set_mark(unlocked_header, mark);
+         if (witness != mark) {
+           // Another thread beat us, it can only have installed an anonymously locked monitor at this point.
+           // Fetch that monitor, set owner correctly to this thread, and exit it (allowing waiting threads to enter).
+           assert(witness.has_monitor(), "must have monitor");
+           ObjectMonitor* monitor = witness.monitor();
+           assert(monitor->is_owner_anonymous(), "must be anonymous owner");
+           monitor->set_owner_from_anonymous(current);
+           monitor->exit(current);
          }
+         LockStack& lock_stack = current->lock_stack();
+         lock_stack.remove(object);
+         return;
        }
+     } else {
+       markWord dhw = lock->displaced_header();
+       if (dhw.value() == 0) {
+         // If the displaced header is null, then this exit matches up with
+         // a recursive enter. No real work to do here except for diagnostics.
+ #ifndef PRODUCT
+         if (mark != markWord::INFLATING()) {
+           // Only do diagnostics if we are not racing an inflation. Simply
+           // exiting a recursive enter of a Java Monitor that is being
+           // inflated is safe; see the has_monitor() comment below.
+           assert(!mark.is_neutral(), "invariant");
+           assert(!mark.has_locker() ||
+                  current->is_lock_owned((address)mark.locker()), "invariant");
+           if (mark.has_monitor()) {
+             // The BasicLock's displaced_header is marked as a recursive
+             // enter and we have an inflated Java Monitor (ObjectMonitor).
+             // This is a special case where the Java Monitor was inflated
+             // after this thread entered the stack-lock recursively. When a
+             // Java Monitor is inflated, we cannot safely walk the Java
+             // Monitor owner's stack and update the BasicLocks because a
+             // Java Monitor can be asynchronously inflated by a thread that
+             // does not own the Java Monitor.
+             ObjectMonitor* m = mark.monitor();
+             assert(m->object()->mark() == mark, "invariant");
+             assert(m->is_entered(current), "invariant");
+           }
+         }
  #endif
-       return;
-     }
- 
-     if (mark == markWord::from_pointer(lock)) {
-       // If the object is stack-locked by the current thread, try to
-       // swing the displaced header from the BasicLock back to the mark.
-       assert(dhw.is_neutral(), "invariant");
-       if (object->cas_set_mark(dhw, mark) == mark) {
          return;
        }
+ 
+       if (mark == markWord::from_pointer(lock)) {
+         // If the object is stack-locked by the current thread, try to
+         // swing the displaced header from the BasicLock back to the mark.
+         assert(dhw.is_neutral(), "invariant");
+         if (object->cas_set_mark(dhw, mark) == mark) {
+           return;
+         }
+       }
      }
    } else if (VerifyHeavyMonitors) {
      guarantee(!object->mark().has_locker(), "must not be stack-locked");
    }
  
    // We have to take the slow-path of possible inflation and then exit.
    // The ObjectMonitor* can't be async deflated until ownership is
    // dropped inside exit() and the ObjectMonitor* must be !is_busy().
    ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
+   if (UseFastLocking && monitor->is_owner_anonymous()) {
+     // It must be us. Pop lock object from lock stack.
+     LockStack& lock_stack = current->lock_stack();
+     oop popped = lock_stack.pop();
+     assert(popped == object, "must be owned by this thread");
+     monitor->set_owner_from_anonymous(current);
+   }
    monitor->exit(current);
  }
  
  // -----------------------------------------------------------------------------
  // Class Loader  support to workaround deadlocks on the class loader lock objects

@@ -696,11 +750,12 @@
  
  void ObjectSynchronizer::notify(Handle obj, TRAPS) {
    JavaThread* current = THREAD;
  
    markWord mark = obj->mark();
-   if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
+   if ((mark.is_fast_locked() && current->lock_stack().contains(obj())) ||
+       (mark.has_locker() && current->is_lock_owned((address)mark.locker()))) {
      // Not inflated so there can't be any waiters to notify.
      return;
    }
    // The ObjectMonitor* can't be async deflated until ownership is
    // dropped by the calling thread.

@@ -711,11 +766,12 @@
  // NOTE: see comment of notify()
  void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
    JavaThread* current = THREAD;
  
    markWord mark = obj->mark();
-   if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
+   if ((mark.is_fast_locked() && current->lock_stack().contains(obj())) ||
+       (mark.has_locker() && current->is_lock_owned((address)mark.locker()))) {
      // Not inflated so there can't be any waiters to notify.
      return;
    }
    // The ObjectMonitor* can't be async deflated until ownership is
    // dropped by the calling thread.

@@ -739,11 +795,11 @@
  
  static SharedGlobals GVars;
  
  static markWord read_stable_mark(oop obj) {
    markWord mark = obj->mark_acquire();
-   if (!mark.is_being_inflated()) {
+   if (!mark.is_being_inflated() || UseFastLocking) {
      return mark;       // normal fast-path return
    }
  
    int its = 0;
    for (;;) {

@@ -854,10 +910,15 @@
    if (value == 0) value = 0xBAD;
    assert(value != markWord::no_hash, "invariant");
    return value;
  }
  
+ static bool is_lock_owned(Thread* thread, oop obj) {
+   assert(UseFastLocking, "only call this with fast-locking enabled");
+   return thread->is_Java_thread() ? reinterpret_cast<JavaThread*>(thread)->lock_stack().contains(obj) : false;
+ }
+ 
  intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
  
    while (true) {
      ObjectMonitor* monitor = nullptr;
      markWord temp, test;

@@ -908,11 +969,18 @@
          }
          return hash;
        }
        // Fall thru so we only have one place that installs the hash in
        // the ObjectMonitor.
-     } else if (current->is_lock_owned((address)mark.locker())) {
+     } else if (mark.is_fast_locked() && is_lock_owned(current, obj)) {
+       // This is a fast lock owned by the calling thread so use the
+       // markWord from the object.
+       hash = mark.hash();
+       if (hash != 0) {                  // if it has a hash, just return it
+         return hash;
+       }
+     } else if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
        // This is a stack lock owned by the calling thread so fetch the
        // displaced markWord from the BasicLock on the stack.
        temp = mark.displaced_mark_helper();
        assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
        hash = temp.hash();

@@ -977,10 +1045,16 @@
  
    // Uncontended case, header points to stack
    if (mark.has_locker()) {
      return current->is_lock_owned((address)mark.locker());
    }
+ 
+   // Fast-locking case.
+   if (mark.is_fast_locked()) {
+     return current->lock_stack().contains(h_obj());
+   }
+ 
    // Contended case, header points to ObjectMonitor (tagged pointer)
    if (mark.has_monitor()) {
      // The first stage of async deflation does not affect any field
      // used by this comparison so the ObjectMonitor* is usable here.
      ObjectMonitor* monitor = mark.monitor();

@@ -991,38 +1065,30 @@
    return false;
  }
  
  JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
    oop obj = h_obj();
-   address owner = nullptr;
- 
    markWord mark = read_stable_mark(obj);
  
    // Uncontended case, header points to stack
    if (mark.has_locker()) {
-     owner = (address) mark.locker();
+     return Threads::owning_thread_from_monitor_owner(t_list, (address) mark.locker());
+   }
+ 
+   if (mark.is_fast_locked()) {
+     return Threads::owning_thread_from_object(t_list, h_obj());
    }
  
    // Contended case, header points to ObjectMonitor (tagged pointer)
-   else if (mark.has_monitor()) {
+   if (mark.has_monitor()) {
      // The first stage of async deflation does not affect any field
      // used by this comparison so the ObjectMonitor* is usable here.
      ObjectMonitor* monitor = mark.monitor();
      assert(monitor != nullptr, "monitor should be non-null");
-     owner = (address) monitor->owner();
+     return Threads::owning_thread_from_monitor(t_list, monitor);
    }
  
-   if (owner != nullptr) {
-     // owning_thread_from_monitor_owner() may also return null here
-     return Threads::owning_thread_from_monitor_owner(t_list, owner);
-   }
- 
-   // Unlocked case, header in place
-   // Cannot have assertion since this object may have been
-   // locked by another thread when reaching here.
-   // assert(mark.is_neutral(), "sanity check");
- 
    return nullptr;
  }
  
  // Visitors ...
  

@@ -1216,20 +1282,28 @@
      // CASE: inflated
      if (mark.has_monitor()) {
        ObjectMonitor* inf = mark.monitor();
        markWord dmw = inf->header();
        assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
+       if (UseFastLocking && inf->is_owner_anonymous() && is_lock_owned(current, object)) {
+         inf->set_owner_from_anonymous(current);
+         assert(current->is_Java_thread(), "must be Java thread");
+         reinterpret_cast<JavaThread*>(current)->lock_stack().remove(object);
+       }
        return inf;
      }
  
      // CASE: inflation in progress - inflating over a stack-lock.
      // Some other thread is converting from stack-locked to inflated.
      // Only that thread can complete inflation -- other threads must wait.
      // The INFLATING value is transient.
      // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
      // We could always eliminate polling by parking the thread on some auxiliary list.
-     if (mark == markWord::INFLATING()) {
+     // NOTE: We need to check UseFastLocking here, because with fast-locking, the header
+     // may legitimately be zero: cleared lock-bits and all upper header bits zero.
+     // With fast-locking, the INFLATING protocol is not used.
+     if (mark == markWord::INFLATING() && !UseFastLocking) {
        read_stable_mark(object);
        continue;
      }
  
      // CASE: stack-locked

@@ -1241,12 +1315,55 @@
      // ObjectMonitor into the mark.  This was correct, but artificially lengthened
      // the interval in which INFLATING appeared in the mark, thus increasing
      // the odds of inflation contention.
  
      LogStreamHandle(Trace, monitorinflation) lsh;
+     if (mark.is_fast_locked()) {
+       assert(UseFastLocking, "can only happen with fast-locking");
+       ObjectMonitor* monitor = new ObjectMonitor(object);
+       monitor->set_header(mark.set_unlocked());
+       bool own = is_lock_owned(current, object);
+       if (own) {
+         // Owned by us.
+         monitor->set_owner_from(nullptr, current);
+       } else {
+         // Owned by somebody else.
+         monitor->set_owner_anonymous();
+       }
+       markWord monitor_mark = markWord::encode(monitor);
+       markWord witness = object->cas_set_mark(monitor_mark, mark);
+       if (witness == mark) {
+         // Success! Return inflated monitor.
+         if (own) {
+           assert(current->is_Java_thread(), "must be: checked in is_lock_owned()");
+           reinterpret_cast<JavaThread*>(current)->lock_stack().remove(object);
+         }
+         // Once the ObjectMonitor is configured and object is associated
+         // with the ObjectMonitor, it is safe to allow async deflation:
+         _in_use_list.add(monitor);
+ 
+         // Hopefully the performance counters are allocated on distinct
+         // cache lines to avoid false sharing on MP systems ...
+         OM_PERFDATA_OP(Inflations, inc());
+         if (log_is_enabled(Trace, monitorinflation)) {
+           ResourceMark rm(current);
+           lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
+                        INTPTR_FORMAT ", type='%s'", p2i(object),
+                        object->mark().value(), object->klass()->external_name());
+         }
+         if (event.should_commit()) {
+           post_monitor_inflate_event(&event, object, cause);
+         }
+         return monitor;
+       } else {
+         delete monitor;
+         continue;
+       }
+     }
  
      if (mark.has_locker()) {
+       assert(!UseFastLocking, "can not happen with fast-locking");
        ObjectMonitor* m = new ObjectMonitor(object);
        // Optimistically prepare the ObjectMonitor - anticipate successful CAS
        // We do this before the CAS in order to minimize the length of time
        // in which INFLATING appears in the mark.
  

@@ -1456,10 +1573,20 @@
      log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread="
                                  INTPTR_FORMAT, p2i(thread));
    }
  };
  
+ class VM_RendezvousGCThreads : public VM_Operation {
+ public:
+   bool evaluate_at_safepoint() const override { return false; }
+   VMOp_Type type() const override { return VMOp_RendezvousGCThreads; }
+   void doit() override {
+     SuspendibleThreadSet::synchronize();
+     SuspendibleThreadSet::desynchronize();
+   };
+ };
+ 
  // This function is called by the MonitorDeflationThread to deflate
  // ObjectMonitors. It is also called via do_final_audit_and_print_stats()
  // and VM_ThreadDump::doit() by the VMThread.
  size_t ObjectSynchronizer::deflate_idle_monitors(ObjectMonitorsHashtable* table) {
    Thread* current = Thread::current();

@@ -1509,12 +1636,17 @@
                       _in_use_list.count(), _in_use_list.max());
        }
  
        // A JavaThread needs to handshake in order to safely free the
        // ObjectMonitors that were deflated in this cycle.
+       // Also, we sync and desync GC threads around the handshake, so that they can
+       // safely read the mark-word and look-through to the object-monitor, without
+       // being afraid that the object-monitor is going away.
        HandshakeForDeflation hfd_hc;
        Handshake::execute(&hfd_hc);
+       VM_RendezvousGCThreads sync_gc;
+       VMThread::execute(&sync_gc);
  
        if (ls != nullptr) {
          ls->print_cr("after handshaking: in_use_list stats: ceiling="
                       SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
                       in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
< prev index next >