< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page
@@ -50,10 +50,11 @@
  #include "runtime/osThread.hpp"
  #include "runtime/perfData.hpp"
  #include "runtime/safefetch.hpp"
  #include "runtime/safepointMechanism.inline.hpp"
  #include "runtime/sharedRuntime.hpp"
+ #include "runtime/threads.hpp"
  #include "services/threadService.hpp"
  #include "utilities/dtrace.hpp"
  #include "utilities/globalDefinitions.hpp"
  #include "utilities/macros.hpp"
  #include "utilities/preserveException.hpp"

@@ -110,10 +111,22 @@
  
  DEBUG_ONLY(static volatile bool InitDone = false;)
  
  OopStorage* ObjectMonitor::_oop_storage = nullptr;
  
+ OopHandle ObjectMonitor::_vthread_cxq_head;
+ ParkEvent* ObjectMonitor::_vthread_unparker_ParkEvent = nullptr;
+ 
+ static void post_virtual_thread_pinned_event(JavaThread* current, const char* reason) {
+   EventVirtualThreadPinned e;
+   if (e.should_commit()) {
+     e.set_pinnedReason(reason);
+     e.set_carrierThread(JFR_JVM_THREAD_ID(current));
+     e.commit();
+   }
+ }
+ 
  // -----------------------------------------------------------------------------
  // Theory of operations -- Monitors lists, thread residency, etc:
  //
  // * A thread acquires ownership of a monitor by successfully
  //   CAS()ing the _owner field from null to non-null.

@@ -247,10 +260,11 @@
  
  ObjectMonitor::ObjectMonitor(oop object) :
    _header(markWord::zero()),
    _object(_oop_storage, object),
    _owner(nullptr),
+   _stack_locker(nullptr),
    _previous_owner_tid(0),
    _next_om(nullptr),
    _recursions(0),
    _EntryList(nullptr),
    _cxq(nullptr),

@@ -314,11 +328,11 @@
      void* prev_owner = try_set_owner_from(nullptr, locking_thread);
  
      if (prev_owner == nullptr) {
        assert(_recursions == 0, "invariant");
        success = true;
-     } else if (prev_owner == locking_thread) {
+     } else if (prev_owner == owner_for(locking_thread)) {
        _recursions++;
        success = true;
      } else if (prev_owner == DEFLATER_MARKER) {
        // Racing with deflation.
        prev_owner = try_set_owner_from(DEFLATER_MARKER, locking_thread);

@@ -332,15 +346,10 @@
          // success will only be false if this races with something other than
          // deflation.
          prev_owner = try_set_owner_from(nullptr, locking_thread);
          success = prev_owner == nullptr;
        }
-     } else if (LockingMode == LM_LEGACY && locking_thread->is_lock_owned((address)prev_owner)) {
-       assert(_recursions == 0, "must be");
-       _recursions = 1;
-       set_owner_from_BasicLock(prev_owner, locking_thread);
-       success = true;
      }
      assert(success, "Failed to enter_for: locking_thread=" INTPTR_FORMAT
             ", this=" INTPTR_FORMAT "{owner=" INTPTR_FORMAT "}, observed owner: " INTPTR_FORMAT,
             p2i(locking_thread), p2i(this), p2i(owner_raw()), p2i(prev_owner));
    } else {

@@ -355,11 +364,11 @@
      }
    }
  
    add_to_contentions(-1);
  
-   assert(!success || owner_raw() == locking_thread, "must be");
+   assert(!success || is_owner(locking_thread), "must be");
  
    return success;
  }
  
  bool ObjectMonitor::enter(JavaThread* current) {

@@ -371,41 +380,34 @@
    if (cur == nullptr) {
      assert(_recursions == 0, "invariant");
      return true;
    }
  
-   if (cur == current) {
+   if (cur == owner_for(current)) {
      // TODO-FIXME: check for integer overflow!  BUGID 6557169.
      _recursions++;
      return true;
    }
  
-   if (LockingMode != LM_LIGHTWEIGHT && current->is_lock_owned((address)cur)) {
-     assert(_recursions == 0, "internal state error");
-     _recursions = 1;
-     set_owner_from_BasicLock(cur, current);  // Convert from BasicLock* to Thread*.
-     return true;
-   }
- 
    // We've encountered genuine contention.
  
    // Try one round of spinning *before* enqueueing current
    // and before going through the awkward and expensive state
    // transitions.  The following spin is strictly optional ...
    // Note that if we acquire the monitor from an initial spin
    // we forgo posting JVMTI events and firing DTRACE probes.
    if (TrySpin(current)) {
-     assert(owner_raw() == current, "must be current: owner=" INTPTR_FORMAT, p2i(owner_raw()));
+     assert(owner_raw() == owner_for(current), "must be current: owner=" INTPTR_FORMAT, p2i(owner_raw()));
      assert(_recursions == 0, "must be 0: recursions=" INTX_FORMAT, _recursions);
      assert(object()->mark() == markWord::encode(this),
             "object mark must match encoded this: mark=" INTPTR_FORMAT
             ", encoded this=" INTPTR_FORMAT, object()->mark().value(),
             markWord::encode(this).value());
      return true;
    }
  
-   assert(owner_raw() != current, "invariant");
+   assert(owner_raw() != owner_for(current), "invariant");
    assert(_succ != current, "invariant");
    assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
    assert(current->thread_state() != _thread_blocked, "invariant");
  
    // Keep track of contention for JVM/TI and M&M queries.

@@ -449,10 +451,27 @@
        // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
        // handler cannot accidentally consume an unpark() meant for the
        // ParkEvent associated with this ObjectMonitor.
      }
  
+ #ifdef LOOM_MONITOR_SUPPORT
+     ContinuationEntry* ce = current->last_continuation();
+     if (ce != nullptr && ce->is_virtual_thread() && current->is_on_monitorenter()) {
+       int result = Continuation::try_preempt(current, ce->cont_oop(current));
+       if (result == freeze_ok) {
+         bool acquired = HandlePreemptedVThread(current);
+         DEBUG_ONLY(int state = java_lang_VirtualThread::state(current->vthread()));
+         assert((acquired && current->preemption_cancelled() && state == java_lang_VirtualThread::RUNNING) ||
+                (!acquired && !current->preemption_cancelled() && state == java_lang_VirtualThread::BLOCKING), "invariant");
+         return true;
+       }
+       if (result == freeze_pinned_native) {
+         post_virtual_thread_pinned_event(current, "Native frame or <clinit> on stack");
+       }
+     }
+ #endif
+ 
      OSThreadContendState osts(current->osthread());
  
      assert(current->thread_state() == _thread_in_vm, "invariant");
  
      for (;;) {

@@ -469,11 +488,11 @@
          // If there is a suspend request, ExitOnSuspend will exit the OM
          // and set the OM as pending.
        }
        if (!eos.exited()) {
          // ExitOnSuspend did not exit the OM
-         assert(owner_raw() == current, "invariant");
+         assert(owner_raw() == owner_for(current), "invariant");
          break;
        }
      }
  
      // We've just gotten past the enter-check-for-suspend dance and we now own

@@ -483,11 +502,11 @@
    add_to_contentions(-1);
    assert(contentions() >= 0, "must not be negative: contentions=%d", contentions());
  
    // Must either set _recursions = 0 or ASSERT _recursions == 0.
    assert(_recursions == 0, "invariant");
-   assert(owner_raw() == current, "invariant");
+   assert(owner_raw() == owner_for(current), "invariant");
    assert(_succ != current, "invariant");
    assert(object()->mark() == markWord::encode(this), "invariant");
  
    // The thread -- now the owner -- is back in vm mode.
    // Report the glorious news via TI,DTrace and jvmstat.

@@ -558,31 +577,31 @@
    const oop obj = object_peek();
  
    if (obj == nullptr) {
      // If the object died, we can recycle the monitor without racing with
      // Java threads. The GC already broke the association with the object.
-     set_owner_from(nullptr, DEFLATER_MARKER);
+     set_owner_from_raw(nullptr, DEFLATER_MARKER);
      assert(contentions() >= 0, "must be non-negative: contentions=%d", contentions());
      _contentions = INT_MIN; // minimum negative int
    } else {
      // Attempt async deflation protocol.
  
      // Set a null owner to DEFLATER_MARKER to force any contending thread
      // through the slow path. This is just the first part of the async
      // deflation dance.
-     if (try_set_owner_from(nullptr, DEFLATER_MARKER) != nullptr) {
+     if (try_set_owner_from_raw(nullptr, DEFLATER_MARKER) != nullptr) {
        // The owner field is no longer null so we lost the race since the
        // ObjectMonitor is now busy.
        return false;
      }
  
      if (contentions() > 0 || _waiters != 0) {
        // Another thread has raced to enter the ObjectMonitor after
        // is_busy() above or has already entered and waited on
        // it which makes it busy so no deflation. Restore owner to
        // null if it is still DEFLATER_MARKER.
-       if (try_set_owner_from(DEFLATER_MARKER, nullptr) != DEFLATER_MARKER) {
+       if (try_set_owner_from_raw(DEFLATER_MARKER, nullptr) != DEFLATER_MARKER) {
          // Deferred decrement for the JT EnterI() that cancelled the async deflation.
          add_to_contentions(-1);
        }
        return false;
      }

@@ -591,11 +610,11 @@
      // to retry. This is the second part of the async deflation dance.
      if (Atomic::cmpxchg(&_contentions, 0, INT_MIN) != 0) {
        // Contentions was no longer 0 so we lost the race since the
        // ObjectMonitor is now busy. Restore owner to null if it is
        // still DEFLATER_MARKER:
-       if (try_set_owner_from(DEFLATER_MARKER, nullptr) != DEFLATER_MARKER) {
+       if (try_set_owner_from_raw(DEFLATER_MARKER, nullptr) != DEFLATER_MARKER) {
          // Deferred decrement for the JT EnterI() that cancelled the async deflation.
          add_to_contentions(-1);
        }
        return false;
      }

@@ -687,11 +706,11 @@
  // Convert the fields used by is_busy() to a string that can be
  // used for diagnostic output.
  const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
    ss->print("is_busy: waiters=%d"
              ", contentions=%d"
-             ", owner=" PTR_FORMAT
+             ", owner=" INTPTR_FORMAT
              ", cxq=" PTR_FORMAT
              ", EntryList=" PTR_FORMAT,
              _waiters,
              (contentions() > 0 ? contentions() : 0),
              owner_is_DEFLATER_MARKER()

@@ -710,11 +729,11 @@
    assert(current->thread_state() == _thread_blocked, "invariant");
  
    // Try the lock - TATAS
    if (TryLock(current) == TryLockResult::Success) {
      assert(_succ != current, "invariant");
-     assert(owner_raw() == current, "invariant");
+     assert(owner_raw() == owner_for(current), "invariant");
      assert(_Responsible != current, "invariant");
      return;
    }
  
    if (try_set_owner_from(DEFLATER_MARKER, current) == DEFLATER_MARKER) {

@@ -743,19 +762,19 @@
    // operation to donate the remainder of this thread's quantum
    // to the owner.  This has subtle but beneficial affinity
    // effects.
  
    if (TrySpin(current)) {
-     assert(owner_raw() == current, "invariant");
+     assert(owner_raw() == owner_for(current), "invariant");
      assert(_succ != current, "invariant");
      assert(_Responsible != current, "invariant");
      return;
    }
  
    // The Spin failed -- Enqueue and park the thread ...
    assert(_succ != current, "invariant");
-   assert(owner_raw() != current, "invariant");
+   assert(owner_raw() != owner_for(current), "invariant");
    assert(_Responsible != current, "invariant");
  
    // Enqueue "current" on ObjectMonitor's _cxq.
    //
    // Node acts as a proxy for current.

@@ -781,11 +800,11 @@
  
      // Interference - the CAS failed because _cxq changed.  Just retry.
      // As an optional optimization we retry the lock.
      if (TryLock(current) == TryLockResult::Success) {
        assert(_succ != current, "invariant");
-       assert(owner_raw() == current, "invariant");
+       assert(owner_raw() == owner_for(current), "invariant");
        assert(_Responsible != current, "invariant");
        return;
      }
    }
  

@@ -829,20 +848,26 @@
    // to defer the state transitions until absolutely necessary,
    // and in doing so avoid some transitions ...
  
    int nWakeups = 0;
    int recheckInterval = 1;
+   bool do_timed_parked = false;
+ 
+   ContinuationEntry* ce = current->last_continuation();
+   if (ce != nullptr && ce->is_virtual_thread()) {
+     do_timed_parked = true;
+   }
  
    for (;;) {
  
      if (TryLock(current) == TryLockResult::Success) {
        break;
      }
-     assert(owner_raw() != current, "invariant");
+     assert(owner_raw() != owner_for(current), "invariant");
  
      // park self
-     if (_Responsible == current) {
+     if (_Responsible == current || do_timed_parked) {
        current->_ParkEvent->park((jlong) recheckInterval);
        // Increase the recheckInterval, but clamp the value.
        recheckInterval *= 8;
        if (recheckInterval > MAX_RECHECK_INTERVAL) {
          recheckInterval = MAX_RECHECK_INTERVAL;

@@ -908,11 +933,11 @@
    // From the perspective of the lock owner (this thread), the
    // EntryList is stable and cxq is prepend-only.
    // The head of cxq is volatile but the interior is stable.
    // In addition, current.TState is stable.
  
-   assert(owner_raw() == current, "invariant");
+   assert(owner_raw() == owner_for(current), "invariant");
  
    UnlinkAfterAcquire(current, &node);
    if (_succ == current) _succ = nullptr;
  
    assert(_succ != current, "invariant");

@@ -964,10 +989,95 @@
    // execute a serializing instruction.
  
    return;
  }
  
+ bool ObjectMonitor::HandlePreemptedVThread(JavaThread* current) {
+   // Either because we acquire the lock below or because we will preempt the
+   // vthread clear the _current_pending_monitor field from the current JavaThread.
+   current->set_current_pending_monitor(nullptr);
+ 
+   // Try once more after freezing the continuation.
+   if (TryLock(current) == TryLockResult::Success) {
+     assert(owner_raw() == owner_for(current), "invariant");
+     assert(_succ != current, "invariant");
+     assert(_Responsible != current, "invariant");
+     current->set_preemption_cancelled(true);
+     add_to_contentions(-1);
+     return true;
+   }
+ 
+   if (try_set_owner_from(DEFLATER_MARKER, current) == DEFLATER_MARKER) {
+     // Cancelled the in-progress async deflation by changing owner from
+     // DEFLATER_MARKER to current. As part of the contended enter protocol,
+     // contentions was incremented to a positive value before this call to
+     // HandlePreemptedVThread(). We avoid decrementing contentions to
+     // prevent the deflater thread from winning the last part of the
+     // 2-part async deflation protocol. The deflater thread will decrement
+     // contentions after it recognizes that the async deflation was cancelled.
+     assert(_succ != current, "invariant");
+     assert(_Responsible != current, "invariant");
+     current->set_preemption_cancelled(true);
+     return true;
+   }
+ 
+   oop vthread = current->vthread();
+   assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
+   java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::BLOCKING);
+ 
+   ObjectWaiter* node = new ObjectWaiter(vthread);
+   node->_prev   = (ObjectWaiter*) 0xBAD;
+   node->TState  = ObjectWaiter::TS_CXQ;
+ 
+   // Push node associated with vthread onto the front of the _cxq.
+   ObjectWaiter* nxt;
+   for (;;) {
+     node->_next = nxt = _cxq;
+     if (Atomic::cmpxchg(&_cxq, nxt, node) == nxt) break;
+ 
+     // Interference - the CAS failed because _cxq changed.  Just retry.
+     // As an optional optimization we retry the lock.
+     if (TryLock(current) == TryLockResult::Success) {
+       assert(owner_raw() == owner_for(current), "invariant");
+       assert(_succ != current, "invariant");
+       assert(_Responsible != current, "invariant");
+       current->set_preemption_cancelled(true);
+       java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::RUNNING);
+       add_to_contentions(-1);
+       delete node;
+       return true;
+     }
+   }
+ 
+   // We have to try once more since owner could have exited monitor and checked
+   // _cxq before we added the node to the queue.
+   if (TryLock(current) == TryLockResult::Success) {
+     assert(owner_raw() == owner_for(current), "invariant");
+     assert(_Responsible != current, "invariant");
+     current->set_preemption_cancelled(true);
+     java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::RUNNING);
+     UnlinkAfterAcquire(current, node, vthread);
+     delete node;
+     if (_succ == (JavaThread*)java_lang_Thread::thread_id(vthread)) _succ = nullptr;
+     add_to_contentions(-1);
+     return true;
+   }
+ 
+   if (nxt == nullptr && _EntryList == nullptr) {
+     // The C2 unlock() fast path first checks if _cxq and _EntryList are empty and
+     // if they are it just clears the _owner field. Since we always run the risk of
+     // having that check happening before we added the node to _cxq and the release
+     // of the monitor happening after the last TryLock attempt we need to do something
+     // to avoid stranding. We set the _Responsible field which results in a timed-wait.
+     if (Atomic::replace_if_null(&_Responsible, (JavaThread*)java_lang_Thread::thread_id(vthread))) {
+       java_lang_VirtualThread::set_recheckInterval(vthread, 1);
+     }
+   }
+ 
+   return false;
+ }
+ 
  // ReenterI() is a specialized inline form of the latter half of the
  // contended slow-path from EnterI().  We use ReenterI() only for
  // monitor reentry in wait().
  //
  // In the future we should reconcile EnterI() and ReenterI().

@@ -983,11 +1093,11 @@
  
    int nWakeups = 0;
    for (;;) {
      ObjectWaiter::TStates v = currentNode->TState;
      guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
-     assert(owner_raw() != current, "invariant");
+     assert(owner_raw() != owner_for(current), "invariant");
  
      if (TrySpin(current)) {
          break;
      }
  

@@ -1036,26 +1146,85 @@
    // From the perspective of the lock owner (this thread), the
    // EntryList is stable and cxq is prepend-only.
    // The head of cxq is volatile but the interior is stable.
    // In addition, current.TState is stable.
  
-   assert(owner_raw() == current, "invariant");
+   assert(owner_raw() == owner_for(current), "invariant");
    assert(object()->mark() == markWord::encode(this), "invariant");
    UnlinkAfterAcquire(current, currentNode);
    if (_succ == current) _succ = nullptr;
    assert(_succ != current, "invariant");
    currentNode->TState = ObjectWaiter::TS_RUN;
    OrderAccess::fence();      // see comments at the end of EnterI()
  }
  
+ void ObjectMonitor::redo_enter(JavaThread* current) {
+   assert(java_lang_VirtualThread::state(current->vthread()) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
+   assert(current->is_in_VTMS_transition(), "must be");
+ 
+   if (TryLock(current) == TryLockResult::Success) {
+     VThreadEpilog(current);
+     return;
+   }
+ 
+   oop vthread = current->vthread();
+   if (_succ == (JavaThread*)java_lang_Thread::thread_id(vthread)) _succ = nullptr;
+ 
+   // Invariant: after clearing _succ a thread *must* retry _owner before parking.
+   OrderAccess::fence();
+ 
+   if (TryLock(current) == TryLockResult::Success) {
+     assert(owner_raw() == owner_for(current), "invariant");
+     VThreadEpilog(current);
+     return;
+   }
+ 
+   // Fast preemption. The JT will read this variable on return to the
+   // monitorenter_redo stub and will just remove enterSpecial frame
+   // from the stack and return to Continuation.run()
+   current->set_preempting(true);
+ 
+   java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::BLOCKING);
+   if (_Responsible == (JavaThread*)java_lang_Thread::thread_id(vthread)) {
+     int recheckInterval = java_lang_VirtualThread::recheckInterval(vthread);
+     assert(recheckInterval >= 1 && recheckInterval <= 6, "invariant");
+     if (recheckInterval < 6) {
+       recheckInterval++;
+       java_lang_VirtualThread::set_recheckInterval(vthread, recheckInterval);
+     }
+   } else if (java_lang_VirtualThread::recheckInterval(vthread) > 0) {
+     // No need to do timed park anymore
+     java_lang_VirtualThread::set_recheckInterval(vthread, 0);
+   }
+ }
+ 
+ void ObjectMonitor::VThreadEpilog(JavaThread* current) {
+   assert(owner_raw() == owner_for(current), "invariant");
+   add_to_contentions(-1);
+ 
+   oop vthread = current->vthread();
+   if (java_lang_VirtualThread::recheckInterval(vthread) > 0) {
+     java_lang_VirtualThread::set_recheckInterval(vthread, 0);
+   }
+   int64_t threadid = java_lang_Thread::thread_id(vthread);
+   if (_succ == (JavaThread*)threadid) _succ = nullptr;
+   if (_Responsible == (JavaThread*)threadid) {
+     _Responsible = nullptr;
+     OrderAccess::fence(); // Dekker pivot-point
+   }
+   ObjectWaiter* node = LookupWaiter(threadid);
+   UnlinkAfterAcquire(current, node, vthread);
+   delete node;
+ }
+ 
  // By convention we unlink a contending thread from EntryList|cxq immediately
  // after the thread acquires the lock in ::enter().  Equally, we could defer
  // unlinking the thread until ::exit()-time.
  
- void ObjectMonitor::UnlinkAfterAcquire(JavaThread* current, ObjectWaiter* currentNode) {
-   assert(owner_raw() == current, "invariant");
-   assert(currentNode->_thread == current, "invariant");
+ void ObjectMonitor::UnlinkAfterAcquire(JavaThread* current, ObjectWaiter* currentNode, oop vthread) {
+   assert(owner_raw() == owner_for(current), "invariant");
+   assert((currentNode->_thread == current) || (currentNode->_thread == nullptr && currentNode->vthread() == vthread), "invariant");
  
    if (currentNode->TState == ObjectWaiter::TS_ENTER) {
      // Normal case: remove current from the DLL EntryList .
      // This is a constant-time operation.
      ObjectWaiter* nxt = currentNode->_next;

@@ -1111,10 +1280,20 @@
    currentNode->_next  = (ObjectWaiter*) 0xBAD;
    currentNode->TState = ObjectWaiter::TS_RUN;
  #endif
  }
  
+ // Fix this. Save ObjectWaiter* when freezing. Or use hashtable.
+ ObjectWaiter* ObjectMonitor::LookupWaiter(int64_t threadid) {
+   ObjectWaiter* p;
+   for (p = _EntryList; p != nullptr && (!p->is_vthread() || java_lang_Thread::thread_id(p->vthread()) != threadid); p = p->_next) {}
+   if (p != nullptr) return p;
+   for (p = _cxq; p != nullptr && (!p->is_vthread() || java_lang_Thread::thread_id(p->vthread()) != threadid); p = p->_next) {}
+   assert(p != nullptr, "should be on either _cxq or _EntryList");
+   return p;
+ }
+ 
  // -----------------------------------------------------------------------------
  // Exit support
  //
  // exit()
  // ~~~~~~

@@ -1170,35 +1349,29 @@
  // structured the code so the windows are short and the frequency
  // of such futile wakups is low.
  
  void ObjectMonitor::exit(JavaThread* current, bool not_suspended) {
    void* cur = owner_raw();
-   if (current != cur) {
-     if (LockingMode != LM_LIGHTWEIGHT && current->is_lock_owned((address)cur)) {
-       assert(_recursions == 0, "invariant");
-       set_owner_from_BasicLock(cur, current);  // Convert from BasicLock* to Thread*.
-       _recursions = 0;
-     } else {
-       // Apparent unbalanced locking ...
-       // Naively we'd like to throw IllegalMonitorStateException.
-       // As a practical matter we can neither allocate nor throw an
-       // exception as ::exit() can be called from leaf routines.
-       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
-       // Upon deeper reflection, however, in a properly run JVM the only
-       // way we should encounter this situation is in the presence of
-       // unbalanced JNI locking. TODO: CheckJNICalls.
-       // See also: CR4414101
+   if (owner_for(current) != cur) {
+     // Apparent unbalanced locking ...
+     // Naively we'd like to throw IllegalMonitorStateException.
+     // As a practical matter we can neither allocate nor throw an
+     // exception as ::exit() can be called from leaf routines.
+     // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
+     // Upon deeper reflection, however, in a properly run JVM the only
+     // way we should encounter this situation is in the presence of
+     // unbalanced JNI locking. TODO: CheckJNICalls.
+     // See also: CR4414101
  #ifdef ASSERT
-       LogStreamHandle(Error, monitorinflation) lsh;
-       lsh.print_cr("ERROR: ObjectMonitor::exit(): thread=" INTPTR_FORMAT
-                     " is exiting an ObjectMonitor it does not own.", p2i(current));
-       lsh.print_cr("The imbalance is possibly caused by JNI locking.");
-       print_debug_style_on(&lsh);
-       assert(false, "Non-balanced monitor enter/exit!");
+     LogStreamHandle(Error, monitorinflation) lsh;
+     lsh.print_cr("ERROR: ObjectMonitor::exit(): thread=" INTPTR_FORMAT
+                   " is exiting an ObjectMonitor it does not own.", p2i(current));
+     lsh.print_cr("The imbalance is possibly caused by JNI locking.");
+     print_debug_style_on(&lsh);
+     assert(false, "Non-balanced monitor enter/exit!");
  #endif
-       return;
-     }
+     return;
    }
  
    if (_recursions != 0) {
      _recursions--;        // this is simple recursive enter
      return;

@@ -1215,17 +1388,17 @@
      _previous_owner_tid = JFR_THREAD_ID(current);
    }
  #endif
  
    for (;;) {
-     assert(current == owner_raw(), "invariant");
+     assert(owner_for(current) == owner_raw(), "invariant");
  
      // Drop the lock.
      // release semantics: prior loads and stores from within the critical section
      // must not float (reorder) past the following store that drops the lock.
      // Uses a storeload to separate release_store(owner) from the
-     // successor check. The try_set_owner() below uses cmpxchg() so
+     // successor check. The try_set_owner_from() below uses cmpxchg() so
      // we get the fence down there.
      release_clear_owner(current);
      OrderAccess::storeload();
  
      if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != nullptr) {

@@ -1271,11 +1444,11 @@
      //
      if (try_set_owner_from(nullptr, current) != nullptr) {
        return;
      }
  
-     guarantee(owner_raw() == current, "invariant");
+     guarantee(owner_raw() == owner_for(current), "invariant");
  
      ObjectWaiter* w = nullptr;
  
      w = _EntryList;
      if (w != nullptr) {

@@ -1348,19 +1521,27 @@
      }
    }
  }
  
  void ObjectMonitor::ExitEpilog(JavaThread* current, ObjectWaiter* Wakee) {
-   assert(owner_raw() == current, "invariant");
+   assert(owner_raw() == owner_for(current), "invariant");
  
    // Exit protocol:
    // 1. ST _succ = wakee
    // 2. membar #loadstore|#storestore;
    // 2. ST _owner = nullptr
    // 3. unpark(wakee)
  
-   _succ = Wakee->_thread;
+   oop vthread = nullptr;
+   if (Wakee->_thread != nullptr) {
+     // Platform thread case
+     _succ = Wakee->_thread;
+   } else {
+     assert(Wakee->vthread() != nullptr, "invariant");
+     vthread = Wakee->vthread();
+     _succ = (JavaThread*)java_lang_Thread::thread_id(vthread);
+   }
    ParkEvent * Trigger = Wakee->_event;
  
    // Hygiene -- once we've set _owner = nullptr we can't safely dereference Wakee again.
    // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
    // out-of-scope (non-extant).

@@ -1370,11 +1551,17 @@
    // Uses a fence to separate release_store(owner) from the LD in unpark().
    release_clear_owner(current);
    OrderAccess::fence();
  
    DTRACE_MONITOR_PROBE(contended__exit, this, object(), current);
-   Trigger->unpark();
+ 
+   if (vthread == nullptr) {
+     // Platform thread case
+     Trigger->unpark();
+   } else if (java_lang_VirtualThread::set_onWaitingList(vthread, _vthread_cxq_head)) {
+     Trigger->unpark();
+   }
  
    // Maintain stats and report events to JVMTI
    OM_PERFDATA_OP(Parks, inc());
  }
  

@@ -1385,23 +1572,23 @@
  // thread due to contention.
  intx ObjectMonitor::complete_exit(JavaThread* current) {
    assert(InitDone, "Unexpectedly not initialized");
  
    void* cur = owner_raw();
-   if (current != cur) {
-     if (LockingMode != LM_LIGHTWEIGHT && current->is_lock_owned((address)cur)) {
+   if (owner_for(current) != cur) {
+     if (LockingMode == LM_LEGACY && is_stack_locker(current)) {
        assert(_recursions == 0, "internal state error");
-       set_owner_from_BasicLock(cur, current);  // Convert from BasicLock* to Thread*.
+       set_owner_from_BasicLock(current);  // Convert from BasicLock* to Thread*.
        _recursions = 0;
      }
    }
  
-   guarantee(current == owner_raw(), "complete_exit not owner");
+   guarantee(owner_for(current) == owner_raw(), "complete_exit not owner");
    intx save = _recursions; // record the old recursion count
    _recursions = 0;         // set the recursion level to be 0
    exit(current);           // exit the monitor
-   guarantee(owner_raw() != current, "invariant");
+   guarantee(owner_raw() != owner_for(current), "invariant");
    return save;
  }
  
  // Checks that the current THREAD owns this monitor and causes an
  // immediate return if it doesn't. We don't use the CHECK macro

@@ -1421,17 +1608,11 @@
  // (IMSE). If there is a pending exception and the specified thread
  // is not the owner, that exception will be replaced by the IMSE.
  bool ObjectMonitor::check_owner(TRAPS) {
    JavaThread* current = THREAD;
    void* cur = owner_raw();
-   assert(cur != anon_owner_ptr(), "no anon owner here");
-   if (cur == current) {
-     return true;
-   }
-   if (LockingMode != LM_LIGHTWEIGHT && current->is_lock_owned((address)cur)) {
-     set_owner_from_BasicLock(cur, current);  // Convert from BasicLock* to Thread*.
-     _recursions = 0;
+   if (cur == owner_for(current)) {
      return true;
    }
    THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
               "current thread is not owner", false);
  }

@@ -1499,10 +1680,21 @@
      }
      THROW(vmSymbols::java_lang_InterruptedException());
      return;
    }
  
+   ContinuationEntry* ce = current->last_continuation();
+   if (ce != nullptr && ce->is_virtual_thread()) {
+     const Klass* monitor_klass = object()->klass();
+     if (!is_excluded(monitor_klass)) {
+       ResourceMark rm;
+       char reason[256];
+       jio_snprintf(reason, sizeof reason, "Object.wait on object of klass %s", monitor_klass->external_name());
+       post_virtual_thread_pinned_event(current, reason);
+     }
+   }
+ 
    current->set_current_waiting_monitor(this);
  
    // create a node to be put into the queue
    // Critically, after we reset() the event but prior to park(), we must check
    // for a pending interrupt.

@@ -1526,11 +1718,11 @@
  
    intx save = _recursions;     // record the old recursion count
    _waiters++;                  // increment the number of waiters
    _recursions = 0;             // set the recursion level to be 1
    exit(current);               // exit the monitor
-   guarantee(owner_raw() != current, "invariant");
+   guarantee(owner_raw() != owner_for(current), "invariant");
  
    // The thread is on the WaitSet list - now park() it.
    // On MP systems it's conceivable that a brief spin before we park
    // could be profitable.
    //

@@ -1632,11 +1824,11 @@
        post_monitor_wait_event(&event, this, node._notifier_tid, millis, ret == OS_TIMEOUT);
      }
  
      OrderAccess::fence();
  
-     assert(owner_raw() != current, "invariant");
+     assert(owner_raw() != owner_for(current), "invariant");
      ObjectWaiter::TStates v = node.TState;
      if (v == ObjectWaiter::TS_RUN) {
        enter(current);
      } else {
        guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");

@@ -1647,25 +1839,25 @@
      // current has reacquired the lock.
      // Lifecycle - the node representing current must not appear on any queues.
      // Node is about to go out-of-scope, but even if it were immortal we wouldn't
      // want residual elements associated with this thread left on any lists.
      guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
-     assert(owner_raw() == current, "invariant");
+     assert(owner_raw() == owner_for(current), "invariant");
      assert(_succ != current, "invariant");
    } // OSThreadWaitState()
  
    current->set_current_waiting_monitor(nullptr);
  
    guarantee(_recursions == 0, "invariant");
    int relock_count = JvmtiDeferredUpdates::get_and_reset_relock_count_after_wait(current);
    _recursions =   save          // restore the old recursion count
                  + relock_count; //  increased by the deferred relock count
-   current->inc_held_monitor_count(relock_count); // Deopt never entered these counts.
+   NOT_LOOM_MONITOR_SUPPORT(current->inc_held_monitor_count(relock_count);) // Deopt never entered these counts.
    _waiters--;             // decrement the number of waiters
  
    // Verify a few postconditions
-   assert(owner_raw() == current, "invariant");
+   assert(owner_raw() == owner_for(current), "invariant");
    assert(_succ != current, "invariant");
    assert(object()->mark() == markWord::encode(this), "invariant");
  
    // check if the notification happened
    if (!WasNotified) {

@@ -1938,11 +2130,11 @@
    // when preparing to LD...CAS _owner, etc and the CAS is likely
    // to succeed.
    if (_succ == nullptr) {
      _succ = current;
    }
-   Thread* prv = nullptr;
+   void* prv = nullptr;
  
    // There are three ways to exit the following loop:
    // 1.  A successful spin where this thread has acquired the lock.
    // 2.  Spin failure with prejudice
    // 3.  Spin failure without prejudice

@@ -1976,13 +2168,13 @@
      // or if we observe _owner change from one non-null value to
      // another non-null value.   In such cases we might abort
      // the spin without prejudice or apply a "penalty" to the
      // spin count-down variable "ctr", reducing it by 100, say.
  
-     JavaThread* ox = static_cast<JavaThread*>(owner_raw());
+     void* ox = owner_raw();
      if (ox == nullptr) {
-       ox = static_cast<JavaThread*>(try_set_owner_from(nullptr, current));
+       ox = try_set_owner_from(nullptr, current);
        if (ox == nullptr) {
          // The CAS succeeded -- this thread acquired ownership
          // Take care of some bookkeeping to exit spin state.
          if (_succ == current) {
            _succ = nullptr;

@@ -2047,15 +2239,19 @@
    _prev     = nullptr;
    _notified = 0;
    _notifier_tid = 0;
    TState    = TS_RUN;
    _thread   = current;
-   _event    = _thread->_ParkEvent;
+   _event    = _thread != nullptr ? _thread->_ParkEvent : ObjectMonitor::vthread_unparker_ParkEvent();
    _active   = false;
    assert(_event != nullptr, "invariant");
  }
  
+ ObjectWaiter::ObjectWaiter(oop vthread) : ObjectWaiter((JavaThread*)nullptr) {
+   _vthread = OopHandle(JavaThread::thread_oop_storage(), vthread);
+ }
+ 
  void ObjectWaiter::wait_reenter_begin(ObjectMonitor * const mon) {
    _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(_thread, mon);
  }
  
  void ObjectWaiter::wait_reenter_end(ObjectMonitor * const mon) {

@@ -2167,10 +2363,15 @@
    _oop_storage = OopStorageSet::create_weak("ObjectSynchronizer Weak", mtSynchronizer);
  
    DEBUG_ONLY(InitDone = true;)
  }
  
+ void ObjectMonitor::Initialize2() {
+   _vthread_cxq_head = OopHandle(JavaThread::thread_oop_storage(), nullptr);
+   _vthread_unparker_ParkEvent = ParkEvent::Allocate(nullptr);
+ }
+ 
  void ObjectMonitor::print_on(outputStream* st) const {
    // The minimal things to print for markWord printing, more can be added for debugging and logging.
    st->print("{contentions=0x%08x,waiters=0x%08x"
              ",recursions=" INTX_FORMAT ",owner=" INTPTR_FORMAT "}",
              contentions(), waiters(), recursions(),
< prev index next >