< prev index next > src/hotspot/share/runtime/objectMonitor.cpp
Print this page
#include "runtime/osThread.hpp"
#include "runtime/perfData.hpp"
#include "runtime/safefetch.hpp"
#include "runtime/safepointMechanism.inline.hpp"
#include "runtime/sharedRuntime.hpp"
+ #include "runtime/threads.hpp"
#include "services/threadService.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include "utilities/preserveException.hpp"
DEBUG_ONLY(static volatile bool InitDone = false;)
OopStorage* ObjectMonitor::_oop_storage = nullptr;
+ OopHandle ObjectMonitor::_vthread_cxq_head;
+ ParkEvent* ObjectMonitor::_vthread_unparker_ParkEvent = nullptr;
+
+ static void post_virtual_thread_pinned_event(JavaThread* current, const char* reason) {
+ EventVirtualThreadPinned e;
+ if (e.should_commit()) {
+ e.set_pinnedReason(reason);
+ e.set_carrierThread(JFR_JVM_THREAD_ID(current));
+ e.commit();
+ }
+ }
+
// -----------------------------------------------------------------------------
// Theory of operations -- Monitors lists, thread residency, etc:
//
// * A thread acquires ownership of a monitor by successfully
// CAS()ing the _owner field from null to non-null.
ObjectMonitor::ObjectMonitor(oop object) :
_header(markWord::zero()),
_object(_oop_storage, object),
_owner(nullptr),
+ _stack_locker(nullptr),
_previous_owner_tid(0),
_next_om(nullptr),
_recursions(0),
_EntryList(nullptr),
_cxq(nullptr),
void* prev_owner = try_set_owner_from(nullptr, locking_thread);
if (prev_owner == nullptr) {
assert(_recursions == 0, "invariant");
success = true;
! } else if (prev_owner == locking_thread) {
_recursions++;
success = true;
} else if (prev_owner == DEFLATER_MARKER) {
// Racing with deflation.
prev_owner = try_set_owner_from(DEFLATER_MARKER, locking_thread);
void* prev_owner = try_set_owner_from(nullptr, locking_thread);
if (prev_owner == nullptr) {
assert(_recursions == 0, "invariant");
success = true;
! } else if (prev_owner == owner_for(locking_thread)) {
_recursions++;
success = true;
} else if (prev_owner == DEFLATER_MARKER) {
// Racing with deflation.
prev_owner = try_set_owner_from(DEFLATER_MARKER, locking_thread);
// success will only be false if this races with something other than
// deflation.
prev_owner = try_set_owner_from(nullptr, locking_thread);
success = prev_owner == nullptr;
}
- } else if (LockingMode == LM_LEGACY && locking_thread->is_lock_owned((address)prev_owner)) {
- assert(_recursions == 0, "must be");
- _recursions = 1;
- set_owner_from_BasicLock(prev_owner, locking_thread);
- success = true;
}
assert(success, "Failed to enter_for: locking_thread=" INTPTR_FORMAT
", this=" INTPTR_FORMAT "{owner=" INTPTR_FORMAT "}, observed owner: " INTPTR_FORMAT,
p2i(locking_thread), p2i(this), p2i(owner_raw()), p2i(prev_owner));
} else {
}
}
add_to_contentions(-1);
! assert(!success || owner_raw() == locking_thread, "must be");
return success;
}
bool ObjectMonitor::enter(JavaThread* current) {
}
}
add_to_contentions(-1);
! assert(!success || is_owner(locking_thread), "must be");
return success;
}
bool ObjectMonitor::enter(JavaThread* current) {
if (cur == nullptr) {
assert(_recursions == 0, "invariant");
return true;
}
! if (cur == current) {
// TODO-FIXME: check for integer overflow! BUGID 6557169.
_recursions++;
return true;
}
- if (LockingMode != LM_LIGHTWEIGHT && current->is_lock_owned((address)cur)) {
- assert(_recursions == 0, "internal state error");
- _recursions = 1;
- set_owner_from_BasicLock(cur, current); // Convert from BasicLock* to Thread*.
- return true;
- }
-
// We've encountered genuine contention.
// Try one round of spinning *before* enqueueing current
// and before going through the awkward and expensive state
// transitions. The following spin is strictly optional ...
// Note that if we acquire the monitor from an initial spin
// we forgo posting JVMTI events and firing DTRACE probes.
if (TrySpin(current)) {
! assert(owner_raw() == current, "must be current: owner=" INTPTR_FORMAT, p2i(owner_raw()));
assert(_recursions == 0, "must be 0: recursions=" INTX_FORMAT, _recursions);
assert(object()->mark() == markWord::encode(this),
"object mark must match encoded this: mark=" INTPTR_FORMAT
", encoded this=" INTPTR_FORMAT, object()->mark().value(),
markWord::encode(this).value());
return true;
}
! assert(owner_raw() != current, "invariant");
assert(_succ != current, "invariant");
assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
assert(current->thread_state() != _thread_blocked, "invariant");
// Keep track of contention for JVM/TI and M&M queries.
if (cur == nullptr) {
assert(_recursions == 0, "invariant");
return true;
}
! if (cur == owner_for(current)) {
// TODO-FIXME: check for integer overflow! BUGID 6557169.
_recursions++;
return true;
}
// We've encountered genuine contention.
// Try one round of spinning *before* enqueueing current
// and before going through the awkward and expensive state
// transitions. The following spin is strictly optional ...
// Note that if we acquire the monitor from an initial spin
// we forgo posting JVMTI events and firing DTRACE probes.
if (TrySpin(current)) {
! assert(owner_raw() == owner_for(current), "must be current: owner=" INTPTR_FORMAT, p2i(owner_raw()));
assert(_recursions == 0, "must be 0: recursions=" INTX_FORMAT, _recursions);
assert(object()->mark() == markWord::encode(this),
"object mark must match encoded this: mark=" INTPTR_FORMAT
", encoded this=" INTPTR_FORMAT, object()->mark().value(),
markWord::encode(this).value());
return true;
}
! assert(owner_raw() != owner_for(current), "invariant");
assert(_succ != current, "invariant");
assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
assert(current->thread_state() != _thread_blocked, "invariant");
// Keep track of contention for JVM/TI and M&M queries.
// This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
// handler cannot accidentally consume an unpark() meant for the
// ParkEvent associated with this ObjectMonitor.
}
+ #ifdef LOOM_MONITOR_SUPPORT
+ ContinuationEntry* ce = current->last_continuation();
+ if (ce != nullptr && ce->is_virtual_thread() && current->is_on_monitorenter()) {
+ int result = Continuation::try_preempt(current, ce->cont_oop(current), freeze_on_monitorenter);
+ if (result == freeze_ok) {
+ bool acquired = VThreadMonitorEnter(current);
+ if (acquired) {
+ current->set_preemption_cancelled(true);
+ if (JvmtiExport::should_post_monitor_contended_entered()) {
+ // We are going to call thaw again after this and finish the VMTS
+ // transition so no need to do it here. We will post the event there.
+ current->set_contended_entered_monitor(this);
+ }
+ }
+ current->set_current_pending_monitor(nullptr);
+ DEBUG_ONLY(int state = java_lang_VirtualThread::state(current->vthread()));
+ assert((acquired && current->preemption_cancelled() && state == java_lang_VirtualThread::RUNNING) ||
+ (!acquired && !current->preemption_cancelled() && state == java_lang_VirtualThread::BLOCKING), "invariant");
+ return true;
+ }
+ if (result == freeze_pinned_native) {
+ post_virtual_thread_pinned_event(current, "Native frame or <clinit> on stack");
+ }
+ }
+ #endif
+
OSThreadContendState osts(current->osthread());
assert(current->thread_state() == _thread_in_vm, "invariant");
for (;;) {
// If there is a suspend request, ExitOnSuspend will exit the OM
// and set the OM as pending.
}
if (!eos.exited()) {
// ExitOnSuspend did not exit the OM
! assert(owner_raw() == current, "invariant");
break;
}
}
// We've just gotten past the enter-check-for-suspend dance and we now own
// If there is a suspend request, ExitOnSuspend will exit the OM
// and set the OM as pending.
}
if (!eos.exited()) {
// ExitOnSuspend did not exit the OM
! assert(owner_raw() == owner_for(current), "invariant");
break;
}
}
// We've just gotten past the enter-check-for-suspend dance and we now own
add_to_contentions(-1);
assert(contentions() >= 0, "must not be negative: contentions=%d", contentions());
// Must either set _recursions = 0 or ASSERT _recursions == 0.
assert(_recursions == 0, "invariant");
! assert(owner_raw() == current, "invariant");
assert(_succ != current, "invariant");
assert(object()->mark() == markWord::encode(this), "invariant");
// The thread -- now the owner -- is back in vm mode.
// Report the glorious news via TI,DTrace and jvmstat.
add_to_contentions(-1);
assert(contentions() >= 0, "must not be negative: contentions=%d", contentions());
// Must either set _recursions = 0 or ASSERT _recursions == 0.
assert(_recursions == 0, "invariant");
! assert(owner_raw() == owner_for(current), "invariant");
assert(_succ != current, "invariant");
assert(object()->mark() == markWord::encode(this), "invariant");
// The thread -- now the owner -- is back in vm mode.
// Report the glorious news via TI,DTrace and jvmstat.
const oop obj = object_peek();
if (obj == nullptr) {
// If the object died, we can recycle the monitor without racing with
// Java threads. The GC already broke the association with the object.
! set_owner_from(nullptr, DEFLATER_MARKER);
assert(contentions() >= 0, "must be non-negative: contentions=%d", contentions());
_contentions = INT_MIN; // minimum negative int
} else {
// Attempt async deflation protocol.
// Set a null owner to DEFLATER_MARKER to force any contending thread
// through the slow path. This is just the first part of the async
// deflation dance.
! if (try_set_owner_from(nullptr, DEFLATER_MARKER) != nullptr) {
// The owner field is no longer null so we lost the race since the
// ObjectMonitor is now busy.
return false;
}
if (contentions() > 0 || _waiters != 0) {
// Another thread has raced to enter the ObjectMonitor after
// is_busy() above or has already entered and waited on
// it which makes it busy so no deflation. Restore owner to
// null if it is still DEFLATER_MARKER.
! if (try_set_owner_from(DEFLATER_MARKER, nullptr) != DEFLATER_MARKER) {
// Deferred decrement for the JT EnterI() that cancelled the async deflation.
add_to_contentions(-1);
}
return false;
}
const oop obj = object_peek();
if (obj == nullptr) {
// If the object died, we can recycle the monitor without racing with
// Java threads. The GC already broke the association with the object.
! set_owner_from_raw(nullptr, DEFLATER_MARKER);
assert(contentions() >= 0, "must be non-negative: contentions=%d", contentions());
_contentions = INT_MIN; // minimum negative int
} else {
// Attempt async deflation protocol.
// Set a null owner to DEFLATER_MARKER to force any contending thread
// through the slow path. This is just the first part of the async
// deflation dance.
! if (try_set_owner_from_raw(nullptr, DEFLATER_MARKER) != nullptr) {
// The owner field is no longer null so we lost the race since the
// ObjectMonitor is now busy.
return false;
}
if (contentions() > 0 || _waiters != 0) {
// Another thread has raced to enter the ObjectMonitor after
// is_busy() above or has already entered and waited on
// it which makes it busy so no deflation. Restore owner to
// null if it is still DEFLATER_MARKER.
! if (try_set_owner_from_raw(DEFLATER_MARKER, nullptr) != DEFLATER_MARKER) {
// Deferred decrement for the JT EnterI() that cancelled the async deflation.
add_to_contentions(-1);
}
return false;
}
// to retry. This is the second part of the async deflation dance.
if (Atomic::cmpxchg(&_contentions, 0, INT_MIN) != 0) {
// Contentions was no longer 0 so we lost the race since the
// ObjectMonitor is now busy. Restore owner to null if it is
// still DEFLATER_MARKER:
! if (try_set_owner_from(DEFLATER_MARKER, nullptr) != DEFLATER_MARKER) {
// Deferred decrement for the JT EnterI() that cancelled the async deflation.
add_to_contentions(-1);
}
return false;
}
// to retry. This is the second part of the async deflation dance.
if (Atomic::cmpxchg(&_contentions, 0, INT_MIN) != 0) {
// Contentions was no longer 0 so we lost the race since the
// ObjectMonitor is now busy. Restore owner to null if it is
// still DEFLATER_MARKER:
! if (try_set_owner_from_raw(DEFLATER_MARKER, nullptr) != DEFLATER_MARKER) {
// Deferred decrement for the JT EnterI() that cancelled the async deflation.
add_to_contentions(-1);
}
return false;
}
// Convert the fields used by is_busy() to a string that can be
// used for diagnostic output.
const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
ss->print("is_busy: waiters=%d"
", contentions=%d"
! ", owner=" PTR_FORMAT
", cxq=" PTR_FORMAT
", EntryList=" PTR_FORMAT,
_waiters,
(contentions() > 0 ? contentions() : 0),
owner_is_DEFLATER_MARKER()
// Convert the fields used by is_busy() to a string that can be
// used for diagnostic output.
const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
ss->print("is_busy: waiters=%d"
", contentions=%d"
! ", owner=" INTPTR_FORMAT
", cxq=" PTR_FORMAT
", EntryList=" PTR_FORMAT,
_waiters,
(contentions() > 0 ? contentions() : 0),
owner_is_DEFLATER_MARKER()
assert(current->thread_state() == _thread_blocked, "invariant");
// Try the lock - TATAS
if (TryLock(current) == TryLockResult::Success) {
assert(_succ != current, "invariant");
! assert(owner_raw() == current, "invariant");
assert(_Responsible != current, "invariant");
return;
}
if (try_set_owner_from(DEFLATER_MARKER, current) == DEFLATER_MARKER) {
assert(current->thread_state() == _thread_blocked, "invariant");
// Try the lock - TATAS
if (TryLock(current) == TryLockResult::Success) {
assert(_succ != current, "invariant");
! assert(owner_raw() == owner_for(current), "invariant");
assert(_Responsible != current, "invariant");
return;
}
if (try_set_owner_from(DEFLATER_MARKER, current) == DEFLATER_MARKER) {
// operation to donate the remainder of this thread's quantum
// to the owner. This has subtle but beneficial affinity
// effects.
if (TrySpin(current)) {
! assert(owner_raw() == current, "invariant");
assert(_succ != current, "invariant");
assert(_Responsible != current, "invariant");
return;
}
// The Spin failed -- Enqueue and park the thread ...
assert(_succ != current, "invariant");
! assert(owner_raw() != current, "invariant");
assert(_Responsible != current, "invariant");
// Enqueue "current" on ObjectMonitor's _cxq.
//
// Node acts as a proxy for current.
// operation to donate the remainder of this thread's quantum
// to the owner. This has subtle but beneficial affinity
// effects.
if (TrySpin(current)) {
! assert(owner_raw() == owner_for(current), "invariant");
assert(_succ != current, "invariant");
assert(_Responsible != current, "invariant");
return;
}
// The Spin failed -- Enqueue and park the thread ...
assert(_succ != current, "invariant");
! assert(owner_raw() != owner_for(current), "invariant");
assert(_Responsible != current, "invariant");
// Enqueue "current" on ObjectMonitor's _cxq.
//
// Node acts as a proxy for current.
// Interference - the CAS failed because _cxq changed. Just retry.
// As an optional optimization we retry the lock.
if (TryLock(current) == TryLockResult::Success) {
assert(_succ != current, "invariant");
! assert(owner_raw() == current, "invariant");
assert(_Responsible != current, "invariant");
return;
}
}
// Interference - the CAS failed because _cxq changed. Just retry.
// As an optional optimization we retry the lock.
if (TryLock(current) == TryLockResult::Success) {
assert(_succ != current, "invariant");
! assert(owner_raw() == owner_for(current), "invariant");
assert(_Responsible != current, "invariant");
return;
}
}
// Since state transitions are heavy and inefficient we'd like
// to defer the state transitions until absolutely necessary,
// and in doing so avoid some transitions ...
int recheckInterval = 1;
for (;;) {
if (TryLock(current) == TryLockResult::Success) {
break;
}
! assert(owner_raw() != current, "invariant");
// park self
! if (_Responsible == current) {
current->_ParkEvent->park((jlong) recheckInterval);
// Increase the recheckInterval, but clamp the value.
recheckInterval *= 8;
if (recheckInterval > MAX_RECHECK_INTERVAL) {
recheckInterval = MAX_RECHECK_INTERVAL;
// Since state transitions are heavy and inefficient we'd like
// to defer the state transitions until absolutely necessary,
// and in doing so avoid some transitions ...
int recheckInterval = 1;
+ bool do_timed_parked = false;
+
+ ContinuationEntry* ce = current->last_continuation();
+ if (ce != nullptr && ce->is_virtual_thread()) {
+ do_timed_parked = true;
+ }
for (;;) {
if (TryLock(current) == TryLockResult::Success) {
break;
}
! assert(owner_raw() != owner_for(current), "invariant");
// park self
! if (_Responsible == current || do_timed_parked) {
current->_ParkEvent->park((jlong) recheckInterval);
// Increase the recheckInterval, but clamp the value.
recheckInterval *= 8;
if (recheckInterval > MAX_RECHECK_INTERVAL) {
recheckInterval = MAX_RECHECK_INTERVAL;
// From the perspective of the lock owner (this thread), the
// EntryList is stable and cxq is prepend-only.
// The head of cxq is volatile but the interior is stable.
// In addition, current.TState is stable.
! assert(owner_raw() == current, "invariant");
UnlinkAfterAcquire(current, &node);
if (_succ == current) _succ = nullptr;
assert(_succ != current, "invariant");
// From the perspective of the lock owner (this thread), the
// EntryList is stable and cxq is prepend-only.
// The head of cxq is volatile but the interior is stable.
// In addition, current.TState is stable.
! assert(owner_raw() == owner_for(current), "invariant");
UnlinkAfterAcquire(current, &node);
if (_succ == current) _succ = nullptr;
assert(_succ != current, "invariant");
assert(object()->mark() == markWord::encode(this), "invariant");
assert(current->thread_state() != _thread_blocked, "invariant");
for (;;) {
! ObjectWaiter::TStates v = currentNode->TState;
guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
! assert(owner_raw() != current, "invariant");
// This thread has been notified so try to reacquire the lock.
if (TryLock(current) == TryLockResult::Success) {
break;
}
assert(object()->mark() == markWord::encode(this), "invariant");
assert(current->thread_state() != _thread_blocked, "invariant");
for (;;) {
! uint8_t v = currentNode->TState;
guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
! assert(owner_raw() != owner_for(current), "invariant");
// This thread has been notified so try to reacquire the lock.
if (TryLock(current) == TryLockResult::Success) {
break;
}
// From the perspective of the lock owner (this thread), the
// EntryList is stable and cxq is prepend-only.
// The head of cxq is volatile but the interior is stable.
// In addition, current.TState is stable.
! assert(owner_raw() == current, "invariant");
assert(object()->mark() == markWord::encode(this), "invariant");
UnlinkAfterAcquire(current, currentNode);
if (_succ == current) _succ = nullptr;
assert(_succ != current, "invariant");
currentNode->TState = ObjectWaiter::TS_RUN;
OrderAccess::fence(); // see comments at the end of EnterI()
}
// By convention we unlink a contending thread from EntryList|cxq immediately
// after the thread acquires the lock in ::enter(). Equally, we could defer
// unlinking the thread until ::exit()-time.
void ObjectMonitor::UnlinkAfterAcquire(JavaThread* current, ObjectWaiter* currentNode) {
! assert(owner_raw() == current, "invariant");
! assert(currentNode->_thread == current, "invariant");
if (currentNode->TState == ObjectWaiter::TS_ENTER) {
// Normal case: remove current from the DLL EntryList .
// This is a constant-time operation.
ObjectWaiter* nxt = currentNode->_next;
// From the perspective of the lock owner (this thread), the
// EntryList is stable and cxq is prepend-only.
// The head of cxq is volatile but the interior is stable.
// In addition, current.TState is stable.
! assert(owner_raw() == owner_for(current), "invariant");
assert(object()->mark() == markWord::encode(this), "invariant");
UnlinkAfterAcquire(current, currentNode);
if (_succ == current) _succ = nullptr;
assert(_succ != current, "invariant");
currentNode->TState = ObjectWaiter::TS_RUN;
OrderAccess::fence(); // see comments at the end of EnterI()
}
+ // This method is called from two places:
+ // - On monitorenter contention with a null waiter.
+ // - After Object.wait() times out or the target is interrupted to reenter the
+ // monitor, with the existing waiter.
+ // For the Object.wait() case we do not delete the ObjectWaiter in case we
+ // succesfully acquire the monitor since we are going to need it on return.
+ bool ObjectMonitor::VThreadMonitorEnter(JavaThread* current, ObjectWaiter* waiter) {
+ if (TryLock(current) == TryLockResult::Success) {
+ assert(owner_raw() == owner_for(current), "invariant");
+ assert(_succ != current, "invariant");
+ assert(_Responsible != current, "invariant");
+ add_to_contentions(-1);
+ return true;
+ }
+
+ if (try_set_owner_from(DEFLATER_MARKER, current) == DEFLATER_MARKER) {
+ // Cancelled the in-progress async deflation by changing owner from
+ // DEFLATER_MARKER to current. As part of the contended enter protocol,
+ // contentions was incremented to a positive value before this call to
+ // VThreadMonitorEnter(). We avoid decrementing contentions to prevent
+ // the deflater thread from winning the last part of the 2-part async
+ // deflation protocol. The deflater thread will decrement contentions
+ // after it recognizes that the async deflation was cancelled.
+ assert(_succ != current, "invariant");
+ assert(_Responsible != current, "invariant");
+ assert(waiter != nullptr, "monitor currently in used marked for deflation??");
+ return true;
+ }
+
+ oop vthread = current->vthread();
+ ObjectWaiter* node = waiter != nullptr ? waiter : new ObjectWaiter(vthread, this);
+ node->_prev = (ObjectWaiter*) 0xBAD;
+ node->TState = ObjectWaiter::TS_CXQ;
+
+ // Push node associated with vthread onto the front of the _cxq.
+ ObjectWaiter* nxt;
+ for (;;) {
+ node->_next = nxt = _cxq;
+ if (Atomic::cmpxchg(&_cxq, nxt, node) == nxt) break;
+
+ // Interference - the CAS failed because _cxq changed. Just retry.
+ // As an optional optimization we retry the lock.
+ if (TryLock(current) == TryLockResult::Success) {
+ assert(owner_raw() == owner_for(current), "invariant");
+ assert(_succ != current, "invariant");
+ assert(_Responsible != current, "invariant");
+ add_to_contentions(-1);
+ if (waiter == nullptr) delete node; // for Object.wait() don't delete yet
+ return true;
+ }
+ }
+
+ // We have to try once more since owner could have exited monitor and checked
+ // _cxq before we added the node to the queue.
+ if (TryLock(current) == TryLockResult::Success) {
+ assert(owner_raw() == owner_for(current), "invariant");
+ assert(_Responsible != current, "invariant");
+ UnlinkAfterAcquire(current, node);
+ if (_succ == (JavaThread*)java_lang_Thread::thread_id(vthread)) _succ = nullptr;
+ add_to_contentions(-1);
+ if (waiter == nullptr) delete node; // for Object.wait() don't delete yet
+ return true;
+ }
+
+ if (nxt == nullptr && _EntryList == nullptr) {
+ // The C2 unlock() fast path first checks if _cxq and _EntryList are empty and
+ // if they are it just clears the _owner field. Since we always run the risk of
+ // having that check happening before we added the node to _cxq and the release
+ // of the monitor happening after the last TryLock attempt we need to do something
+ // to avoid stranding. We set the _Responsible field which results in a timed-wait.
+ if (Atomic::replace_if_null(&_Responsible, (JavaThread*)java_lang_Thread::thread_id(vthread))) {
+ java_lang_VirtualThread::set_recheckInterval(vthread, 1);
+ }
+ }
+
+ assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
+ java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::BLOCKING);
+
+ // We didn't succeed in acquiring the monitor so save ObjectWaiter*
+ // in the chunk since we will need it when resuming execution.
+ oop cont = java_lang_VirtualThread::continuation(vthread);
+ stackChunkOop chunk = jdk_internal_vm_Continuation::tail(cont);
+ chunk->set_object_waiter(node);
+ return false;
+ }
+
+ void ObjectMonitor::resume_operation(JavaThread* current, ObjectWaiter* node) {
+ assert(java_lang_VirtualThread::state(current->vthread()) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
+ assert(current->is_in_VTMS_transition(), "must be");
+
+ if (node->is_wait() && !node->at_reenter()) {
+ bool notified = VThreadWaitReenter(current, node);
+ if (!notified) return;
+ // Notified case. We were already added to CXQ or TS_ENTER
+ // by the notifier so just try to reenter the monitor.
+ }
+
+ // Retry acquiring monitor...
+
+ int state = node->TState;
+ guarantee(state == ObjectWaiter::TS_ENTER || state == ObjectWaiter::TS_CXQ, "invariant");
+
+ if (TryLock(current) == TryLockResult::Success) {
+ VThreadEpilog(current, node);
+ return;
+ }
+
+ oop vthread = current->vthread();
+ if (_succ == (JavaThread*)java_lang_Thread::thread_id(vthread)) _succ = nullptr;
+
+ // Invariant: after clearing _succ a thread *must* retry _owner before parking.
+ OrderAccess::fence();
+
+ if (TryLock(current) == TryLockResult::Success) {
+ VThreadEpilog(current, node);
+ return;
+ }
+
+ // Update recheck interval in case we are the _Responsible.
+ if (_Responsible == (JavaThread*)java_lang_Thread::thread_id(vthread)) {
+ int recheckInterval = java_lang_VirtualThread::recheckInterval(vthread);
+ assert(recheckInterval >= 1 && recheckInterval <= 6, "invariant");
+ if (recheckInterval < 6) {
+ recheckInterval++;
+ java_lang_VirtualThread::set_recheckInterval(vthread, recheckInterval);
+ }
+ } else if (java_lang_VirtualThread::recheckInterval(vthread) > 0) {
+ // No need to do timed park anymore
+ java_lang_VirtualThread::set_recheckInterval(vthread, 0);
+ }
+
+ // The JT will read this variable on return to the resume_monitor_operation stub
+ // and will unmount (enterSpecial frame removed and return to Continuation.run()).
+ current->set_preempting(true);
+ java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::BLOCKING);
+ }
+
+ void ObjectMonitor::VThreadEpilog(JavaThread* current, ObjectWaiter* node) {
+ assert(owner_raw() == owner_for(current), "invariant");
+ add_to_contentions(-1);
+
+ oop vthread = current->vthread();
+ if (java_lang_VirtualThread::recheckInterval(vthread) > 0) {
+ java_lang_VirtualThread::set_recheckInterval(vthread, 0);
+ }
+ int64_t threadid = java_lang_Thread::thread_id(vthread);
+ if (_succ == (JavaThread*)threadid) _succ = nullptr;
+ if (_Responsible == (JavaThread*)threadid) {
+ _Responsible = nullptr;
+ OrderAccess::fence(); // Dekker pivot-point
+ }
+
+ guarantee(_recursions == 0, "invariant");
+
+ if (node->is_wait()) {
+ _recursions = node->_recursions; // restore the old recursion count
+ _waiters--; // decrement the number of waiters
+
+ if (node->_interrupted) {
+ // We will throw at thaw end after finishing the mount transition.
+ current->set_pending_interrupted_exception(true);
+ }
+ }
+
+ assert(node->TState == ObjectWaiter::TS_ENTER || node->TState == ObjectWaiter::TS_CXQ, "");
+ UnlinkAfterAcquire(current, node);
+ delete node;
+
+ oop cont = java_lang_VirtualThread::continuation(vthread);
+ stackChunkOop chunk = jdk_internal_vm_Continuation::tail(cont);
+ chunk->set_object_waiter(nullptr);
+
+ if (JvmtiExport::should_post_monitor_contended_entered()) {
+ // We are going to call thaw again after this and finish the VMTS
+ // transition so no need to do it here. We will post the event there.
+ current->set_contended_entered_monitor(this);
+ }
+ }
+
// By convention we unlink a contending thread from EntryList|cxq immediately
// after the thread acquires the lock in ::enter(). Equally, we could defer
// unlinking the thread until ::exit()-time.
void ObjectMonitor::UnlinkAfterAcquire(JavaThread* current, ObjectWaiter* currentNode) {
! assert(owner_raw() == owner_for(current), "invariant");
! assert((!currentNode->is_vthread() && currentNode->thread() == current) ||
+ (currentNode->is_vthread() && currentNode->vthread() == current->vthread()), "invariant");
if (currentNode->TState == ObjectWaiter::TS_ENTER) {
// Normal case: remove current from the DLL EntryList .
// This is a constant-time operation.
ObjectWaiter* nxt = currentNode->_next;
// structured the code so the windows are short and the frequency
// of such futile wakups is low.
void ObjectMonitor::exit(JavaThread* current, bool not_suspended) {
void* cur = owner_raw();
! if (current != cur) {
! if (LockingMode != LM_LIGHTWEIGHT && current->is_lock_owned((address)cur)) {
! assert(_recursions == 0, "invariant");
! set_owner_from_BasicLock(cur, current); // Convert from BasicLock* to Thread*.
! _recursions = 0;
! } else {
! // Apparent unbalanced locking ...
! // Naively we'd like to throw IllegalMonitorStateException.
! // As a practical matter we can neither allocate nor throw an
! // exception as ::exit() can be called from leaf routines.
- // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
- // Upon deeper reflection, however, in a properly run JVM the only
- // way we should encounter this situation is in the presence of
- // unbalanced JNI locking. TODO: CheckJNICalls.
- // See also: CR4414101
#ifdef ASSERT
! LogStreamHandle(Error, monitorinflation) lsh;
! lsh.print_cr("ERROR: ObjectMonitor::exit(): thread=" INTPTR_FORMAT
! " is exiting an ObjectMonitor it does not own.", p2i(current));
! lsh.print_cr("The imbalance is possibly caused by JNI locking.");
! print_debug_style_on(&lsh);
! assert(false, "Non-balanced monitor enter/exit!");
#endif
! return;
- }
}
if (_recursions != 0) {
_recursions--; // this is simple recursive enter
return;
// structured the code so the windows are short and the frequency
// of such futile wakups is low.
void ObjectMonitor::exit(JavaThread* current, bool not_suspended) {
void* cur = owner_raw();
! if (owner_for(current) != cur) {
! // Apparent unbalanced locking ...
! // Naively we'd like to throw IllegalMonitorStateException.
! // As a practical matter we can neither allocate nor throw an
! // exception as ::exit() can be called from leaf routines.
! // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
! // Upon deeper reflection, however, in a properly run JVM the only
! // way we should encounter this situation is in the presence of
! // unbalanced JNI locking. TODO: CheckJNICalls.
! // See also: CR4414101
#ifdef ASSERT
! LogStreamHandle(Error, monitorinflation) lsh;
! lsh.print_cr("ERROR: ObjectMonitor::exit(): thread=" INTPTR_FORMAT
! " is exiting an ObjectMonitor it does not own.", p2i(current));
! lsh.print_cr("The imbalance is possibly caused by JNI locking.");
! print_debug_style_on(&lsh);
! assert(false, "Non-balanced monitor enter/exit!");
#endif
! return;
}
if (_recursions != 0) {
_recursions--; // this is simple recursive enter
return;
_previous_owner_tid = JFR_THREAD_ID(current);
}
#endif
for (;;) {
! assert(current == owner_raw(), "invariant");
// Drop the lock.
// release semantics: prior loads and stores from within the critical section
// must not float (reorder) past the following store that drops the lock.
// Uses a storeload to separate release_store(owner) from the
! // successor check. The try_set_owner() below uses cmpxchg() so
// we get the fence down there.
release_clear_owner(current);
OrderAccess::storeload();
if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != nullptr) {
_previous_owner_tid = JFR_THREAD_ID(current);
}
#endif
for (;;) {
! assert(owner_for(current) == owner_raw(), "invariant");
// Drop the lock.
// release semantics: prior loads and stores from within the critical section
// must not float (reorder) past the following store that drops the lock.
// Uses a storeload to separate release_store(owner) from the
! // successor check. The try_set_owner_from() below uses cmpxchg() so
// we get the fence down there.
release_clear_owner(current);
OrderAccess::storeload();
if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != nullptr) {
//
if (try_set_owner_from(nullptr, current) != nullptr) {
return;
}
! guarantee(owner_raw() == current, "invariant");
ObjectWaiter* w = nullptr;
w = _EntryList;
if (w != nullptr) {
//
if (try_set_owner_from(nullptr, current) != nullptr) {
return;
}
! guarantee(owner_raw() == owner_for(current), "invariant");
ObjectWaiter* w = nullptr;
w = _EntryList;
if (w != nullptr) {
}
}
}
void ObjectMonitor::ExitEpilog(JavaThread* current, ObjectWaiter* Wakee) {
! assert(owner_raw() == current, "invariant");
// Exit protocol:
// 1. ST _succ = wakee
// 2. membar #loadstore|#storestore;
// 2. ST _owner = nullptr
// 3. unpark(wakee)
! _succ = Wakee->_thread;
! ParkEvent * Trigger = Wakee->_event;
// Hygiene -- once we've set _owner = nullptr we can't safely dereference Wakee again.
// The thread associated with Wakee may have grabbed the lock and "Wakee" may be
// out-of-scope (non-extant).
Wakee = nullptr;
}
}
}
void ObjectMonitor::ExitEpilog(JavaThread* current, ObjectWaiter* Wakee) {
! assert(owner_raw() == owner_for(current), "invariant");
// Exit protocol:
// 1. ST _succ = wakee
// 2. membar #loadstore|#storestore;
// 2. ST _owner = nullptr
// 3. unpark(wakee)
! oop vthread = nullptr;
! ParkEvent * Trigger;
+ if (!Wakee->is_vthread()) {
+ JavaThread* t = Wakee->thread();
+ assert(t != nullptr, "");
+ Trigger = t->_ParkEvent;
+ _succ = t;
+ } else {
+ vthread = Wakee->vthread();
+ assert(vthread != nullptr, "");
+ Trigger = ObjectMonitor::vthread_unparker_ParkEvent();
+ _succ = (JavaThread*)java_lang_Thread::thread_id(vthread);
+ }
// Hygiene -- once we've set _owner = nullptr we can't safely dereference Wakee again.
// The thread associated with Wakee may have grabbed the lock and "Wakee" may be
// out-of-scope (non-extant).
Wakee = nullptr;
// Uses a fence to separate release_store(owner) from the LD in unpark().
release_clear_owner(current);
OrderAccess::fence();
DTRACE_MONITOR_PROBE(contended__exit, this, object(), current);
! Trigger->unpark();
// Maintain stats and report events to JVMTI
OM_PERFDATA_OP(Parks, inc());
}
// Uses a fence to separate release_store(owner) from the LD in unpark().
release_clear_owner(current);
OrderAccess::fence();
DTRACE_MONITOR_PROBE(contended__exit, this, object(), current);
!
+ if (vthread == nullptr) {
+ // Platform thread case
+ Trigger->unpark();
+ } else if (java_lang_VirtualThread::set_onWaitingList(vthread, _vthread_cxq_head)) {
+ Trigger->unpark();
+ }
// Maintain stats and report events to JVMTI
OM_PERFDATA_OP(Parks, inc());
}
// thread due to contention.
intx ObjectMonitor::complete_exit(JavaThread* current) {
assert(InitDone, "Unexpectedly not initialized");
void* cur = owner_raw();
! if (current != cur) {
! if (LockingMode != LM_LIGHTWEIGHT && current->is_lock_owned((address)cur)) {
assert(_recursions == 0, "internal state error");
! set_owner_from_BasicLock(cur, current); // Convert from BasicLock* to Thread*.
_recursions = 0;
}
}
! guarantee(current == owner_raw(), "complete_exit not owner");
intx save = _recursions; // record the old recursion count
_recursions = 0; // set the recursion level to be 0
exit(current); // exit the monitor
! guarantee(owner_raw() != current, "invariant");
return save;
}
// Checks that the current THREAD owns this monitor and causes an
// immediate return if it doesn't. We don't use the CHECK macro
// thread due to contention.
intx ObjectMonitor::complete_exit(JavaThread* current) {
assert(InitDone, "Unexpectedly not initialized");
void* cur = owner_raw();
! if (owner_for(current) != cur) {
! if (LockingMode == LM_LEGACY && is_stack_locker(current)) {
assert(_recursions == 0, "internal state error");
! set_owner_from_BasicLock(current); // Convert from BasicLock* to Thread*.
_recursions = 0;
}
}
! guarantee(owner_for(current) == owner_raw(), "complete_exit not owner");
intx save = _recursions; // record the old recursion count
_recursions = 0; // set the recursion level to be 0
exit(current); // exit the monitor
! guarantee(owner_raw() != owner_for(current), "invariant");
return save;
}
// Checks that the current THREAD owns this monitor and causes an
// immediate return if it doesn't. We don't use the CHECK macro
// (IMSE). If there is a pending exception and the specified thread
// is not the owner, that exception will be replaced by the IMSE.
bool ObjectMonitor::check_owner(TRAPS) {
JavaThread* current = THREAD;
void* cur = owner_raw();
! assert(cur != anon_owner_ptr(), "no anon owner here");
- if (cur == current) {
- return true;
- }
- if (LockingMode != LM_LIGHTWEIGHT && current->is_lock_owned((address)cur)) {
- set_owner_from_BasicLock(cur, current); // Convert from BasicLock* to Thread*.
- _recursions = 0;
return true;
}
THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
"current thread is not owner", false);
}
// (IMSE). If there is a pending exception and the specified thread
// is not the owner, that exception will be replaced by the IMSE.
bool ObjectMonitor::check_owner(TRAPS) {
JavaThread* current = THREAD;
void* cur = owner_raw();
! if (cur == owner_for(current)) {
return true;
}
THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
"current thread is not owner", false);
}
event->set_notifier(notifier_tid);
event->set_timedOut(timedout);
event->commit();
}
+ static void vthread_monitor_waited_event(JavaThread *current, ObjectWaiter* node, EventJavaMonitorWait* event, jboolean timed_out) {
+ // Since we might safepoint set the anchor so that the stack can we walked.
+ assert(current->last_continuation() != nullptr, "");
+ JavaFrameAnchor* anchor = current->frame_anchor();
+ anchor->set_last_Java_sp(current->last_continuation()->entry_sp());
+ anchor->set_last_Java_pc(current->last_continuation()->entry_pc());
+
+ JRT_BLOCK
+ if (event->should_commit()) {
+ long timeout = java_lang_VirtualThread::waitTimeout(current->vthread());
+ post_monitor_wait_event(event, node->_monitor, node->_notifier_tid, timeout, timed_out);
+ }
+ if (JvmtiExport::should_post_monitor_waited()) {
+ JvmtiExport::vthread_post_monitor_waited(current, node->_monitor, timed_out);
+ }
+ JRT_BLOCK_END
+ current->frame_anchor()->clear();
+ }
+
// -----------------------------------------------------------------------------
// Wait/Notify/NotifyAll
//
// Note: a subset of changes to ObjectMonitor::wait()
// will need to be replicated in complete_exit
return;
}
current->set_current_waiting_monitor(this);
+ #ifdef LOOM_MONITOR_SUPPORT
+ ContinuationEntry* ce = current->last_continuation();
+ if (interruptible && ce != nullptr && ce->is_virtual_thread()) {
+ int result = Continuation::try_preempt(current, ce->cont_oop(current), freeze_on_wait);
+ if (result == freeze_ok) {
+ VThreadWait(current, millis);
+ current->set_current_waiting_monitor(nullptr);
+ return;
+ }
+ if (result == freeze_pinned_native) {
+ const Klass* monitor_klass = object()->klass();
+ if (!is_excluded(monitor_klass)) {
+ post_virtual_thread_pinned_event(current, "Native frame or <clinit> on stack");
+ }
+ }
+ }
+ #endif
+
// create a node to be put into the queue
// Critically, after we reset() the event but prior to park(), we must check
// for a pending interrupt.
ObjectWaiter node(current);
node.TState = ObjectWaiter::TS_WAIT;
intx save = _recursions; // record the old recursion count
_waiters++; // increment the number of waiters
_recursions = 0; // set the recursion level to be 1
exit(current); // exit the monitor
! guarantee(owner_raw() != current, "invariant");
// The thread is on the WaitSet list - now park() it.
// On MP systems it's conceivable that a brief spin before we park
// could be profitable.
//
intx save = _recursions; // record the old recursion count
_waiters++; // increment the number of waiters
_recursions = 0; // set the recursion level to be 1
exit(current); // exit the monitor
! guarantee(owner_raw() != owner_for(current), "invariant");
// The thread is on the WaitSet list - now park() it.
// On MP systems it's conceivable that a brief spin before we park
// could be profitable.
//
{
ClearSuccOnSuspend csos(this);
ThreadBlockInVMPreprocess<ClearSuccOnSuspend> tbivs(current, csos, true /* allow_suspend */);
if (interrupted || HAS_PENDING_EXCEPTION) {
// Intentionally empty
! } else if (node._notified == 0) {
if (millis <= 0) {
current->_ParkEvent->park();
} else {
ret = current->_ParkEvent->park(millis);
}
{
ClearSuccOnSuspend csos(this);
ThreadBlockInVMPreprocess<ClearSuccOnSuspend> tbivs(current, csos, true /* allow_suspend */);
if (interrupted || HAS_PENDING_EXCEPTION) {
// Intentionally empty
! } else if (!node._notified) {
if (millis <= 0) {
current->_ParkEvent->park();
} else {
ret = current->_ParkEvent->park(millis);
}
if (node.TState == ObjectWaiter::TS_WAIT) {
Thread::SpinAcquire(&_WaitSetLock, "WaitSet - unlink");
if (node.TState == ObjectWaiter::TS_WAIT) {
DequeueSpecificWaiter(&node); // unlink from WaitSet
! assert(node._notified == 0, "invariant");
node.TState = ObjectWaiter::TS_RUN;
}
Thread::SpinRelease(&_WaitSetLock);
}
if (node.TState == ObjectWaiter::TS_WAIT) {
Thread::SpinAcquire(&_WaitSetLock, "WaitSet - unlink");
if (node.TState == ObjectWaiter::TS_WAIT) {
DequeueSpecificWaiter(&node); // unlink from WaitSet
! assert(!node._notified, "invariant");
node.TState = ObjectWaiter::TS_RUN;
}
Thread::SpinRelease(&_WaitSetLock);
}
// post monitor waited event. Note that this is past-tense, we are done waiting.
if (JvmtiExport::should_post_monitor_waited()) {
JvmtiExport::post_monitor_waited(current, this, ret == OS_TIMEOUT);
! if (node._notified != 0 && _succ == current) {
// In this part of the monitor wait-notify-reenter protocol it
// is possible (and normal) for another thread to do a fastpath
// monitor enter-exit while this thread is still trying to get
// to the reenter portion of the protocol.
//
// post monitor waited event. Note that this is past-tense, we are done waiting.
if (JvmtiExport::should_post_monitor_waited()) {
JvmtiExport::post_monitor_waited(current, this, ret == OS_TIMEOUT);
! if (node._notified && _succ == current) {
// In this part of the monitor wait-notify-reenter protocol it
// is possible (and normal) for another thread to do a fastpath
// monitor enter-exit while this thread is still trying to get
// to the reenter portion of the protocol.
//
// monitors and JVM/TI RawMonitors (for now).
//
// We redo the unpark() to ensure forward progress, i.e., we
// don't want all pending threads hanging (parked) with none
// entering the unlocked monitor.
! node._event->unpark();
}
}
if (event.should_commit()) {
post_monitor_wait_event(&event, this, node._notifier_tid, millis, ret == OS_TIMEOUT);
}
OrderAccess::fence();
! assert(owner_raw() != current, "invariant");
ObjectWaiter::TStates v = node.TState;
if (v == ObjectWaiter::TS_RUN) {
enter(current);
} else {
guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
// monitors and JVM/TI RawMonitors (for now).
//
// We redo the unpark() to ensure forward progress, i.e., we
// don't want all pending threads hanging (parked) with none
// entering the unlocked monitor.
! current->_ParkEvent->unpark();
}
}
if (event.should_commit()) {
post_monitor_wait_event(&event, this, node._notifier_tid, millis, ret == OS_TIMEOUT);
}
OrderAccess::fence();
! assert(owner_raw() != owner_for(current), "invariant");
ObjectWaiter::TStates v = node.TState;
if (v == ObjectWaiter::TS_RUN) {
enter(current);
} else {
guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
// current has reacquired the lock.
// Lifecycle - the node representing current must not appear on any queues.
// Node is about to go out-of-scope, but even if it were immortal we wouldn't
// want residual elements associated with this thread left on any lists.
guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
! assert(owner_raw() == current, "invariant");
assert(_succ != current, "invariant");
} // OSThreadWaitState()
current->set_current_waiting_monitor(nullptr);
guarantee(_recursions == 0, "invariant");
int relock_count = JvmtiDeferredUpdates::get_and_reset_relock_count_after_wait(current);
_recursions = save // restore the old recursion count
+ relock_count; // increased by the deferred relock count
! current->inc_held_monitor_count(relock_count); // Deopt never entered these counts.
_waiters--; // decrement the number of waiters
// Verify a few postconditions
! assert(owner_raw() == current, "invariant");
assert(_succ != current, "invariant");
assert(object()->mark() == markWord::encode(this), "invariant");
// check if the notification happened
if (!WasNotified) {
// current has reacquired the lock.
// Lifecycle - the node representing current must not appear on any queues.
// Node is about to go out-of-scope, but even if it were immortal we wouldn't
// want residual elements associated with this thread left on any lists.
guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
! assert(owner_raw() == owner_for(current), "invariant");
assert(_succ != current, "invariant");
} // OSThreadWaitState()
current->set_current_waiting_monitor(nullptr);
guarantee(_recursions == 0, "invariant");
int relock_count = JvmtiDeferredUpdates::get_and_reset_relock_count_after_wait(current);
_recursions = save // restore the old recursion count
+ relock_count; // increased by the deferred relock count
! NOT_LOOM_MONITOR_SUPPORT(current->inc_held_monitor_count(relock_count);) // Deopt never entered these counts.
_waiters--; // decrement the number of waiters
// Verify a few postconditions
! assert(owner_raw() == owner_for(current), "invariant");
assert(_succ != current, "invariant");
assert(object()->mark() == markWord::encode(this), "invariant");
// check if the notification happened
if (!WasNotified) {
// NOTE: Spurious wake up will be consider as timeout.
// Monitor notify has precedence over thread interrupt.
}
-
// Consider:
// If the lock is cool (cxq == null && succ == null) and we're on an MP system
// then instead of transferring a thread from the WaitSet to the EntryList
// we might just dequeue a thread from the WaitSet and directly unpark() it.
void ObjectMonitor::INotify(JavaThread* current) {
Thread::SpinAcquire(&_WaitSetLock, "WaitSet - notify");
ObjectWaiter* iterator = DequeueWaiter();
if (iterator != nullptr) {
guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
! guarantee(iterator->_notified == 0, "invariant");
// Disposition - what might we do with iterator ?
// a. add it directly to the EntryList - either tail (policy == 1)
// or head (policy == 0).
// b. push it onto the front of the _cxq (policy == 2).
// For now we use (b).
iterator->TState = ObjectWaiter::TS_ENTER;
! iterator->_notified = 1;
iterator->_notifier_tid = JFR_THREAD_ID(current);
ObjectWaiter* list = _EntryList;
if (list != nullptr) {
assert(list->_prev == nullptr, "invariant");
// NOTE: Spurious wake up will be consider as timeout.
// Monitor notify has precedence over thread interrupt.
}
// Consider:
// If the lock is cool (cxq == null && succ == null) and we're on an MP system
// then instead of transferring a thread from the WaitSet to the EntryList
// we might just dequeue a thread from the WaitSet and directly unpark() it.
void ObjectMonitor::INotify(JavaThread* current) {
Thread::SpinAcquire(&_WaitSetLock, "WaitSet - notify");
ObjectWaiter* iterator = DequeueWaiter();
if (iterator != nullptr) {
guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
! guarantee(!iterator->_notified, "invariant");
// Disposition - what might we do with iterator ?
// a. add it directly to the EntryList - either tail (policy == 1)
// or head (policy == 0).
// b. push it onto the front of the _cxq (policy == 2).
// For now we use (b).
+ if (iterator->is_vthread()) {
+ oop vthread = iterator->vthread();
+ java_lang_VirtualThread::set_notified(vthread, true);
+ int old_state = java_lang_VirtualThread::state(vthread);
+ // If state is not WAIT/TIMED_WAIT then target could still be on
+ // unmount transition, or wait could have already timed-out or target
+ // could have been interrupted. In the first case, the target itself
+ // will set the state to BLOCKED at the end of the unmount transition.
+ // In the other cases the target would have been already unblocked so
+ // there is nothing to do.
+ if (old_state == java_lang_VirtualThread::WAIT ||
+ old_state == java_lang_VirtualThread::TIMED_WAIT) {
+ java_lang_VirtualThread::cmpxchg_state(vthread, old_state, java_lang_VirtualThread::BLOCKED);
+ }
+ }
+
iterator->TState = ObjectWaiter::TS_ENTER;
! iterator->_notified = true;
iterator->_notifier_tid = JFR_THREAD_ID(current);
ObjectWaiter* list = _EntryList;
if (list != nullptr) {
assert(list->_prev == nullptr, "invariant");
// protected by _WaitSetLock. In practice that's not useful. With the
// exception of wait() timeouts and interrupts the monitor owner
// is the only thread that grabs _WaitSetLock. There's almost no contention
// on _WaitSetLock so it's not profitable to reduce the length of the
// critical section.
!
! iterator->wait_reenter_begin(this);
}
Thread::SpinRelease(&_WaitSetLock);
}
// Consider: a not-uncommon synchronization bug is to use notify() when
// protected by _WaitSetLock. In practice that's not useful. With the
// exception of wait() timeouts and interrupts the monitor owner
// is the only thread that grabs _WaitSetLock. There's almost no contention
// on _WaitSetLock so it's not profitable to reduce the length of the
// critical section.
! if (!iterator->is_vthread()) {
! iterator->wait_reenter_begin(this);
+ }
}
Thread::SpinRelease(&_WaitSetLock);
}
// Consider: a not-uncommon synchronization bug is to use notify() when
}
OM_PERFDATA_OP(Notifications, inc(tally));
}
+ void ObjectMonitor::VThreadWait(JavaThread* current, jlong millis) {
+ oop vthread = current->vthread();
+ ObjectWaiter* node = new ObjectWaiter(vthread, this);
+ node->_is_wait = true;
+ node->TState = ObjectWaiter::TS_WAIT;
+ java_lang_VirtualThread::set_notified(vthread, false); // Reset notified flag
+
+ // Enter the waiting queue, which is a circular doubly linked list in this case
+ // but it could be a priority queue or any data structure.
+ // _WaitSetLock protects the wait queue. Normally the wait queue is accessed only
+ // by the owner of the monitor *except* in the case where park()
+ // returns because of a timeout or interrupt. Contention is exceptionally rare
+ // so we use a simple spin-lock instead of a heavier-weight blocking lock.
+
+ Thread::SpinAcquire(&_WaitSetLock, "WaitSet - add");
+ AddWaiter(node);
+ Thread::SpinRelease(&_WaitSetLock);
+
+ _Responsible = nullptr;
+
+ node->_recursions = _recursions; // record the old recursion count
+ _recursions = 0; // set the recursion level to be 1
+ _waiters++; // increment the number of waiters
+ exit(current); // exit the monitor
+ guarantee(owner_raw() != owner_for(current), "invariant");
+
+ assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
+ java_lang_VirtualThread::set_state(vthread, millis == 0 ? java_lang_VirtualThread::WAITING : java_lang_VirtualThread::TIMED_WAITING);
+ java_lang_VirtualThread::set_waitTimeout(vthread, millis);
+
+ // Save the ObjectWaiter* in the chunk since we will need it
+ // when resuming execution.
+ oop cont = java_lang_VirtualThread::continuation(vthread);
+ stackChunkOop chunk = jdk_internal_vm_Continuation::tail(cont);
+ chunk->set_object_waiter(node);
+ }
+
+ bool ObjectMonitor::VThreadWaitReenter(JavaThread* current, ObjectWaiter* node) {
+ // First time we run after being preempted on Object.wait().
+ // We need to check if we were interrupted or wait() timed-out
+ // and in that case remove ourselves from the _WaitSet queue.
+ if (node->TState == ObjectWaiter::TS_WAIT) {
+ Thread::SpinAcquire(&_WaitSetLock, "WaitSet - unlink");
+ if (node->TState == ObjectWaiter::TS_WAIT) {
+ DequeueSpecificWaiter(node); // unlink from WaitSet
+ assert(!node->_notified, "invariant");
+ node->TState = ObjectWaiter::TS_RUN;
+ }
+ Thread::SpinRelease(&_WaitSetLock);
+ }
+
+ ObjectWaiter::TStates state = node->TState;
+ bool was_notified = state == ObjectWaiter::TS_ENTER || state == ObjectWaiter::TS_CXQ;
+ assert(was_notified || state == ObjectWaiter::TS_RUN, "");
+
+ // save it so that once we re-acquire the monitor we know if we need to throw IE.
+ node->_interrupted = !was_notified && current->is_interrupted(false);
+
+ EventJavaMonitorWait event;
+ if (event.should_commit() || JvmtiExport::should_post_monitor_waited()) {
+ vthread_monitor_waited_event(current, node, &event, !was_notified && !node->_interrupted);
+ }
+
+ node->_at_reenter = true;
+ add_to_contentions(1);
+ assert(owner_raw() != owner_for(current), "invariant");
+
+ if (!was_notified) {
+ bool acquired = VThreadMonitorEnter(current, node);
+ if (acquired) {
+ guarantee(_recursions == 0, "invariant");
+ _recursions = node->_recursions; // restore the old recursion count
+ _waiters--; // decrement the number of waiters
+
+ if (node->_interrupted) {
+ // We will throw at thaw end after finishing the mount transition.
+ current->set_pending_interrupted_exception(true);
+ }
+
+ delete node;
+ oop cont = java_lang_VirtualThread::continuation(current->vthread());
+ stackChunkOop chunk = jdk_internal_vm_Continuation::tail(cont);
+ chunk->set_object_waiter(nullptr);
+ } else {
+ // The JT will read this variable on return to the resume_monitor_operation stub
+ // and will unmount (enterSpecial frame removed and return to Continuation.run()).
+ current->set_preempting(true);
+ }
+ }
+ return was_notified;
+ }
+
// -----------------------------------------------------------------------------
// Adaptive Spinning Support
//
// Adaptive spin-then-block - rational spinning
//
// when preparing to LD...CAS _owner, etc and the CAS is likely
// to succeed.
if (_succ == nullptr) {
_succ = current;
}
! Thread* prv = nullptr;
// There are three ways to exit the following loop:
// 1. A successful spin where this thread has acquired the lock.
// 2. Spin failure with prejudice
// 3. Spin failure without prejudice
// when preparing to LD...CAS _owner, etc and the CAS is likely
// to succeed.
if (_succ == nullptr) {
_succ = current;
}
! void* prv = nullptr;
// There are three ways to exit the following loop:
// 1. A successful spin where this thread has acquired the lock.
// 2. Spin failure with prejudice
// 3. Spin failure without prejudice
// or if we observe _owner change from one non-null value to
// another non-null value. In such cases we might abort
// the spin without prejudice or apply a "penalty" to the
// spin count-down variable "ctr", reducing it by 100, say.
! JavaThread* ox = static_cast<JavaThread*>(owner_raw());
if (ox == nullptr) {
! ox = static_cast<JavaThread*>(try_set_owner_from(nullptr, current));
if (ox == nullptr) {
// The CAS succeeded -- this thread acquired ownership
// Take care of some bookkeeping to exit spin state.
if (_succ == current) {
_succ = nullptr;
// or if we observe _owner change from one non-null value to
// another non-null value. In such cases we might abort
// the spin without prejudice or apply a "penalty" to the
// spin count-down variable "ctr", reducing it by 100, say.
! void* ox = owner_raw();
if (ox == nullptr) {
! ox = try_set_owner_from(nullptr, current);
if (ox == nullptr) {
// The CAS succeeded -- this thread acquired ownership
// Take care of some bookkeeping to exit spin state.
if (_succ == current) {
_succ = nullptr;
// WaitSet management ...
ObjectWaiter::ObjectWaiter(JavaThread* current) {
_next = nullptr;
_prev = nullptr;
! _notified = 0;
_notifier_tid = 0;
TState = TS_RUN;
! _thread = current;
! _event = _thread->_ParkEvent;
_active = false;
! assert(_event != nullptr, "invariant");
}
void ObjectWaiter::wait_reenter_begin(ObjectMonitor * const mon) {
_active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(_thread, mon);
}
// WaitSet management ...
ObjectWaiter::ObjectWaiter(JavaThread* current) {
_next = nullptr;
_prev = nullptr;
! _thread = current;
+ _monitor = nullptr;
_notifier_tid = 0;
+ _recursions = 0;
TState = TS_RUN;
! _notified = false;
! _is_wait = false;
+ _at_reenter = false;
+ _interrupted = false;
_active = false;
! }
+
+ ObjectWaiter::ObjectWaiter(oop vthread, ObjectMonitor* mon) : ObjectWaiter(nullptr) {
+ assert(oopDesc::is_oop(vthread), "");
+ _vthread = OopHandle(JavaThread::thread_oop_storage(), vthread);
+ _monitor = mon;
+ }
+
+ ObjectWaiter::~ObjectWaiter() {
+ if (is_vthread()) {
+ assert(vthread() != nullptr, "");
+ _vthread.release(JavaThread::thread_oop_storage());
+ }
+ }
+
+ oop ObjectWaiter::vthread() {
+ return _vthread.resolve();
}
void ObjectWaiter::wait_reenter_begin(ObjectMonitor * const mon) {
_active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(_thread, mon);
}
_oop_storage = OopStorageSet::create_weak("ObjectSynchronizer Weak", mtSynchronizer);
DEBUG_ONLY(InitDone = true;)
}
+ void ObjectMonitor::Initialize2() {
+ _vthread_cxq_head = OopHandle(JavaThread::thread_oop_storage(), nullptr);
+ _vthread_unparker_ParkEvent = ParkEvent::Allocate(nullptr);
+ }
+
void ObjectMonitor::print_on(outputStream* st) const {
// The minimal things to print for markWord printing, more can be added for debugging and logging.
st->print("{contentions=0x%08x,waiters=0x%08x"
",recursions=" INTX_FORMAT ",owner=" INTPTR_FORMAT "}",
contentions(), waiters(), recursions(),
< prev index next >