< prev index next > src/hotspot/share/runtime/objectMonitor.cpp
Print this page
_entry_list(nullptr),
_entry_list_tail(nullptr),
_succ(NO_OWNER),
_SpinDuration(ObjectMonitor::Knob_SpinLimit),
_contentions(0),
+ _unmounted_vthreads(0),
_wait_set(nullptr),
_waiters(0),
_wait_set_lock(0),
_stack_locker(nullptr)
{ }
// TODO: Defer all thread state transitions until park-time.
// Since state transitions are heavy and inefficient we'd like
// to defer the state transitions until absolutely necessary,
// and in doing so avoid some transitions ...
- // For virtual threads that are pinned, do a timed-park instead to
- // alleviate some deadlocks cases where the succesor is an unmounted
- // virtual thread that cannot run. This can happen in particular when
- // this virtual thread is currently loading/initializing a class, and
- // all other carriers have a vthread pinned to it waiting for said class
- // to be loaded/initialized.
+ // If there are unmounted virtual threads in the _entry_list do a timed-park
+ // instead to alleviate some deadlocks cases where one of them is picked as
+ // the successor but cannot run due to having run out of carriers. This can
+ // happen, for example, if this is a pinned virtual thread currently loading
+ // or initializining a class, and all other carriers have a pinned vthread
+ // waiting for said class to be loaded/initialized.
+ // Read counter *after* adding this thread to the _entry_list.
+ // Adding to _entry_list uses Atomic::cmpxchg() which already provides
+ // a fence that prevents this load from floating up previous store.
+ bool do_timed_parked = has_unmounted_vthreads();
static int MAX_RECHECK_INTERVAL = 1000;
int recheck_interval = 1;
- bool do_timed_parked = false;
- ContinuationEntry* ce = current->last_continuation();
- if (ce != nullptr && ce->is_virtual_thread()) {
- do_timed_parked = true;
- }
for (;;) {
if (try_lock(current) == TryLockResult::Success) {
break;
assert(currentNode != nullptr, "invariant");
assert(currentNode->_thread == current, "invariant");
assert(_waiters > 0, "invariant");
assert_mark_word_consistency();
+ // If there are unmounted virtual threads in the _entry_list do a timed-park
+ // instead to alleviate some deadlocks cases where one of them is picked as
+ // the successor but cannot run due to having run out of carriers. This can
+ // happen, for example, if this is a pinned virtual thread (or plain carrier)
+ // waiting for a class to be initialized.
+ // In theory we only get here in the "notification" case where the thread has
+ // already been added to the _entry_list. But if the thread happened to be interrupted
+ // at the same time it was being notified, we could have read a state of TS_ENTER
+ // that led us here but the thread hasn't been added yet to the queue. In that
+ // case getting a false value from has_unmounted_vthreads() is not a guarantee
+ // that vthreads weren't added before this thread to the _entry_list. We will live
+ // with this corner case not only because it would be very rare, but also because
+ // if there are several carriers blocked in this same situation, this would only
+ // happen for the first one notified.
+ bool do_timed_parked = has_unmounted_vthreads();
+ static int MAX_RECHECK_INTERVAL = 1000;
+ int recheck_interval = 1;
+
for (;;) {
ObjectWaiter::TStates v = currentNode->TState;
guarantee(v == ObjectWaiter::TS_ENTER, "invariant");
assert(!has_owner(current), "invariant");
assert(current->thread_state() == _thread_in_vm, "invariant");
{
ClearSuccOnSuspend csos(this);
ThreadBlockInVMPreprocess<ClearSuccOnSuspend> tbivs(current, csos, true /* allow_suspend */);
- current->_ParkEvent->park();
+ if (do_timed_parked) {
+ current->_ParkEvent->park((jlong) recheck_interval);
+ // Increase the recheck_interval, but clamp the value.
+ recheck_interval *= 8;
+ if (recheck_interval > MAX_RECHECK_INTERVAL) {
+ recheck_interval = MAX_RECHECK_INTERVAL;
+ }
+ } else {
+ current->_ParkEvent->park();
+ }
}
}
// Try again, but just so we distinguish between futile wakeups and
// successful wakeups. The following test isn't algorithmically
return true;
}
oop vthread = current->vthread();
ObjectWaiter* node = waiter != nullptr ? waiter : new ObjectWaiter(vthread, this);
+
+ // Increment counter *before* adding the vthread to the _entry_list.
+ // Adding to _entry_list uses Atomic::cmpxchg() which already provides
+ // a fence that prevents reordering of the stores.
+ inc_unmounted_vthreads();
+
if (try_lock_or_add_to_entry_list(current, node)) {
// We got the lock.
if (waiter == nullptr) delete node; // for Object.wait() don't delete yet
+ dec_unmounted_vthreads();
return true;
}
// This thread is now added to the entry_list.
// We have to try once more since owner could have exited monitor and checked
if (try_lock(current) == TryLockResult::Success) {
assert(has_owner(current), "invariant");
unlink_after_acquire(current, node);
if (has_successor(current)) clear_successor();
if (waiter == nullptr) delete node; // for Object.wait() don't delete yet
+ dec_unmounted_vthreads();
return true;
}
assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::BLOCKING);
}
void ObjectMonitor::vthread_epilog(JavaThread* current, ObjectWaiter* node) {
assert(has_owner(current), "invariant");
add_to_contentions(-1);
+ dec_unmounted_vthreads();
if (has_successor(current)) clear_successor();
guarantee(_recursions == 0, "invariant");
JvmtiExport::post_monitor_wait(current, object(), millis);
}
current->set_current_waiting_monitor(this);
result = Continuation::try_preempt(current, ce->cont_oop(current));
if (result == freeze_ok) {
- vthread_wait(current, millis);
+ vthread_wait(current, millis, interruptible);
current->set_current_waiting_monitor(nullptr);
return;
}
}
// The jtiows does nothing for non-interruptible.
// there is nothing to do.
if (old_state == java_lang_VirtualThread::WAIT ||
old_state == java_lang_VirtualThread::TIMED_WAIT) {
java_lang_VirtualThread::cmpxchg_state(vthread, old_state, java_lang_VirtualThread::BLOCKED);
}
+ // Increment counter *before* adding the vthread to the _entry_list.
+ // Adding to _entry_list uses Atomic::cmpxchg() which already provides
+ // a fence that prevents reordering of the stores.
+ inc_unmounted_vthreads();
}
iterator->_notified = true;
iterator->_notifier_tid = JFR_THREAD_ID(current);
did_notify = true;
// on _wait_set_lock so it's not profitable to reduce the length of the
// critical section.
if (!iterator->is_vthread()) {
iterator->wait_reenter_begin(this);
+
+ // Read counter *after* adding the thread to the _entry_list.
+ // Adding to _entry_list uses Atomic::cmpxchg() which already provides
+ // a fence that prevents this load from floating up previous store.
+ if (has_unmounted_vthreads()) {
+ // Wake up the thread to alleviate some deadlocks cases where the successor
+ // that will be picked up when this thread releases the monitor is an unmounted
+ // virtual thread that cannot run due to having run out of carriers. Upon waking
+ // up, the thread will call reenter_internal() which will use time-park in case
+ // there is contention and there are still vthreads in the _entry_list.
+ JavaThread* t = iterator->thread();
+ t->_ParkEvent->unpark();
+ }
}
}
Thread::SpinRelease(&_wait_set_lock);
return did_notify;
}
if ((tally > 0) && event.should_commit()) {
post_monitor_notify_event(&event, this, /* notified_count = */ tally);
}
}
- void ObjectMonitor::vthread_wait(JavaThread* current, jlong millis) {
+ void ObjectMonitor::vthread_wait(JavaThread* current, jlong millis, bool interruptible) {
oop vthread = current->vthread();
ObjectWaiter* node = new ObjectWaiter(vthread, this);
node->_is_wait = true;
+ node->_interruptible = interruptible;
node->TState = ObjectWaiter::TS_WAIT;
java_lang_VirtualThread::set_notified(vthread, false); // Reset notified flag
+ java_lang_VirtualThread::set_interruptible_wait(vthread, interruptible);
// Enter the waiting queue, which is a circular doubly linked list in this case
// but it could be a priority queue or any data structure.
// _wait_set_lock protects the wait queue. Normally the wait queue is accessed only
// by the owner of the monitor *except* in the case where park()
// If this was an interrupted case, set the _interrupted boolean so that
// once we re-acquire the monitor we know if we need to throw IE or not.
ObjectWaiter::TStates state = node->TState;
bool was_notified = state == ObjectWaiter::TS_ENTER;
assert(was_notified || state == ObjectWaiter::TS_RUN, "");
- node->_interrupted = !was_notified && current->is_interrupted(false);
+ node->_interrupted = node->_interruptible && !was_notified && current->is_interrupted(false);
- // Post JFR and JVMTI events.
+ // Post JFR and JVMTI events. If non-interruptible we are in
+ // ObjectLocker case so we don't post anything.
EventJavaMonitorWait wait_event;
- if (wait_event.should_commit() || JvmtiExport::should_post_monitor_waited()) {
+ if (node->_interruptible && (wait_event.should_commit() || JvmtiExport::should_post_monitor_waited())) {
vthread_monitor_waited_event(current, node, cont, &wait_event, !was_notified && !node->_interrupted);
}
// Mark that we are at reenter so that we don't call this method again.
node->_at_reenter = true;
st->print_cr(" _entry_list = " INTPTR_FORMAT, p2i(_entry_list));
st->print_cr(" _entry_list_tail = " INTPTR_FORMAT, p2i(_entry_list_tail));
st->print_cr(" _succ = " INT64_FORMAT, successor());
st->print_cr(" _SpinDuration = %d", _SpinDuration);
st->print_cr(" _contentions = %d", contentions());
+ st->print_cr(" _unmounted_vthreads = " INT64_FORMAT, _unmounted_vthreads);
st->print_cr(" _wait_set = " INTPTR_FORMAT, p2i(_wait_set));
st->print_cr(" _waiters = %d", _waiters);
st->print_cr(" _wait_set_lock = %d", _wait_set_lock);
st->print_cr("}");
}
< prev index next >