277 } else {
278 // However, ThreadService::get_current_contended_monitor()
279 // can call here via the VMThread so sanity check it.
280 assert(self->is_VM_thread(), "must be");
281 }
282 #endif // ASSERT
283 }
284
285 ObjectMonitor::ObjectMonitor(oop object) :
286 _metadata(0),
287 _object(_oop_storage, object),
288 _owner(NO_OWNER),
289 _previous_owner_tid(0),
290 _next_om(nullptr),
291 _recursions(0),
292 _entry_list(nullptr),
293 _entry_list_tail(nullptr),
294 _succ(NO_OWNER),
295 _SpinDuration(ObjectMonitor::Knob_SpinLimit),
296 _contentions(0),
297 _wait_set(nullptr),
298 _waiters(0),
299 _wait_set_lock(0)
300 { }
301
302 ObjectMonitor::~ObjectMonitor() {
303 _object.release(_oop_storage);
304 }
305
306 oop ObjectMonitor::object() const {
307 check_object_context();
308 return _object.resolve();
309 }
310
311 void ObjectMonitor::ExitOnSuspend::operator()(JavaThread* current) {
312 if (current->is_suspended()) {
313 _om->_recursions = 0;
314 _om->clear_successor();
315 // Don't need a full fence after clearing successor here because of the call to exit().
316 _om->exit(current, false /* not_suspended */);
317 _om_exited = true;
318
319 current->set_current_pending_monitor(_om);
320 }
321 }
322
323 void ObjectMonitor::ClearSuccOnSuspend::operator()(JavaThread* current) {
324 if (current->is_suspended()) {
325 if (_om->has_successor(current)) {
326 _om->clear_successor();
327 OrderAccess::fence(); // always do a full fence when successor is cleared
328 }
329 }
330 }
966
967 ObjectWaiter node(current);
968 current->_ParkEvent->reset();
969
970 if (try_lock_or_add_to_entry_list(current, &node)) {
971 return; // We got the lock.
972 }
973 // This thread is now added to the _entry_list.
974
975 // The lock might have been released while this thread was occupied queueing
976 // itself onto _entry_list. To close the race and avoid "stranding" and
977 // progress-liveness failure we must resample-retry _owner before parking.
978 // Note the Dekker/Lamport duality: ST _entry_list; MEMBAR; LD Owner.
979 // In this case the ST-MEMBAR is accomplished with CAS().
980 //
981 // TODO: Defer all thread state transitions until park-time.
982 // Since state transitions are heavy and inefficient we'd like
983 // to defer the state transitions until absolutely necessary,
984 // and in doing so avoid some transitions ...
985
986 // For virtual threads that are pinned, do a timed-park instead to
987 // alleviate some deadlocks cases where the succesor is an unmounted
988 // virtual thread that cannot run. This can happen in particular when
989 // this virtual thread is currently loading/initializing a class, and
990 // all other carriers have a vthread pinned to it waiting for said class
991 // to be loaded/initialized.
992 static int MAX_RECHECK_INTERVAL = 1000;
993 int recheck_interval = 1;
994 bool do_timed_parked = false;
995 ContinuationEntry* ce = current->last_continuation();
996 if (ce != nullptr && ce->is_virtual_thread()) {
997 do_timed_parked = true;
998 }
999
1000 for (;;) {
1001
1002 if (try_lock(current) == TryLockResult::Success) {
1003 break;
1004 }
1005 assert(!has_owner(current), "invariant");
1006
1007 // park self
1008 if (do_timed_parked) {
1009 current->_ParkEvent->park((jlong) recheck_interval);
1010 // Increase the recheck_interval, but clamp the value.
1011 recheck_interval *= 8;
1012 if (recheck_interval > MAX_RECHECK_INTERVAL) {
1013 recheck_interval = MAX_RECHECK_INTERVAL;
1014 }
1015 } else {
1016 current->_ParkEvent->park();
1017 }
1018
1073 // the ST of null into _owner in the *subsequent* (following) corresponding
1074 // monitorexit.
1075
1076 return;
1077 }
1078
1079 // reenter_internal() is a specialized inline form of the latter half of the
1080 // contended slow-path from enter_internal(). We use reenter_internal() only for
1081 // monitor reentry in wait().
1082 //
1083 // In the future we should reconcile enter_internal() and reenter_internal().
1084
1085 void ObjectMonitor::reenter_internal(JavaThread* current, ObjectWaiter* currentNode) {
1086 assert(current != nullptr, "invariant");
1087 assert(current->thread_state() != _thread_blocked, "invariant");
1088 assert(currentNode != nullptr, "invariant");
1089 assert(currentNode->_thread == current, "invariant");
1090 assert(_waiters > 0, "invariant");
1091 assert_mark_word_consistency();
1092
1093 for (;;) {
1094 ObjectWaiter::TStates v = currentNode->TState;
1095 guarantee(v == ObjectWaiter::TS_ENTER, "invariant");
1096 assert(!has_owner(current), "invariant");
1097
1098 // This thread has been notified so try to reacquire the lock.
1099 if (try_lock(current) == TryLockResult::Success) {
1100 break;
1101 }
1102
1103 // If that fails, spin again. Note that spin count may be zero so the above TryLock
1104 // is necessary.
1105 if (try_spin(current)) {
1106 break;
1107 }
1108
1109 {
1110 OSThreadContendState osts(current->osthread());
1111
1112 assert(current->thread_state() == _thread_in_vm, "invariant");
1113
1114 {
1115 ClearSuccOnSuspend csos(this);
1116 ThreadBlockInVMPreprocess<ClearSuccOnSuspend> tbivs(current, csos, true /* allow_suspend */);
1117 current->_ParkEvent->park();
1118 }
1119 }
1120
1121 // Try again, but just so we distinguish between futile wakeups and
1122 // successful wakeups. The following test isn't algorithmically
1123 // necessary, but it helps us maintain sensible statistics.
1124 if (try_lock(current) == TryLockResult::Success) {
1125 break;
1126 }
1127
1128 // The lock is still contested.
1129
1130 // Assuming this is not a spurious wakeup we'll normally
1131 // find that _succ == current.
1132 if (has_successor(current)) clear_successor();
1133
1134 // Invariant: after clearing _succ a contending thread
1135 // *must* retry _owner before parking.
1136 OrderAccess::fence();
1137 }
1138
1139 // Current has acquired the lock -- Unlink current from the _entry_list.
1140 assert(has_owner(current), "invariant");
1141 assert_mark_word_consistency();
1142 unlink_after_acquire(current, currentNode);
1143 if (has_successor(current)) clear_successor();
1144 assert(!has_successor(current), "invariant");
1145 currentNode->TState = ObjectWaiter::TS_RUN;
1146 OrderAccess::fence(); // see comments at the end of enter_internal()
1147 }
1148
1149 // This method is called from two places:
1150 // - On monitorenter contention with a null waiter.
1151 // - After Object.wait() times out or the target is interrupted to reenter the
1152 // monitor, with the existing waiter.
1153 // For the Object.wait() case we do not delete the ObjectWaiter in case we
1154 // succesfully acquire the monitor since we are going to need it on return.
1155 bool ObjectMonitor::vthread_monitor_enter(JavaThread* current, ObjectWaiter* waiter) {
1156 if (try_lock(current) == TryLockResult::Success) {
1157 assert(has_owner(current), "invariant");
1158 assert(!has_successor(current), "invariant");
1159 return true;
1160 }
1161
1162 oop vthread = current->vthread();
1163 ObjectWaiter* node = waiter != nullptr ? waiter : new ObjectWaiter(vthread, this);
1164 if (try_lock_or_add_to_entry_list(current, node)) {
1165 // We got the lock.
1166 if (waiter == nullptr) delete node; // for Object.wait() don't delete yet
1167 return true;
1168 }
1169 // This thread is now added to the entry_list.
1170
1171 // We have to try once more since owner could have exited monitor and checked
1172 // _entry_list before we added the node to the queue.
1173 if (try_lock(current) == TryLockResult::Success) {
1174 assert(has_owner(current), "invariant");
1175 unlink_after_acquire(current, node);
1176 if (has_successor(current)) clear_successor();
1177 if (waiter == nullptr) delete node; // for Object.wait() don't delete yet
1178 return true;
1179 }
1180
1181 assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
1182 java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::BLOCKING);
1183
1184 // We didn't succeed in acquiring the monitor so increment _contentions and
1185 // save ObjectWaiter* in the vthread since we will need it when resuming execution.
1186 add_to_contentions(1);
1187 java_lang_VirtualThread::set_objectWaiter(vthread, node);
1188 return false;
1189 }
1190
1191 // Called from thaw code to resume the monitor operation that caused the vthread
1192 // to be unmounted. Method returns true if the monitor is successfully acquired,
1193 // which marks the end of the monitor operation, otherwise it returns false.
1194 bool ObjectMonitor::resume_operation(JavaThread* current, ObjectWaiter* node, ContinuationWrapper& cont) {
1195 assert(java_lang_VirtualThread::state(current->vthread()) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
1196 assert(!has_owner(current), "");
1197
1213 oop vthread = current->vthread();
1214 if (has_successor(current)) clear_successor();
1215
1216 // Invariant: after clearing _succ a thread *must* retry acquiring the monitor.
1217 OrderAccess::fence();
1218
1219 if (try_lock(current) == TryLockResult::Success) {
1220 vthread_epilog(current, node);
1221 return true;
1222 }
1223
1224 // We will return to Continuation.run() and unmount so set the right state.
1225 java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::BLOCKING);
1226
1227 return false;
1228 }
1229
1230 void ObjectMonitor::vthread_epilog(JavaThread* current, ObjectWaiter* node) {
1231 assert(has_owner(current), "invariant");
1232 add_to_contentions(-1);
1233
1234 if (has_successor(current)) clear_successor();
1235
1236 guarantee(_recursions == 0, "invariant");
1237
1238 if (node->is_wait()) {
1239 _recursions = node->_recursions; // restore the old recursion count
1240 _waiters--; // decrement the number of waiters
1241
1242 if (node->_interrupted) {
1243 // We will throw at thaw end after finishing the mount transition.
1244 current->set_pending_interrupted_exception(true);
1245 }
1246 }
1247
1248 unlink_after_acquire(current, node);
1249 delete node;
1250
1251 // Clear the ObjectWaiter* from the vthread.
1252 java_lang_VirtualThread::set_objectWaiter(current->vthread(), nullptr);
1757 // consume an unpark() meant for the ParkEvent associated with
1758 // this ObjectMonitor.
1759 }
1760 if (wait_event.should_commit()) {
1761 post_monitor_wait_event(&wait_event, this, 0, millis, false);
1762 }
1763 THROW(vmSymbols::java_lang_InterruptedException());
1764 return;
1765 }
1766
1767 freeze_result result;
1768 ContinuationEntry* ce = current->last_continuation();
1769 bool is_virtual = ce != nullptr && ce->is_virtual_thread();
1770 if (is_virtual) {
1771 if (interruptible && JvmtiExport::should_post_monitor_wait()) {
1772 JvmtiExport::post_monitor_wait(current, object(), millis);
1773 }
1774 current->set_current_waiting_monitor(this);
1775 result = Continuation::try_preempt(current, ce->cont_oop(current));
1776 if (result == freeze_ok) {
1777 vthread_wait(current, millis);
1778 current->set_current_waiting_monitor(nullptr);
1779 return;
1780 }
1781 }
1782 // The jtiows does nothing for non-interruptible.
1783 JavaThreadInObjectWaitState jtiows(current, millis != 0, interruptible);
1784
1785 if (!is_virtual) { // it was already set for virtual thread
1786 if (interruptible && JvmtiExport::should_post_monitor_wait()) {
1787 JvmtiExport::post_monitor_wait(current, object(), millis);
1788
1789 // The current thread already owns the monitor and it has not yet
1790 // been added to the wait queue so the current thread cannot be
1791 // made the successor. This means that the JVMTI_EVENT_MONITOR_WAIT
1792 // event handler cannot accidentally consume an unpark() meant for
1793 // the ParkEvent associated with this ObjectMonitor.
1794 }
1795 current->set_current_waiting_monitor(this);
1796 }
1797 // create a node to be put into the queue
1987 Thread::SpinAcquire(&_wait_set_lock);
1988 ObjectWaiter* iterator = dequeue_waiter();
1989 if (iterator != nullptr) {
1990 guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
1991 guarantee(!iterator->_notified, "invariant");
1992
1993 if (iterator->is_vthread()) {
1994 oop vthread = iterator->vthread();
1995 java_lang_VirtualThread::set_notified(vthread, true);
1996 int old_state = java_lang_VirtualThread::state(vthread);
1997 // If state is not WAIT/TIMED_WAIT then target could still be on
1998 // unmount transition, or wait could have already timed-out or target
1999 // could have been interrupted. In the first case, the target itself
2000 // will set the state to BLOCKED at the end of the unmount transition.
2001 // In the other cases the target would have been already unblocked so
2002 // there is nothing to do.
2003 if (old_state == java_lang_VirtualThread::WAIT ||
2004 old_state == java_lang_VirtualThread::TIMED_WAIT) {
2005 java_lang_VirtualThread::cmpxchg_state(vthread, old_state, java_lang_VirtualThread::BLOCKED);
2006 }
2007 }
2008
2009 iterator->_notified = true;
2010 iterator->_notifier_tid = JFR_THREAD_ID(current);
2011 did_notify = true;
2012 add_to_entry_list(current, iterator);
2013
2014 // _wait_set_lock protects the wait queue, not the entry_list. We could
2015 // move the add-to-entry_list operation, above, outside the critical section
2016 // protected by _wait_set_lock. In practice that's not useful. With the
2017 // exception of wait() timeouts and interrupts the monitor owner
2018 // is the only thread that grabs _wait_set_lock. There's almost no contention
2019 // on _wait_set_lock so it's not profitable to reduce the length of the
2020 // critical section.
2021
2022 if (!iterator->is_vthread()) {
2023 iterator->wait_reenter_begin(this);
2024 }
2025 }
2026 Thread::SpinRelease(&_wait_set_lock);
2027 return did_notify;
2028 }
2029
2030 static void post_monitor_notify_event(EventJavaMonitorNotify* event,
2031 ObjectMonitor* monitor,
2032 int notified_count) {
2033 assert(event != nullptr, "invariant");
2034 assert(monitor != nullptr, "invariant");
2035 const Klass* monitor_klass = monitor->object()->klass();
2036 if (ObjectMonitor::is_jfr_excluded(monitor_klass)) {
2037 return;
2038 }
2039 event->set_monitorClass(monitor_klass);
2040 // Set an address that is 'unique enough', such that events close in
2041 // time and with the same address are likely (but not guaranteed) to
2042 // belong to the same object.
2043 event->set_address((uintptr_t)monitor);
2093 quick_notifyAll(current);
2094 }
2095
2096 void ObjectMonitor::quick_notifyAll(JavaThread* current) {
2097 assert(has_owner(current), "Precondition");
2098
2099 EventJavaMonitorNotify event;
2100 DTRACE_MONITOR_PROBE(notifyAll, this, object(), current);
2101 int tally = 0;
2102 while (_wait_set != nullptr) {
2103 if (notify_internal(current)) {
2104 tally++;
2105 }
2106 }
2107
2108 if ((tally > 0) && event.should_commit()) {
2109 post_monitor_notify_event(&event, this, /* notified_count = */ tally);
2110 }
2111 }
2112
2113 void ObjectMonitor::vthread_wait(JavaThread* current, jlong millis) {
2114 oop vthread = current->vthread();
2115 ObjectWaiter* node = new ObjectWaiter(vthread, this);
2116 node->_is_wait = true;
2117 node->TState = ObjectWaiter::TS_WAIT;
2118 java_lang_VirtualThread::set_notified(vthread, false); // Reset notified flag
2119
2120 // Enter the waiting queue, which is a circular doubly linked list in this case
2121 // but it could be a priority queue or any data structure.
2122 // _wait_set_lock protects the wait queue. Normally the wait queue is accessed only
2123 // by the owner of the monitor *except* in the case where park()
2124 // returns because of a timeout or interrupt. Contention is exceptionally rare
2125 // so we use a simple spin-lock instead of a heavier-weight blocking lock.
2126
2127 Thread::SpinAcquire(&_wait_set_lock);
2128 add_waiter(node);
2129 Thread::SpinRelease(&_wait_set_lock);
2130
2131 node->_recursions = _recursions; // record the old recursion count
2132 _recursions = 0; // set the recursion level to be 0
2133 _waiters++; // increment the number of waiters
2134 exit(current); // exit the monitor
2135 guarantee(!has_owner(current), "invariant");
2136
2137 assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
2138 java_lang_VirtualThread::set_state(vthread, millis == 0 ? java_lang_VirtualThread::WAITING : java_lang_VirtualThread::TIMED_WAITING);
2144
2145 bool ObjectMonitor::vthread_wait_reenter(JavaThread* current, ObjectWaiter* node, ContinuationWrapper& cont) {
2146 // The first time we run after being preempted on Object.wait() we
2147 // need to check if we were interrupted or the wait timed-out, and
2148 // in that case remove ourselves from the _wait_set queue.
2149 if (node->TState == ObjectWaiter::TS_WAIT) {
2150 Thread::SpinAcquire(&_wait_set_lock);
2151 if (node->TState == ObjectWaiter::TS_WAIT) {
2152 dequeue_specific_waiter(node); // unlink from wait_set
2153 assert(!node->_notified, "invariant");
2154 node->TState = ObjectWaiter::TS_RUN;
2155 }
2156 Thread::SpinRelease(&_wait_set_lock);
2157 }
2158
2159 // If this was an interrupted case, set the _interrupted boolean so that
2160 // once we re-acquire the monitor we know if we need to throw IE or not.
2161 ObjectWaiter::TStates state = node->TState;
2162 bool was_notified = state == ObjectWaiter::TS_ENTER;
2163 assert(was_notified || state == ObjectWaiter::TS_RUN, "");
2164 node->_interrupted = !was_notified && current->is_interrupted(false);
2165
2166 // Post JFR and JVMTI events.
2167 EventJavaMonitorWait wait_event;
2168 if (wait_event.should_commit() || JvmtiExport::should_post_monitor_waited()) {
2169 vthread_monitor_waited_event(current, node, cont, &wait_event, !was_notified && !node->_interrupted);
2170 }
2171
2172 // Mark that we are at reenter so that we don't call this method again.
2173 node->_at_reenter = true;
2174
2175 if (!was_notified) {
2176 bool acquired = vthread_monitor_enter(current, node);
2177 if (acquired) {
2178 guarantee(_recursions == 0, "invariant");
2179 _recursions = node->_recursions; // restore the old recursion count
2180 _waiters--; // decrement the number of waiters
2181
2182 if (node->_interrupted) {
2183 // We will throw at thaw end after finishing the mount transition.
2184 current->set_pending_interrupted_exception(true);
2185 }
2186
2187 delete node;
2188 // Clear the ObjectWaiter* from the vthread.
2443
2444 return false;
2445 }
2446
2447
2448 // -----------------------------------------------------------------------------
2449 // wait_set management ...
2450
2451 ObjectWaiter::ObjectWaiter(JavaThread* current) {
2452 _next = nullptr;
2453 _prev = nullptr;
2454 _thread = current;
2455 _monitor = nullptr;
2456 _notifier_tid = 0;
2457 _recursions = 0;
2458 TState = TS_RUN;
2459 _notified = false;
2460 _is_wait = false;
2461 _at_reenter = false;
2462 _interrupted = false;
2463 _active = false;
2464 }
2465
2466 ObjectWaiter::ObjectWaiter(oop vthread, ObjectMonitor* mon) : ObjectWaiter(nullptr) {
2467 assert(oopDesc::is_oop(vthread), "");
2468 _vthread = OopHandle(JavaThread::thread_oop_storage(), vthread);
2469 _monitor = mon;
2470 }
2471
2472 ObjectWaiter::~ObjectWaiter() {
2473 if (is_vthread()) {
2474 assert(vthread() != nullptr, "");
2475 _vthread.release(JavaThread::thread_oop_storage());
2476 }
2477 }
2478
2479 oop ObjectWaiter::vthread() const {
2480 return _vthread.resolve();
2481 }
2482
2615 st->print_cr(" _object = " INTPTR_FORMAT, p2i(object_peek()));
2616 st->print_cr(" _pad_buf0 = {");
2617 st->print_cr(" [0] = '\\0'");
2618 st->print_cr(" ...");
2619 st->print_cr(" [%d] = '\\0'", (int)sizeof(_pad_buf0) - 1);
2620 st->print_cr(" }");
2621 st->print_cr(" _owner = " INT64_FORMAT, owner_raw());
2622 st->print_cr(" _previous_owner_tid = " UINT64_FORMAT, _previous_owner_tid);
2623 st->print_cr(" _pad_buf1 = {");
2624 st->print_cr(" [0] = '\\0'");
2625 st->print_cr(" ...");
2626 st->print_cr(" [%d] = '\\0'", (int)sizeof(_pad_buf1) - 1);
2627 st->print_cr(" }");
2628 st->print_cr(" _next_om = " INTPTR_FORMAT, p2i(next_om()));
2629 st->print_cr(" _recursions = %zd", _recursions);
2630 st->print_cr(" _entry_list = " INTPTR_FORMAT, p2i(_entry_list));
2631 st->print_cr(" _entry_list_tail = " INTPTR_FORMAT, p2i(_entry_list_tail));
2632 st->print_cr(" _succ = " INT64_FORMAT, successor());
2633 st->print_cr(" _SpinDuration = %d", _SpinDuration);
2634 st->print_cr(" _contentions = %d", contentions());
2635 st->print_cr(" _wait_set = " INTPTR_FORMAT, p2i(_wait_set));
2636 st->print_cr(" _waiters = %d", _waiters);
2637 st->print_cr(" _wait_set_lock = %d", _wait_set_lock);
2638 st->print_cr("}");
2639 }
2640 #endif
|
277 } else {
278 // However, ThreadService::get_current_contended_monitor()
279 // can call here via the VMThread so sanity check it.
280 assert(self->is_VM_thread(), "must be");
281 }
282 #endif // ASSERT
283 }
284
285 ObjectMonitor::ObjectMonitor(oop object) :
286 _metadata(0),
287 _object(_oop_storage, object),
288 _owner(NO_OWNER),
289 _previous_owner_tid(0),
290 _next_om(nullptr),
291 _recursions(0),
292 _entry_list(nullptr),
293 _entry_list_tail(nullptr),
294 _succ(NO_OWNER),
295 _SpinDuration(ObjectMonitor::Knob_SpinLimit),
296 _contentions(0),
297 _unmounted_vthreads(0),
298 _wait_set(nullptr),
299 _waiters(0),
300 _wait_set_lock(0)
301 { }
302
303 ObjectMonitor::~ObjectMonitor() {
304 _object.release(_oop_storage);
305 _object_strong.release(JavaThread::thread_oop_storage());
306 }
307
308 oop ObjectMonitor::object() const {
309 check_object_context();
310 return _object.resolve();
311 }
312
313 // Keep object protected during ObjectLocker preemption.
314 void ObjectMonitor::set_object_strong() {
315 check_object_context();
316 if (_object_strong.is_empty()) {
317 if (Thread::TrySpinAcquire(&_object_strong_lock)) {
318 if (_object_strong.is_empty()) {
319 assert(_object.resolve() != nullptr, "");
320 _object_strong = OopHandle(JavaThread::thread_oop_storage(), _object.resolve());
321 }
322 Thread::SpinRelease(&_object_strong_lock);
323 }
324 }
325 }
326
327 void ObjectMonitor::ExitOnSuspend::operator()(JavaThread* current) {
328 if (current->is_suspended()) {
329 _om->_recursions = 0;
330 _om->clear_successor();
331 // Don't need a full fence after clearing successor here because of the call to exit().
332 _om->exit(current, false /* not_suspended */);
333 _om_exited = true;
334
335 current->set_current_pending_monitor(_om);
336 }
337 }
338
339 void ObjectMonitor::ClearSuccOnSuspend::operator()(JavaThread* current) {
340 if (current->is_suspended()) {
341 if (_om->has_successor(current)) {
342 _om->clear_successor();
343 OrderAccess::fence(); // always do a full fence when successor is cleared
344 }
345 }
346 }
982
983 ObjectWaiter node(current);
984 current->_ParkEvent->reset();
985
986 if (try_lock_or_add_to_entry_list(current, &node)) {
987 return; // We got the lock.
988 }
989 // This thread is now added to the _entry_list.
990
991 // The lock might have been released while this thread was occupied queueing
992 // itself onto _entry_list. To close the race and avoid "stranding" and
993 // progress-liveness failure we must resample-retry _owner before parking.
994 // Note the Dekker/Lamport duality: ST _entry_list; MEMBAR; LD Owner.
995 // In this case the ST-MEMBAR is accomplished with CAS().
996 //
997 // TODO: Defer all thread state transitions until park-time.
998 // Since state transitions are heavy and inefficient we'd like
999 // to defer the state transitions until absolutely necessary,
1000 // and in doing so avoid some transitions ...
1001
1002 // If there are unmounted virtual threads in the _entry_list do a timed-park
1003 // instead to alleviate some deadlocks cases where one of them is picked as
1004 // the successor but cannot run due to having run out of carriers. This can
1005 // happen, for example, if this is a pinned virtual thread currently loading
1006 // or initializining a class, and all other carriers have a pinned vthread
1007 // waiting for said class to be loaded/initialized.
1008 // Read counter *after* adding this thread to the _entry_list.
1009 // Adding to _entry_list uses Atomic::cmpxchg() which already provides
1010 // a fence that prevents this load from floating up previous store.
1011 bool do_timed_parked = has_unmounted_vthreads();
1012 static int MAX_RECHECK_INTERVAL = 1000;
1013 int recheck_interval = 1;
1014
1015 for (;;) {
1016
1017 if (try_lock(current) == TryLockResult::Success) {
1018 break;
1019 }
1020 assert(!has_owner(current), "invariant");
1021
1022 // park self
1023 if (do_timed_parked) {
1024 current->_ParkEvent->park((jlong) recheck_interval);
1025 // Increase the recheck_interval, but clamp the value.
1026 recheck_interval *= 8;
1027 if (recheck_interval > MAX_RECHECK_INTERVAL) {
1028 recheck_interval = MAX_RECHECK_INTERVAL;
1029 }
1030 } else {
1031 current->_ParkEvent->park();
1032 }
1033
1088 // the ST of null into _owner in the *subsequent* (following) corresponding
1089 // monitorexit.
1090
1091 return;
1092 }
1093
1094 // reenter_internal() is a specialized inline form of the latter half of the
1095 // contended slow-path from enter_internal(). We use reenter_internal() only for
1096 // monitor reentry in wait().
1097 //
1098 // In the future we should reconcile enter_internal() and reenter_internal().
1099
1100 void ObjectMonitor::reenter_internal(JavaThread* current, ObjectWaiter* currentNode) {
1101 assert(current != nullptr, "invariant");
1102 assert(current->thread_state() != _thread_blocked, "invariant");
1103 assert(currentNode != nullptr, "invariant");
1104 assert(currentNode->_thread == current, "invariant");
1105 assert(_waiters > 0, "invariant");
1106 assert_mark_word_consistency();
1107
1108 // If there are unmounted virtual threads in the _entry_list do a timed-park
1109 // instead to alleviate some deadlocks cases where one of them is picked as
1110 // the successor but cannot run due to having run out of carriers. This can
1111 // happen, for example, if this is a pinned virtual thread (or plain carrier)
1112 // waiting for a class to be initialized.
1113 bool do_timed_parked = has_unmounted_vthreads();
1114 static int MAX_RECHECK_INTERVAL = 1000;
1115 int recheck_interval = 1;
1116
1117 for (;;) {
1118 ObjectWaiter::TStates v = currentNode->TState;
1119 guarantee(v == ObjectWaiter::TS_ENTER, "invariant");
1120 assert(!has_owner(current), "invariant");
1121
1122 // This thread has been notified so try to reacquire the lock.
1123 if (try_lock(current) == TryLockResult::Success) {
1124 break;
1125 }
1126
1127 // If that fails, spin again. Note that spin count may be zero so the above TryLock
1128 // is necessary.
1129 if (try_spin(current)) {
1130 break;
1131 }
1132
1133 {
1134 OSThreadContendState osts(current->osthread());
1135
1136 assert(current->thread_state() == _thread_in_vm, "invariant");
1137
1138 {
1139 ClearSuccOnSuspend csos(this);
1140 ThreadBlockInVMPreprocess<ClearSuccOnSuspend> tbivs(current, csos, true /* allow_suspend */);
1141 if (do_timed_parked) {
1142 current->_ParkEvent->park((jlong) recheck_interval);
1143 // Increase the recheck_interval, but clamp the value.
1144 recheck_interval *= 8;
1145 if (recheck_interval > MAX_RECHECK_INTERVAL) {
1146 recheck_interval = MAX_RECHECK_INTERVAL;
1147 }
1148 } else {
1149 current->_ParkEvent->park();
1150 }
1151 }
1152 }
1153
1154 // Try again, but just so we distinguish between futile wakeups and
1155 // successful wakeups. The following test isn't algorithmically
1156 // necessary, but it helps us maintain sensible statistics.
1157 if (try_lock(current) == TryLockResult::Success) {
1158 break;
1159 }
1160
1161 // The lock is still contested.
1162
1163 // Assuming this is not a spurious wakeup we'll normally
1164 // find that _succ == current.
1165 if (has_successor(current)) clear_successor();
1166
1167 // Invariant: after clearing _succ a contending thread
1168 // *must* retry _owner before parking.
1169 OrderAccess::fence();
1170
1171 // See comment in notify_internal
1172 do_timed_parked |= currentNode->_do_timed_park;
1173 }
1174
1175 // Current has acquired the lock -- Unlink current from the _entry_list.
1176 assert(has_owner(current), "invariant");
1177 assert_mark_word_consistency();
1178 unlink_after_acquire(current, currentNode);
1179 if (has_successor(current)) clear_successor();
1180 assert(!has_successor(current), "invariant");
1181 currentNode->TState = ObjectWaiter::TS_RUN;
1182 OrderAccess::fence(); // see comments at the end of enter_internal()
1183 }
1184
1185 // This method is called from two places:
1186 // - On monitorenter contention with a null waiter.
1187 // - After Object.wait() times out or the target is interrupted to reenter the
1188 // monitor, with the existing waiter.
1189 // For the Object.wait() case we do not delete the ObjectWaiter in case we
1190 // succesfully acquire the monitor since we are going to need it on return.
1191 bool ObjectMonitor::vthread_monitor_enter(JavaThread* current, ObjectWaiter* waiter) {
1192 if (try_lock(current) == TryLockResult::Success) {
1193 assert(has_owner(current), "invariant");
1194 assert(!has_successor(current), "invariant");
1195 return true;
1196 }
1197
1198 oop vthread = current->vthread();
1199 ObjectWaiter* node = waiter != nullptr ? waiter : new ObjectWaiter(vthread, this);
1200
1201 // Increment counter *before* adding the vthread to the _entry_list.
1202 // Adding to _entry_list uses Atomic::cmpxchg() which already provides
1203 // a fence that prevents reordering of the stores.
1204 inc_unmounted_vthreads();
1205
1206 if (try_lock_or_add_to_entry_list(current, node)) {
1207 // We got the lock.
1208 if (waiter == nullptr) delete node; // for Object.wait() don't delete yet
1209 dec_unmounted_vthreads();
1210 return true;
1211 }
1212 // This thread is now added to the entry_list.
1213
1214 // We have to try once more since owner could have exited monitor and checked
1215 // _entry_list before we added the node to the queue.
1216 if (try_lock(current) == TryLockResult::Success) {
1217 assert(has_owner(current), "invariant");
1218 unlink_after_acquire(current, node);
1219 if (has_successor(current)) clear_successor();
1220 if (waiter == nullptr) delete node; // for Object.wait() don't delete yet
1221 dec_unmounted_vthreads();
1222 return true;
1223 }
1224
1225 assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
1226 java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::BLOCKING);
1227
1228 // We didn't succeed in acquiring the monitor so increment _contentions and
1229 // save ObjectWaiter* in the vthread since we will need it when resuming execution.
1230 add_to_contentions(1);
1231 java_lang_VirtualThread::set_objectWaiter(vthread, node);
1232 return false;
1233 }
1234
1235 // Called from thaw code to resume the monitor operation that caused the vthread
1236 // to be unmounted. Method returns true if the monitor is successfully acquired,
1237 // which marks the end of the monitor operation, otherwise it returns false.
1238 bool ObjectMonitor::resume_operation(JavaThread* current, ObjectWaiter* node, ContinuationWrapper& cont) {
1239 assert(java_lang_VirtualThread::state(current->vthread()) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
1240 assert(!has_owner(current), "");
1241
1257 oop vthread = current->vthread();
1258 if (has_successor(current)) clear_successor();
1259
1260 // Invariant: after clearing _succ a thread *must* retry acquiring the monitor.
1261 OrderAccess::fence();
1262
1263 if (try_lock(current) == TryLockResult::Success) {
1264 vthread_epilog(current, node);
1265 return true;
1266 }
1267
1268 // We will return to Continuation.run() and unmount so set the right state.
1269 java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::BLOCKING);
1270
1271 return false;
1272 }
1273
1274 void ObjectMonitor::vthread_epilog(JavaThread* current, ObjectWaiter* node) {
1275 assert(has_owner(current), "invariant");
1276 add_to_contentions(-1);
1277 dec_unmounted_vthreads();
1278
1279 if (has_successor(current)) clear_successor();
1280
1281 guarantee(_recursions == 0, "invariant");
1282
1283 if (node->is_wait()) {
1284 _recursions = node->_recursions; // restore the old recursion count
1285 _waiters--; // decrement the number of waiters
1286
1287 if (node->_interrupted) {
1288 // We will throw at thaw end after finishing the mount transition.
1289 current->set_pending_interrupted_exception(true);
1290 }
1291 }
1292
1293 unlink_after_acquire(current, node);
1294 delete node;
1295
1296 // Clear the ObjectWaiter* from the vthread.
1297 java_lang_VirtualThread::set_objectWaiter(current->vthread(), nullptr);
1802 // consume an unpark() meant for the ParkEvent associated with
1803 // this ObjectMonitor.
1804 }
1805 if (wait_event.should_commit()) {
1806 post_monitor_wait_event(&wait_event, this, 0, millis, false);
1807 }
1808 THROW(vmSymbols::java_lang_InterruptedException());
1809 return;
1810 }
1811
1812 freeze_result result;
1813 ContinuationEntry* ce = current->last_continuation();
1814 bool is_virtual = ce != nullptr && ce->is_virtual_thread();
1815 if (is_virtual) {
1816 if (interruptible && JvmtiExport::should_post_monitor_wait()) {
1817 JvmtiExport::post_monitor_wait(current, object(), millis);
1818 }
1819 current->set_current_waiting_monitor(this);
1820 result = Continuation::try_preempt(current, ce->cont_oop(current));
1821 if (result == freeze_ok) {
1822 vthread_wait(current, millis, interruptible);
1823 current->set_current_waiting_monitor(nullptr);
1824 return;
1825 }
1826 }
1827 // The jtiows does nothing for non-interruptible.
1828 JavaThreadInObjectWaitState jtiows(current, millis != 0, interruptible);
1829
1830 if (!is_virtual) { // it was already set for virtual thread
1831 if (interruptible && JvmtiExport::should_post_monitor_wait()) {
1832 JvmtiExport::post_monitor_wait(current, object(), millis);
1833
1834 // The current thread already owns the monitor and it has not yet
1835 // been added to the wait queue so the current thread cannot be
1836 // made the successor. This means that the JVMTI_EVENT_MONITOR_WAIT
1837 // event handler cannot accidentally consume an unpark() meant for
1838 // the ParkEvent associated with this ObjectMonitor.
1839 }
1840 current->set_current_waiting_monitor(this);
1841 }
1842 // create a node to be put into the queue
2032 Thread::SpinAcquire(&_wait_set_lock);
2033 ObjectWaiter* iterator = dequeue_waiter();
2034 if (iterator != nullptr) {
2035 guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
2036 guarantee(!iterator->_notified, "invariant");
2037
2038 if (iterator->is_vthread()) {
2039 oop vthread = iterator->vthread();
2040 java_lang_VirtualThread::set_notified(vthread, true);
2041 int old_state = java_lang_VirtualThread::state(vthread);
2042 // If state is not WAIT/TIMED_WAIT then target could still be on
2043 // unmount transition, or wait could have already timed-out or target
2044 // could have been interrupted. In the first case, the target itself
2045 // will set the state to BLOCKED at the end of the unmount transition.
2046 // In the other cases the target would have been already unblocked so
2047 // there is nothing to do.
2048 if (old_state == java_lang_VirtualThread::WAIT ||
2049 old_state == java_lang_VirtualThread::TIMED_WAIT) {
2050 java_lang_VirtualThread::cmpxchg_state(vthread, old_state, java_lang_VirtualThread::BLOCKED);
2051 }
2052 // Increment counter *before* adding the vthread to the _entry_list.
2053 // Adding to _entry_list uses Atomic::cmpxchg() which already provides
2054 // a fence that prevents reordering of the stores.
2055 inc_unmounted_vthreads();
2056 }
2057
2058 iterator->_notified = true;
2059 iterator->_notifier_tid = JFR_THREAD_ID(current);
2060 did_notify = true;
2061 add_to_entry_list(current, iterator);
2062
2063 // _wait_set_lock protects the wait queue, not the entry_list. We could
2064 // move the add-to-entry_list operation, above, outside the critical section
2065 // protected by _wait_set_lock. In practice that's not useful. With the
2066 // exception of wait() timeouts and interrupts the monitor owner
2067 // is the only thread that grabs _wait_set_lock. There's almost no contention
2068 // on _wait_set_lock so it's not profitable to reduce the length of the
2069 // critical section.
2070
2071 if (!iterator->is_vthread()) {
2072 iterator->wait_reenter_begin(this);
2073
2074 // Read counter *after* adding the thread to the _entry_list.
2075 // Adding to _entry_list uses Atomic::cmpxchg() which already provides
2076 // a fence that prevents this load from floating up previous store.
2077 if (has_unmounted_vthreads()) {
2078 // Wake up the thread to alleviate some deadlocks cases where the successor
2079 // that will be picked up when this thread releases the monitor is an unmounted
2080 // virtual thread that cannot run due to having run out of carriers. Upon waking
2081 // up, the thread will call reenter_internal() which will use timed-park in case
2082 // there is contention and there are still vthreads in the _entry_list.
2083 // If the target was interrupted or the wait timed-out at the same time, it could
2084 // have reached reenter_internal and read a false value of has_unmounted_vthreads()
2085 // before we added it to the _entry_list above. To fix that, we set _do_timed_park
2086 // which will be read by the target on the next loop iteration in reenter_internal.
2087 iterator->_do_timed_park = true;
2088 JavaThread* t = iterator->thread();
2089 t->_ParkEvent->unpark();
2090 }
2091 }
2092 }
2093 Thread::SpinRelease(&_wait_set_lock);
2094 return did_notify;
2095 }
2096
2097 static void post_monitor_notify_event(EventJavaMonitorNotify* event,
2098 ObjectMonitor* monitor,
2099 int notified_count) {
2100 assert(event != nullptr, "invariant");
2101 assert(monitor != nullptr, "invariant");
2102 const Klass* monitor_klass = monitor->object()->klass();
2103 if (ObjectMonitor::is_jfr_excluded(monitor_klass)) {
2104 return;
2105 }
2106 event->set_monitorClass(monitor_klass);
2107 // Set an address that is 'unique enough', such that events close in
2108 // time and with the same address are likely (but not guaranteed) to
2109 // belong to the same object.
2110 event->set_address((uintptr_t)monitor);
2160 quick_notifyAll(current);
2161 }
2162
2163 void ObjectMonitor::quick_notifyAll(JavaThread* current) {
2164 assert(has_owner(current), "Precondition");
2165
2166 EventJavaMonitorNotify event;
2167 DTRACE_MONITOR_PROBE(notifyAll, this, object(), current);
2168 int tally = 0;
2169 while (_wait_set != nullptr) {
2170 if (notify_internal(current)) {
2171 tally++;
2172 }
2173 }
2174
2175 if ((tally > 0) && event.should_commit()) {
2176 post_monitor_notify_event(&event, this, /* notified_count = */ tally);
2177 }
2178 }
2179
2180 void ObjectMonitor::vthread_wait(JavaThread* current, jlong millis, bool interruptible) {
2181 oop vthread = current->vthread();
2182 ObjectWaiter* node = new ObjectWaiter(vthread, this);
2183 node->_is_wait = true;
2184 node->_interruptible = interruptible;
2185 node->TState = ObjectWaiter::TS_WAIT;
2186 java_lang_VirtualThread::set_notified(vthread, false); // Reset notified flag
2187 java_lang_VirtualThread::set_interruptible_wait(vthread, interruptible);
2188
2189 // Enter the waiting queue, which is a circular doubly linked list in this case
2190 // but it could be a priority queue or any data structure.
2191 // _wait_set_lock protects the wait queue. Normally the wait queue is accessed only
2192 // by the owner of the monitor *except* in the case where park()
2193 // returns because of a timeout or interrupt. Contention is exceptionally rare
2194 // so we use a simple spin-lock instead of a heavier-weight blocking lock.
2195
2196 Thread::SpinAcquire(&_wait_set_lock);
2197 add_waiter(node);
2198 Thread::SpinRelease(&_wait_set_lock);
2199
2200 node->_recursions = _recursions; // record the old recursion count
2201 _recursions = 0; // set the recursion level to be 0
2202 _waiters++; // increment the number of waiters
2203 exit(current); // exit the monitor
2204 guarantee(!has_owner(current), "invariant");
2205
2206 assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
2207 java_lang_VirtualThread::set_state(vthread, millis == 0 ? java_lang_VirtualThread::WAITING : java_lang_VirtualThread::TIMED_WAITING);
2213
2214 bool ObjectMonitor::vthread_wait_reenter(JavaThread* current, ObjectWaiter* node, ContinuationWrapper& cont) {
2215 // The first time we run after being preempted on Object.wait() we
2216 // need to check if we were interrupted or the wait timed-out, and
2217 // in that case remove ourselves from the _wait_set queue.
2218 if (node->TState == ObjectWaiter::TS_WAIT) {
2219 Thread::SpinAcquire(&_wait_set_lock);
2220 if (node->TState == ObjectWaiter::TS_WAIT) {
2221 dequeue_specific_waiter(node); // unlink from wait_set
2222 assert(!node->_notified, "invariant");
2223 node->TState = ObjectWaiter::TS_RUN;
2224 }
2225 Thread::SpinRelease(&_wait_set_lock);
2226 }
2227
2228 // If this was an interrupted case, set the _interrupted boolean so that
2229 // once we re-acquire the monitor we know if we need to throw IE or not.
2230 ObjectWaiter::TStates state = node->TState;
2231 bool was_notified = state == ObjectWaiter::TS_ENTER;
2232 assert(was_notified || state == ObjectWaiter::TS_RUN, "");
2233 node->_interrupted = node->_interruptible && !was_notified && current->is_interrupted(false);
2234
2235 // Post JFR and JVMTI events. If non-interruptible we are in
2236 // ObjectLocker case so we don't post anything.
2237 EventJavaMonitorWait wait_event;
2238 if (node->_interruptible && (wait_event.should_commit() || JvmtiExport::should_post_monitor_waited())) {
2239 vthread_monitor_waited_event(current, node, cont, &wait_event, !was_notified && !node->_interrupted);
2240 }
2241
2242 // Mark that we are at reenter so that we don't call this method again.
2243 node->_at_reenter = true;
2244
2245 if (!was_notified) {
2246 bool acquired = vthread_monitor_enter(current, node);
2247 if (acquired) {
2248 guarantee(_recursions == 0, "invariant");
2249 _recursions = node->_recursions; // restore the old recursion count
2250 _waiters--; // decrement the number of waiters
2251
2252 if (node->_interrupted) {
2253 // We will throw at thaw end after finishing the mount transition.
2254 current->set_pending_interrupted_exception(true);
2255 }
2256
2257 delete node;
2258 // Clear the ObjectWaiter* from the vthread.
2513
2514 return false;
2515 }
2516
2517
2518 // -----------------------------------------------------------------------------
2519 // wait_set management ...
2520
2521 ObjectWaiter::ObjectWaiter(JavaThread* current) {
2522 _next = nullptr;
2523 _prev = nullptr;
2524 _thread = current;
2525 _monitor = nullptr;
2526 _notifier_tid = 0;
2527 _recursions = 0;
2528 TState = TS_RUN;
2529 _notified = false;
2530 _is_wait = false;
2531 _at_reenter = false;
2532 _interrupted = false;
2533 _do_timed_park = false;
2534 _active = false;
2535 }
2536
2537 ObjectWaiter::ObjectWaiter(oop vthread, ObjectMonitor* mon) : ObjectWaiter(nullptr) {
2538 assert(oopDesc::is_oop(vthread), "");
2539 _vthread = OopHandle(JavaThread::thread_oop_storage(), vthread);
2540 _monitor = mon;
2541 }
2542
2543 ObjectWaiter::~ObjectWaiter() {
2544 if (is_vthread()) {
2545 assert(vthread() != nullptr, "");
2546 _vthread.release(JavaThread::thread_oop_storage());
2547 }
2548 }
2549
2550 oop ObjectWaiter::vthread() const {
2551 return _vthread.resolve();
2552 }
2553
2686 st->print_cr(" _object = " INTPTR_FORMAT, p2i(object_peek()));
2687 st->print_cr(" _pad_buf0 = {");
2688 st->print_cr(" [0] = '\\0'");
2689 st->print_cr(" ...");
2690 st->print_cr(" [%d] = '\\0'", (int)sizeof(_pad_buf0) - 1);
2691 st->print_cr(" }");
2692 st->print_cr(" _owner = " INT64_FORMAT, owner_raw());
2693 st->print_cr(" _previous_owner_tid = " UINT64_FORMAT, _previous_owner_tid);
2694 st->print_cr(" _pad_buf1 = {");
2695 st->print_cr(" [0] = '\\0'");
2696 st->print_cr(" ...");
2697 st->print_cr(" [%d] = '\\0'", (int)sizeof(_pad_buf1) - 1);
2698 st->print_cr(" }");
2699 st->print_cr(" _next_om = " INTPTR_FORMAT, p2i(next_om()));
2700 st->print_cr(" _recursions = %zd", _recursions);
2701 st->print_cr(" _entry_list = " INTPTR_FORMAT, p2i(_entry_list));
2702 st->print_cr(" _entry_list_tail = " INTPTR_FORMAT, p2i(_entry_list_tail));
2703 st->print_cr(" _succ = " INT64_FORMAT, successor());
2704 st->print_cr(" _SpinDuration = %d", _SpinDuration);
2705 st->print_cr(" _contentions = %d", contentions());
2706 st->print_cr(" _unmounted_vthreads = " INT64_FORMAT, _unmounted_vthreads);
2707 st->print_cr(" _wait_set = " INTPTR_FORMAT, p2i(_wait_set));
2708 st->print_cr(" _waiters = %d", _waiters);
2709 st->print_cr(" _wait_set_lock = %d", _wait_set_lock);
2710 st->print_cr("}");
2711 }
2712 #endif
|