< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page

 278   } else {
 279     // However, ThreadService::get_current_contended_monitor()
 280     // can call here via the VMThread so sanity check it.
 281     assert(self->is_VM_thread(), "must be");
 282   }
 283 #endif // ASSERT
 284 }
 285 
 286 ObjectMonitor::ObjectMonitor(oop object) :
 287   _metadata(0),
 288   _object(_oop_storage, object),
 289   _owner(NO_OWNER),
 290   _previous_owner_tid(0),
 291   _next_om(nullptr),
 292   _recursions(0),
 293   _entry_list(nullptr),
 294   _entry_list_tail(nullptr),
 295   _succ(NO_OWNER),
 296   _SpinDuration(ObjectMonitor::Knob_SpinLimit),
 297   _contentions(0),

 298   _wait_set(nullptr),
 299   _waiters(0),
 300   _wait_set_lock(0),
 301   _stack_locker(nullptr)
 302 { }
 303 
 304 ObjectMonitor::~ObjectMonitor() {
 305   _object.release(_oop_storage);
 306 }
 307 
 308 oop ObjectMonitor::object() const {
 309   check_object_context();
 310   return _object.resolve();
 311 }
 312 
 313 void ObjectMonitor::ExitOnSuspend::operator()(JavaThread* current) {
 314   if (current->is_suspended()) {
 315     _om->_recursions = 0;
 316     _om->clear_successor();
 317     // Don't need a full fence after clearing successor here because of the call to exit().

 966 
 967   ObjectWaiter node(current);
 968   current->_ParkEvent->reset();
 969 
 970   if (try_lock_or_add_to_entry_list(current, &node)) {
 971     return; // We got the lock.
 972   }
 973   // This thread is now added to the _entry_list.
 974 
 975   // The lock might have been released while this thread was occupied queueing
 976   // itself onto _entry_list.  To close the race and avoid "stranding" and
 977   // progress-liveness failure we must resample-retry _owner before parking.
 978   // Note the Dekker/Lamport duality: ST _entry_list; MEMBAR; LD Owner.
 979   // In this case the ST-MEMBAR is accomplished with CAS().
 980   //
 981   // TODO: Defer all thread state transitions until park-time.
 982   // Since state transitions are heavy and inefficient we'd like
 983   // to defer the state transitions until absolutely necessary,
 984   // and in doing so avoid some transitions ...
 985 
 986   // For virtual threads that are pinned, do a timed-park instead to
 987   // alleviate some deadlocks cases where the succesor is an unmounted
 988   // virtual thread that cannot run. This can happen in particular when
 989   // this virtual thread is currently loading/initializing a class, and
 990   // all other carriers have a vthread pinned to it waiting for said class
 991   // to be loaded/initialized.




 992   static int MAX_RECHECK_INTERVAL = 1000;
 993   int recheck_interval = 1;
 994   bool do_timed_parked = false;
 995   ContinuationEntry* ce = current->last_continuation();
 996   if (ce != nullptr && ce->is_virtual_thread()) {
 997     do_timed_parked = true;
 998   }
 999 
1000   for (;;) {
1001 
1002     if (try_lock(current) == TryLockResult::Success) {
1003       break;
1004     }
1005     assert(!has_owner(current), "invariant");
1006 
1007     // park self
1008     if (do_timed_parked) {
1009       current->_ParkEvent->park((jlong) recheck_interval);
1010       // Increase the recheck_interval, but clamp the value.
1011       recheck_interval *= 8;
1012       if (recheck_interval > MAX_RECHECK_INTERVAL) {
1013         recheck_interval = MAX_RECHECK_INTERVAL;
1014       }
1015     } else {
1016       current->_ParkEvent->park();
1017     }
1018 

1073   // the ST of null into _owner in the *subsequent* (following) corresponding
1074   // monitorexit.
1075 
1076   return;
1077 }
1078 
1079 // reenter_internal() is a specialized inline form of the latter half of the
1080 // contended slow-path from enter_internal().  We use reenter_internal() only for
1081 // monitor reentry in wait().
1082 //
1083 // In the future we should reconcile enter_internal() and reenter_internal().
1084 
1085 void ObjectMonitor::reenter_internal(JavaThread* current, ObjectWaiter* currentNode) {
1086   assert(current != nullptr, "invariant");
1087   assert(current->thread_state() != _thread_blocked, "invariant");
1088   assert(currentNode != nullptr, "invariant");
1089   assert(currentNode->_thread == current, "invariant");
1090   assert(_waiters > 0, "invariant");
1091   assert_mark_word_consistency();
1092 


















1093   for (;;) {
1094     ObjectWaiter::TStates v = currentNode->TState;
1095     guarantee(v == ObjectWaiter::TS_ENTER, "invariant");
1096     assert(!has_owner(current), "invariant");
1097 
1098     // This thread has been notified so try to reacquire the lock.
1099     if (try_lock(current) == TryLockResult::Success) {
1100       break;
1101     }
1102 
1103     // If that fails, spin again.  Note that spin count may be zero so the above TryLock
1104     // is necessary.
1105     if (try_spin(current)) {
1106         break;
1107     }
1108 
1109     {
1110       OSThreadContendState osts(current->osthread());
1111 
1112       assert(current->thread_state() == _thread_in_vm, "invariant");
1113 
1114       {
1115         ClearSuccOnSuspend csos(this);
1116         ThreadBlockInVMPreprocess<ClearSuccOnSuspend> tbivs(current, csos, true /* allow_suspend */);
1117         current->_ParkEvent->park();









1118       }
1119     }
1120 
1121     // Try again, but just so we distinguish between futile wakeups and
1122     // successful wakeups.  The following test isn't algorithmically
1123     // necessary, but it helps us maintain sensible statistics.
1124     if (try_lock(current) == TryLockResult::Success) {
1125       break;
1126     }
1127 
1128     // The lock is still contested.
1129 
1130     // Assuming this is not a spurious wakeup we'll normally
1131     // find that _succ == current.
1132     if (has_successor(current)) clear_successor();
1133 
1134     // Invariant: after clearing _succ a contending thread
1135     // *must* retry  _owner before parking.
1136     OrderAccess::fence();
1137   }

1144   assert(!has_successor(current), "invariant");
1145   currentNode->TState = ObjectWaiter::TS_RUN;
1146   OrderAccess::fence();      // see comments at the end of enter_internal()
1147 }
1148 
1149 // This method is called from two places:
1150 // - On monitorenter contention with a null waiter.
1151 // - After Object.wait() times out or the target is interrupted to reenter the
1152 //   monitor, with the existing waiter.
1153 // For the Object.wait() case we do not delete the ObjectWaiter in case we
1154 // succesfully acquire the monitor since we are going to need it on return.
1155 bool ObjectMonitor::vthread_monitor_enter(JavaThread* current, ObjectWaiter* waiter) {
1156   if (try_lock(current) == TryLockResult::Success) {
1157     assert(has_owner(current), "invariant");
1158     assert(!has_successor(current), "invariant");
1159     return true;
1160   }
1161 
1162   oop vthread = current->vthread();
1163   ObjectWaiter* node = waiter != nullptr ? waiter : new ObjectWaiter(vthread, this);






1164   if (try_lock_or_add_to_entry_list(current, node)) {
1165     // We got the lock.
1166     if (waiter == nullptr) delete node;  // for Object.wait() don't delete yet

1167     return true;
1168   }
1169   // This thread is now added to the entry_list.
1170 
1171   // We have to try once more since owner could have exited monitor and checked
1172   // _entry_list before we added the node to the queue.
1173   if (try_lock(current) == TryLockResult::Success) {
1174     assert(has_owner(current), "invariant");
1175     unlink_after_acquire(current, node);
1176     if (has_successor(current)) clear_successor();
1177     if (waiter == nullptr) delete node;  // for Object.wait() don't delete yet

1178     return true;
1179   }
1180 
1181   assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
1182   java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::BLOCKING);
1183 
1184   // We didn't succeed in acquiring the monitor so increment _contentions and
1185   // save ObjectWaiter* in the vthread since we will need it when resuming execution.
1186   add_to_contentions(1);
1187   java_lang_VirtualThread::set_objectWaiter(vthread, node);
1188   return false;
1189 }
1190 
1191 // Called from thaw code to resume the monitor operation that caused the vthread
1192 // to be unmounted. Method returns true if the monitor is successfully acquired,
1193 // which marks the end of the monitor operation, otherwise it returns false.
1194 bool ObjectMonitor::resume_operation(JavaThread* current, ObjectWaiter* node, ContinuationWrapper& cont) {
1195   assert(java_lang_VirtualThread::state(current->vthread()) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
1196   assert(!has_owner(current), "");
1197 

1213   oop vthread = current->vthread();
1214   if (has_successor(current)) clear_successor();
1215 
1216   // Invariant: after clearing _succ a thread *must* retry acquiring the monitor.
1217   OrderAccess::fence();
1218 
1219   if (try_lock(current) == TryLockResult::Success) {
1220     vthread_epilog(current, node);
1221     return true;
1222   }
1223 
1224   // We will return to Continuation.run() and unmount so set the right state.
1225   java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::BLOCKING);
1226 
1227   return false;
1228 }
1229 
1230 void ObjectMonitor::vthread_epilog(JavaThread* current, ObjectWaiter* node) {
1231   assert(has_owner(current), "invariant");
1232   add_to_contentions(-1);

1233 
1234   if (has_successor(current)) clear_successor();
1235 
1236   guarantee(_recursions == 0, "invariant");
1237 
1238   if (node->is_wait()) {
1239     _recursions = node->_recursions;   // restore the old recursion count
1240     _waiters--;                        // decrement the number of waiters
1241 
1242     if (node->_interrupted) {
1243       // We will throw at thaw end after finishing the mount transition.
1244       current->set_pending_interrupted_exception(true);
1245     }
1246   }
1247 
1248   unlink_after_acquire(current, node);
1249   delete node;
1250 
1251   // Clear the ObjectWaiter* from the vthread.
1252   java_lang_VirtualThread::set_objectWaiter(current->vthread(), nullptr);

1757       // consume an unpark() meant for the ParkEvent associated with
1758       // this ObjectMonitor.
1759     }
1760     if (wait_event.should_commit()) {
1761       post_monitor_wait_event(&wait_event, this, 0, millis, false);
1762     }
1763     THROW(vmSymbols::java_lang_InterruptedException());
1764     return;
1765   }
1766 
1767   freeze_result result;
1768   ContinuationEntry* ce = current->last_continuation();
1769   bool is_virtual = ce != nullptr && ce->is_virtual_thread();
1770   if (is_virtual) {
1771     if (interruptible && JvmtiExport::should_post_monitor_wait()) {
1772       JvmtiExport::post_monitor_wait(current, object(), millis);
1773     }
1774     current->set_current_waiting_monitor(this);
1775     result = Continuation::try_preempt(current, ce->cont_oop(current));
1776     if (result == freeze_ok) {
1777       vthread_wait(current, millis);
1778       current->set_current_waiting_monitor(nullptr);
1779       return;
1780     }
1781   }
1782   // The jtiows does nothing for non-interruptible.
1783   JavaThreadInObjectWaitState jtiows(current, millis != 0, interruptible);
1784 
1785   if (!is_virtual) { // it was already set for virtual thread
1786     if (interruptible && JvmtiExport::should_post_monitor_wait()) {
1787       JvmtiExport::post_monitor_wait(current, object(), millis);
1788 
1789       // The current thread already owns the monitor and it has not yet
1790       // been added to the wait queue so the current thread cannot be
1791       // made the successor. This means that the JVMTI_EVENT_MONITOR_WAIT
1792       // event handler cannot accidentally consume an unpark() meant for
1793       // the ParkEvent associated with this ObjectMonitor.
1794     }
1795     current->set_current_waiting_monitor(this);
1796   }
1797   // create a node to be put into the queue

1987   Thread::SpinAcquire(&_wait_set_lock);
1988   ObjectWaiter* iterator = dequeue_waiter();
1989   if (iterator != nullptr) {
1990     guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
1991     guarantee(!iterator->_notified, "invariant");
1992 
1993     if (iterator->is_vthread()) {
1994       oop vthread = iterator->vthread();
1995       java_lang_VirtualThread::set_notified(vthread, true);
1996       int old_state = java_lang_VirtualThread::state(vthread);
1997       // If state is not WAIT/TIMED_WAIT then target could still be on
1998       // unmount transition, or wait could have already timed-out or target
1999       // could have been interrupted. In the first case, the target itself
2000       // will set the state to BLOCKED at the end of the unmount transition.
2001       // In the other cases the target would have been already unblocked so
2002       // there is nothing to do.
2003       if (old_state == java_lang_VirtualThread::WAIT ||
2004           old_state == java_lang_VirtualThread::TIMED_WAIT) {
2005         java_lang_VirtualThread::cmpxchg_state(vthread, old_state, java_lang_VirtualThread::BLOCKED);
2006       }




2007     }
2008 
2009     iterator->_notified = true;
2010     iterator->_notifier_tid = JFR_THREAD_ID(current);
2011     did_notify = true;
2012     add_to_entry_list(current, iterator);
2013 
2014     // _wait_set_lock protects the wait queue, not the entry_list.  We could
2015     // move the add-to-entry_list operation, above, outside the critical section
2016     // protected by _wait_set_lock.  In practice that's not useful.  With the
2017     // exception of  wait() timeouts and interrupts the monitor owner
2018     // is the only thread that grabs _wait_set_lock.  There's almost no contention
2019     // on _wait_set_lock so it's not profitable to reduce the length of the
2020     // critical section.
2021 
2022     if (!iterator->is_vthread()) {
2023       iterator->wait_reenter_begin(this);













2024     }
2025   }
2026   Thread::SpinRelease(&_wait_set_lock);
2027   return did_notify;
2028 }
2029 
2030 static void post_monitor_notify_event(EventJavaMonitorNotify* event,
2031                                       ObjectMonitor* monitor,
2032                                       int notified_count) {
2033   assert(event != nullptr, "invariant");
2034   assert(monitor != nullptr, "invariant");
2035   const Klass* monitor_klass = monitor->object()->klass();
2036   if (ObjectMonitor::is_jfr_excluded(monitor_klass)) {
2037     return;
2038   }
2039   event->set_monitorClass(monitor_klass);
2040   // Set an address that is 'unique enough', such that events close in
2041   // time and with the same address are likely (but not guaranteed) to
2042   // belong to the same object.
2043   event->set_address((uintptr_t)monitor);

2093   quick_notifyAll(current);
2094 }
2095 
2096 void ObjectMonitor::quick_notifyAll(JavaThread* current) {
2097   assert(has_owner(current), "Precondition");
2098 
2099   EventJavaMonitorNotify event;
2100   DTRACE_MONITOR_PROBE(notifyAll, this, object(), current);
2101   int tally = 0;
2102   while (_wait_set != nullptr) {
2103     if (notify_internal(current)) {
2104       tally++;
2105     }
2106   }
2107 
2108   if ((tally > 0) && event.should_commit()) {
2109     post_monitor_notify_event(&event, this, /* notified_count = */ tally);
2110   }
2111 }
2112 
2113 void ObjectMonitor::vthread_wait(JavaThread* current, jlong millis) {
2114   oop vthread = current->vthread();
2115   ObjectWaiter* node = new ObjectWaiter(vthread, this);
2116   node->_is_wait = true;

2117   node->TState = ObjectWaiter::TS_WAIT;
2118   java_lang_VirtualThread::set_notified(vthread, false);  // Reset notified flag

2119 
2120   // Enter the waiting queue, which is a circular doubly linked list in this case
2121   // but it could be a priority queue or any data structure.
2122   // _wait_set_lock protects the wait queue.  Normally the wait queue is accessed only
2123   // by the owner of the monitor *except* in the case where park()
2124   // returns because of a timeout or interrupt.  Contention is exceptionally rare
2125   // so we use a simple spin-lock instead of a heavier-weight blocking lock.
2126 
2127   Thread::SpinAcquire(&_wait_set_lock);
2128   add_waiter(node);
2129   Thread::SpinRelease(&_wait_set_lock);
2130 
2131   node->_recursions = _recursions;   // record the old recursion count
2132   _recursions = 0;                   // set the recursion level to be 0
2133   _waiters++;                        // increment the number of waiters
2134   exit(current);                     // exit the monitor
2135   guarantee(!has_owner(current), "invariant");
2136 
2137   assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
2138   java_lang_VirtualThread::set_state(vthread, millis == 0 ? java_lang_VirtualThread::WAITING : java_lang_VirtualThread::TIMED_WAITING);

2144 
2145 bool ObjectMonitor::vthread_wait_reenter(JavaThread* current, ObjectWaiter* node, ContinuationWrapper& cont) {
2146   // The first time we run after being preempted on Object.wait() we
2147   // need to check if we were interrupted or the wait timed-out, and
2148   // in that case remove ourselves from the _wait_set queue.
2149   if (node->TState == ObjectWaiter::TS_WAIT) {
2150     Thread::SpinAcquire(&_wait_set_lock);
2151     if (node->TState == ObjectWaiter::TS_WAIT) {
2152       dequeue_specific_waiter(node);       // unlink from wait_set
2153       assert(!node->_notified, "invariant");
2154       node->TState = ObjectWaiter::TS_RUN;
2155     }
2156     Thread::SpinRelease(&_wait_set_lock);
2157   }
2158 
2159   // If this was an interrupted case, set the _interrupted boolean so that
2160   // once we re-acquire the monitor we know if we need to throw IE or not.
2161   ObjectWaiter::TStates state = node->TState;
2162   bool was_notified = state == ObjectWaiter::TS_ENTER;
2163   assert(was_notified || state == ObjectWaiter::TS_RUN, "");
2164   node->_interrupted = !was_notified && current->is_interrupted(false);
2165 
2166   // Post JFR and JVMTI events.

2167   EventJavaMonitorWait wait_event;
2168   if (wait_event.should_commit() || JvmtiExport::should_post_monitor_waited()) {
2169     vthread_monitor_waited_event(current, node, cont, &wait_event, !was_notified && !node->_interrupted);
2170   }
2171 
2172   // Mark that we are at reenter so that we don't call this method again.
2173   node->_at_reenter = true;
2174 
2175   if (!was_notified) {
2176     bool acquired = vthread_monitor_enter(current, node);
2177     if (acquired) {
2178       guarantee(_recursions == 0, "invariant");
2179       _recursions = node->_recursions;   // restore the old recursion count
2180       _waiters--;                        // decrement the number of waiters
2181 
2182       if (node->_interrupted) {
2183         // We will throw at thaw end after finishing the mount transition.
2184         current->set_pending_interrupted_exception(true);
2185       }
2186 
2187       delete node;
2188       // Clear the ObjectWaiter* from the vthread.

2615   st->print_cr("  _object = " INTPTR_FORMAT, p2i(object_peek()));
2616   st->print_cr("  _pad_buf0 = {");
2617   st->print_cr("    [0] = '\\0'");
2618   st->print_cr("    ...");
2619   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf0) - 1);
2620   st->print_cr("  }");
2621   st->print_cr("  _owner = " INT64_FORMAT, owner_raw());
2622   st->print_cr("  _previous_owner_tid = " UINT64_FORMAT, _previous_owner_tid);
2623   st->print_cr("  _pad_buf1 = {");
2624   st->print_cr("    [0] = '\\0'");
2625   st->print_cr("    ...");
2626   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf1) - 1);
2627   st->print_cr("  }");
2628   st->print_cr("  _next_om = " INTPTR_FORMAT, p2i(next_om()));
2629   st->print_cr("  _recursions = %zd", _recursions);
2630   st->print_cr("  _entry_list = " INTPTR_FORMAT, p2i(_entry_list));
2631   st->print_cr("  _entry_list_tail = " INTPTR_FORMAT, p2i(_entry_list_tail));
2632   st->print_cr("  _succ = " INT64_FORMAT, successor());
2633   st->print_cr("  _SpinDuration = %d", _SpinDuration);
2634   st->print_cr("  _contentions = %d", contentions());

2635   st->print_cr("  _wait_set = " INTPTR_FORMAT, p2i(_wait_set));
2636   st->print_cr("  _waiters = %d", _waiters);
2637   st->print_cr("  _wait_set_lock = %d", _wait_set_lock);
2638   st->print_cr("}");
2639 }
2640 #endif

 278   } else {
 279     // However, ThreadService::get_current_contended_monitor()
 280     // can call here via the VMThread so sanity check it.
 281     assert(self->is_VM_thread(), "must be");
 282   }
 283 #endif // ASSERT
 284 }
 285 
 286 ObjectMonitor::ObjectMonitor(oop object) :
 287   _metadata(0),
 288   _object(_oop_storage, object),
 289   _owner(NO_OWNER),
 290   _previous_owner_tid(0),
 291   _next_om(nullptr),
 292   _recursions(0),
 293   _entry_list(nullptr),
 294   _entry_list_tail(nullptr),
 295   _succ(NO_OWNER),
 296   _SpinDuration(ObjectMonitor::Knob_SpinLimit),
 297   _contentions(0),
 298   _unmounted_vthreads(0),
 299   _wait_set(nullptr),
 300   _waiters(0),
 301   _wait_set_lock(0),
 302   _stack_locker(nullptr)
 303 { }
 304 
 305 ObjectMonitor::~ObjectMonitor() {
 306   _object.release(_oop_storage);
 307 }
 308 
 309 oop ObjectMonitor::object() const {
 310   check_object_context();
 311   return _object.resolve();
 312 }
 313 
 314 void ObjectMonitor::ExitOnSuspend::operator()(JavaThread* current) {
 315   if (current->is_suspended()) {
 316     _om->_recursions = 0;
 317     _om->clear_successor();
 318     // Don't need a full fence after clearing successor here because of the call to exit().

 967 
 968   ObjectWaiter node(current);
 969   current->_ParkEvent->reset();
 970 
 971   if (try_lock_or_add_to_entry_list(current, &node)) {
 972     return; // We got the lock.
 973   }
 974   // This thread is now added to the _entry_list.
 975 
 976   // The lock might have been released while this thread was occupied queueing
 977   // itself onto _entry_list.  To close the race and avoid "stranding" and
 978   // progress-liveness failure we must resample-retry _owner before parking.
 979   // Note the Dekker/Lamport duality: ST _entry_list; MEMBAR; LD Owner.
 980   // In this case the ST-MEMBAR is accomplished with CAS().
 981   //
 982   // TODO: Defer all thread state transitions until park-time.
 983   // Since state transitions are heavy and inefficient we'd like
 984   // to defer the state transitions until absolutely necessary,
 985   // and in doing so avoid some transitions ...
 986 
 987   // If there are unmounted virtual threads in the _entry_list do a timed-park
 988   // instead to alleviate some deadlocks cases where one of them is picked as
 989   // the successor but cannot run due to having run out of carriers. This can
 990   // happen, for example, if this is a pinned virtual thread currently loading
 991   // or initializining a class, and all other carriers have a pinned vthread
 992   // waiting for said class to be loaded/initialized.
 993   // Read counter *after* adding this thread to the _entry_list.
 994   // Adding to _entry_list uses Atomic::cmpxchg() which already provides
 995   // a fence that prevents this load from floating up previous store.
 996   bool do_timed_parked = has_unmounted_vthreads();
 997   static int MAX_RECHECK_INTERVAL = 1000;
 998   int recheck_interval = 1;





 999 
1000   for (;;) {
1001 
1002     if (try_lock(current) == TryLockResult::Success) {
1003       break;
1004     }
1005     assert(!has_owner(current), "invariant");
1006 
1007     // park self
1008     if (do_timed_parked) {
1009       current->_ParkEvent->park((jlong) recheck_interval);
1010       // Increase the recheck_interval, but clamp the value.
1011       recheck_interval *= 8;
1012       if (recheck_interval > MAX_RECHECK_INTERVAL) {
1013         recheck_interval = MAX_RECHECK_INTERVAL;
1014       }
1015     } else {
1016       current->_ParkEvent->park();
1017     }
1018 

1073   // the ST of null into _owner in the *subsequent* (following) corresponding
1074   // monitorexit.
1075 
1076   return;
1077 }
1078 
1079 // reenter_internal() is a specialized inline form of the latter half of the
1080 // contended slow-path from enter_internal().  We use reenter_internal() only for
1081 // monitor reentry in wait().
1082 //
1083 // In the future we should reconcile enter_internal() and reenter_internal().
1084 
1085 void ObjectMonitor::reenter_internal(JavaThread* current, ObjectWaiter* currentNode) {
1086   assert(current != nullptr, "invariant");
1087   assert(current->thread_state() != _thread_blocked, "invariant");
1088   assert(currentNode != nullptr, "invariant");
1089   assert(currentNode->_thread == current, "invariant");
1090   assert(_waiters > 0, "invariant");
1091   assert_mark_word_consistency();
1092 
1093   // If there are unmounted virtual threads in the _entry_list do a timed-park
1094   // instead to alleviate some deadlocks cases where one of them is picked as
1095   // the successor but cannot run due to having run out of carriers. This can
1096   // happen, for example, if this is a pinned virtual thread (or plain carrier)
1097   // waiting for a class to be initialized.
1098   // In theory we only get here in the "notification" case where the thread has
1099   // already been added to the _entry_list. But if the thread happened to be interrupted
1100   // at the same time it was being notified, we could have read a state of TS_ENTER
1101   // that led us here but the thread hasn't been added yet to the queue. In that
1102   // case getting a false value from has_unmounted_vthreads() is not a guarantee
1103   // that vthreads weren't added before this thread to the _entry_list. We will live
1104   // with this corner case not only because it would be very rare, but also because
1105   // if there are several carriers blocked in this same situation, this would only
1106   // happen for the first one notified.
1107   bool do_timed_parked = has_unmounted_vthreads();
1108   static int MAX_RECHECK_INTERVAL = 1000;
1109   int recheck_interval = 1;
1110 
1111   for (;;) {
1112     ObjectWaiter::TStates v = currentNode->TState;
1113     guarantee(v == ObjectWaiter::TS_ENTER, "invariant");
1114     assert(!has_owner(current), "invariant");
1115 
1116     // This thread has been notified so try to reacquire the lock.
1117     if (try_lock(current) == TryLockResult::Success) {
1118       break;
1119     }
1120 
1121     // If that fails, spin again.  Note that spin count may be zero so the above TryLock
1122     // is necessary.
1123     if (try_spin(current)) {
1124         break;
1125     }
1126 
1127     {
1128       OSThreadContendState osts(current->osthread());
1129 
1130       assert(current->thread_state() == _thread_in_vm, "invariant");
1131 
1132       {
1133         ClearSuccOnSuspend csos(this);
1134         ThreadBlockInVMPreprocess<ClearSuccOnSuspend> tbivs(current, csos, true /* allow_suspend */);
1135         if (do_timed_parked) {
1136           current->_ParkEvent->park((jlong) recheck_interval);
1137           // Increase the recheck_interval, but clamp the value.
1138           recheck_interval *= 8;
1139           if (recheck_interval > MAX_RECHECK_INTERVAL) {
1140             recheck_interval = MAX_RECHECK_INTERVAL;
1141           }
1142         } else {
1143           current->_ParkEvent->park();
1144         }
1145       }
1146     }
1147 
1148     // Try again, but just so we distinguish between futile wakeups and
1149     // successful wakeups.  The following test isn't algorithmically
1150     // necessary, but it helps us maintain sensible statistics.
1151     if (try_lock(current) == TryLockResult::Success) {
1152       break;
1153     }
1154 
1155     // The lock is still contested.
1156 
1157     // Assuming this is not a spurious wakeup we'll normally
1158     // find that _succ == current.
1159     if (has_successor(current)) clear_successor();
1160 
1161     // Invariant: after clearing _succ a contending thread
1162     // *must* retry  _owner before parking.
1163     OrderAccess::fence();
1164   }

1171   assert(!has_successor(current), "invariant");
1172   currentNode->TState = ObjectWaiter::TS_RUN;
1173   OrderAccess::fence();      // see comments at the end of enter_internal()
1174 }
1175 
1176 // This method is called from two places:
1177 // - On monitorenter contention with a null waiter.
1178 // - After Object.wait() times out or the target is interrupted to reenter the
1179 //   monitor, with the existing waiter.
1180 // For the Object.wait() case we do not delete the ObjectWaiter in case we
1181 // succesfully acquire the monitor since we are going to need it on return.
1182 bool ObjectMonitor::vthread_monitor_enter(JavaThread* current, ObjectWaiter* waiter) {
1183   if (try_lock(current) == TryLockResult::Success) {
1184     assert(has_owner(current), "invariant");
1185     assert(!has_successor(current), "invariant");
1186     return true;
1187   }
1188 
1189   oop vthread = current->vthread();
1190   ObjectWaiter* node = waiter != nullptr ? waiter : new ObjectWaiter(vthread, this);
1191 
1192   // Increment counter *before* adding the vthread to the _entry_list.
1193   // Adding to _entry_list uses Atomic::cmpxchg() which already provides
1194   // a fence that prevents reordering of the stores.
1195   inc_unmounted_vthreads();
1196 
1197   if (try_lock_or_add_to_entry_list(current, node)) {
1198     // We got the lock.
1199     if (waiter == nullptr) delete node;  // for Object.wait() don't delete yet
1200     dec_unmounted_vthreads();
1201     return true;
1202   }
1203   // This thread is now added to the entry_list.
1204 
1205   // We have to try once more since owner could have exited monitor and checked
1206   // _entry_list before we added the node to the queue.
1207   if (try_lock(current) == TryLockResult::Success) {
1208     assert(has_owner(current), "invariant");
1209     unlink_after_acquire(current, node);
1210     if (has_successor(current)) clear_successor();
1211     if (waiter == nullptr) delete node;  // for Object.wait() don't delete yet
1212     dec_unmounted_vthreads();
1213     return true;
1214   }
1215 
1216   assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
1217   java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::BLOCKING);
1218 
1219   // We didn't succeed in acquiring the monitor so increment _contentions and
1220   // save ObjectWaiter* in the vthread since we will need it when resuming execution.
1221   add_to_contentions(1);
1222   java_lang_VirtualThread::set_objectWaiter(vthread, node);
1223   return false;
1224 }
1225 
1226 // Called from thaw code to resume the monitor operation that caused the vthread
1227 // to be unmounted. Method returns true if the monitor is successfully acquired,
1228 // which marks the end of the monitor operation, otherwise it returns false.
1229 bool ObjectMonitor::resume_operation(JavaThread* current, ObjectWaiter* node, ContinuationWrapper& cont) {
1230   assert(java_lang_VirtualThread::state(current->vthread()) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
1231   assert(!has_owner(current), "");
1232 

1248   oop vthread = current->vthread();
1249   if (has_successor(current)) clear_successor();
1250 
1251   // Invariant: after clearing _succ a thread *must* retry acquiring the monitor.
1252   OrderAccess::fence();
1253 
1254   if (try_lock(current) == TryLockResult::Success) {
1255     vthread_epilog(current, node);
1256     return true;
1257   }
1258 
1259   // We will return to Continuation.run() and unmount so set the right state.
1260   java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::BLOCKING);
1261 
1262   return false;
1263 }
1264 
1265 void ObjectMonitor::vthread_epilog(JavaThread* current, ObjectWaiter* node) {
1266   assert(has_owner(current), "invariant");
1267   add_to_contentions(-1);
1268   dec_unmounted_vthreads();
1269 
1270   if (has_successor(current)) clear_successor();
1271 
1272   guarantee(_recursions == 0, "invariant");
1273 
1274   if (node->is_wait()) {
1275     _recursions = node->_recursions;   // restore the old recursion count
1276     _waiters--;                        // decrement the number of waiters
1277 
1278     if (node->_interrupted) {
1279       // We will throw at thaw end after finishing the mount transition.
1280       current->set_pending_interrupted_exception(true);
1281     }
1282   }
1283 
1284   unlink_after_acquire(current, node);
1285   delete node;
1286 
1287   // Clear the ObjectWaiter* from the vthread.
1288   java_lang_VirtualThread::set_objectWaiter(current->vthread(), nullptr);

1793       // consume an unpark() meant for the ParkEvent associated with
1794       // this ObjectMonitor.
1795     }
1796     if (wait_event.should_commit()) {
1797       post_monitor_wait_event(&wait_event, this, 0, millis, false);
1798     }
1799     THROW(vmSymbols::java_lang_InterruptedException());
1800     return;
1801   }
1802 
1803   freeze_result result;
1804   ContinuationEntry* ce = current->last_continuation();
1805   bool is_virtual = ce != nullptr && ce->is_virtual_thread();
1806   if (is_virtual) {
1807     if (interruptible && JvmtiExport::should_post_monitor_wait()) {
1808       JvmtiExport::post_monitor_wait(current, object(), millis);
1809     }
1810     current->set_current_waiting_monitor(this);
1811     result = Continuation::try_preempt(current, ce->cont_oop(current));
1812     if (result == freeze_ok) {
1813       vthread_wait(current, millis, interruptible);
1814       current->set_current_waiting_monitor(nullptr);
1815       return;
1816     }
1817   }
1818   // The jtiows does nothing for non-interruptible.
1819   JavaThreadInObjectWaitState jtiows(current, millis != 0, interruptible);
1820 
1821   if (!is_virtual) { // it was already set for virtual thread
1822     if (interruptible && JvmtiExport::should_post_monitor_wait()) {
1823       JvmtiExport::post_monitor_wait(current, object(), millis);
1824 
1825       // The current thread already owns the monitor and it has not yet
1826       // been added to the wait queue so the current thread cannot be
1827       // made the successor. This means that the JVMTI_EVENT_MONITOR_WAIT
1828       // event handler cannot accidentally consume an unpark() meant for
1829       // the ParkEvent associated with this ObjectMonitor.
1830     }
1831     current->set_current_waiting_monitor(this);
1832   }
1833   // create a node to be put into the queue

2023   Thread::SpinAcquire(&_wait_set_lock);
2024   ObjectWaiter* iterator = dequeue_waiter();
2025   if (iterator != nullptr) {
2026     guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
2027     guarantee(!iterator->_notified, "invariant");
2028 
2029     if (iterator->is_vthread()) {
2030       oop vthread = iterator->vthread();
2031       java_lang_VirtualThread::set_notified(vthread, true);
2032       int old_state = java_lang_VirtualThread::state(vthread);
2033       // If state is not WAIT/TIMED_WAIT then target could still be on
2034       // unmount transition, or wait could have already timed-out or target
2035       // could have been interrupted. In the first case, the target itself
2036       // will set the state to BLOCKED at the end of the unmount transition.
2037       // In the other cases the target would have been already unblocked so
2038       // there is nothing to do.
2039       if (old_state == java_lang_VirtualThread::WAIT ||
2040           old_state == java_lang_VirtualThread::TIMED_WAIT) {
2041         java_lang_VirtualThread::cmpxchg_state(vthread, old_state, java_lang_VirtualThread::BLOCKED);
2042       }
2043       // Increment counter *before* adding the vthread to the _entry_list.
2044       // Adding to _entry_list uses Atomic::cmpxchg() which already provides
2045       // a fence that prevents reordering of the stores.
2046       inc_unmounted_vthreads();
2047     }
2048 
2049     iterator->_notified = true;
2050     iterator->_notifier_tid = JFR_THREAD_ID(current);
2051     did_notify = true;
2052     add_to_entry_list(current, iterator);
2053 
2054     // _wait_set_lock protects the wait queue, not the entry_list.  We could
2055     // move the add-to-entry_list operation, above, outside the critical section
2056     // protected by _wait_set_lock.  In practice that's not useful.  With the
2057     // exception of  wait() timeouts and interrupts the monitor owner
2058     // is the only thread that grabs _wait_set_lock.  There's almost no contention
2059     // on _wait_set_lock so it's not profitable to reduce the length of the
2060     // critical section.
2061 
2062     if (!iterator->is_vthread()) {
2063       iterator->wait_reenter_begin(this);
2064 
2065       // Read counter *after* adding the thread to the _entry_list.
2066       // Adding to _entry_list uses Atomic::cmpxchg() which already provides
2067       // a fence that prevents this load from floating up previous store.
2068       if (has_unmounted_vthreads()) {
2069         // Wake up the thread to alleviate some deadlocks cases where the successor
2070         // that will be picked up when this thread releases the monitor is an unmounted
2071         // virtual thread that cannot run due to having run out of carriers. Upon waking
2072         // up, the thread will call reenter_internal() which will use time-park in case
2073         // there is contention and there are still vthreads in the _entry_list.
2074         JavaThread* t = iterator->thread();
2075         t->_ParkEvent->unpark();
2076       }
2077     }
2078   }
2079   Thread::SpinRelease(&_wait_set_lock);
2080   return did_notify;
2081 }
2082 
2083 static void post_monitor_notify_event(EventJavaMonitorNotify* event,
2084                                       ObjectMonitor* monitor,
2085                                       int notified_count) {
2086   assert(event != nullptr, "invariant");
2087   assert(monitor != nullptr, "invariant");
2088   const Klass* monitor_klass = monitor->object()->klass();
2089   if (ObjectMonitor::is_jfr_excluded(monitor_klass)) {
2090     return;
2091   }
2092   event->set_monitorClass(monitor_klass);
2093   // Set an address that is 'unique enough', such that events close in
2094   // time and with the same address are likely (but not guaranteed) to
2095   // belong to the same object.
2096   event->set_address((uintptr_t)monitor);

2146   quick_notifyAll(current);
2147 }
2148 
2149 void ObjectMonitor::quick_notifyAll(JavaThread* current) {
2150   assert(has_owner(current), "Precondition");
2151 
2152   EventJavaMonitorNotify event;
2153   DTRACE_MONITOR_PROBE(notifyAll, this, object(), current);
2154   int tally = 0;
2155   while (_wait_set != nullptr) {
2156     if (notify_internal(current)) {
2157       tally++;
2158     }
2159   }
2160 
2161   if ((tally > 0) && event.should_commit()) {
2162     post_monitor_notify_event(&event, this, /* notified_count = */ tally);
2163   }
2164 }
2165 
2166 void ObjectMonitor::vthread_wait(JavaThread* current, jlong millis, bool interruptible) {
2167   oop vthread = current->vthread();
2168   ObjectWaiter* node = new ObjectWaiter(vthread, this);
2169   node->_is_wait = true;
2170   node->_interruptible = interruptible;
2171   node->TState = ObjectWaiter::TS_WAIT;
2172   java_lang_VirtualThread::set_notified(vthread, false);  // Reset notified flag
2173   java_lang_VirtualThread::set_interruptible_wait(vthread, interruptible);
2174 
2175   // Enter the waiting queue, which is a circular doubly linked list in this case
2176   // but it could be a priority queue or any data structure.
2177   // _wait_set_lock protects the wait queue.  Normally the wait queue is accessed only
2178   // by the owner of the monitor *except* in the case where park()
2179   // returns because of a timeout or interrupt.  Contention is exceptionally rare
2180   // so we use a simple spin-lock instead of a heavier-weight blocking lock.
2181 
2182   Thread::SpinAcquire(&_wait_set_lock);
2183   add_waiter(node);
2184   Thread::SpinRelease(&_wait_set_lock);
2185 
2186   node->_recursions = _recursions;   // record the old recursion count
2187   _recursions = 0;                   // set the recursion level to be 0
2188   _waiters++;                        // increment the number of waiters
2189   exit(current);                     // exit the monitor
2190   guarantee(!has_owner(current), "invariant");
2191 
2192   assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
2193   java_lang_VirtualThread::set_state(vthread, millis == 0 ? java_lang_VirtualThread::WAITING : java_lang_VirtualThread::TIMED_WAITING);

2199 
2200 bool ObjectMonitor::vthread_wait_reenter(JavaThread* current, ObjectWaiter* node, ContinuationWrapper& cont) {
2201   // The first time we run after being preempted on Object.wait() we
2202   // need to check if we were interrupted or the wait timed-out, and
2203   // in that case remove ourselves from the _wait_set queue.
2204   if (node->TState == ObjectWaiter::TS_WAIT) {
2205     Thread::SpinAcquire(&_wait_set_lock);
2206     if (node->TState == ObjectWaiter::TS_WAIT) {
2207       dequeue_specific_waiter(node);       // unlink from wait_set
2208       assert(!node->_notified, "invariant");
2209       node->TState = ObjectWaiter::TS_RUN;
2210     }
2211     Thread::SpinRelease(&_wait_set_lock);
2212   }
2213 
2214   // If this was an interrupted case, set the _interrupted boolean so that
2215   // once we re-acquire the monitor we know if we need to throw IE or not.
2216   ObjectWaiter::TStates state = node->TState;
2217   bool was_notified = state == ObjectWaiter::TS_ENTER;
2218   assert(was_notified || state == ObjectWaiter::TS_RUN, "");
2219   node->_interrupted = node->_interruptible && !was_notified && current->is_interrupted(false);
2220 
2221   // Post JFR and JVMTI events. If non-interruptible we are in
2222   // ObjectLocker case so we don't post anything.
2223   EventJavaMonitorWait wait_event;
2224   if (node->_interruptible && (wait_event.should_commit() || JvmtiExport::should_post_monitor_waited())) {
2225     vthread_monitor_waited_event(current, node, cont, &wait_event, !was_notified && !node->_interrupted);
2226   }
2227 
2228   // Mark that we are at reenter so that we don't call this method again.
2229   node->_at_reenter = true;
2230 
2231   if (!was_notified) {
2232     bool acquired = vthread_monitor_enter(current, node);
2233     if (acquired) {
2234       guarantee(_recursions == 0, "invariant");
2235       _recursions = node->_recursions;   // restore the old recursion count
2236       _waiters--;                        // decrement the number of waiters
2237 
2238       if (node->_interrupted) {
2239         // We will throw at thaw end after finishing the mount transition.
2240         current->set_pending_interrupted_exception(true);
2241       }
2242 
2243       delete node;
2244       // Clear the ObjectWaiter* from the vthread.

2671   st->print_cr("  _object = " INTPTR_FORMAT, p2i(object_peek()));
2672   st->print_cr("  _pad_buf0 = {");
2673   st->print_cr("    [0] = '\\0'");
2674   st->print_cr("    ...");
2675   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf0) - 1);
2676   st->print_cr("  }");
2677   st->print_cr("  _owner = " INT64_FORMAT, owner_raw());
2678   st->print_cr("  _previous_owner_tid = " UINT64_FORMAT, _previous_owner_tid);
2679   st->print_cr("  _pad_buf1 = {");
2680   st->print_cr("    [0] = '\\0'");
2681   st->print_cr("    ...");
2682   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf1) - 1);
2683   st->print_cr("  }");
2684   st->print_cr("  _next_om = " INTPTR_FORMAT, p2i(next_om()));
2685   st->print_cr("  _recursions = %zd", _recursions);
2686   st->print_cr("  _entry_list = " INTPTR_FORMAT, p2i(_entry_list));
2687   st->print_cr("  _entry_list_tail = " INTPTR_FORMAT, p2i(_entry_list_tail));
2688   st->print_cr("  _succ = " INT64_FORMAT, successor());
2689   st->print_cr("  _SpinDuration = %d", _SpinDuration);
2690   st->print_cr("  _contentions = %d", contentions());
2691   st->print_cr("  _unmounted_vthreads = " INT64_FORMAT, _unmounted_vthreads);
2692   st->print_cr("  _wait_set = " INTPTR_FORMAT, p2i(_wait_set));
2693   st->print_cr("  _waiters = %d", _waiters);
2694   st->print_cr("  _wait_set_lock = %d", _wait_set_lock);
2695   st->print_cr("}");
2696 }
2697 #endif
< prev index next >