< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page

 278   } else {
 279     // However, ThreadService::get_current_contended_monitor()
 280     // can call here via the VMThread so sanity check it.
 281     assert(self->is_VM_thread(), "must be");
 282   }
 283 #endif // ASSERT
 284 }
 285 
 286 ObjectMonitor::ObjectMonitor(oop object) :
 287   _metadata(0),
 288   _object(_oop_storage, object),
 289   _owner(NO_OWNER),
 290   _previous_owner_tid(0),
 291   _next_om(nullptr),
 292   _recursions(0),
 293   _entry_list(nullptr),
 294   _entry_list_tail(nullptr),
 295   _succ(NO_OWNER),
 296   _SpinDuration(ObjectMonitor::Knob_SpinLimit),
 297   _contentions(0),

 298   _wait_set(nullptr),
 299   _waiters(0),
 300   _wait_set_lock(0),
 301   _stack_locker(nullptr)
 302 { }
 303 
 304 ObjectMonitor::~ObjectMonitor() {
 305   _object.release(_oop_storage);
 306 }
 307 
 308 oop ObjectMonitor::object() const {
 309   check_object_context();
 310   return _object.resolve();
 311 }
 312 
 313 void ObjectMonitor::ExitOnSuspend::operator()(JavaThread* current) {
 314   if (current->is_suspended()) {
 315     _om->_recursions = 0;
 316     _om->clear_successor();
 317     // Don't need a full fence after clearing successor here because of the call to exit().

 968 
 969   ObjectWaiter node(current);
 970   current->_ParkEvent->reset();
 971 
 972   if (try_lock_or_add_to_entry_list(current, &node)) {
 973     return; // We got the lock.
 974   }
 975   // This thread is now added to the _entry_list.
 976 
 977   // The lock might have been released while this thread was occupied queueing
 978   // itself onto _entry_list.  To close the race and avoid "stranding" and
 979   // progress-liveness failure we must resample-retry _owner before parking.
 980   // Note the Dekker/Lamport duality: ST _entry_list; MEMBAR; LD Owner.
 981   // In this case the ST-MEMBAR is accomplished with CAS().
 982   //
 983   // TODO: Defer all thread state transitions until park-time.
 984   // Since state transitions are heavy and inefficient we'd like
 985   // to defer the state transitions until absolutely necessary,
 986   // and in doing so avoid some transitions ...
 987 
 988   // For virtual threads that are pinned, do a timed-park instead to
 989   // alleviate some deadlocks cases where the succesor is an unmounted
 990   // virtual thread that cannot run. This can happen in particular when
 991   // this virtual thread is currently loading/initializing a class, and
 992   // all other carriers have a vthread pinned to it waiting for said class
 993   // to be loaded/initialized.




 994   static int MAX_RECHECK_INTERVAL = 1000;
 995   int recheck_interval = 1;
 996   bool do_timed_parked = false;
 997   ContinuationEntry* ce = current->last_continuation();
 998   if (ce != nullptr && ce->is_virtual_thread()) {
 999     do_timed_parked = true;
1000   }
1001 
1002   for (;;) {
1003 
1004     if (try_lock(current) == TryLockResult::Success) {
1005       break;
1006     }
1007     assert(!has_owner(current), "invariant");
1008 
1009     // park self
1010     if (do_timed_parked) {
1011       current->_ParkEvent->park((jlong) recheck_interval);
1012       // Increase the recheck_interval, but clamp the value.
1013       recheck_interval *= 8;
1014       if (recheck_interval > MAX_RECHECK_INTERVAL) {
1015         recheck_interval = MAX_RECHECK_INTERVAL;
1016       }
1017     } else {
1018       current->_ParkEvent->park();
1019     }
1020 

1075   // the ST of null into _owner in the *subsequent* (following) corresponding
1076   // monitorexit.
1077 
1078   return;
1079 }
1080 
1081 // reenter_internal() is a specialized inline form of the latter half of the
1082 // contended slow-path from enter_internal().  We use reenter_internal() only for
1083 // monitor reentry in wait().
1084 //
1085 // In the future we should reconcile enter_internal() and reenter_internal().
1086 
1087 void ObjectMonitor::reenter_internal(JavaThread* current, ObjectWaiter* currentNode) {
1088   assert(current != nullptr, "invariant");
1089   assert(current->thread_state() != _thread_blocked, "invariant");
1090   assert(currentNode != nullptr, "invariant");
1091   assert(currentNode->_thread == current, "invariant");
1092   assert(_waiters > 0, "invariant");
1093   assert_mark_word_consistency();
1094 


















1095   for (;;) {
1096     ObjectWaiter::TStates v = currentNode->TState;
1097     guarantee(v == ObjectWaiter::TS_ENTER, "invariant");
1098     assert(!has_owner(current), "invariant");
1099 
1100     // This thread has been notified so try to reacquire the lock.
1101     if (try_lock(current) == TryLockResult::Success) {
1102       break;
1103     }
1104 
1105     // If that fails, spin again.  Note that spin count may be zero so the above TryLock
1106     // is necessary.
1107     if (try_spin(current)) {
1108         break;
1109     }
1110 
1111     {
1112       OSThreadContendState osts(current->osthread());
1113 
1114       assert(current->thread_state() == _thread_in_vm, "invariant");
1115 
1116       {
1117         ClearSuccOnSuspend csos(this);
1118         ThreadBlockInVMPreprocess<ClearSuccOnSuspend> tbivs(current, csos, true /* allow_suspend */);
1119         current->_ParkEvent->park();









1120       }
1121     }
1122 
1123     // Try again, but just so we distinguish between futile wakeups and
1124     // successful wakeups.  The following test isn't algorithmically
1125     // necessary, but it helps us maintain sensible statistics.
1126     if (try_lock(current) == TryLockResult::Success) {
1127       break;
1128     }
1129 
1130     // The lock is still contested.
1131 
1132     // Assuming this is not a spurious wakeup we'll normally
1133     // find that _succ == current.
1134     if (has_successor(current)) clear_successor();
1135 
1136     // Invariant: after clearing _succ a contending thread
1137     // *must* retry  _owner before parking.
1138     OrderAccess::fence();
1139   }

1146   assert(!has_successor(current), "invariant");
1147   currentNode->TState = ObjectWaiter::TS_RUN;
1148   OrderAccess::fence();      // see comments at the end of enter_internal()
1149 }
1150 
1151 // This method is called from two places:
1152 // - On monitorenter contention with a null waiter.
1153 // - After Object.wait() times out or the target is interrupted to reenter the
1154 //   monitor, with the existing waiter.
1155 // For the Object.wait() case we do not delete the ObjectWaiter in case we
1156 // succesfully acquire the monitor since we are going to need it on return.
1157 bool ObjectMonitor::vthread_monitor_enter(JavaThread* current, ObjectWaiter* waiter) {
1158   if (try_lock(current) == TryLockResult::Success) {
1159     assert(has_owner(current), "invariant");
1160     assert(!has_successor(current), "invariant");
1161     return true;
1162   }
1163 
1164   oop vthread = current->vthread();
1165   ObjectWaiter* node = waiter != nullptr ? waiter : new ObjectWaiter(vthread, this);






1166   if (try_lock_or_add_to_entry_list(current, node)) {
1167     // We got the lock.
1168     if (waiter == nullptr) delete node;  // for Object.wait() don't delete yet

1169     return true;
1170   }
1171   // This thread is now added to the entry_list.
1172 
1173   // We have to try once more since owner could have exited monitor and checked
1174   // _entry_list before we added the node to the queue.
1175   if (try_lock(current) == TryLockResult::Success) {
1176     assert(has_owner(current), "invariant");
1177     unlink_after_acquire(current, node);
1178     if (has_successor(current)) clear_successor();
1179     if (waiter == nullptr) delete node;  // for Object.wait() don't delete yet

1180     return true;
1181   }
1182 
1183   assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
1184   java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::BLOCKING);
1185 
1186   // We didn't succeed in acquiring the monitor so increment _contentions and
1187   // save ObjectWaiter* in the vthread since we will need it when resuming execution.
1188   add_to_contentions(1);
1189   java_lang_VirtualThread::set_objectWaiter(vthread, node);
1190   return false;
1191 }
1192 
1193 // Called from thaw code to resume the monitor operation that caused the vthread
1194 // to be unmounted. Method returns true if the monitor is successfully acquired,
1195 // which marks the end of the monitor operation, otherwise it returns false.
1196 bool ObjectMonitor::resume_operation(JavaThread* current, ObjectWaiter* node, ContinuationWrapper& cont) {
1197   assert(java_lang_VirtualThread::state(current->vthread()) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
1198   assert(!has_owner(current), "");
1199 

1215   oop vthread = current->vthread();
1216   if (has_successor(current)) clear_successor();
1217 
1218   // Invariant: after clearing _succ a thread *must* retry acquiring the monitor.
1219   OrderAccess::fence();
1220 
1221   if (try_lock(current) == TryLockResult::Success) {
1222     vthread_epilog(current, node);
1223     return true;
1224   }
1225 
1226   // We will return to Continuation.run() and unmount so set the right state.
1227   java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::BLOCKING);
1228 
1229   return false;
1230 }
1231 
1232 void ObjectMonitor::vthread_epilog(JavaThread* current, ObjectWaiter* node) {
1233   assert(has_owner(current), "invariant");
1234   add_to_contentions(-1);

1235 
1236   if (has_successor(current)) clear_successor();
1237 
1238   guarantee(_recursions == 0, "invariant");
1239 
1240   if (node->is_wait()) {
1241     _recursions = node->_recursions;   // restore the old recursion count
1242     _waiters--;                        // decrement the number of waiters
1243 
1244     if (node->_interrupted) {
1245       // We will throw at thaw end after finishing the mount transition.
1246       current->set_pending_interrupted_exception(true);
1247     }
1248   }
1249 
1250   unlink_after_acquire(current, node);
1251   delete node;
1252 
1253   // Clear the ObjectWaiter* from the vthread.
1254   java_lang_VirtualThread::set_objectWaiter(current->vthread(), nullptr);

1759       // consume an unpark() meant for the ParkEvent associated with
1760       // this ObjectMonitor.
1761     }
1762     if (wait_event.should_commit()) {
1763       post_monitor_wait_event(&wait_event, this, 0, millis, false);
1764     }
1765     THROW(vmSymbols::java_lang_InterruptedException());
1766     return;
1767   }
1768 
1769   freeze_result result;
1770   ContinuationEntry* ce = current->last_continuation();
1771   bool is_virtual = ce != nullptr && ce->is_virtual_thread();
1772   if (is_virtual) {
1773     if (interruptible && JvmtiExport::should_post_monitor_wait()) {
1774       JvmtiExport::post_monitor_wait(current, object(), millis);
1775     }
1776     current->set_current_waiting_monitor(this);
1777     result = Continuation::try_preempt(current, ce->cont_oop(current));
1778     if (result == freeze_ok) {
1779       vthread_wait(current, millis);
1780       current->set_current_waiting_monitor(nullptr);
1781       return;
1782     }
1783   }
1784   // The jtiows does nothing for non-interruptible.
1785   JavaThreadInObjectWaitState jtiows(current, millis != 0, interruptible);
1786 
1787   if (!is_virtual) { // it was already set for virtual thread
1788     if (interruptible && JvmtiExport::should_post_monitor_wait()) {
1789       JvmtiExport::post_monitor_wait(current, object(), millis);
1790 
1791       // The current thread already owns the monitor and it has not yet
1792       // been added to the wait queue so the current thread cannot be
1793       // made the successor. This means that the JVMTI_EVENT_MONITOR_WAIT
1794       // event handler cannot accidentally consume an unpark() meant for
1795       // the ParkEvent associated with this ObjectMonitor.
1796     }
1797     current->set_current_waiting_monitor(this);
1798   }
1799   // create a node to be put into the queue

1989   Thread::SpinAcquire(&_wait_set_lock);
1990   ObjectWaiter* iterator = dequeue_waiter();
1991   if (iterator != nullptr) {
1992     guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
1993     guarantee(!iterator->_notified, "invariant");
1994 
1995     if (iterator->is_vthread()) {
1996       oop vthread = iterator->vthread();
1997       java_lang_VirtualThread::set_notified(vthread, true);
1998       int old_state = java_lang_VirtualThread::state(vthread);
1999       // If state is not WAIT/TIMED_WAIT then target could still be on
2000       // unmount transition, or wait could have already timed-out or target
2001       // could have been interrupted. In the first case, the target itself
2002       // will set the state to BLOCKED at the end of the unmount transition.
2003       // In the other cases the target would have been already unblocked so
2004       // there is nothing to do.
2005       if (old_state == java_lang_VirtualThread::WAIT ||
2006           old_state == java_lang_VirtualThread::TIMED_WAIT) {
2007         java_lang_VirtualThread::cmpxchg_state(vthread, old_state, java_lang_VirtualThread::BLOCKED);
2008       }




2009     }
2010 
2011     iterator->_notified = true;
2012     iterator->_notifier_tid = JFR_THREAD_ID(current);
2013     did_notify = true;
2014     add_to_entry_list(current, iterator);
2015 
2016     // _wait_set_lock protects the wait queue, not the entry_list.  We could
2017     // move the add-to-entry_list operation, above, outside the critical section
2018     // protected by _wait_set_lock.  In practice that's not useful.  With the
2019     // exception of  wait() timeouts and interrupts the monitor owner
2020     // is the only thread that grabs _wait_set_lock.  There's almost no contention
2021     // on _wait_set_lock so it's not profitable to reduce the length of the
2022     // critical section.
2023 
2024     if (!iterator->is_vthread()) {
2025       iterator->wait_reenter_begin(this);













2026     }
2027   }
2028   Thread::SpinRelease(&_wait_set_lock);
2029   return did_notify;
2030 }
2031 
2032 static void post_monitor_notify_event(EventJavaMonitorNotify* event,
2033                                       ObjectMonitor* monitor,
2034                                       int notified_count) {
2035   assert(event != nullptr, "invariant");
2036   assert(monitor != nullptr, "invariant");
2037   const Klass* monitor_klass = monitor->object()->klass();
2038   if (ObjectMonitor::is_jfr_excluded(monitor_klass)) {
2039     return;
2040   }
2041   event->set_monitorClass(monitor_klass);
2042   // Set an address that is 'unique enough', such that events close in
2043   // time and with the same address are likely (but not guaranteed) to
2044   // belong to the same object.
2045   event->set_address((uintptr_t)monitor);

2095   quick_notifyAll(current);
2096 }
2097 
2098 void ObjectMonitor::quick_notifyAll(JavaThread* current) {
2099   assert(has_owner(current), "Precondition");
2100 
2101   EventJavaMonitorNotify event;
2102   DTRACE_MONITOR_PROBE(notifyAll, this, object(), current);
2103   int tally = 0;
2104   while (_wait_set != nullptr) {
2105     if (notify_internal(current)) {
2106       tally++;
2107     }
2108   }
2109 
2110   if ((tally > 0) && event.should_commit()) {
2111     post_monitor_notify_event(&event, this, /* notified_count = */ tally);
2112   }
2113 }
2114 
2115 void ObjectMonitor::vthread_wait(JavaThread* current, jlong millis) {
2116   oop vthread = current->vthread();
2117   ObjectWaiter* node = new ObjectWaiter(vthread, this);
2118   node->_is_wait = true;

2119   node->TState = ObjectWaiter::TS_WAIT;
2120   java_lang_VirtualThread::set_notified(vthread, false);  // Reset notified flag

2121 
2122   // Enter the waiting queue, which is a circular doubly linked list in this case
2123   // but it could be a priority queue or any data structure.
2124   // _wait_set_lock protects the wait queue.  Normally the wait queue is accessed only
2125   // by the owner of the monitor *except* in the case where park()
2126   // returns because of a timeout or interrupt.  Contention is exceptionally rare
2127   // so we use a simple spin-lock instead of a heavier-weight blocking lock.
2128 
2129   Thread::SpinAcquire(&_wait_set_lock);
2130   add_waiter(node);
2131   Thread::SpinRelease(&_wait_set_lock);
2132 
2133   node->_recursions = _recursions;   // record the old recursion count
2134   _recursions = 0;                   // set the recursion level to be 0
2135   _waiters++;                        // increment the number of waiters
2136   exit(current);                     // exit the monitor
2137   guarantee(!has_owner(current), "invariant");
2138 
2139   assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
2140   java_lang_VirtualThread::set_state(vthread, millis == 0 ? java_lang_VirtualThread::WAITING : java_lang_VirtualThread::TIMED_WAITING);

2146 
2147 bool ObjectMonitor::vthread_wait_reenter(JavaThread* current, ObjectWaiter* node, ContinuationWrapper& cont) {
2148   // The first time we run after being preempted on Object.wait() we
2149   // need to check if we were interrupted or the wait timed-out, and
2150   // in that case remove ourselves from the _wait_set queue.
2151   if (node->TState == ObjectWaiter::TS_WAIT) {
2152     Thread::SpinAcquire(&_wait_set_lock);
2153     if (node->TState == ObjectWaiter::TS_WAIT) {
2154       dequeue_specific_waiter(node);       // unlink from wait_set
2155       assert(!node->_notified, "invariant");
2156       node->TState = ObjectWaiter::TS_RUN;
2157     }
2158     Thread::SpinRelease(&_wait_set_lock);
2159   }
2160 
2161   // If this was an interrupted case, set the _interrupted boolean so that
2162   // once we re-acquire the monitor we know if we need to throw IE or not.
2163   ObjectWaiter::TStates state = node->TState;
2164   bool was_notified = state == ObjectWaiter::TS_ENTER;
2165   assert(was_notified || state == ObjectWaiter::TS_RUN, "");
2166   node->_interrupted = !was_notified && current->is_interrupted(false);
2167 
2168   // Post JFR and JVMTI events.

2169   EventJavaMonitorWait wait_event;
2170   if (wait_event.should_commit() || JvmtiExport::should_post_monitor_waited()) {
2171     vthread_monitor_waited_event(current, node, cont, &wait_event, !was_notified && !node->_interrupted);
2172   }
2173 
2174   // Mark that we are at reenter so that we don't call this method again.
2175   node->_at_reenter = true;
2176 
2177   if (!was_notified) {
2178     bool acquired = vthread_monitor_enter(current, node);
2179     if (acquired) {
2180       guarantee(_recursions == 0, "invariant");
2181       _recursions = node->_recursions;   // restore the old recursion count
2182       _waiters--;                        // decrement the number of waiters
2183 
2184       if (node->_interrupted) {
2185         // We will throw at thaw end after finishing the mount transition.
2186         current->set_pending_interrupted_exception(true);
2187       }
2188 
2189       delete node;
2190       // Clear the ObjectWaiter* from the vthread.

2617   st->print_cr("  _object = " INTPTR_FORMAT, p2i(object_peek()));
2618   st->print_cr("  _pad_buf0 = {");
2619   st->print_cr("    [0] = '\\0'");
2620   st->print_cr("    ...");
2621   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf0) - 1);
2622   st->print_cr("  }");
2623   st->print_cr("  _owner = " INT64_FORMAT, owner_raw());
2624   st->print_cr("  _previous_owner_tid = " UINT64_FORMAT, _previous_owner_tid);
2625   st->print_cr("  _pad_buf1 = {");
2626   st->print_cr("    [0] = '\\0'");
2627   st->print_cr("    ...");
2628   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf1) - 1);
2629   st->print_cr("  }");
2630   st->print_cr("  _next_om = " INTPTR_FORMAT, p2i(next_om()));
2631   st->print_cr("  _recursions = %zd", _recursions);
2632   st->print_cr("  _entry_list = " INTPTR_FORMAT, p2i(_entry_list));
2633   st->print_cr("  _entry_list_tail = " INTPTR_FORMAT, p2i(_entry_list_tail));
2634   st->print_cr("  _succ = " INT64_FORMAT, successor());
2635   st->print_cr("  _SpinDuration = %d", _SpinDuration);
2636   st->print_cr("  _contentions = %d", contentions());

2637   st->print_cr("  _wait_set = " INTPTR_FORMAT, p2i(_wait_set));
2638   st->print_cr("  _waiters = %d", _waiters);
2639   st->print_cr("  _wait_set_lock = %d", _wait_set_lock);
2640   st->print_cr("}");
2641 }
2642 #endif

 278   } else {
 279     // However, ThreadService::get_current_contended_monitor()
 280     // can call here via the VMThread so sanity check it.
 281     assert(self->is_VM_thread(), "must be");
 282   }
 283 #endif // ASSERT
 284 }
 285 
 286 ObjectMonitor::ObjectMonitor(oop object) :
 287   _metadata(0),
 288   _object(_oop_storage, object),
 289   _owner(NO_OWNER),
 290   _previous_owner_tid(0),
 291   _next_om(nullptr),
 292   _recursions(0),
 293   _entry_list(nullptr),
 294   _entry_list_tail(nullptr),
 295   _succ(NO_OWNER),
 296   _SpinDuration(ObjectMonitor::Knob_SpinLimit),
 297   _contentions(0),
 298   _unmounted_vthreads(0),
 299   _wait_set(nullptr),
 300   _waiters(0),
 301   _wait_set_lock(0),
 302   _stack_locker(nullptr)
 303 { }
 304 
 305 ObjectMonitor::~ObjectMonitor() {
 306   _object.release(_oop_storage);
 307 }
 308 
 309 oop ObjectMonitor::object() const {
 310   check_object_context();
 311   return _object.resolve();
 312 }
 313 
 314 void ObjectMonitor::ExitOnSuspend::operator()(JavaThread* current) {
 315   if (current->is_suspended()) {
 316     _om->_recursions = 0;
 317     _om->clear_successor();
 318     // Don't need a full fence after clearing successor here because of the call to exit().

 969 
 970   ObjectWaiter node(current);
 971   current->_ParkEvent->reset();
 972 
 973   if (try_lock_or_add_to_entry_list(current, &node)) {
 974     return; // We got the lock.
 975   }
 976   // This thread is now added to the _entry_list.
 977 
 978   // The lock might have been released while this thread was occupied queueing
 979   // itself onto _entry_list.  To close the race and avoid "stranding" and
 980   // progress-liveness failure we must resample-retry _owner before parking.
 981   // Note the Dekker/Lamport duality: ST _entry_list; MEMBAR; LD Owner.
 982   // In this case the ST-MEMBAR is accomplished with CAS().
 983   //
 984   // TODO: Defer all thread state transitions until park-time.
 985   // Since state transitions are heavy and inefficient we'd like
 986   // to defer the state transitions until absolutely necessary,
 987   // and in doing so avoid some transitions ...
 988 
 989   // If there are unmounted virtual threads in the _entry_list do a timed-park
 990   // instead to alleviate some deadlocks cases where one of them is picked as
 991   // the successor but cannot run due to having run out of carriers. This can
 992   // happen, for example, if this is a pinned virtual thread currently loading
 993   // or initializining a class, and all other carriers have a pinned vthread
 994   // waiting for said class to be loaded/initialized.
 995   // Read counter *after* adding this thread to the _entry_list.
 996   // Adding to _entry_list uses Atomic::cmpxchg() which already provides
 997   // a fence that prevents this load from floating up previous store.
 998   bool do_timed_parked = has_unmounted_vthreads();
 999   static int MAX_RECHECK_INTERVAL = 1000;
1000   int recheck_interval = 1;





1001 
1002   for (;;) {
1003 
1004     if (try_lock(current) == TryLockResult::Success) {
1005       break;
1006     }
1007     assert(!has_owner(current), "invariant");
1008 
1009     // park self
1010     if (do_timed_parked) {
1011       current->_ParkEvent->park((jlong) recheck_interval);
1012       // Increase the recheck_interval, but clamp the value.
1013       recheck_interval *= 8;
1014       if (recheck_interval > MAX_RECHECK_INTERVAL) {
1015         recheck_interval = MAX_RECHECK_INTERVAL;
1016       }
1017     } else {
1018       current->_ParkEvent->park();
1019     }
1020 

1075   // the ST of null into _owner in the *subsequent* (following) corresponding
1076   // monitorexit.
1077 
1078   return;
1079 }
1080 
1081 // reenter_internal() is a specialized inline form of the latter half of the
1082 // contended slow-path from enter_internal().  We use reenter_internal() only for
1083 // monitor reentry in wait().
1084 //
1085 // In the future we should reconcile enter_internal() and reenter_internal().
1086 
1087 void ObjectMonitor::reenter_internal(JavaThread* current, ObjectWaiter* currentNode) {
1088   assert(current != nullptr, "invariant");
1089   assert(current->thread_state() != _thread_blocked, "invariant");
1090   assert(currentNode != nullptr, "invariant");
1091   assert(currentNode->_thread == current, "invariant");
1092   assert(_waiters > 0, "invariant");
1093   assert_mark_word_consistency();
1094 
1095   // If there are unmounted virtual threads in the _entry_list do a timed-park
1096   // instead to alleviate some deadlocks cases where one of them is picked as
1097   // the successor but cannot run due to having run out of carriers. This can
1098   // happen, for example, if this is a pinned virtual thread (or plain carrier)
1099   // waiting for a class to be initialized.
1100   // In theory we only get here in the "notification" case where the thread has
1101   // already been added to the _entry_list. But if the thread happened to be interrupted
1102   // at the same time it was being notified, we could have read a state of TS_ENTER
1103   // that led us here but the thread hasn't been added yet to the queue. In that
1104   // case getting a false value from has_unmounted_vthreads() is not a guarantee
1105   // that vthreads weren't added before this thread to the _entry_list. We will live
1106   // with this corner case not only because it would be very rare, but also because
1107   // if there are several carriers blocked in this same situation, this would only
1108   // happen for the first one notified.
1109   bool do_timed_parked = has_unmounted_vthreads();
1110   static int MAX_RECHECK_INTERVAL = 1000;
1111   int recheck_interval = 1;
1112 
1113   for (;;) {
1114     ObjectWaiter::TStates v = currentNode->TState;
1115     guarantee(v == ObjectWaiter::TS_ENTER, "invariant");
1116     assert(!has_owner(current), "invariant");
1117 
1118     // This thread has been notified so try to reacquire the lock.
1119     if (try_lock(current) == TryLockResult::Success) {
1120       break;
1121     }
1122 
1123     // If that fails, spin again.  Note that spin count may be zero so the above TryLock
1124     // is necessary.
1125     if (try_spin(current)) {
1126         break;
1127     }
1128 
1129     {
1130       OSThreadContendState osts(current->osthread());
1131 
1132       assert(current->thread_state() == _thread_in_vm, "invariant");
1133 
1134       {
1135         ClearSuccOnSuspend csos(this);
1136         ThreadBlockInVMPreprocess<ClearSuccOnSuspend> tbivs(current, csos, true /* allow_suspend */);
1137         if (do_timed_parked) {
1138           current->_ParkEvent->park((jlong) recheck_interval);
1139           // Increase the recheck_interval, but clamp the value.
1140           recheck_interval *= 8;
1141           if (recheck_interval > MAX_RECHECK_INTERVAL) {
1142             recheck_interval = MAX_RECHECK_INTERVAL;
1143           }
1144         } else {
1145           current->_ParkEvent->park();
1146         }
1147       }
1148     }
1149 
1150     // Try again, but just so we distinguish between futile wakeups and
1151     // successful wakeups.  The following test isn't algorithmically
1152     // necessary, but it helps us maintain sensible statistics.
1153     if (try_lock(current) == TryLockResult::Success) {
1154       break;
1155     }
1156 
1157     // The lock is still contested.
1158 
1159     // Assuming this is not a spurious wakeup we'll normally
1160     // find that _succ == current.
1161     if (has_successor(current)) clear_successor();
1162 
1163     // Invariant: after clearing _succ a contending thread
1164     // *must* retry  _owner before parking.
1165     OrderAccess::fence();
1166   }

1173   assert(!has_successor(current), "invariant");
1174   currentNode->TState = ObjectWaiter::TS_RUN;
1175   OrderAccess::fence();      // see comments at the end of enter_internal()
1176 }
1177 
1178 // This method is called from two places:
1179 // - On monitorenter contention with a null waiter.
1180 // - After Object.wait() times out or the target is interrupted to reenter the
1181 //   monitor, with the existing waiter.
1182 // For the Object.wait() case we do not delete the ObjectWaiter in case we
1183 // succesfully acquire the monitor since we are going to need it on return.
1184 bool ObjectMonitor::vthread_monitor_enter(JavaThread* current, ObjectWaiter* waiter) {
1185   if (try_lock(current) == TryLockResult::Success) {
1186     assert(has_owner(current), "invariant");
1187     assert(!has_successor(current), "invariant");
1188     return true;
1189   }
1190 
1191   oop vthread = current->vthread();
1192   ObjectWaiter* node = waiter != nullptr ? waiter : new ObjectWaiter(vthread, this);
1193 
1194   // Increment counter *before* adding the vthread to the _entry_list.
1195   // Adding to _entry_list uses Atomic::cmpxchg() which already provides
1196   // a fence that prevents reordering of the stores.
1197   inc_unmounted_vthreads();
1198 
1199   if (try_lock_or_add_to_entry_list(current, node)) {
1200     // We got the lock.
1201     if (waiter == nullptr) delete node;  // for Object.wait() don't delete yet
1202     dec_unmounted_vthreads();
1203     return true;
1204   }
1205   // This thread is now added to the entry_list.
1206 
1207   // We have to try once more since owner could have exited monitor and checked
1208   // _entry_list before we added the node to the queue.
1209   if (try_lock(current) == TryLockResult::Success) {
1210     assert(has_owner(current), "invariant");
1211     unlink_after_acquire(current, node);
1212     if (has_successor(current)) clear_successor();
1213     if (waiter == nullptr) delete node;  // for Object.wait() don't delete yet
1214     dec_unmounted_vthreads();
1215     return true;
1216   }
1217 
1218   assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
1219   java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::BLOCKING);
1220 
1221   // We didn't succeed in acquiring the monitor so increment _contentions and
1222   // save ObjectWaiter* in the vthread since we will need it when resuming execution.
1223   add_to_contentions(1);
1224   java_lang_VirtualThread::set_objectWaiter(vthread, node);
1225   return false;
1226 }
1227 
1228 // Called from thaw code to resume the monitor operation that caused the vthread
1229 // to be unmounted. Method returns true if the monitor is successfully acquired,
1230 // which marks the end of the monitor operation, otherwise it returns false.
1231 bool ObjectMonitor::resume_operation(JavaThread* current, ObjectWaiter* node, ContinuationWrapper& cont) {
1232   assert(java_lang_VirtualThread::state(current->vthread()) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
1233   assert(!has_owner(current), "");
1234 

1250   oop vthread = current->vthread();
1251   if (has_successor(current)) clear_successor();
1252 
1253   // Invariant: after clearing _succ a thread *must* retry acquiring the monitor.
1254   OrderAccess::fence();
1255 
1256   if (try_lock(current) == TryLockResult::Success) {
1257     vthread_epilog(current, node);
1258     return true;
1259   }
1260 
1261   // We will return to Continuation.run() and unmount so set the right state.
1262   java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::BLOCKING);
1263 
1264   return false;
1265 }
1266 
1267 void ObjectMonitor::vthread_epilog(JavaThread* current, ObjectWaiter* node) {
1268   assert(has_owner(current), "invariant");
1269   add_to_contentions(-1);
1270   dec_unmounted_vthreads();
1271 
1272   if (has_successor(current)) clear_successor();
1273 
1274   guarantee(_recursions == 0, "invariant");
1275 
1276   if (node->is_wait()) {
1277     _recursions = node->_recursions;   // restore the old recursion count
1278     _waiters--;                        // decrement the number of waiters
1279 
1280     if (node->_interrupted) {
1281       // We will throw at thaw end after finishing the mount transition.
1282       current->set_pending_interrupted_exception(true);
1283     }
1284   }
1285 
1286   unlink_after_acquire(current, node);
1287   delete node;
1288 
1289   // Clear the ObjectWaiter* from the vthread.
1290   java_lang_VirtualThread::set_objectWaiter(current->vthread(), nullptr);

1795       // consume an unpark() meant for the ParkEvent associated with
1796       // this ObjectMonitor.
1797     }
1798     if (wait_event.should_commit()) {
1799       post_monitor_wait_event(&wait_event, this, 0, millis, false);
1800     }
1801     THROW(vmSymbols::java_lang_InterruptedException());
1802     return;
1803   }
1804 
1805   freeze_result result;
1806   ContinuationEntry* ce = current->last_continuation();
1807   bool is_virtual = ce != nullptr && ce->is_virtual_thread();
1808   if (is_virtual) {
1809     if (interruptible && JvmtiExport::should_post_monitor_wait()) {
1810       JvmtiExport::post_monitor_wait(current, object(), millis);
1811     }
1812     current->set_current_waiting_monitor(this);
1813     result = Continuation::try_preempt(current, ce->cont_oop(current));
1814     if (result == freeze_ok) {
1815       vthread_wait(current, millis, interruptible);
1816       current->set_current_waiting_monitor(nullptr);
1817       return;
1818     }
1819   }
1820   // The jtiows does nothing for non-interruptible.
1821   JavaThreadInObjectWaitState jtiows(current, millis != 0, interruptible);
1822 
1823   if (!is_virtual) { // it was already set for virtual thread
1824     if (interruptible && JvmtiExport::should_post_monitor_wait()) {
1825       JvmtiExport::post_monitor_wait(current, object(), millis);
1826 
1827       // The current thread already owns the monitor and it has not yet
1828       // been added to the wait queue so the current thread cannot be
1829       // made the successor. This means that the JVMTI_EVENT_MONITOR_WAIT
1830       // event handler cannot accidentally consume an unpark() meant for
1831       // the ParkEvent associated with this ObjectMonitor.
1832     }
1833     current->set_current_waiting_monitor(this);
1834   }
1835   // create a node to be put into the queue

2025   Thread::SpinAcquire(&_wait_set_lock);
2026   ObjectWaiter* iterator = dequeue_waiter();
2027   if (iterator != nullptr) {
2028     guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
2029     guarantee(!iterator->_notified, "invariant");
2030 
2031     if (iterator->is_vthread()) {
2032       oop vthread = iterator->vthread();
2033       java_lang_VirtualThread::set_notified(vthread, true);
2034       int old_state = java_lang_VirtualThread::state(vthread);
2035       // If state is not WAIT/TIMED_WAIT then target could still be on
2036       // unmount transition, or wait could have already timed-out or target
2037       // could have been interrupted. In the first case, the target itself
2038       // will set the state to BLOCKED at the end of the unmount transition.
2039       // In the other cases the target would have been already unblocked so
2040       // there is nothing to do.
2041       if (old_state == java_lang_VirtualThread::WAIT ||
2042           old_state == java_lang_VirtualThread::TIMED_WAIT) {
2043         java_lang_VirtualThread::cmpxchg_state(vthread, old_state, java_lang_VirtualThread::BLOCKED);
2044       }
2045       // Increment counter *before* adding the vthread to the _entry_list.
2046       // Adding to _entry_list uses Atomic::cmpxchg() which already provides
2047       // a fence that prevents reordering of the stores.
2048       inc_unmounted_vthreads();
2049     }
2050 
2051     iterator->_notified = true;
2052     iterator->_notifier_tid = JFR_THREAD_ID(current);
2053     did_notify = true;
2054     add_to_entry_list(current, iterator);
2055 
2056     // _wait_set_lock protects the wait queue, not the entry_list.  We could
2057     // move the add-to-entry_list operation, above, outside the critical section
2058     // protected by _wait_set_lock.  In practice that's not useful.  With the
2059     // exception of  wait() timeouts and interrupts the monitor owner
2060     // is the only thread that grabs _wait_set_lock.  There's almost no contention
2061     // on _wait_set_lock so it's not profitable to reduce the length of the
2062     // critical section.
2063 
2064     if (!iterator->is_vthread()) {
2065       iterator->wait_reenter_begin(this);
2066 
2067       // Read counter *after* adding the thread to the _entry_list.
2068       // Adding to _entry_list uses Atomic::cmpxchg() which already provides
2069       // a fence that prevents this load from floating up previous store.
2070       if (has_unmounted_vthreads()) {
2071         // Wake up the thread to alleviate some deadlocks cases where the successor
2072         // that will be picked up when this thread releases the monitor is an unmounted
2073         // virtual thread that cannot run due to having run out of carriers. Upon waking
2074         // up, the thread will call reenter_internal() which will use time-park in case
2075         // there is contention and there are still vthreads in the _entry_list.
2076         JavaThread* t = iterator->thread();
2077         t->_ParkEvent->unpark();
2078       }
2079     }
2080   }
2081   Thread::SpinRelease(&_wait_set_lock);
2082   return did_notify;
2083 }
2084 
2085 static void post_monitor_notify_event(EventJavaMonitorNotify* event,
2086                                       ObjectMonitor* monitor,
2087                                       int notified_count) {
2088   assert(event != nullptr, "invariant");
2089   assert(monitor != nullptr, "invariant");
2090   const Klass* monitor_klass = monitor->object()->klass();
2091   if (ObjectMonitor::is_jfr_excluded(monitor_klass)) {
2092     return;
2093   }
2094   event->set_monitorClass(monitor_klass);
2095   // Set an address that is 'unique enough', such that events close in
2096   // time and with the same address are likely (but not guaranteed) to
2097   // belong to the same object.
2098   event->set_address((uintptr_t)monitor);

2148   quick_notifyAll(current);
2149 }
2150 
2151 void ObjectMonitor::quick_notifyAll(JavaThread* current) {
2152   assert(has_owner(current), "Precondition");
2153 
2154   EventJavaMonitorNotify event;
2155   DTRACE_MONITOR_PROBE(notifyAll, this, object(), current);
2156   int tally = 0;
2157   while (_wait_set != nullptr) {
2158     if (notify_internal(current)) {
2159       tally++;
2160     }
2161   }
2162 
2163   if ((tally > 0) && event.should_commit()) {
2164     post_monitor_notify_event(&event, this, /* notified_count = */ tally);
2165   }
2166 }
2167 
2168 void ObjectMonitor::vthread_wait(JavaThread* current, jlong millis, bool interruptible) {
2169   oop vthread = current->vthread();
2170   ObjectWaiter* node = new ObjectWaiter(vthread, this);
2171   node->_is_wait = true;
2172   node->_interruptible = interruptible;
2173   node->TState = ObjectWaiter::TS_WAIT;
2174   java_lang_VirtualThread::set_notified(vthread, false);  // Reset notified flag
2175   java_lang_VirtualThread::set_interruptible_wait(vthread, interruptible);
2176 
2177   // Enter the waiting queue, which is a circular doubly linked list in this case
2178   // but it could be a priority queue or any data structure.
2179   // _wait_set_lock protects the wait queue.  Normally the wait queue is accessed only
2180   // by the owner of the monitor *except* in the case where park()
2181   // returns because of a timeout or interrupt.  Contention is exceptionally rare
2182   // so we use a simple spin-lock instead of a heavier-weight blocking lock.
2183 
2184   Thread::SpinAcquire(&_wait_set_lock);
2185   add_waiter(node);
2186   Thread::SpinRelease(&_wait_set_lock);
2187 
2188   node->_recursions = _recursions;   // record the old recursion count
2189   _recursions = 0;                   // set the recursion level to be 0
2190   _waiters++;                        // increment the number of waiters
2191   exit(current);                     // exit the monitor
2192   guarantee(!has_owner(current), "invariant");
2193 
2194   assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
2195   java_lang_VirtualThread::set_state(vthread, millis == 0 ? java_lang_VirtualThread::WAITING : java_lang_VirtualThread::TIMED_WAITING);

2201 
2202 bool ObjectMonitor::vthread_wait_reenter(JavaThread* current, ObjectWaiter* node, ContinuationWrapper& cont) {
2203   // The first time we run after being preempted on Object.wait() we
2204   // need to check if we were interrupted or the wait timed-out, and
2205   // in that case remove ourselves from the _wait_set queue.
2206   if (node->TState == ObjectWaiter::TS_WAIT) {
2207     Thread::SpinAcquire(&_wait_set_lock);
2208     if (node->TState == ObjectWaiter::TS_WAIT) {
2209       dequeue_specific_waiter(node);       // unlink from wait_set
2210       assert(!node->_notified, "invariant");
2211       node->TState = ObjectWaiter::TS_RUN;
2212     }
2213     Thread::SpinRelease(&_wait_set_lock);
2214   }
2215 
2216   // If this was an interrupted case, set the _interrupted boolean so that
2217   // once we re-acquire the monitor we know if we need to throw IE or not.
2218   ObjectWaiter::TStates state = node->TState;
2219   bool was_notified = state == ObjectWaiter::TS_ENTER;
2220   assert(was_notified || state == ObjectWaiter::TS_RUN, "");
2221   node->_interrupted = node->_interruptible && !was_notified && current->is_interrupted(false);
2222 
2223   // Post JFR and JVMTI events. If non-interruptible we are in
2224   // ObjectLocker case so we don't post anything.
2225   EventJavaMonitorWait wait_event;
2226   if (node->_interruptible && (wait_event.should_commit() || JvmtiExport::should_post_monitor_waited())) {
2227     vthread_monitor_waited_event(current, node, cont, &wait_event, !was_notified && !node->_interrupted);
2228   }
2229 
2230   // Mark that we are at reenter so that we don't call this method again.
2231   node->_at_reenter = true;
2232 
2233   if (!was_notified) {
2234     bool acquired = vthread_monitor_enter(current, node);
2235     if (acquired) {
2236       guarantee(_recursions == 0, "invariant");
2237       _recursions = node->_recursions;   // restore the old recursion count
2238       _waiters--;                        // decrement the number of waiters
2239 
2240       if (node->_interrupted) {
2241         // We will throw at thaw end after finishing the mount transition.
2242         current->set_pending_interrupted_exception(true);
2243       }
2244 
2245       delete node;
2246       // Clear the ObjectWaiter* from the vthread.

2673   st->print_cr("  _object = " INTPTR_FORMAT, p2i(object_peek()));
2674   st->print_cr("  _pad_buf0 = {");
2675   st->print_cr("    [0] = '\\0'");
2676   st->print_cr("    ...");
2677   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf0) - 1);
2678   st->print_cr("  }");
2679   st->print_cr("  _owner = " INT64_FORMAT, owner_raw());
2680   st->print_cr("  _previous_owner_tid = " UINT64_FORMAT, _previous_owner_tid);
2681   st->print_cr("  _pad_buf1 = {");
2682   st->print_cr("    [0] = '\\0'");
2683   st->print_cr("    ...");
2684   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf1) - 1);
2685   st->print_cr("  }");
2686   st->print_cr("  _next_om = " INTPTR_FORMAT, p2i(next_om()));
2687   st->print_cr("  _recursions = %zd", _recursions);
2688   st->print_cr("  _entry_list = " INTPTR_FORMAT, p2i(_entry_list));
2689   st->print_cr("  _entry_list_tail = " INTPTR_FORMAT, p2i(_entry_list_tail));
2690   st->print_cr("  _succ = " INT64_FORMAT, successor());
2691   st->print_cr("  _SpinDuration = %d", _SpinDuration);
2692   st->print_cr("  _contentions = %d", contentions());
2693   st->print_cr("  _unmounted_vthreads = " INT64_FORMAT, _unmounted_vthreads);
2694   st->print_cr("  _wait_set = " INTPTR_FORMAT, p2i(_wait_set));
2695   st->print_cr("  _waiters = %d", _waiters);
2696   st->print_cr("  _wait_set_lock = %d", _wait_set_lock);
2697   st->print_cr("}");
2698 }
2699 #endif
< prev index next >