< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page

 278   } else {
 279     // However, ThreadService::get_current_contended_monitor()
 280     // can call here via the VMThread so sanity check it.
 281     assert(self->is_VM_thread(), "must be");
 282   }
 283 #endif // ASSERT
 284 }
 285 
 286 ObjectMonitor::ObjectMonitor(oop object) :
 287   _metadata(0),
 288   _object(_oop_storage, object),
 289   _owner(NO_OWNER),
 290   _previous_owner_tid(0),
 291   _next_om(nullptr),
 292   _recursions(0),
 293   _entry_list(nullptr),
 294   _entry_list_tail(nullptr),
 295   _succ(NO_OWNER),
 296   _SpinDuration(ObjectMonitor::Knob_SpinLimit),
 297   _contentions(0),

 298   _wait_set(nullptr),
 299   _waiters(0),
 300   _wait_set_lock(0),
 301   _stack_locker(nullptr)
 302 { }
 303 
 304 ObjectMonitor::~ObjectMonitor() {
 305   _object.release(_oop_storage);
 306 }
 307 
 308 oop ObjectMonitor::object() const {
 309   check_object_context();
 310   return _object.resolve();
 311 }
 312 
 313 void ObjectMonitor::ExitOnSuspend::operator()(JavaThread* current) {
 314   if (current->is_suspended()) {
 315     _om->_recursions = 0;
 316     _om->clear_successor();
 317     // Don't need a full fence after clearing successor here because of the call to exit().

 966 
 967   ObjectWaiter node(current);
 968   current->_ParkEvent->reset();
 969 
 970   if (try_lock_or_add_to_entry_list(current, &node)) {
 971     return; // We got the lock.
 972   }
 973   // This thread is now added to the _entry_list.
 974 
 975   // The lock might have been released while this thread was occupied queueing
 976   // itself onto _entry_list.  To close the race and avoid "stranding" and
 977   // progress-liveness failure we must resample-retry _owner before parking.
 978   // Note the Dekker/Lamport duality: ST _entry_list; MEMBAR; LD Owner.
 979   // In this case the ST-MEMBAR is accomplished with CAS().
 980   //
 981   // TODO: Defer all thread state transitions until park-time.
 982   // Since state transitions are heavy and inefficient we'd like
 983   // to defer the state transitions until absolutely necessary,
 984   // and in doing so avoid some transitions ...
 985 
 986   // For virtual threads that are pinned, do a timed-park instead to
 987   // alleviate some deadlocks cases where the succesor is an unmounted
 988   // virtual thread that cannot run. This can happen in particular when
 989   // this virtual thread is currently loading/initializing a class, and
 990   // all other carriers have a vthread pinned to it waiting for said class
 991   // to be loaded/initialized.




 992   static int MAX_RECHECK_INTERVAL = 1000;
 993   int recheck_interval = 1;
 994   bool do_timed_parked = false;
 995   ContinuationEntry* ce = current->last_continuation();
 996   if (ce != nullptr && ce->is_virtual_thread()) {
 997     do_timed_parked = true;
 998   }
 999 
1000   for (;;) {
1001 
1002     if (try_lock(current) == TryLockResult::Success) {
1003       break;
1004     }
1005     assert(!has_owner(current), "invariant");
1006 
1007     // park self
1008     if (do_timed_parked) {
1009       current->_ParkEvent->park((jlong) recheck_interval);
1010       // Increase the recheck_interval, but clamp the value.
1011       recheck_interval *= 8;
1012       if (recheck_interval > MAX_RECHECK_INTERVAL) {
1013         recheck_interval = MAX_RECHECK_INTERVAL;
1014       }
1015     } else {
1016       current->_ParkEvent->park();
1017     }
1018 

1073   // the ST of null into _owner in the *subsequent* (following) corresponding
1074   // monitorexit.
1075 
1076   return;
1077 }
1078 
1079 // reenter_internal() is a specialized inline form of the latter half of the
1080 // contended slow-path from enter_internal().  We use reenter_internal() only for
1081 // monitor reentry in wait().
1082 //
1083 // In the future we should reconcile enter_internal() and reenter_internal().
1084 
1085 void ObjectMonitor::reenter_internal(JavaThread* current, ObjectWaiter* currentNode) {
1086   assert(current != nullptr, "invariant");
1087   assert(current->thread_state() != _thread_blocked, "invariant");
1088   assert(currentNode != nullptr, "invariant");
1089   assert(currentNode->_thread == current, "invariant");
1090   assert(_waiters > 0, "invariant");
1091   assert_mark_word_consistency();
1092 


















1093   for (;;) {
1094     ObjectWaiter::TStates v = currentNode->TState;
1095     guarantee(v == ObjectWaiter::TS_ENTER, "invariant");
1096     assert(!has_owner(current), "invariant");
1097 
1098     // This thread has been notified so try to reacquire the lock.
1099     if (try_lock(current) == TryLockResult::Success) {
1100       break;
1101     }
1102 
1103     // If that fails, spin again.  Note that spin count may be zero so the above TryLock
1104     // is necessary.
1105     if (try_spin(current)) {
1106         break;
1107     }
1108 
1109     {
1110       OSThreadContendState osts(current->osthread());
1111 
1112       assert(current->thread_state() == _thread_in_vm, "invariant");
1113 
1114       {
1115         ClearSuccOnSuspend csos(this);
1116         ThreadBlockInVMPreprocess<ClearSuccOnSuspend> tbivs(current, csos, true /* allow_suspend */);
1117         current->_ParkEvent->park();









1118       }
1119     }
1120 
1121     // Try again, but just so we distinguish between futile wakeups and
1122     // successful wakeups.  The following test isn't algorithmically
1123     // necessary, but it helps us maintain sensible statistics.
1124     if (try_lock(current) == TryLockResult::Success) {
1125       break;
1126     }
1127 
1128     // The lock is still contested.
1129 
1130     // Assuming this is not a spurious wakeup we'll normally
1131     // find that _succ == current.
1132     if (has_successor(current)) clear_successor();
1133 
1134     // Invariant: after clearing _succ a contending thread
1135     // *must* retry  _owner before parking.
1136     OrderAccess::fence();
1137   }

1144   assert(!has_successor(current), "invariant");
1145   currentNode->TState = ObjectWaiter::TS_RUN;
1146   OrderAccess::fence();      // see comments at the end of enter_internal()
1147 }
1148 
1149 // This method is called from two places:
1150 // - On monitorenter contention with a null waiter.
1151 // - After Object.wait() times out or the target is interrupted to reenter the
1152 //   monitor, with the existing waiter.
1153 // For the Object.wait() case we do not delete the ObjectWaiter in case we
1154 // succesfully acquire the monitor since we are going to need it on return.
1155 bool ObjectMonitor::vthread_monitor_enter(JavaThread* current, ObjectWaiter* waiter) {
1156   if (try_lock(current) == TryLockResult::Success) {
1157     assert(has_owner(current), "invariant");
1158     assert(!has_successor(current), "invariant");
1159     return true;
1160   }
1161 
1162   oop vthread = current->vthread();
1163   ObjectWaiter* node = waiter != nullptr ? waiter : new ObjectWaiter(vthread, this);






1164   if (try_lock_or_add_to_entry_list(current, node)) {
1165     // We got the lock.
1166     if (waiter == nullptr) delete node;  // for Object.wait() don't delete yet

1167     return true;
1168   }
1169   // This thread is now added to the entry_list.
1170 
1171   // We have to try once more since owner could have exited monitor and checked
1172   // _entry_list before we added the node to the queue.
1173   if (try_lock(current) == TryLockResult::Success) {
1174     assert(has_owner(current), "invariant");
1175     unlink_after_acquire(current, node);
1176     if (has_successor(current)) clear_successor();
1177     if (waiter == nullptr) delete node;  // for Object.wait() don't delete yet

1178     return true;
1179   }
1180 
1181   assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
1182   java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::BLOCKING);
1183 
1184   // We didn't succeed in acquiring the monitor so increment _contentions and
1185   // save ObjectWaiter* in the vthread since we will need it when resuming execution.
1186   add_to_contentions(1);
1187   java_lang_VirtualThread::set_objectWaiter(vthread, node);
1188   return false;
1189 }
1190 
1191 // Called from thaw code to resume the monitor operation that caused the vthread
1192 // to be unmounted. Method returns true if the monitor is successfully acquired,
1193 // which marks the end of the monitor operation, otherwise it returns false.
1194 bool ObjectMonitor::resume_operation(JavaThread* current, ObjectWaiter* node, ContinuationWrapper& cont) {
1195   assert(java_lang_VirtualThread::state(current->vthread()) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
1196   assert(!has_owner(current), "");
1197 

1213   oop vthread = current->vthread();
1214   if (has_successor(current)) clear_successor();
1215 
1216   // Invariant: after clearing _succ a thread *must* retry acquiring the monitor.
1217   OrderAccess::fence();
1218 
1219   if (try_lock(current) == TryLockResult::Success) {
1220     vthread_epilog(current, node);
1221     return true;
1222   }
1223 
1224   // We will return to Continuation.run() and unmount so set the right state.
1225   java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::BLOCKING);
1226 
1227   return false;
1228 }
1229 
1230 void ObjectMonitor::vthread_epilog(JavaThread* current, ObjectWaiter* node) {
1231   assert(has_owner(current), "invariant");
1232   add_to_contentions(-1);

1233 
1234   if (has_successor(current)) clear_successor();
1235 
1236   guarantee(_recursions == 0, "invariant");
1237 
1238   if (node->is_wait()) {
1239     _recursions = node->_recursions;   // restore the old recursion count
1240     _waiters--;                        // decrement the number of waiters
1241 
1242     if (node->_interrupted) {
1243       // We will throw at thaw end after finishing the mount transition.
1244       current->set_pending_interrupted_exception(true);
1245     }
1246   }
1247 
1248   unlink_after_acquire(current, node);
1249   delete node;
1250 
1251   // Clear the ObjectWaiter* from the vthread.
1252   java_lang_VirtualThread::set_objectWaiter(current->vthread(), nullptr);

1753       // consume an unpark() meant for the ParkEvent associated with
1754       // this ObjectMonitor.
1755     }
1756     if (wait_event.should_commit()) {
1757       post_monitor_wait_event(&wait_event, this, 0, millis, false);
1758     }
1759     THROW(vmSymbols::java_lang_InterruptedException());
1760     return;
1761   }
1762 
1763   freeze_result result;
1764   ContinuationEntry* ce = current->last_continuation();
1765   bool is_virtual = ce != nullptr && ce->is_virtual_thread();
1766   if (is_virtual) {
1767     if (interruptible && JvmtiExport::should_post_monitor_wait()) {
1768       JvmtiExport::post_monitor_wait(current, object(), millis);
1769     }
1770     current->set_current_waiting_monitor(this);
1771     result = Continuation::try_preempt(current, ce->cont_oop(current));
1772     if (result == freeze_ok) {
1773       vthread_wait(current, millis);
1774       current->set_current_waiting_monitor(nullptr);
1775       return;
1776     }
1777   }
1778   // The jtiows does nothing for non-interruptible.
1779   JavaThreadInObjectWaitState jtiows(current, millis != 0, interruptible);
1780 
1781   if (!is_virtual) { // it was already set for virtual thread
1782     if (interruptible && JvmtiExport::should_post_monitor_wait()) {
1783       JvmtiExport::post_monitor_wait(current, object(), millis);
1784 
1785       // The current thread already owns the monitor and it has not yet
1786       // been added to the wait queue so the current thread cannot be
1787       // made the successor. This means that the JVMTI_EVENT_MONITOR_WAIT
1788       // event handler cannot accidentally consume an unpark() meant for
1789       // the ParkEvent associated with this ObjectMonitor.
1790     }
1791     current->set_current_waiting_monitor(this);
1792   }
1793   // create a node to be put into the queue

1983   Thread::SpinAcquire(&_wait_set_lock, "wait_set - notify");
1984   ObjectWaiter* iterator = dequeue_waiter();
1985   if (iterator != nullptr) {
1986     guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
1987     guarantee(!iterator->_notified, "invariant");
1988 
1989     if (iterator->is_vthread()) {
1990       oop vthread = iterator->vthread();
1991       java_lang_VirtualThread::set_notified(vthread, true);
1992       int old_state = java_lang_VirtualThread::state(vthread);
1993       // If state is not WAIT/TIMED_WAIT then target could still be on
1994       // unmount transition, or wait could have already timed-out or target
1995       // could have been interrupted. In the first case, the target itself
1996       // will set the state to BLOCKED at the end of the unmount transition.
1997       // In the other cases the target would have been already unblocked so
1998       // there is nothing to do.
1999       if (old_state == java_lang_VirtualThread::WAIT ||
2000           old_state == java_lang_VirtualThread::TIMED_WAIT) {
2001         java_lang_VirtualThread::cmpxchg_state(vthread, old_state, java_lang_VirtualThread::BLOCKED);
2002       }




2003     }
2004 
2005     iterator->_notified = true;
2006     iterator->_notifier_tid = JFR_THREAD_ID(current);
2007     did_notify = true;
2008     add_to_entry_list(current, iterator);
2009 
2010     // _wait_set_lock protects the wait queue, not the entry_list.  We could
2011     // move the add-to-entry_list operation, above, outside the critical section
2012     // protected by _wait_set_lock.  In practice that's not useful.  With the
2013     // exception of  wait() timeouts and interrupts the monitor owner
2014     // is the only thread that grabs _wait_set_lock.  There's almost no contention
2015     // on _wait_set_lock so it's not profitable to reduce the length of the
2016     // critical section.
2017 
2018     if (!iterator->is_vthread()) {
2019       iterator->wait_reenter_begin(this);













2020     }
2021   }
2022   Thread::SpinRelease(&_wait_set_lock);
2023   return did_notify;
2024 }
2025 
2026 static void post_monitor_notify_event(EventJavaMonitorNotify* event,
2027                                       ObjectMonitor* monitor,
2028                                       int notified_count) {
2029   assert(event != nullptr, "invariant");
2030   assert(monitor != nullptr, "invariant");
2031   const Klass* monitor_klass = monitor->object()->klass();
2032   if (ObjectMonitor::is_jfr_excluded(monitor_klass)) {
2033     return;
2034   }
2035   event->set_monitorClass(monitor_klass);
2036   // Set an address that is 'unique enough', such that events close in
2037   // time and with the same address are likely (but not guaranteed) to
2038   // belong to the same object.
2039   event->set_address((uintptr_t)monitor);

2089   quick_notifyAll(current);
2090 }
2091 
2092 void ObjectMonitor::quick_notifyAll(JavaThread* current) {
2093   assert(has_owner(current), "Precondition");
2094 
2095   EventJavaMonitorNotify event;
2096   DTRACE_MONITOR_PROBE(notifyAll, this, object(), current);
2097   int tally = 0;
2098   while (_wait_set != nullptr) {
2099     if (notify_internal(current)) {
2100       tally++;
2101     }
2102   }
2103 
2104   if ((tally > 0) && event.should_commit()) {
2105     post_monitor_notify_event(&event, this, /* notified_count = */ tally);
2106   }
2107 }
2108 
2109 void ObjectMonitor::vthread_wait(JavaThread* current, jlong millis) {
2110   oop vthread = current->vthread();
2111   ObjectWaiter* node = new ObjectWaiter(vthread, this);
2112   node->_is_wait = true;

2113   node->TState = ObjectWaiter::TS_WAIT;
2114   java_lang_VirtualThread::set_notified(vthread, false);  // Reset notified flag

2115 
2116   // Enter the waiting queue, which is a circular doubly linked list in this case
2117   // but it could be a priority queue or any data structure.
2118   // _wait_set_lock protects the wait queue.  Normally the wait queue is accessed only
2119   // by the owner of the monitor *except* in the case where park()
2120   // returns because of a timeout or interrupt.  Contention is exceptionally rare
2121   // so we use a simple spin-lock instead of a heavier-weight blocking lock.
2122 
2123   Thread::SpinAcquire(&_wait_set_lock, "wait_set - add");
2124   add_waiter(node);
2125   Thread::SpinRelease(&_wait_set_lock);
2126 
2127   node->_recursions = _recursions;   // record the old recursion count
2128   _recursions = 0;                   // set the recursion level to be 0
2129   _waiters++;                        // increment the number of waiters
2130   exit(current);                     // exit the monitor
2131   guarantee(!has_owner(current), "invariant");
2132 
2133   assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
2134   java_lang_VirtualThread::set_state(vthread, millis == 0 ? java_lang_VirtualThread::WAITING : java_lang_VirtualThread::TIMED_WAITING);

2140 
2141 bool ObjectMonitor::vthread_wait_reenter(JavaThread* current, ObjectWaiter* node, ContinuationWrapper& cont) {
2142   // The first time we run after being preempted on Object.wait() we
2143   // need to check if we were interrupted or the wait timed-out, and
2144   // in that case remove ourselves from the _wait_set queue.
2145   if (node->TState == ObjectWaiter::TS_WAIT) {
2146     Thread::SpinAcquire(&_wait_set_lock, "wait_set - unlink");
2147     if (node->TState == ObjectWaiter::TS_WAIT) {
2148       dequeue_specific_waiter(node);       // unlink from wait_set
2149       assert(!node->_notified, "invariant");
2150       node->TState = ObjectWaiter::TS_RUN;
2151     }
2152     Thread::SpinRelease(&_wait_set_lock);
2153   }
2154 
2155   // If this was an interrupted case, set the _interrupted boolean so that
2156   // once we re-acquire the monitor we know if we need to throw IE or not.
2157   ObjectWaiter::TStates state = node->TState;
2158   bool was_notified = state == ObjectWaiter::TS_ENTER;
2159   assert(was_notified || state == ObjectWaiter::TS_RUN, "");
2160   node->_interrupted = !was_notified && current->is_interrupted(false);
2161 
2162   // Post JFR and JVMTI events.

2163   EventJavaMonitorWait wait_event;
2164   if (wait_event.should_commit() || JvmtiExport::should_post_monitor_waited()) {
2165     vthread_monitor_waited_event(current, node, cont, &wait_event, !was_notified && !node->_interrupted);
2166   }
2167 
2168   // Mark that we are at reenter so that we don't call this method again.
2169   node->_at_reenter = true;
2170 
2171   if (!was_notified) {
2172     bool acquired = vthread_monitor_enter(current, node);
2173     if (acquired) {
2174       guarantee(_recursions == 0, "invariant");
2175       _recursions = node->_recursions;   // restore the old recursion count
2176       _waiters--;                        // decrement the number of waiters
2177 
2178       if (node->_interrupted) {
2179         // We will throw at thaw end after finishing the mount transition.
2180         current->set_pending_interrupted_exception(true);
2181       }
2182 
2183       delete node;
2184       // Clear the ObjectWaiter* from the vthread.

2611   st->print_cr("  _object = " INTPTR_FORMAT, p2i(object_peek()));
2612   st->print_cr("  _pad_buf0 = {");
2613   st->print_cr("    [0] = '\\0'");
2614   st->print_cr("    ...");
2615   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf0) - 1);
2616   st->print_cr("  }");
2617   st->print_cr("  _owner = " INT64_FORMAT, owner_raw());
2618   st->print_cr("  _previous_owner_tid = " UINT64_FORMAT, _previous_owner_tid);
2619   st->print_cr("  _pad_buf1 = {");
2620   st->print_cr("    [0] = '\\0'");
2621   st->print_cr("    ...");
2622   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf1) - 1);
2623   st->print_cr("  }");
2624   st->print_cr("  _next_om = " INTPTR_FORMAT, p2i(next_om()));
2625   st->print_cr("  _recursions = %zd", _recursions);
2626   st->print_cr("  _entry_list = " INTPTR_FORMAT, p2i(_entry_list));
2627   st->print_cr("  _entry_list_tail = " INTPTR_FORMAT, p2i(_entry_list_tail));
2628   st->print_cr("  _succ = " INT64_FORMAT, successor());
2629   st->print_cr("  _SpinDuration = %d", _SpinDuration);
2630   st->print_cr("  _contentions = %d", contentions());

2631   st->print_cr("  _wait_set = " INTPTR_FORMAT, p2i(_wait_set));
2632   st->print_cr("  _waiters = %d", _waiters);
2633   st->print_cr("  _wait_set_lock = %d", _wait_set_lock);
2634   st->print_cr("}");
2635 }
2636 #endif

 278   } else {
 279     // However, ThreadService::get_current_contended_monitor()
 280     // can call here via the VMThread so sanity check it.
 281     assert(self->is_VM_thread(), "must be");
 282   }
 283 #endif // ASSERT
 284 }
 285 
 286 ObjectMonitor::ObjectMonitor(oop object) :
 287   _metadata(0),
 288   _object(_oop_storage, object),
 289   _owner(NO_OWNER),
 290   _previous_owner_tid(0),
 291   _next_om(nullptr),
 292   _recursions(0),
 293   _entry_list(nullptr),
 294   _entry_list_tail(nullptr),
 295   _succ(NO_OWNER),
 296   _SpinDuration(ObjectMonitor::Knob_SpinLimit),
 297   _contentions(0),
 298   _unmounted_vthreads(0),
 299   _wait_set(nullptr),
 300   _waiters(0),
 301   _wait_set_lock(0),
 302   _stack_locker(nullptr)
 303 { }
 304 
 305 ObjectMonitor::~ObjectMonitor() {
 306   _object.release(_oop_storage);
 307 }
 308 
 309 oop ObjectMonitor::object() const {
 310   check_object_context();
 311   return _object.resolve();
 312 }
 313 
 314 void ObjectMonitor::ExitOnSuspend::operator()(JavaThread* current) {
 315   if (current->is_suspended()) {
 316     _om->_recursions = 0;
 317     _om->clear_successor();
 318     // Don't need a full fence after clearing successor here because of the call to exit().

 967 
 968   ObjectWaiter node(current);
 969   current->_ParkEvent->reset();
 970 
 971   if (try_lock_or_add_to_entry_list(current, &node)) {
 972     return; // We got the lock.
 973   }
 974   // This thread is now added to the _entry_list.
 975 
 976   // The lock might have been released while this thread was occupied queueing
 977   // itself onto _entry_list.  To close the race and avoid "stranding" and
 978   // progress-liveness failure we must resample-retry _owner before parking.
 979   // Note the Dekker/Lamport duality: ST _entry_list; MEMBAR; LD Owner.
 980   // In this case the ST-MEMBAR is accomplished with CAS().
 981   //
 982   // TODO: Defer all thread state transitions until park-time.
 983   // Since state transitions are heavy and inefficient we'd like
 984   // to defer the state transitions until absolutely necessary,
 985   // and in doing so avoid some transitions ...
 986 
 987   // If there are unmounted virtual threads in the _entry_list do a timed-park
 988   // instead to alleviate some deadlocks cases where one of them is picked as
 989   // the successor but cannot run due to having run out of carriers. This can
 990   // happen, for example, if this is a pinned virtual thread currently loading
 991   // or initializining a class, and all other carriers have a pinned vthread
 992   // waiting for said class to be loaded/initialized.
 993   // Read counter *after* adding this thread to the _entry_list.
 994   // Adding to _entry_list uses Atomic::cmpxchg() which already provides
 995   // a fence that prevents this load from floating up previous store.
 996   bool do_timed_parked = has_unmounted_vthreads();
 997   static int MAX_RECHECK_INTERVAL = 1000;
 998   int recheck_interval = 1;





 999 
1000   for (;;) {
1001 
1002     if (try_lock(current) == TryLockResult::Success) {
1003       break;
1004     }
1005     assert(!has_owner(current), "invariant");
1006 
1007     // park self
1008     if (do_timed_parked) {
1009       current->_ParkEvent->park((jlong) recheck_interval);
1010       // Increase the recheck_interval, but clamp the value.
1011       recheck_interval *= 8;
1012       if (recheck_interval > MAX_RECHECK_INTERVAL) {
1013         recheck_interval = MAX_RECHECK_INTERVAL;
1014       }
1015     } else {
1016       current->_ParkEvent->park();
1017     }
1018 

1073   // the ST of null into _owner in the *subsequent* (following) corresponding
1074   // monitorexit.
1075 
1076   return;
1077 }
1078 
1079 // reenter_internal() is a specialized inline form of the latter half of the
1080 // contended slow-path from enter_internal().  We use reenter_internal() only for
1081 // monitor reentry in wait().
1082 //
1083 // In the future we should reconcile enter_internal() and reenter_internal().
1084 
1085 void ObjectMonitor::reenter_internal(JavaThread* current, ObjectWaiter* currentNode) {
1086   assert(current != nullptr, "invariant");
1087   assert(current->thread_state() != _thread_blocked, "invariant");
1088   assert(currentNode != nullptr, "invariant");
1089   assert(currentNode->_thread == current, "invariant");
1090   assert(_waiters > 0, "invariant");
1091   assert_mark_word_consistency();
1092 
1093   // If there are unmounted virtual threads in the _entry_list do a timed-park
1094   // instead to alleviate some deadlocks cases where one of them is picked as
1095   // the successor but cannot run due to having run out of carriers. This can
1096   // happen, for example, if this is a pinned virtual thread (or plain carrier)
1097   // waiting for a class to be initialized.
1098   // In theory we only get here in the "notification" case where the thread has
1099   // already been added to the _entry_list. But if the thread happened to be interrupted
1100   // at the same time it was being notified, we could have read a state of TS_ENTER
1101   // that led us here but the thread hasn't been added yet to the queue. In that
1102   // case getting a false value from has_unmounted_vthreads() is not a guarantee
1103   // that vthreads weren't added before this thread to the _entry_list. We will live
1104   // with this corner case not only because it would be very rare, but also because
1105   // if there are several carriers blocked in this same situation, this would only
1106   // happen for the first one notified.
1107   bool do_timed_parked = has_unmounted_vthreads();
1108   static int MAX_RECHECK_INTERVAL = 1000;
1109   int recheck_interval = 1;
1110 
1111   for (;;) {
1112     ObjectWaiter::TStates v = currentNode->TState;
1113     guarantee(v == ObjectWaiter::TS_ENTER, "invariant");
1114     assert(!has_owner(current), "invariant");
1115 
1116     // This thread has been notified so try to reacquire the lock.
1117     if (try_lock(current) == TryLockResult::Success) {
1118       break;
1119     }
1120 
1121     // If that fails, spin again.  Note that spin count may be zero so the above TryLock
1122     // is necessary.
1123     if (try_spin(current)) {
1124         break;
1125     }
1126 
1127     {
1128       OSThreadContendState osts(current->osthread());
1129 
1130       assert(current->thread_state() == _thread_in_vm, "invariant");
1131 
1132       {
1133         ClearSuccOnSuspend csos(this);
1134         ThreadBlockInVMPreprocess<ClearSuccOnSuspend> tbivs(current, csos, true /* allow_suspend */);
1135         if (do_timed_parked) {
1136           current->_ParkEvent->park((jlong) recheck_interval);
1137           // Increase the recheck_interval, but clamp the value.
1138           recheck_interval *= 8;
1139           if (recheck_interval > MAX_RECHECK_INTERVAL) {
1140             recheck_interval = MAX_RECHECK_INTERVAL;
1141           }
1142         } else {
1143           current->_ParkEvent->park();
1144         }
1145       }
1146     }
1147 
1148     // Try again, but just so we distinguish between futile wakeups and
1149     // successful wakeups.  The following test isn't algorithmically
1150     // necessary, but it helps us maintain sensible statistics.
1151     if (try_lock(current) == TryLockResult::Success) {
1152       break;
1153     }
1154 
1155     // The lock is still contested.
1156 
1157     // Assuming this is not a spurious wakeup we'll normally
1158     // find that _succ == current.
1159     if (has_successor(current)) clear_successor();
1160 
1161     // Invariant: after clearing _succ a contending thread
1162     // *must* retry  _owner before parking.
1163     OrderAccess::fence();
1164   }

1171   assert(!has_successor(current), "invariant");
1172   currentNode->TState = ObjectWaiter::TS_RUN;
1173   OrderAccess::fence();      // see comments at the end of enter_internal()
1174 }
1175 
1176 // This method is called from two places:
1177 // - On monitorenter contention with a null waiter.
1178 // - After Object.wait() times out or the target is interrupted to reenter the
1179 //   monitor, with the existing waiter.
1180 // For the Object.wait() case we do not delete the ObjectWaiter in case we
1181 // succesfully acquire the monitor since we are going to need it on return.
1182 bool ObjectMonitor::vthread_monitor_enter(JavaThread* current, ObjectWaiter* waiter) {
1183   if (try_lock(current) == TryLockResult::Success) {
1184     assert(has_owner(current), "invariant");
1185     assert(!has_successor(current), "invariant");
1186     return true;
1187   }
1188 
1189   oop vthread = current->vthread();
1190   ObjectWaiter* node = waiter != nullptr ? waiter : new ObjectWaiter(vthread, this);
1191 
1192   // Increment counter *before* adding the vthread to the _entry_list.
1193   // Adding to _entry_list uses Atomic::cmpxchg() which already provides
1194   // a fence that prevents reordering of the stores.
1195   inc_unmounted_vthreads();
1196 
1197   if (try_lock_or_add_to_entry_list(current, node)) {
1198     // We got the lock.
1199     if (waiter == nullptr) delete node;  // for Object.wait() don't delete yet
1200     dec_unmounted_vthreads();
1201     return true;
1202   }
1203   // This thread is now added to the entry_list.
1204 
1205   // We have to try once more since owner could have exited monitor and checked
1206   // _entry_list before we added the node to the queue.
1207   if (try_lock(current) == TryLockResult::Success) {
1208     assert(has_owner(current), "invariant");
1209     unlink_after_acquire(current, node);
1210     if (has_successor(current)) clear_successor();
1211     if (waiter == nullptr) delete node;  // for Object.wait() don't delete yet
1212     dec_unmounted_vthreads();
1213     return true;
1214   }
1215 
1216   assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
1217   java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::BLOCKING);
1218 
1219   // We didn't succeed in acquiring the monitor so increment _contentions and
1220   // save ObjectWaiter* in the vthread since we will need it when resuming execution.
1221   add_to_contentions(1);
1222   java_lang_VirtualThread::set_objectWaiter(vthread, node);
1223   return false;
1224 }
1225 
1226 // Called from thaw code to resume the monitor operation that caused the vthread
1227 // to be unmounted. Method returns true if the monitor is successfully acquired,
1228 // which marks the end of the monitor operation, otherwise it returns false.
1229 bool ObjectMonitor::resume_operation(JavaThread* current, ObjectWaiter* node, ContinuationWrapper& cont) {
1230   assert(java_lang_VirtualThread::state(current->vthread()) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
1231   assert(!has_owner(current), "");
1232 

1248   oop vthread = current->vthread();
1249   if (has_successor(current)) clear_successor();
1250 
1251   // Invariant: after clearing _succ a thread *must* retry acquiring the monitor.
1252   OrderAccess::fence();
1253 
1254   if (try_lock(current) == TryLockResult::Success) {
1255     vthread_epilog(current, node);
1256     return true;
1257   }
1258 
1259   // We will return to Continuation.run() and unmount so set the right state.
1260   java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::BLOCKING);
1261 
1262   return false;
1263 }
1264 
1265 void ObjectMonitor::vthread_epilog(JavaThread* current, ObjectWaiter* node) {
1266   assert(has_owner(current), "invariant");
1267   add_to_contentions(-1);
1268   dec_unmounted_vthreads();
1269 
1270   if (has_successor(current)) clear_successor();
1271 
1272   guarantee(_recursions == 0, "invariant");
1273 
1274   if (node->is_wait()) {
1275     _recursions = node->_recursions;   // restore the old recursion count
1276     _waiters--;                        // decrement the number of waiters
1277 
1278     if (node->_interrupted) {
1279       // We will throw at thaw end after finishing the mount transition.
1280       current->set_pending_interrupted_exception(true);
1281     }
1282   }
1283 
1284   unlink_after_acquire(current, node);
1285   delete node;
1286 
1287   // Clear the ObjectWaiter* from the vthread.
1288   java_lang_VirtualThread::set_objectWaiter(current->vthread(), nullptr);

1789       // consume an unpark() meant for the ParkEvent associated with
1790       // this ObjectMonitor.
1791     }
1792     if (wait_event.should_commit()) {
1793       post_monitor_wait_event(&wait_event, this, 0, millis, false);
1794     }
1795     THROW(vmSymbols::java_lang_InterruptedException());
1796     return;
1797   }
1798 
1799   freeze_result result;
1800   ContinuationEntry* ce = current->last_continuation();
1801   bool is_virtual = ce != nullptr && ce->is_virtual_thread();
1802   if (is_virtual) {
1803     if (interruptible && JvmtiExport::should_post_monitor_wait()) {
1804       JvmtiExport::post_monitor_wait(current, object(), millis);
1805     }
1806     current->set_current_waiting_monitor(this);
1807     result = Continuation::try_preempt(current, ce->cont_oop(current));
1808     if (result == freeze_ok) {
1809       vthread_wait(current, millis, interruptible);
1810       current->set_current_waiting_monitor(nullptr);
1811       return;
1812     }
1813   }
1814   // The jtiows does nothing for non-interruptible.
1815   JavaThreadInObjectWaitState jtiows(current, millis != 0, interruptible);
1816 
1817   if (!is_virtual) { // it was already set for virtual thread
1818     if (interruptible && JvmtiExport::should_post_monitor_wait()) {
1819       JvmtiExport::post_monitor_wait(current, object(), millis);
1820 
1821       // The current thread already owns the monitor and it has not yet
1822       // been added to the wait queue so the current thread cannot be
1823       // made the successor. This means that the JVMTI_EVENT_MONITOR_WAIT
1824       // event handler cannot accidentally consume an unpark() meant for
1825       // the ParkEvent associated with this ObjectMonitor.
1826     }
1827     current->set_current_waiting_monitor(this);
1828   }
1829   // create a node to be put into the queue

2019   Thread::SpinAcquire(&_wait_set_lock, "wait_set - notify");
2020   ObjectWaiter* iterator = dequeue_waiter();
2021   if (iterator != nullptr) {
2022     guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
2023     guarantee(!iterator->_notified, "invariant");
2024 
2025     if (iterator->is_vthread()) {
2026       oop vthread = iterator->vthread();
2027       java_lang_VirtualThread::set_notified(vthread, true);
2028       int old_state = java_lang_VirtualThread::state(vthread);
2029       // If state is not WAIT/TIMED_WAIT then target could still be on
2030       // unmount transition, or wait could have already timed-out or target
2031       // could have been interrupted. In the first case, the target itself
2032       // will set the state to BLOCKED at the end of the unmount transition.
2033       // In the other cases the target would have been already unblocked so
2034       // there is nothing to do.
2035       if (old_state == java_lang_VirtualThread::WAIT ||
2036           old_state == java_lang_VirtualThread::TIMED_WAIT) {
2037         java_lang_VirtualThread::cmpxchg_state(vthread, old_state, java_lang_VirtualThread::BLOCKED);
2038       }
2039       // Increment counter *before* adding the vthread to the _entry_list.
2040       // Adding to _entry_list uses Atomic::cmpxchg() which already provides
2041       // a fence that prevents reordering of the stores.
2042       inc_unmounted_vthreads();
2043     }
2044 
2045     iterator->_notified = true;
2046     iterator->_notifier_tid = JFR_THREAD_ID(current);
2047     did_notify = true;
2048     add_to_entry_list(current, iterator);
2049 
2050     // _wait_set_lock protects the wait queue, not the entry_list.  We could
2051     // move the add-to-entry_list operation, above, outside the critical section
2052     // protected by _wait_set_lock.  In practice that's not useful.  With the
2053     // exception of  wait() timeouts and interrupts the monitor owner
2054     // is the only thread that grabs _wait_set_lock.  There's almost no contention
2055     // on _wait_set_lock so it's not profitable to reduce the length of the
2056     // critical section.
2057 
2058     if (!iterator->is_vthread()) {
2059       iterator->wait_reenter_begin(this);
2060 
2061       // Read counter *after* adding the thread to the _entry_list.
2062       // Adding to _entry_list uses Atomic::cmpxchg() which already provides
2063       // a fence that prevents this load from floating up previous store.
2064       if (has_unmounted_vthreads()) {
2065         // Wake up the thread to alleviate some deadlocks cases where the successor
2066         // that will be picked up when this thread releases the monitor is an unmounted
2067         // virtual thread that cannot run due to having run out of carriers. Upon waking
2068         // up, the thread will call reenter_internal() which will use time-park in case
2069         // there is contention and there are still vthreads in the _entry_list.
2070         JavaThread* t = iterator->thread();
2071         t->_ParkEvent->unpark();
2072       }
2073     }
2074   }
2075   Thread::SpinRelease(&_wait_set_lock);
2076   return did_notify;
2077 }
2078 
2079 static void post_monitor_notify_event(EventJavaMonitorNotify* event,
2080                                       ObjectMonitor* monitor,
2081                                       int notified_count) {
2082   assert(event != nullptr, "invariant");
2083   assert(monitor != nullptr, "invariant");
2084   const Klass* monitor_klass = monitor->object()->klass();
2085   if (ObjectMonitor::is_jfr_excluded(monitor_klass)) {
2086     return;
2087   }
2088   event->set_monitorClass(monitor_klass);
2089   // Set an address that is 'unique enough', such that events close in
2090   // time and with the same address are likely (but not guaranteed) to
2091   // belong to the same object.
2092   event->set_address((uintptr_t)monitor);

2142   quick_notifyAll(current);
2143 }
2144 
2145 void ObjectMonitor::quick_notifyAll(JavaThread* current) {
2146   assert(has_owner(current), "Precondition");
2147 
2148   EventJavaMonitorNotify event;
2149   DTRACE_MONITOR_PROBE(notifyAll, this, object(), current);
2150   int tally = 0;
2151   while (_wait_set != nullptr) {
2152     if (notify_internal(current)) {
2153       tally++;
2154     }
2155   }
2156 
2157   if ((tally > 0) && event.should_commit()) {
2158     post_monitor_notify_event(&event, this, /* notified_count = */ tally);
2159   }
2160 }
2161 
2162 void ObjectMonitor::vthread_wait(JavaThread* current, jlong millis, bool interruptible) {
2163   oop vthread = current->vthread();
2164   ObjectWaiter* node = new ObjectWaiter(vthread, this);
2165   node->_is_wait = true;
2166   node->_interruptible = interruptible;
2167   node->TState = ObjectWaiter::TS_WAIT;
2168   java_lang_VirtualThread::set_notified(vthread, false);  // Reset notified flag
2169   java_lang_VirtualThread::set_interruptible_wait(vthread, interruptible);
2170 
2171   // Enter the waiting queue, which is a circular doubly linked list in this case
2172   // but it could be a priority queue or any data structure.
2173   // _wait_set_lock protects the wait queue.  Normally the wait queue is accessed only
2174   // by the owner of the monitor *except* in the case where park()
2175   // returns because of a timeout or interrupt.  Contention is exceptionally rare
2176   // so we use a simple spin-lock instead of a heavier-weight blocking lock.
2177 
2178   Thread::SpinAcquire(&_wait_set_lock, "wait_set - add");
2179   add_waiter(node);
2180   Thread::SpinRelease(&_wait_set_lock);
2181 
2182   node->_recursions = _recursions;   // record the old recursion count
2183   _recursions = 0;                   // set the recursion level to be 0
2184   _waiters++;                        // increment the number of waiters
2185   exit(current);                     // exit the monitor
2186   guarantee(!has_owner(current), "invariant");
2187 
2188   assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
2189   java_lang_VirtualThread::set_state(vthread, millis == 0 ? java_lang_VirtualThread::WAITING : java_lang_VirtualThread::TIMED_WAITING);

2195 
2196 bool ObjectMonitor::vthread_wait_reenter(JavaThread* current, ObjectWaiter* node, ContinuationWrapper& cont) {
2197   // The first time we run after being preempted on Object.wait() we
2198   // need to check if we were interrupted or the wait timed-out, and
2199   // in that case remove ourselves from the _wait_set queue.
2200   if (node->TState == ObjectWaiter::TS_WAIT) {
2201     Thread::SpinAcquire(&_wait_set_lock, "wait_set - unlink");
2202     if (node->TState == ObjectWaiter::TS_WAIT) {
2203       dequeue_specific_waiter(node);       // unlink from wait_set
2204       assert(!node->_notified, "invariant");
2205       node->TState = ObjectWaiter::TS_RUN;
2206     }
2207     Thread::SpinRelease(&_wait_set_lock);
2208   }
2209 
2210   // If this was an interrupted case, set the _interrupted boolean so that
2211   // once we re-acquire the monitor we know if we need to throw IE or not.
2212   ObjectWaiter::TStates state = node->TState;
2213   bool was_notified = state == ObjectWaiter::TS_ENTER;
2214   assert(was_notified || state == ObjectWaiter::TS_RUN, "");
2215   node->_interrupted = node->_interruptible && !was_notified && current->is_interrupted(false);
2216 
2217   // Post JFR and JVMTI events. If non-interruptible we are in
2218   // ObjectLocker case so we don't post anything.
2219   EventJavaMonitorWait wait_event;
2220   if (node->_interruptible && (wait_event.should_commit() || JvmtiExport::should_post_monitor_waited())) {
2221     vthread_monitor_waited_event(current, node, cont, &wait_event, !was_notified && !node->_interrupted);
2222   }
2223 
2224   // Mark that we are at reenter so that we don't call this method again.
2225   node->_at_reenter = true;
2226 
2227   if (!was_notified) {
2228     bool acquired = vthread_monitor_enter(current, node);
2229     if (acquired) {
2230       guarantee(_recursions == 0, "invariant");
2231       _recursions = node->_recursions;   // restore the old recursion count
2232       _waiters--;                        // decrement the number of waiters
2233 
2234       if (node->_interrupted) {
2235         // We will throw at thaw end after finishing the mount transition.
2236         current->set_pending_interrupted_exception(true);
2237       }
2238 
2239       delete node;
2240       // Clear the ObjectWaiter* from the vthread.

2667   st->print_cr("  _object = " INTPTR_FORMAT, p2i(object_peek()));
2668   st->print_cr("  _pad_buf0 = {");
2669   st->print_cr("    [0] = '\\0'");
2670   st->print_cr("    ...");
2671   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf0) - 1);
2672   st->print_cr("  }");
2673   st->print_cr("  _owner = " INT64_FORMAT, owner_raw());
2674   st->print_cr("  _previous_owner_tid = " UINT64_FORMAT, _previous_owner_tid);
2675   st->print_cr("  _pad_buf1 = {");
2676   st->print_cr("    [0] = '\\0'");
2677   st->print_cr("    ...");
2678   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf1) - 1);
2679   st->print_cr("  }");
2680   st->print_cr("  _next_om = " INTPTR_FORMAT, p2i(next_om()));
2681   st->print_cr("  _recursions = %zd", _recursions);
2682   st->print_cr("  _entry_list = " INTPTR_FORMAT, p2i(_entry_list));
2683   st->print_cr("  _entry_list_tail = " INTPTR_FORMAT, p2i(_entry_list_tail));
2684   st->print_cr("  _succ = " INT64_FORMAT, successor());
2685   st->print_cr("  _SpinDuration = %d", _SpinDuration);
2686   st->print_cr("  _contentions = %d", contentions());
2687   st->print_cr("  _unmounted_vthreads = " INT64_FORMAT, _unmounted_vthreads);
2688   st->print_cr("  _wait_set = " INTPTR_FORMAT, p2i(_wait_set));
2689   st->print_cr("  _waiters = %d", _waiters);
2690   st->print_cr("  _wait_set_lock = %d", _wait_set_lock);
2691   st->print_cr("}");
2692 }
2693 #endif
< prev index next >