35 #include "oops/markWord.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "oops/oopHandle.inline.hpp"
38 #include "oops/weakHandle.inline.hpp"
39 #include "prims/jvmtiDeferredUpdates.hpp"
40 #include "prims/jvmtiExport.hpp"
41 #include "runtime/atomic.hpp"
42 #include "runtime/globals.hpp"
43 #include "runtime/handles.inline.hpp"
44 #include "runtime/interfaceSupport.inline.hpp"
45 #include "runtime/javaThread.inline.hpp"
46 #include "runtime/mutexLocker.hpp"
47 #include "runtime/objectMonitor.hpp"
48 #include "runtime/objectMonitor.inline.hpp"
49 #include "runtime/orderAccess.hpp"
50 #include "runtime/osThread.hpp"
51 #include "runtime/perfData.hpp"
52 #include "runtime/safefetch.hpp"
53 #include "runtime/safepointMechanism.inline.hpp"
54 #include "runtime/sharedRuntime.hpp"
55 #include "services/threadService.hpp"
56 #include "utilities/dtrace.hpp"
57 #include "utilities/globalDefinitions.hpp"
58 #include "utilities/macros.hpp"
59 #include "utilities/preserveException.hpp"
60 #if INCLUDE_JFR
61 #include "jfr/support/jfrFlush.hpp"
62 #endif
63
64 #ifdef DTRACE_ENABLED
65
66 // Only bother with this argument setup if dtrace is available
67 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly.
68
69
70 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \
71 char* bytes = nullptr; \
72 int len = 0; \
73 jlong jtid = SharedRuntime::get_java_tid(thread); \
74 Symbol* klassname = obj->klass()->name(); \
95 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \
96 { \
97 if (DTraceMonitorProbes) { \
98 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
99 HOTSPOT_MONITOR_##probe(jtid, \
100 (uintptr_t)(monitor), bytes, len); \
101 } \
102 }
103
104 #else // ndef DTRACE_ENABLED
105
106 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
107 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
108
109 #endif // ndef DTRACE_ENABLED
110
111 DEBUG_ONLY(static volatile bool InitDone = false;)
112
113 OopStorage* ObjectMonitor::_oop_storage = nullptr;
114
115 // -----------------------------------------------------------------------------
116 // Theory of operations -- Monitors lists, thread residency, etc:
117 //
118 // * A thread acquires ownership of a monitor by successfully
119 // CAS()ing the _owner field from null to non-null.
120 //
121 // * Invariant: A thread appears on at most one monitor list --
122 // cxq, EntryList or WaitSet -- at any one time.
123 //
124 // * Contending threads "push" themselves onto the cxq with CAS
125 // and then spin/park.
126 //
127 // * After a contending thread eventually acquires the lock it must
128 // dequeue itself from either the EntryList or the cxq.
129 //
130 // * The exiting thread identifies and unparks an "heir presumptive"
131 // tentative successor thread on the EntryList. Critically, the
132 // exiting thread doesn't unlink the successor thread from the EntryList.
133 // After having been unparked, the wakee will recontend for ownership of
134 // the monitor. The successor (wakee) will either acquire the lock or
232 switch (jt->thread_state()) {
233 case _thread_in_vm: // the usual case
234 case _thread_in_Java: // during deopt
235 break;
236 default:
237 fatal("called from an unsafe thread state");
238 }
239 assert(jt->is_active_Java_thread(), "must be active JavaThread");
240 } else {
241 // However, ThreadService::get_current_contended_monitor()
242 // can call here via the VMThread so sanity check it.
243 assert(self->is_VM_thread(), "must be");
244 }
245 #endif // ASSERT
246 }
247
248 ObjectMonitor::ObjectMonitor(oop object) :
249 _header(markWord::zero()),
250 _object(_oop_storage, object),
251 _owner(nullptr),
252 _previous_owner_tid(0),
253 _next_om(nullptr),
254 _recursions(0),
255 _EntryList(nullptr),
256 _cxq(nullptr),
257 _succ(nullptr),
258 _Responsible(nullptr),
259 _SpinDuration(ObjectMonitor::Knob_SpinLimit),
260 _contentions(0),
261 _WaitSet(nullptr),
262 _waiters(0),
263 _WaitSetLock(0)
264 { }
265
266 ObjectMonitor::~ObjectMonitor() {
267 _object.release(_oop_storage);
268 }
269
270 oop ObjectMonitor::object() const {
271 check_object_context();
299
300 // -----------------------------------------------------------------------------
301 // Enter support
302
303 bool ObjectMonitor::enter_for(JavaThread* locking_thread) {
304 // Used by ObjectSynchronizer::enter_for to enter for another thread.
305 // The monitor is private to or already owned by locking_thread which must be suspended.
306 // So this code may only contend with deflation.
307 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
308
309 // Block out deflation as soon as possible.
310 add_to_contentions(1);
311
312 bool success = false;
313 if (!is_being_async_deflated()) {
314 void* prev_owner = try_set_owner_from(nullptr, locking_thread);
315
316 if (prev_owner == nullptr) {
317 assert(_recursions == 0, "invariant");
318 success = true;
319 } else if (prev_owner == locking_thread) {
320 _recursions++;
321 success = true;
322 } else if (prev_owner == DEFLATER_MARKER) {
323 // Racing with deflation.
324 prev_owner = try_set_owner_from(DEFLATER_MARKER, locking_thread);
325 if (prev_owner == DEFLATER_MARKER) {
326 // Cancelled deflation. Increment contentions as part of the deflation protocol.
327 add_to_contentions(1);
328 success = true;
329 } else if (prev_owner == nullptr) {
330 // At this point we cannot race with deflation as we have both incremented
331 // contentions, seen contention > 0 and seen a DEFLATER_MARKER.
332 // success will only be false if this races with something other than
333 // deflation.
334 prev_owner = try_set_owner_from(nullptr, locking_thread);
335 success = prev_owner == nullptr;
336 }
337 } else if (LockingMode == LM_LEGACY && locking_thread->is_lock_owned((address)prev_owner)) {
338 assert(_recursions == 0, "must be");
339 _recursions = 1;
340 set_owner_from_BasicLock(prev_owner, locking_thread);
341 success = true;
342 }
343 assert(success, "Failed to enter_for: locking_thread=" INTPTR_FORMAT
344 ", this=" INTPTR_FORMAT "{owner=" INTPTR_FORMAT "}, observed owner: " INTPTR_FORMAT,
345 p2i(locking_thread), p2i(this), p2i(owner_raw()), p2i(prev_owner));
346 } else {
347 // Async deflation is in progress and our contentions increment
348 // above lost the race to async deflation. Undo the work and
349 // force the caller to retry.
350 const oop l_object = object();
351 if (l_object != nullptr) {
352 // Attempt to restore the header/dmw to the object's header so that
353 // we only retry once if the deflater thread happens to be slow.
354 install_displaced_markword_in_object(l_object);
355 }
356 }
357
358 add_to_contentions(-1);
359
360 assert(!success || owner_raw() == locking_thread, "must be");
361
362 return success;
363 }
364
365 bool ObjectMonitor::enter(JavaThread* current) {
366 assert(current == JavaThread::current(), "must be");
367 // The following code is ordered to check the most common cases first
368 // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
369
370 void* cur = try_set_owner_from(nullptr, current);
371 if (cur == nullptr) {
372 assert(_recursions == 0, "invariant");
373 return true;
374 }
375
376 if (cur == current) {
377 // TODO-FIXME: check for integer overflow! BUGID 6557169.
378 _recursions++;
379 return true;
380 }
381
382 if (LockingMode != LM_LIGHTWEIGHT && current->is_lock_owned((address)cur)) {
383 assert(_recursions == 0, "internal state error");
384 _recursions = 1;
385 set_owner_from_BasicLock(cur, current); // Convert from BasicLock* to Thread*.
386 return true;
387 }
388
389 // We've encountered genuine contention.
390
391 // Try one round of spinning *before* enqueueing current
392 // and before going through the awkward and expensive state
393 // transitions. The following spin is strictly optional ...
394 // Note that if we acquire the monitor from an initial spin
395 // we forgo posting JVMTI events and firing DTRACE probes.
396 if (TrySpin(current)) {
397 assert(owner_raw() == current, "must be current: owner=" INTPTR_FORMAT, p2i(owner_raw()));
398 assert(_recursions == 0, "must be 0: recursions=" INTX_FORMAT, _recursions);
399 assert(object()->mark() == markWord::encode(this),
400 "object mark must match encoded this: mark=" INTPTR_FORMAT
401 ", encoded this=" INTPTR_FORMAT, object()->mark().value(),
402 markWord::encode(this).value());
403 return true;
404 }
405
406 assert(owner_raw() != current, "invariant");
407 assert(_succ != current, "invariant");
408 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
409 assert(current->thread_state() != _thread_blocked, "invariant");
410
411 // Keep track of contention for JVM/TI and M&M queries.
412 add_to_contentions(1);
413 if (is_being_async_deflated()) {
414 // Async deflation is in progress and our contentions increment
415 // above lost the race to async deflation. Undo the work and
416 // force the caller to retry.
417 const oop l_object = object();
418 if (l_object != nullptr) {
419 // Attempt to restore the header/dmw to the object's header so that
420 // we only retry once if the deflater thread happens to be slow.
421 install_displaced_markword_in_object(l_object);
422 }
423 add_to_contentions(-1);
424 return false;
425 }
426
434 event.set_address((uintptr_t)this);
435 }
436
437 { // Change java thread status to indicate blocked on monitor enter.
438 JavaThreadBlockedOnMonitorEnterState jtbmes(current, this);
439
440 assert(current->current_pending_monitor() == nullptr, "invariant");
441 current->set_current_pending_monitor(this);
442
443 DTRACE_MONITOR_PROBE(contended__enter, this, object(), current);
444 if (JvmtiExport::should_post_monitor_contended_enter()) {
445 JvmtiExport::post_monitor_contended_enter(current, this);
446
447 // The current thread does not yet own the monitor and does not
448 // yet appear on any queues that would get it made the successor.
449 // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
450 // handler cannot accidentally consume an unpark() meant for the
451 // ParkEvent associated with this ObjectMonitor.
452 }
453
454 OSThreadContendState osts(current->osthread());
455
456 assert(current->thread_state() == _thread_in_vm, "invariant");
457
458 for (;;) {
459 ExitOnSuspend eos(this);
460 {
461 ThreadBlockInVMPreprocess<ExitOnSuspend> tbivs(current, eos, true /* allow_suspend */);
462 EnterI(current);
463 current->set_current_pending_monitor(nullptr);
464 // We can go to a safepoint at the end of this block. If we
465 // do a thread dump during that safepoint, then this thread will show
466 // as having "-locked" the monitor, but the OS and java.lang.Thread
467 // states will still report that the thread is blocked trying to
468 // acquire it.
469 // If there is a suspend request, ExitOnSuspend will exit the OM
470 // and set the OM as pending.
471 }
472 if (!eos.exited()) {
473 // ExitOnSuspend did not exit the OM
474 assert(owner_raw() == current, "invariant");
475 break;
476 }
477 }
478
479 // We've just gotten past the enter-check-for-suspend dance and we now own
480 // the monitor free and clear.
481 }
482
483 add_to_contentions(-1);
484 assert(contentions() >= 0, "must not be negative: contentions=%d", contentions());
485
486 // Must either set _recursions = 0 or ASSERT _recursions == 0.
487 assert(_recursions == 0, "invariant");
488 assert(owner_raw() == current, "invariant");
489 assert(_succ != current, "invariant");
490 assert(object()->mark() == markWord::encode(this), "invariant");
491
492 // The thread -- now the owner -- is back in vm mode.
493 // Report the glorious news via TI,DTrace and jvmstat.
494 // The probe effect is non-trivial. All the reportage occurs
495 // while we hold the monitor, increasing the length of the critical
496 // section. Amdahl's parallel speedup law comes vividly into play.
497 //
498 // Another option might be to aggregate the events (thread local or
499 // per-monitor aggregation) and defer reporting until a more opportune
500 // time -- such as next time some thread encounters contention but has
501 // yet to acquire the lock. While spinning that thread could
502 // spinning we could increment JVMStat counters, etc.
503
504 DTRACE_MONITOR_PROBE(contended__entered, this, object(), current);
505 if (JvmtiExport::should_post_monitor_contended_entered()) {
506 JvmtiExport::post_monitor_contended_entered(current, this);
507
508 // The current thread already owns the monitor and is not going to
543 // makes contentions negative as signals to contending threads that
544 // an async deflation is in progress. There are a number of checks
545 // as part of the protocol to make sure that the calling thread has
546 // not lost the race to a contending thread.
547 //
548 // The ObjectMonitor has been successfully async deflated when:
549 // (contentions < 0)
550 // Contending threads that see that condition know to retry their operation.
551 //
552 bool ObjectMonitor::deflate_monitor() {
553 if (is_busy()) {
554 // Easy checks are first - the ObjectMonitor is busy so no deflation.
555 return false;
556 }
557
558 const oop obj = object_peek();
559
560 if (obj == nullptr) {
561 // If the object died, we can recycle the monitor without racing with
562 // Java threads. The GC already broke the association with the object.
563 set_owner_from(nullptr, DEFLATER_MARKER);
564 assert(contentions() >= 0, "must be non-negative: contentions=%d", contentions());
565 _contentions = INT_MIN; // minimum negative int
566 } else {
567 // Attempt async deflation protocol.
568
569 // Set a null owner to DEFLATER_MARKER to force any contending thread
570 // through the slow path. This is just the first part of the async
571 // deflation dance.
572 if (try_set_owner_from(nullptr, DEFLATER_MARKER) != nullptr) {
573 // The owner field is no longer null so we lost the race since the
574 // ObjectMonitor is now busy.
575 return false;
576 }
577
578 if (contentions() > 0 || _waiters != 0) {
579 // Another thread has raced to enter the ObjectMonitor after
580 // is_busy() above or has already entered and waited on
581 // it which makes it busy so no deflation. Restore owner to
582 // null if it is still DEFLATER_MARKER.
583 if (try_set_owner_from(DEFLATER_MARKER, nullptr) != DEFLATER_MARKER) {
584 // Deferred decrement for the JT EnterI() that cancelled the async deflation.
585 add_to_contentions(-1);
586 }
587 return false;
588 }
589
590 // Make a zero contentions field negative to force any contending threads
591 // to retry. This is the second part of the async deflation dance.
592 if (Atomic::cmpxchg(&_contentions, 0, INT_MIN) != 0) {
593 // Contentions was no longer 0 so we lost the race since the
594 // ObjectMonitor is now busy. Restore owner to null if it is
595 // still DEFLATER_MARKER:
596 if (try_set_owner_from(DEFLATER_MARKER, nullptr) != DEFLATER_MARKER) {
597 // Deferred decrement for the JT EnterI() that cancelled the async deflation.
598 add_to_contentions(-1);
599 }
600 return false;
601 }
602 }
603
604 // Sanity checks for the races:
605 guarantee(owner_is_DEFLATER_MARKER(), "must be deflater marker");
606 guarantee(contentions() < 0, "must be negative: contentions=%d",
607 contentions());
608 guarantee(_waiters == 0, "must be 0: waiters=%d", _waiters);
609 guarantee(_cxq == nullptr, "must be no contending threads: cxq="
610 INTPTR_FORMAT, p2i(_cxq));
611 guarantee(_EntryList == nullptr,
612 "must be no entering threads: EntryList=" INTPTR_FORMAT,
613 p2i(_EntryList));
614
615 if (obj != nullptr) {
616 if (log_is_enabled(Trace, monitorinflation)) {
672 log_info(monitorinflation)("install_displaced_markword_in_object: "
673 "failed cas_set_mark: new_mark=" INTPTR_FORMAT
674 ", old_mark=" INTPTR_FORMAT ", res=" INTPTR_FORMAT,
675 dmw.value(), markWord::encode(this).value(),
676 res.value());
677 }
678
679 // Note: It does not matter which thread restored the header/dmw
680 // into the object's header. The thread deflating the monitor just
681 // wanted the object's header restored and it is. The threads that
682 // detected a race with the deflation process also wanted the
683 // object's header restored before they retry their operation and
684 // because it is restored they will only retry once.
685 }
686
687 // Convert the fields used by is_busy() to a string that can be
688 // used for diagnostic output.
689 const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
690 ss->print("is_busy: waiters=%d"
691 ", contentions=%d"
692 ", owner=" PTR_FORMAT
693 ", cxq=" PTR_FORMAT
694 ", EntryList=" PTR_FORMAT,
695 _waiters,
696 (contentions() > 0 ? contentions() : 0),
697 owner_is_DEFLATER_MARKER()
698 // We report null instead of DEFLATER_MARKER here because is_busy()
699 // ignores DEFLATER_MARKER values.
700 ? p2i(nullptr)
701 : p2i(owner_raw()),
702 p2i(_cxq),
703 p2i(_EntryList));
704 return ss->base();
705 }
706
707 #define MAX_RECHECK_INTERVAL 1000
708
709 void ObjectMonitor::EnterI(JavaThread* current) {
710 assert(current->thread_state() == _thread_blocked, "invariant");
711
712 // Try the lock - TATAS
713 if (TryLock(current) == TryLockResult::Success) {
714 assert(_succ != current, "invariant");
715 assert(owner_raw() == current, "invariant");
716 assert(_Responsible != current, "invariant");
717 return;
718 }
719
720 if (try_set_owner_from(DEFLATER_MARKER, current) == DEFLATER_MARKER) {
721 // Cancelled the in-progress async deflation by changing owner from
722 // DEFLATER_MARKER to current. As part of the contended enter protocol,
723 // contentions was incremented to a positive value before EnterI()
724 // was called and that prevents the deflater thread from winning the
725 // last part of the 2-part async deflation protocol. After EnterI()
726 // returns to enter(), contentions is decremented because the caller
727 // now owns the monitor. We bump contentions an extra time here to
728 // prevent the deflater thread from winning the last part of the
729 // 2-part async deflation protocol after the regular decrement
730 // occurs in enter(). The deflater thread will decrement contentions
731 // after it recognizes that the async deflation was cancelled.
732 add_to_contentions(1);
733 assert(_succ != current, "invariant");
734 assert(_Responsible != current, "invariant");
735 return;
736 }
737
738 assert(InitDone, "Unexpectedly not initialized");
739
740 // We try one round of spinning *before* enqueueing current.
741 //
742 // If the _owner is ready but OFFPROC we could use a YieldTo()
743 // operation to donate the remainder of this thread's quantum
744 // to the owner. This has subtle but beneficial affinity
745 // effects.
746
747 if (TrySpin(current)) {
748 assert(owner_raw() == current, "invariant");
749 assert(_succ != current, "invariant");
750 assert(_Responsible != current, "invariant");
751 return;
752 }
753
754 // The Spin failed -- Enqueue and park the thread ...
755 assert(_succ != current, "invariant");
756 assert(owner_raw() != current, "invariant");
757 assert(_Responsible != current, "invariant");
758
759 // Enqueue "current" on ObjectMonitor's _cxq.
760 //
761 // Node acts as a proxy for current.
762 // As an aside, if were to ever rewrite the synchronization code mostly
763 // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
764 // Java objects. This would avoid awkward lifecycle and liveness issues,
765 // as well as eliminate a subset of ABA issues.
766 // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
767
768 ObjectWaiter node(current);
769 current->_ParkEvent->reset();
770 node._prev = (ObjectWaiter*) 0xBAD;
771 node.TState = ObjectWaiter::TS_CXQ;
772
773 // Push "current" onto the front of the _cxq.
774 // Once on cxq/EntryList, current stays on-queue until it acquires the lock.
775 // Note that spinning tends to reduce the rate at which threads
776 // enqueue and dequeue on EntryList|cxq.
777 ObjectWaiter* nxt;
778 for (;;) {
779 node._next = nxt = _cxq;
780 if (Atomic::cmpxchg(&_cxq, nxt, &node) == nxt) break;
781
782 // Interference - the CAS failed because _cxq changed. Just retry.
783 // As an optional optimization we retry the lock.
784 if (TryLock(current) == TryLockResult::Success) {
785 assert(_succ != current, "invariant");
786 assert(owner_raw() == current, "invariant");
787 assert(_Responsible != current, "invariant");
788 return;
789 }
790 }
791
792 // Check for cxq|EntryList edge transition to non-null. This indicates
793 // the onset of contention. While contention persists exiting threads
794 // will use a ST:MEMBAR:LD 1-1 exit protocol. When contention abates exit
795 // operations revert to the faster 1-0 mode. This enter operation may interleave
796 // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
797 // arrange for one of the contending thread to use a timed park() operations
798 // to detect and recover from the race. (Stranding is form of progress failure
799 // where the monitor is unlocked but all the contending threads remain parked).
800 // That is, at least one of the contended threads will periodically poll _owner.
801 // One of the contending threads will become the designated "Responsible" thread.
802 // The Responsible thread uses a timed park instead of a normal indefinite park
803 // operation -- it periodically wakes and checks for and recovers from potential
804 // strandings admitted by 1-0 exit operations. We need at most one Responsible
805 // thread per-monitor at any given moment. Only threads on cxq|EntryList may
806 // be responsible for a monitor.
814
815 if (nxt == nullptr && _EntryList == nullptr) {
816 // Try to assume the role of responsible thread for the monitor.
817 // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=current }
818 Atomic::replace_if_null(&_Responsible, current);
819 }
820
821 // The lock might have been released while this thread was occupied queueing
822 // itself onto _cxq. To close the race and avoid "stranding" and
823 // progress-liveness failure we must resample-retry _owner before parking.
824 // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
825 // In this case the ST-MEMBAR is accomplished with CAS().
826 //
827 // TODO: Defer all thread state transitions until park-time.
828 // Since state transitions are heavy and inefficient we'd like
829 // to defer the state transitions until absolutely necessary,
830 // and in doing so avoid some transitions ...
831
832 int nWakeups = 0;
833 int recheckInterval = 1;
834
835 for (;;) {
836
837 if (TryLock(current) == TryLockResult::Success) {
838 break;
839 }
840 assert(owner_raw() != current, "invariant");
841
842 // park self
843 if (_Responsible == current) {
844 current->_ParkEvent->park((jlong) recheckInterval);
845 // Increase the recheckInterval, but clamp the value.
846 recheckInterval *= 8;
847 if (recheckInterval > MAX_RECHECK_INTERVAL) {
848 recheckInterval = MAX_RECHECK_INTERVAL;
849 }
850 } else {
851 current->_ParkEvent->park();
852 }
853
854 if (TryLock(current) == TryLockResult::Success) {
855 break;
856 }
857
858 if (try_set_owner_from(DEFLATER_MARKER, current) == DEFLATER_MARKER) {
859 // Cancelled the in-progress async deflation by changing owner from
860 // DEFLATER_MARKER to current. As part of the contended enter protocol,
861 // contentions was incremented to a positive value before EnterI()
862 // was called and that prevents the deflater thread from winning the
863 // last part of the 2-part async deflation protocol. After EnterI()
893 // We can find that we were unpark()ed and redesignated _succ while
894 // we were spinning. That's harmless. If we iterate and call park(),
895 // park() will consume the event and return immediately and we'll
896 // just spin again. This pattern can repeat, leaving _succ to simply
897 // spin on a CPU.
898
899 if (_succ == current) _succ = nullptr;
900
901 // Invariant: after clearing _succ a thread *must* retry _owner before parking.
902 OrderAccess::fence();
903 }
904
905 // Egress :
906 // current has acquired the lock -- Unlink current from the cxq or EntryList.
907 // Normally we'll find current on the EntryList .
908 // From the perspective of the lock owner (this thread), the
909 // EntryList is stable and cxq is prepend-only.
910 // The head of cxq is volatile but the interior is stable.
911 // In addition, current.TState is stable.
912
913 assert(owner_raw() == current, "invariant");
914
915 UnlinkAfterAcquire(current, &node);
916 if (_succ == current) _succ = nullptr;
917
918 assert(_succ != current, "invariant");
919 if (_Responsible == current) {
920 _Responsible = nullptr;
921 OrderAccess::fence(); // Dekker pivot-point
922
923 // We may leave threads on cxq|EntryList without a designated
924 // "Responsible" thread. This is benign. When this thread subsequently
925 // exits the monitor it can "see" such preexisting "old" threads --
926 // threads that arrived on the cxq|EntryList before the fence, above --
927 // by LDing cxq|EntryList. Newly arrived threads -- that is, threads
928 // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
929 // non-null and elect a new "Responsible" timer thread.
930 //
931 // This thread executes:
932 // ST Responsible=null; MEMBAR (in enter epilogue - here)
933 // LD cxq|EntryList (in subsequent exit)
949 // STs to monitor meta-data and user-data could reorder with (become
950 // visible after) the ST in exit that drops ownership of the lock.
951 // Some other thread could then acquire the lock, but observe inconsistent
952 // or old monitor meta-data and heap data. That violates the JMM.
953 // To that end, the 1-0 exit() operation must have at least STST|LDST
954 // "release" barrier semantics. Specifically, there must be at least a
955 // STST|LDST barrier in exit() before the ST of null into _owner that drops
956 // the lock. The barrier ensures that changes to monitor meta-data and data
957 // protected by the lock will be visible before we release the lock, and
958 // therefore before some other thread (CPU) has a chance to acquire the lock.
959 // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
960 //
961 // Critically, any prior STs to _succ or EntryList must be visible before
962 // the ST of null into _owner in the *subsequent* (following) corresponding
963 // monitorexit. Recall too, that in 1-0 mode monitorexit does not necessarily
964 // execute a serializing instruction.
965
966 return;
967 }
968
969 // ReenterI() is a specialized inline form of the latter half of the
970 // contended slow-path from EnterI(). We use ReenterI() only for
971 // monitor reentry in wait().
972 //
973 // In the future we should reconcile EnterI() and ReenterI().
974
975 void ObjectMonitor::ReenterI(JavaThread* current, ObjectWaiter* currentNode) {
976 assert(current != nullptr, "invariant");
977 assert(currentNode != nullptr, "invariant");
978 assert(currentNode->_thread == current, "invariant");
979 assert(_waiters > 0, "invariant");
980 assert(object()->mark() == markWord::encode(this), "invariant");
981
982 assert(current->thread_state() != _thread_blocked, "invariant");
983
984 int nWakeups = 0;
985 for (;;) {
986 ObjectWaiter::TStates v = currentNode->TState;
987 guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
988 assert(owner_raw() != current, "invariant");
989
990 if (TrySpin(current)) {
991 break;
992 }
993
994 {
995 OSThreadContendState osts(current->osthread());
996
997 assert(current->thread_state() == _thread_in_vm, "invariant");
998
999 {
1000 ClearSuccOnSuspend csos(this);
1001 ThreadBlockInVMPreprocess<ClearSuccOnSuspend> tbivs(current, csos, true /* allow_suspend */);
1002 current->_ParkEvent->park();
1003 }
1004 }
1005
1006 // Try again, but just so we distinguish between futile wakeups and
1007 // successful wakeups. The following test isn't algorithmically
1008 // necessary, but it helps us maintain sensible statistics.
1021 // find that _succ == current.
1022 if (_succ == current) _succ = nullptr;
1023
1024 // Invariant: after clearing _succ a contending thread
1025 // *must* retry _owner before parking.
1026 OrderAccess::fence();
1027
1028 // This PerfData object can be used in parallel with a safepoint.
1029 // See the work around in PerfDataManager::destroy().
1030 OM_PERFDATA_OP(FutileWakeups, inc());
1031 }
1032
1033 // current has acquired the lock -- Unlink current from the cxq or EntryList .
1034 // Normally we'll find current on the EntryList.
1035 // Unlinking from the EntryList is constant-time and atomic-free.
1036 // From the perspective of the lock owner (this thread), the
1037 // EntryList is stable and cxq is prepend-only.
1038 // The head of cxq is volatile but the interior is stable.
1039 // In addition, current.TState is stable.
1040
1041 assert(owner_raw() == current, "invariant");
1042 assert(object()->mark() == markWord::encode(this), "invariant");
1043 UnlinkAfterAcquire(current, currentNode);
1044 if (_succ == current) _succ = nullptr;
1045 assert(_succ != current, "invariant");
1046 currentNode->TState = ObjectWaiter::TS_RUN;
1047 OrderAccess::fence(); // see comments at the end of EnterI()
1048 }
1049
1050 // By convention we unlink a contending thread from EntryList|cxq immediately
1051 // after the thread acquires the lock in ::enter(). Equally, we could defer
1052 // unlinking the thread until ::exit()-time.
1053
1054 void ObjectMonitor::UnlinkAfterAcquire(JavaThread* current, ObjectWaiter* currentNode) {
1055 assert(owner_raw() == current, "invariant");
1056 assert(currentNode->_thread == current, "invariant");
1057
1058 if (currentNode->TState == ObjectWaiter::TS_ENTER) {
1059 // Normal case: remove current from the DLL EntryList .
1060 // This is a constant-time operation.
1061 ObjectWaiter* nxt = currentNode->_next;
1062 ObjectWaiter* prv = currentNode->_prev;
1063 if (nxt != nullptr) nxt->_prev = prv;
1064 if (prv != nullptr) prv->_next = nxt;
1065 if (currentNode == _EntryList) _EntryList = nxt;
1066 assert(nxt == nullptr || nxt->TState == ObjectWaiter::TS_ENTER, "invariant");
1067 assert(prv == nullptr || prv->TState == ObjectWaiter::TS_ENTER, "invariant");
1068 } else {
1069 assert(currentNode->TState == ObjectWaiter::TS_CXQ, "invariant");
1070 // Inopportune interleaving -- current is still on the cxq.
1071 // This usually means the enqueue of self raced an exiting thread.
1072 // Normally we'll find current near the front of the cxq, so
1073 // dequeueing is typically fast. If needbe we can accelerate
1074 // this with some MCS/CHL-like bidirectional list hints and advisory
1075 // back-links so dequeueing from the interior will normally operate
1076 // in constant-time.
1096 q = p;
1097 assert(p->TState == ObjectWaiter::TS_CXQ, "invariant");
1098 }
1099 assert(v != currentNode, "invariant");
1100 assert(p == currentNode, "Node not found on cxq");
1101 assert(p != _cxq, "invariant");
1102 assert(q != nullptr, "invariant");
1103 assert(q->_next == p, "invariant");
1104 q->_next = p->_next;
1105 }
1106 }
1107
1108 #ifdef ASSERT
1109 // Diagnostic hygiene ...
1110 currentNode->_prev = (ObjectWaiter*) 0xBAD;
1111 currentNode->_next = (ObjectWaiter*) 0xBAD;
1112 currentNode->TState = ObjectWaiter::TS_RUN;
1113 #endif
1114 }
1115
1116 // -----------------------------------------------------------------------------
1117 // Exit support
1118 //
1119 // exit()
1120 // ~~~~~~
1121 // Note that the collector can't reclaim the objectMonitor or deflate
1122 // the object out from underneath the thread calling ::exit() as the
1123 // thread calling ::exit() never transitions to a stable state.
1124 // This inhibits GC, which in turn inhibits asynchronous (and
1125 // inopportune) reclamation of "this".
1126 //
1127 // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
1128 // There's one exception to the claim above, however. EnterI() can call
1129 // exit() to drop a lock if the acquirer has been externally suspended.
1130 // In that case exit() is called with _thread_state == _thread_blocked,
1131 // but the monitor's _contentions field is > 0, which inhibits reclamation.
1132 //
1133 // 1-0 exit
1134 // ~~~~~~~~
1135 // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
1155 // exiting thread will notice and unpark the stranded thread, or, (b)
1156 // the timer expires. If the lock is high traffic then the stranding latency
1157 // will be low due to (a). If the lock is low traffic then the odds of
1158 // stranding are lower, although the worst-case stranding latency
1159 // is longer. Critically, we don't want to put excessive load in the
1160 // platform's timer subsystem. We want to minimize both the timer injection
1161 // rate (timers created/sec) as well as the number of timers active at
1162 // any one time. (more precisely, we want to minimize timer-seconds, which is
1163 // the integral of the # of active timers at any instant over time).
1164 // Both impinge on OS scalability. Given that, at most one thread parked on
1165 // a monitor will use a timer.
1166 //
1167 // There is also the risk of a futile wake-up. If we drop the lock
1168 // another thread can reacquire the lock immediately, and we can
1169 // then wake a thread unnecessarily. This is benign, and we've
1170 // structured the code so the windows are short and the frequency
1171 // of such futile wakups is low.
1172
1173 void ObjectMonitor::exit(JavaThread* current, bool not_suspended) {
1174 void* cur = owner_raw();
1175 if (current != cur) {
1176 if (LockingMode != LM_LIGHTWEIGHT && current->is_lock_owned((address)cur)) {
1177 assert(_recursions == 0, "invariant");
1178 set_owner_from_BasicLock(cur, current); // Convert from BasicLock* to Thread*.
1179 _recursions = 0;
1180 } else {
1181 // Apparent unbalanced locking ...
1182 // Naively we'd like to throw IllegalMonitorStateException.
1183 // As a practical matter we can neither allocate nor throw an
1184 // exception as ::exit() can be called from leaf routines.
1185 // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
1186 // Upon deeper reflection, however, in a properly run JVM the only
1187 // way we should encounter this situation is in the presence of
1188 // unbalanced JNI locking. TODO: CheckJNICalls.
1189 // See also: CR4414101
1190 #ifdef ASSERT
1191 LogStreamHandle(Error, monitorinflation) lsh;
1192 lsh.print_cr("ERROR: ObjectMonitor::exit(): thread=" INTPTR_FORMAT
1193 " is exiting an ObjectMonitor it does not own.", p2i(current));
1194 lsh.print_cr("The imbalance is possibly caused by JNI locking.");
1195 print_debug_style_on(&lsh);
1196 assert(false, "Non-balanced monitor enter/exit!");
1197 #endif
1198 return;
1199 }
1200 }
1201
1202 if (_recursions != 0) {
1203 _recursions--; // this is simple recursive enter
1204 return;
1205 }
1206
1207 // Invariant: after setting Responsible=null an thread must execute
1208 // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
1209 _Responsible = nullptr;
1210
1211 #if INCLUDE_JFR
1212 // get the owner's thread id for the MonitorEnter event
1213 // if it is enabled and the thread isn't suspended
1214 if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
1215 _previous_owner_tid = JFR_THREAD_ID(current);
1216 }
1217 #endif
1218
1219 for (;;) {
1220 assert(current == owner_raw(), "invariant");
1221
1222 // Drop the lock.
1223 // release semantics: prior loads and stores from within the critical section
1224 // must not float (reorder) past the following store that drops the lock.
1225 // Uses a storeload to separate release_store(owner) from the
1226 // successor check. The try_set_owner() below uses cmpxchg() so
1227 // we get the fence down there.
1228 release_clear_owner(current);
1229 OrderAccess::storeload();
1230
1231 if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != nullptr) {
1232 return;
1233 }
1234 // Other threads are blocked trying to acquire the lock.
1235
1236 // Normally the exiting thread is responsible for ensuring succession,
1237 // but if other successors are ready or other entering threads are spinning
1238 // then this thread can simply store null into _owner and exit without
1239 // waking a successor. The existence of spinners or ready successors
1240 // guarantees proper succession (liveness). Responsibility passes to the
1241 // ready or running successors. The exiting thread delegates the duty.
1242 // More precisely, if a successor already exists this thread is absolved
1243 // of the responsibility of waking (unparking) one.
1244 //
1245 // The _succ variable is critical to reducing futile wakeup frequency.
1246 // _succ identifies the "heir presumptive" thread that has been made
1256 // to drop the lock and then spin briefly to see if a spinner managed
1257 // to acquire the lock. If so, the exiting thread could exit
1258 // immediately without waking a successor, otherwise the exiting
1259 // thread would need to dequeue and wake a successor.
1260 // (Note that we'd need to make the post-drop spin short, but no
1261 // shorter than the worst-case round-trip cache-line migration time.
1262 // The dropped lock needs to become visible to the spinner, and then
1263 // the acquisition of the lock by the spinner must become visible to
1264 // the exiting thread).
1265
1266 // It appears that an heir-presumptive (successor) must be made ready.
1267 // Only the current lock owner can manipulate the EntryList or
1268 // drain _cxq, so we need to reacquire the lock. If we fail
1269 // to reacquire the lock the responsibility for ensuring succession
1270 // falls to the new owner.
1271 //
1272 if (try_set_owner_from(nullptr, current) != nullptr) {
1273 return;
1274 }
1275
1276 guarantee(owner_raw() == current, "invariant");
1277
1278 ObjectWaiter* w = nullptr;
1279
1280 w = _EntryList;
1281 if (w != nullptr) {
1282 // I'd like to write: guarantee (w->_thread != current).
1283 // But in practice an exiting thread may find itself on the EntryList.
1284 // Let's say thread T1 calls O.wait(). Wait() enqueues T1 on O's waitset and
1285 // then calls exit(). Exit release the lock by setting O._owner to null.
1286 // Let's say T1 then stalls. T2 acquires O and calls O.notify(). The
1287 // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
1288 // release the lock "O". T2 resumes immediately after the ST of null into
1289 // _owner, above. T2 notices that the EntryList is populated, so it
1290 // reacquires the lock and then finds itself on the EntryList.
1291 // Given all that, we have to tolerate the circumstance where "w" is
1292 // associated with current.
1293 assert(w->TState == ObjectWaiter::TS_ENTER, "invariant");
1294 ExitEpilog(current, w);
1295 return;
1296 }
1333 }
1334
1335 // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = nullptr
1336 // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
1337
1338 // See if we can abdicate to a spinner instead of waking a thread.
1339 // A primary goal of the implementation is to reduce the
1340 // context-switch rate.
1341 if (_succ != nullptr) continue;
1342
1343 w = _EntryList;
1344 if (w != nullptr) {
1345 guarantee(w->TState == ObjectWaiter::TS_ENTER, "invariant");
1346 ExitEpilog(current, w);
1347 return;
1348 }
1349 }
1350 }
1351
1352 void ObjectMonitor::ExitEpilog(JavaThread* current, ObjectWaiter* Wakee) {
1353 assert(owner_raw() == current, "invariant");
1354
1355 // Exit protocol:
1356 // 1. ST _succ = wakee
1357 // 2. membar #loadstore|#storestore;
1358 // 2. ST _owner = nullptr
1359 // 3. unpark(wakee)
1360
1361 _succ = Wakee->_thread;
1362 ParkEvent * Trigger = Wakee->_event;
1363
1364 // Hygiene -- once we've set _owner = nullptr we can't safely dereference Wakee again.
1365 // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
1366 // out-of-scope (non-extant).
1367 Wakee = nullptr;
1368
1369 // Drop the lock.
1370 // Uses a fence to separate release_store(owner) from the LD in unpark().
1371 release_clear_owner(current);
1372 OrderAccess::fence();
1373
1374 DTRACE_MONITOR_PROBE(contended__exit, this, object(), current);
1375 Trigger->unpark();
1376
1377 // Maintain stats and report events to JVMTI
1378 OM_PERFDATA_OP(Parks, inc());
1379 }
1380
1381 // complete_exit exits a lock returning recursion count
1382 // complete_exit requires an inflated monitor
1383 // The _owner field is not always the Thread addr even with an
1384 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1385 // thread due to contention.
1386 intx ObjectMonitor::complete_exit(JavaThread* current) {
1387 assert(InitDone, "Unexpectedly not initialized");
1388
1389 void* cur = owner_raw();
1390 if (current != cur) {
1391 if (LockingMode != LM_LIGHTWEIGHT && current->is_lock_owned((address)cur)) {
1392 assert(_recursions == 0, "internal state error");
1393 set_owner_from_BasicLock(cur, current); // Convert from BasicLock* to Thread*.
1394 _recursions = 0;
1395 }
1396 }
1397
1398 guarantee(current == owner_raw(), "complete_exit not owner");
1399 intx save = _recursions; // record the old recursion count
1400 _recursions = 0; // set the recursion level to be 0
1401 exit(current); // exit the monitor
1402 guarantee(owner_raw() != current, "invariant");
1403 return save;
1404 }
1405
1406 // Checks that the current THREAD owns this monitor and causes an
1407 // immediate return if it doesn't. We don't use the CHECK macro
1408 // because we want the IMSE to be the only exception that is thrown
1409 // from the call site when false is returned. Any other pending
1410 // exception is ignored.
1411 #define CHECK_OWNER() \
1412 do { \
1413 if (!check_owner(THREAD)) { \
1414 assert(HAS_PENDING_EXCEPTION, "expected a pending IMSE here."); \
1415 return; \
1416 } \
1417 } while (false)
1418
1419 // Returns true if the specified thread owns the ObjectMonitor.
1420 // Otherwise returns false and throws IllegalMonitorStateException
1421 // (IMSE). If there is a pending exception and the specified thread
1422 // is not the owner, that exception will be replaced by the IMSE.
1423 bool ObjectMonitor::check_owner(TRAPS) {
1424 JavaThread* current = THREAD;
1425 void* cur = owner_raw();
1426 assert(cur != anon_owner_ptr(), "no anon owner here");
1427 if (cur == current) {
1428 return true;
1429 }
1430 if (LockingMode != LM_LIGHTWEIGHT && current->is_lock_owned((address)cur)) {
1431 set_owner_from_BasicLock(cur, current); // Convert from BasicLock* to Thread*.
1432 _recursions = 0;
1433 return true;
1434 }
1435 THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
1436 "current thread is not owner", false);
1437 }
1438
1439 static inline bool is_excluded(const Klass* monitor_klass) {
1440 assert(monitor_klass != nullptr, "invariant");
1441 NOT_JFR_RETURN_(false);
1442 JFR_ONLY(return vmSymbols::jfr_chunk_rotation_monitor() == monitor_klass->name();)
1443 }
1444
1445 static void post_monitor_wait_event(EventJavaMonitorWait* event,
1446 ObjectMonitor* monitor,
1447 uint64_t notifier_tid,
1448 jlong timeout,
1449 bool timedout) {
1450 assert(event != nullptr, "invariant");
1451 assert(monitor != nullptr, "invariant");
1452 const Klass* monitor_klass = monitor->object()->klass();
1484 if (JvmtiExport::should_post_monitor_waited()) {
1485 // Note: 'false' parameter is passed here because the
1486 // wait was not timed out due to thread interrupt.
1487 JvmtiExport::post_monitor_waited(current, this, false);
1488
1489 // In this short circuit of the monitor wait protocol, the
1490 // current thread never drops ownership of the monitor and
1491 // never gets added to the wait queue so the current thread
1492 // cannot be made the successor. This means that the
1493 // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
1494 // consume an unpark() meant for the ParkEvent associated with
1495 // this ObjectMonitor.
1496 }
1497 if (event.should_commit()) {
1498 post_monitor_wait_event(&event, this, 0, millis, false);
1499 }
1500 THROW(vmSymbols::java_lang_InterruptedException());
1501 return;
1502 }
1503
1504 current->set_current_waiting_monitor(this);
1505
1506 // create a node to be put into the queue
1507 // Critically, after we reset() the event but prior to park(), we must check
1508 // for a pending interrupt.
1509 ObjectWaiter node(current);
1510 node.TState = ObjectWaiter::TS_WAIT;
1511 current->_ParkEvent->reset();
1512 OrderAccess::fence(); // ST into Event; membar ; LD interrupted-flag
1513
1514 // Enter the waiting queue, which is a circular doubly linked list in this case
1515 // but it could be a priority queue or any data structure.
1516 // _WaitSetLock protects the wait queue. Normally the wait queue is accessed only
1517 // by the owner of the monitor *except* in the case where park()
1518 // returns because of a timeout of interrupt. Contention is exceptionally rare
1519 // so we use a simple spin-lock instead of a heavier-weight blocking lock.
1520
1521 Thread::SpinAcquire(&_WaitSetLock, "WaitSet - add");
1522 AddWaiter(&node);
1523 Thread::SpinRelease(&_WaitSetLock);
1524
1525 _Responsible = nullptr;
1526
1527 intx save = _recursions; // record the old recursion count
1528 _waiters++; // increment the number of waiters
1529 _recursions = 0; // set the recursion level to be 1
1530 exit(current); // exit the monitor
1531 guarantee(owner_raw() != current, "invariant");
1532
1533 // The thread is on the WaitSet list - now park() it.
1534 // On MP systems it's conceivable that a brief spin before we park
1535 // could be profitable.
1536 //
1537 // TODO-FIXME: change the following logic to a loop of the form
1538 // while (!timeout && !interrupted && _notified == 0) park()
1539
1540 int ret = OS_OK;
1541 int WasNotified = 0;
1542
1543 // Need to check interrupt state whilst still _thread_in_vm
1544 bool interrupted = interruptible && current->is_interrupted(false);
1545
1546 { // State transition wrappers
1547 OSThread* osthread = current->osthread();
1548 OSThreadWaitState osts(osthread, true);
1549
1550 assert(current->thread_state() == _thread_in_vm, "invariant");
1551
1617 // The ObjectMonitor was notified and the current thread is
1618 // the successor which also means that an unpark() has already
1619 // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
1620 // consume the unpark() that was done when the successor was
1621 // set because the same ParkEvent is shared between Java
1622 // monitors and JVM/TI RawMonitors (for now).
1623 //
1624 // We redo the unpark() to ensure forward progress, i.e., we
1625 // don't want all pending threads hanging (parked) with none
1626 // entering the unlocked monitor.
1627 node._event->unpark();
1628 }
1629 }
1630
1631 if (event.should_commit()) {
1632 post_monitor_wait_event(&event, this, node._notifier_tid, millis, ret == OS_TIMEOUT);
1633 }
1634
1635 OrderAccess::fence();
1636
1637 assert(owner_raw() != current, "invariant");
1638 ObjectWaiter::TStates v = node.TState;
1639 if (v == ObjectWaiter::TS_RUN) {
1640 enter(current);
1641 } else {
1642 guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
1643 ReenterI(current, &node);
1644 node.wait_reenter_end(this);
1645 }
1646
1647 // current has reacquired the lock.
1648 // Lifecycle - the node representing current must not appear on any queues.
1649 // Node is about to go out-of-scope, but even if it were immortal we wouldn't
1650 // want residual elements associated with this thread left on any lists.
1651 guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
1652 assert(owner_raw() == current, "invariant");
1653 assert(_succ != current, "invariant");
1654 } // OSThreadWaitState()
1655
1656 current->set_current_waiting_monitor(nullptr);
1657
1658 guarantee(_recursions == 0, "invariant");
1659 int relock_count = JvmtiDeferredUpdates::get_and_reset_relock_count_after_wait(current);
1660 _recursions = save // restore the old recursion count
1661 + relock_count; // increased by the deferred relock count
1662 current->inc_held_monitor_count(relock_count); // Deopt never entered these counts.
1663 _waiters--; // decrement the number of waiters
1664
1665 // Verify a few postconditions
1666 assert(owner_raw() == current, "invariant");
1667 assert(_succ != current, "invariant");
1668 assert(object()->mark() == markWord::encode(this), "invariant");
1669
1670 // check if the notification happened
1671 if (!WasNotified) {
1672 // no, it could be timeout or Thread.interrupt() or both
1673 // check for interrupt event, otherwise it is timeout
1674 if (interruptible && current->is_interrupted(true) && !HAS_PENDING_EXCEPTION) {
1675 THROW(vmSymbols::java_lang_InterruptedException());
1676 }
1677 }
1678
1679 // NOTE: Spurious wake up will be consider as timeout.
1680 // Monitor notify has precedence over thread interrupt.
1681 }
1682
1683
1684 // Consider:
1685 // If the lock is cool (cxq == null && succ == null) and we're on an MP system
1686 // then instead of transferring a thread from the WaitSet to the EntryList
1923 }
1924
1925 //
1926 // Consider the following alternative:
1927 // Periodically set _SpinDuration = _SpinLimit and try a long/full
1928 // spin attempt. "Periodically" might mean after a tally of
1929 // the # of failed spin attempts (or iterations) reaches some threshold.
1930 // This takes us into the realm of 1-out-of-N spinning, where we
1931 // hold the duration constant but vary the frequency.
1932
1933 int ctr = _SpinDuration;
1934 if (ctr <= 0) return false;
1935
1936 // We're good to spin ... spin ingress.
1937 // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
1938 // when preparing to LD...CAS _owner, etc and the CAS is likely
1939 // to succeed.
1940 if (_succ == nullptr) {
1941 _succ = current;
1942 }
1943 Thread* prv = nullptr;
1944
1945 // There are three ways to exit the following loop:
1946 // 1. A successful spin where this thread has acquired the lock.
1947 // 2. Spin failure with prejudice
1948 // 3. Spin failure without prejudice
1949
1950 while (--ctr >= 0) {
1951
1952 // Periodic polling -- Check for pending GC
1953 // Threads may spin while they're unsafe.
1954 // We don't want spinning threads to delay the JVM from reaching
1955 // a stop-the-world safepoint or to steal cycles from GC.
1956 // If we detect a pending safepoint we abort in order that
1957 // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
1958 // this thread, if safe, doesn't steal cycles from GC.
1959 // This is in keeping with the "no loitering in runtime" rule.
1960 // We periodically check to see if there's a safepoint pending.
1961 if ((ctr & 0xFF) == 0) {
1962 // Can't call SafepointMechanism::should_process() since that
1963 // might update the poll values and we could be in a thread_blocked
1964 // state here which is not allowed so just check the poll.
1965 if (SafepointMechanism::local_poll_armed(current)) {
1966 break;
1967 }
1968 SpinPause();
1969 }
1970
1971 // Probe _owner with TATAS
1972 // If this thread observes the monitor transition or flicker
1973 // from locked to unlocked to locked, then the odds that this
1974 // thread will acquire the lock in this spin attempt go down
1975 // considerably. The same argument applies if the CAS fails
1976 // or if we observe _owner change from one non-null value to
1977 // another non-null value. In such cases we might abort
1978 // the spin without prejudice or apply a "penalty" to the
1979 // spin count-down variable "ctr", reducing it by 100, say.
1980
1981 JavaThread* ox = static_cast<JavaThread*>(owner_raw());
1982 if (ox == nullptr) {
1983 ox = static_cast<JavaThread*>(try_set_owner_from(nullptr, current));
1984 if (ox == nullptr) {
1985 // The CAS succeeded -- this thread acquired ownership
1986 // Take care of some bookkeeping to exit spin state.
1987 if (_succ == current) {
1988 _succ = nullptr;
1989 }
1990
1991 // Increase _SpinDuration :
1992 // The spin was successful (profitable) so we tend toward
1993 // longer spin attempts in the future.
1994 // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
1995 // If we acquired the lock early in the spin cycle it
1996 // makes sense to increase _SpinDuration proportionally.
1997 // Note that we don't clamp SpinDuration precisely at SpinLimit.
1998 _SpinDuration = adjust_up(_SpinDuration);
1999 return true;
2000 }
2001
2002 // The CAS failed ... we can take any of the following actions:
2003 // * penalize: ctr -= CASPenalty
2032 OrderAccess::fence();
2033 if (TryLock(current) == TryLockResult::Success) {
2034 return true;
2035 }
2036 }
2037
2038 return false;
2039 }
2040
2041
2042 // -----------------------------------------------------------------------------
2043 // WaitSet management ...
2044
2045 ObjectWaiter::ObjectWaiter(JavaThread* current) {
2046 _next = nullptr;
2047 _prev = nullptr;
2048 _notified = 0;
2049 _notifier_tid = 0;
2050 TState = TS_RUN;
2051 _thread = current;
2052 _event = _thread->_ParkEvent;
2053 _active = false;
2054 assert(_event != nullptr, "invariant");
2055 }
2056
2057 void ObjectWaiter::wait_reenter_begin(ObjectMonitor * const mon) {
2058 _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(_thread, mon);
2059 }
2060
2061 void ObjectWaiter::wait_reenter_end(ObjectMonitor * const mon) {
2062 JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(_thread, _active);
2063 }
2064
2065 inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) {
2066 assert(node != nullptr, "should not add null node");
2067 assert(node->_prev == nullptr, "node already in list");
2068 assert(node->_next == nullptr, "node already in list");
2069 // put node at end of queue (circular doubly linked list)
2070 if (_WaitSet == nullptr) {
2071 _WaitSet = node;
2072 node->_prev = node;
2073 node->_next = node;
2074 } else {
2075 ObjectWaiter* head = _WaitSet;
2076 ObjectWaiter* tail = head->_prev;
2152 { \
2153 n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events, \
2154 CHECK); \
2155 }
2156 NEWPERFCOUNTER(_sync_Inflations);
2157 NEWPERFCOUNTER(_sync_Deflations);
2158 NEWPERFCOUNTER(_sync_ContendedLockAttempts);
2159 NEWPERFCOUNTER(_sync_FutileWakeups);
2160 NEWPERFCOUNTER(_sync_Parks);
2161 NEWPERFCOUNTER(_sync_Notifications);
2162 NEWPERFVARIABLE(_sync_MonExtant);
2163 #undef NEWPERFCOUNTER
2164 #undef NEWPERFVARIABLE
2165 }
2166
2167 _oop_storage = OopStorageSet::create_weak("ObjectSynchronizer Weak", mtSynchronizer);
2168
2169 DEBUG_ONLY(InitDone = true;)
2170 }
2171
2172 void ObjectMonitor::print_on(outputStream* st) const {
2173 // The minimal things to print for markWord printing, more can be added for debugging and logging.
2174 st->print("{contentions=0x%08x,waiters=0x%08x"
2175 ",recursions=" INTX_FORMAT ",owner=" INTPTR_FORMAT "}",
2176 contentions(), waiters(), recursions(),
2177 p2i(owner()));
2178 }
2179 void ObjectMonitor::print() const { print_on(tty); }
2180
2181 #ifdef ASSERT
2182 // Print the ObjectMonitor like a debugger would:
2183 //
2184 // (ObjectMonitor) 0x00007fdfb6012e40 = {
2185 // _header = 0x0000000000000001
2186 // _object = 0x000000070ff45fd0
2187 // _pad_buf0 = {
2188 // [0] = '\0'
2189 // ...
2190 // [43] = '\0'
2191 // }
|
35 #include "oops/markWord.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "oops/oopHandle.inline.hpp"
38 #include "oops/weakHandle.inline.hpp"
39 #include "prims/jvmtiDeferredUpdates.hpp"
40 #include "prims/jvmtiExport.hpp"
41 #include "runtime/atomic.hpp"
42 #include "runtime/globals.hpp"
43 #include "runtime/handles.inline.hpp"
44 #include "runtime/interfaceSupport.inline.hpp"
45 #include "runtime/javaThread.inline.hpp"
46 #include "runtime/mutexLocker.hpp"
47 #include "runtime/objectMonitor.hpp"
48 #include "runtime/objectMonitor.inline.hpp"
49 #include "runtime/orderAccess.hpp"
50 #include "runtime/osThread.hpp"
51 #include "runtime/perfData.hpp"
52 #include "runtime/safefetch.hpp"
53 #include "runtime/safepointMechanism.inline.hpp"
54 #include "runtime/sharedRuntime.hpp"
55 #include "runtime/threads.hpp"
56 #include "services/threadService.hpp"
57 #include "utilities/dtrace.hpp"
58 #include "utilities/globalDefinitions.hpp"
59 #include "utilities/macros.hpp"
60 #include "utilities/preserveException.hpp"
61 #if INCLUDE_JFR
62 #include "jfr/support/jfrFlush.hpp"
63 #endif
64
65 #ifdef DTRACE_ENABLED
66
67 // Only bother with this argument setup if dtrace is available
68 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly.
69
70
71 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \
72 char* bytes = nullptr; \
73 int len = 0; \
74 jlong jtid = SharedRuntime::get_java_tid(thread); \
75 Symbol* klassname = obj->klass()->name(); \
96 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \
97 { \
98 if (DTraceMonitorProbes) { \
99 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
100 HOTSPOT_MONITOR_##probe(jtid, \
101 (uintptr_t)(monitor), bytes, len); \
102 } \
103 }
104
105 #else // ndef DTRACE_ENABLED
106
107 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
108 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
109
110 #endif // ndef DTRACE_ENABLED
111
112 DEBUG_ONLY(static volatile bool InitDone = false;)
113
114 OopStorage* ObjectMonitor::_oop_storage = nullptr;
115
116 OopHandle ObjectMonitor::_vthread_cxq_head;
117 ParkEvent* ObjectMonitor::_vthread_unparker_ParkEvent = nullptr;
118
119 static void post_virtual_thread_pinned_event(JavaThread* current, const char* reason) {
120 EventVirtualThreadPinned e;
121 if (e.should_commit()) {
122 e.set_pinnedReason(reason);
123 e.set_carrierThread(JFR_JVM_THREAD_ID(current));
124 e.commit();
125 }
126 }
127
128 // -----------------------------------------------------------------------------
129 // Theory of operations -- Monitors lists, thread residency, etc:
130 //
131 // * A thread acquires ownership of a monitor by successfully
132 // CAS()ing the _owner field from null to non-null.
133 //
134 // * Invariant: A thread appears on at most one monitor list --
135 // cxq, EntryList or WaitSet -- at any one time.
136 //
137 // * Contending threads "push" themselves onto the cxq with CAS
138 // and then spin/park.
139 //
140 // * After a contending thread eventually acquires the lock it must
141 // dequeue itself from either the EntryList or the cxq.
142 //
143 // * The exiting thread identifies and unparks an "heir presumptive"
144 // tentative successor thread on the EntryList. Critically, the
145 // exiting thread doesn't unlink the successor thread from the EntryList.
146 // After having been unparked, the wakee will recontend for ownership of
147 // the monitor. The successor (wakee) will either acquire the lock or
245 switch (jt->thread_state()) {
246 case _thread_in_vm: // the usual case
247 case _thread_in_Java: // during deopt
248 break;
249 default:
250 fatal("called from an unsafe thread state");
251 }
252 assert(jt->is_active_Java_thread(), "must be active JavaThread");
253 } else {
254 // However, ThreadService::get_current_contended_monitor()
255 // can call here via the VMThread so sanity check it.
256 assert(self->is_VM_thread(), "must be");
257 }
258 #endif // ASSERT
259 }
260
261 ObjectMonitor::ObjectMonitor(oop object) :
262 _header(markWord::zero()),
263 _object(_oop_storage, object),
264 _owner(nullptr),
265 _stack_locker(nullptr),
266 _previous_owner_tid(0),
267 _next_om(nullptr),
268 _recursions(0),
269 _EntryList(nullptr),
270 _cxq(nullptr),
271 _succ(nullptr),
272 _Responsible(nullptr),
273 _SpinDuration(ObjectMonitor::Knob_SpinLimit),
274 _contentions(0),
275 _WaitSet(nullptr),
276 _waiters(0),
277 _WaitSetLock(0)
278 { }
279
280 ObjectMonitor::~ObjectMonitor() {
281 _object.release(_oop_storage);
282 }
283
284 oop ObjectMonitor::object() const {
285 check_object_context();
313
314 // -----------------------------------------------------------------------------
315 // Enter support
316
317 bool ObjectMonitor::enter_for(JavaThread* locking_thread) {
318 // Used by ObjectSynchronizer::enter_for to enter for another thread.
319 // The monitor is private to or already owned by locking_thread which must be suspended.
320 // So this code may only contend with deflation.
321 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
322
323 // Block out deflation as soon as possible.
324 add_to_contentions(1);
325
326 bool success = false;
327 if (!is_being_async_deflated()) {
328 void* prev_owner = try_set_owner_from(nullptr, locking_thread);
329
330 if (prev_owner == nullptr) {
331 assert(_recursions == 0, "invariant");
332 success = true;
333 } else if (prev_owner == owner_for(locking_thread)) {
334 _recursions++;
335 success = true;
336 } else if (prev_owner == DEFLATER_MARKER) {
337 // Racing with deflation.
338 prev_owner = try_set_owner_from(DEFLATER_MARKER, locking_thread);
339 if (prev_owner == DEFLATER_MARKER) {
340 // Cancelled deflation. Increment contentions as part of the deflation protocol.
341 add_to_contentions(1);
342 success = true;
343 } else if (prev_owner == nullptr) {
344 // At this point we cannot race with deflation as we have both incremented
345 // contentions, seen contention > 0 and seen a DEFLATER_MARKER.
346 // success will only be false if this races with something other than
347 // deflation.
348 prev_owner = try_set_owner_from(nullptr, locking_thread);
349 success = prev_owner == nullptr;
350 }
351 }
352 assert(success, "Failed to enter_for: locking_thread=" INTPTR_FORMAT
353 ", this=" INTPTR_FORMAT "{owner=" INTPTR_FORMAT "}, observed owner: " INTPTR_FORMAT,
354 p2i(locking_thread), p2i(this), p2i(owner_raw()), p2i(prev_owner));
355 } else {
356 // Async deflation is in progress and our contentions increment
357 // above lost the race to async deflation. Undo the work and
358 // force the caller to retry.
359 const oop l_object = object();
360 if (l_object != nullptr) {
361 // Attempt to restore the header/dmw to the object's header so that
362 // we only retry once if the deflater thread happens to be slow.
363 install_displaced_markword_in_object(l_object);
364 }
365 }
366
367 add_to_contentions(-1);
368
369 assert(!success || is_owner(locking_thread), "must be");
370
371 return success;
372 }
373
374 bool ObjectMonitor::enter(JavaThread* current) {
375 assert(current == JavaThread::current(), "must be");
376 // The following code is ordered to check the most common cases first
377 // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
378
379 void* cur = try_set_owner_from(nullptr, current);
380 if (cur == nullptr) {
381 assert(_recursions == 0, "invariant");
382 return true;
383 }
384
385 if (cur == owner_for(current)) {
386 // TODO-FIXME: check for integer overflow! BUGID 6557169.
387 _recursions++;
388 return true;
389 }
390
391 // We've encountered genuine contention.
392
393 // Try one round of spinning *before* enqueueing current
394 // and before going through the awkward and expensive state
395 // transitions. The following spin is strictly optional ...
396 // Note that if we acquire the monitor from an initial spin
397 // we forgo posting JVMTI events and firing DTRACE probes.
398 if (TrySpin(current)) {
399 assert(owner_raw() == owner_for(current), "must be current: owner=" INTPTR_FORMAT, p2i(owner_raw()));
400 assert(_recursions == 0, "must be 0: recursions=" INTX_FORMAT, _recursions);
401 assert(object()->mark() == markWord::encode(this),
402 "object mark must match encoded this: mark=" INTPTR_FORMAT
403 ", encoded this=" INTPTR_FORMAT, object()->mark().value(),
404 markWord::encode(this).value());
405 return true;
406 }
407
408 assert(owner_raw() != owner_for(current), "invariant");
409 assert(_succ != current, "invariant");
410 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
411 assert(current->thread_state() != _thread_blocked, "invariant");
412
413 // Keep track of contention for JVM/TI and M&M queries.
414 add_to_contentions(1);
415 if (is_being_async_deflated()) {
416 // Async deflation is in progress and our contentions increment
417 // above lost the race to async deflation. Undo the work and
418 // force the caller to retry.
419 const oop l_object = object();
420 if (l_object != nullptr) {
421 // Attempt to restore the header/dmw to the object's header so that
422 // we only retry once if the deflater thread happens to be slow.
423 install_displaced_markword_in_object(l_object);
424 }
425 add_to_contentions(-1);
426 return false;
427 }
428
436 event.set_address((uintptr_t)this);
437 }
438
439 { // Change java thread status to indicate blocked on monitor enter.
440 JavaThreadBlockedOnMonitorEnterState jtbmes(current, this);
441
442 assert(current->current_pending_monitor() == nullptr, "invariant");
443 current->set_current_pending_monitor(this);
444
445 DTRACE_MONITOR_PROBE(contended__enter, this, object(), current);
446 if (JvmtiExport::should_post_monitor_contended_enter()) {
447 JvmtiExport::post_monitor_contended_enter(current, this);
448
449 // The current thread does not yet own the monitor and does not
450 // yet appear on any queues that would get it made the successor.
451 // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
452 // handler cannot accidentally consume an unpark() meant for the
453 // ParkEvent associated with this ObjectMonitor.
454 }
455
456 #ifdef LOOM_MONITOR_SUPPORT
457 ContinuationEntry* ce = current->last_continuation();
458 if (ce != nullptr && ce->is_virtual_thread() && current->is_on_monitorenter()) {
459 int result = Continuation::try_preempt(current, ce->cont_oop(current));
460 if (result == freeze_ok) {
461 bool acquired = HandlePreemptedVThread(current);
462 DEBUG_ONLY(int state = java_lang_VirtualThread::state(current->vthread()));
463 assert((acquired && current->preemption_cancelled() && state == java_lang_VirtualThread::RUNNING) ||
464 (!acquired && !current->preemption_cancelled() && state == java_lang_VirtualThread::BLOCKING), "invariant");
465 return true;
466 }
467 if (result == freeze_pinned_native) {
468 post_virtual_thread_pinned_event(current, "Native frame or <clinit> on stack");
469 }
470 }
471 #endif
472
473 OSThreadContendState osts(current->osthread());
474
475 assert(current->thread_state() == _thread_in_vm, "invariant");
476
477 for (;;) {
478 ExitOnSuspend eos(this);
479 {
480 ThreadBlockInVMPreprocess<ExitOnSuspend> tbivs(current, eos, true /* allow_suspend */);
481 EnterI(current);
482 current->set_current_pending_monitor(nullptr);
483 // We can go to a safepoint at the end of this block. If we
484 // do a thread dump during that safepoint, then this thread will show
485 // as having "-locked" the monitor, but the OS and java.lang.Thread
486 // states will still report that the thread is blocked trying to
487 // acquire it.
488 // If there is a suspend request, ExitOnSuspend will exit the OM
489 // and set the OM as pending.
490 }
491 if (!eos.exited()) {
492 // ExitOnSuspend did not exit the OM
493 assert(owner_raw() == owner_for(current), "invariant");
494 break;
495 }
496 }
497
498 // We've just gotten past the enter-check-for-suspend dance and we now own
499 // the monitor free and clear.
500 }
501
502 add_to_contentions(-1);
503 assert(contentions() >= 0, "must not be negative: contentions=%d", contentions());
504
505 // Must either set _recursions = 0 or ASSERT _recursions == 0.
506 assert(_recursions == 0, "invariant");
507 assert(owner_raw() == owner_for(current), "invariant");
508 assert(_succ != current, "invariant");
509 assert(object()->mark() == markWord::encode(this), "invariant");
510
511 // The thread -- now the owner -- is back in vm mode.
512 // Report the glorious news via TI,DTrace and jvmstat.
513 // The probe effect is non-trivial. All the reportage occurs
514 // while we hold the monitor, increasing the length of the critical
515 // section. Amdahl's parallel speedup law comes vividly into play.
516 //
517 // Another option might be to aggregate the events (thread local or
518 // per-monitor aggregation) and defer reporting until a more opportune
519 // time -- such as next time some thread encounters contention but has
520 // yet to acquire the lock. While spinning that thread could
521 // spinning we could increment JVMStat counters, etc.
522
523 DTRACE_MONITOR_PROBE(contended__entered, this, object(), current);
524 if (JvmtiExport::should_post_monitor_contended_entered()) {
525 JvmtiExport::post_monitor_contended_entered(current, this);
526
527 // The current thread already owns the monitor and is not going to
562 // makes contentions negative as signals to contending threads that
563 // an async deflation is in progress. There are a number of checks
564 // as part of the protocol to make sure that the calling thread has
565 // not lost the race to a contending thread.
566 //
567 // The ObjectMonitor has been successfully async deflated when:
568 // (contentions < 0)
569 // Contending threads that see that condition know to retry their operation.
570 //
571 bool ObjectMonitor::deflate_monitor() {
572 if (is_busy()) {
573 // Easy checks are first - the ObjectMonitor is busy so no deflation.
574 return false;
575 }
576
577 const oop obj = object_peek();
578
579 if (obj == nullptr) {
580 // If the object died, we can recycle the monitor without racing with
581 // Java threads. The GC already broke the association with the object.
582 set_owner_from_raw(nullptr, DEFLATER_MARKER);
583 assert(contentions() >= 0, "must be non-negative: contentions=%d", contentions());
584 _contentions = INT_MIN; // minimum negative int
585 } else {
586 // Attempt async deflation protocol.
587
588 // Set a null owner to DEFLATER_MARKER to force any contending thread
589 // through the slow path. This is just the first part of the async
590 // deflation dance.
591 if (try_set_owner_from_raw(nullptr, DEFLATER_MARKER) != nullptr) {
592 // The owner field is no longer null so we lost the race since the
593 // ObjectMonitor is now busy.
594 return false;
595 }
596
597 if (contentions() > 0 || _waiters != 0) {
598 // Another thread has raced to enter the ObjectMonitor after
599 // is_busy() above or has already entered and waited on
600 // it which makes it busy so no deflation. Restore owner to
601 // null if it is still DEFLATER_MARKER.
602 if (try_set_owner_from_raw(DEFLATER_MARKER, nullptr) != DEFLATER_MARKER) {
603 // Deferred decrement for the JT EnterI() that cancelled the async deflation.
604 add_to_contentions(-1);
605 }
606 return false;
607 }
608
609 // Make a zero contentions field negative to force any contending threads
610 // to retry. This is the second part of the async deflation dance.
611 if (Atomic::cmpxchg(&_contentions, 0, INT_MIN) != 0) {
612 // Contentions was no longer 0 so we lost the race since the
613 // ObjectMonitor is now busy. Restore owner to null if it is
614 // still DEFLATER_MARKER:
615 if (try_set_owner_from_raw(DEFLATER_MARKER, nullptr) != DEFLATER_MARKER) {
616 // Deferred decrement for the JT EnterI() that cancelled the async deflation.
617 add_to_contentions(-1);
618 }
619 return false;
620 }
621 }
622
623 // Sanity checks for the races:
624 guarantee(owner_is_DEFLATER_MARKER(), "must be deflater marker");
625 guarantee(contentions() < 0, "must be negative: contentions=%d",
626 contentions());
627 guarantee(_waiters == 0, "must be 0: waiters=%d", _waiters);
628 guarantee(_cxq == nullptr, "must be no contending threads: cxq="
629 INTPTR_FORMAT, p2i(_cxq));
630 guarantee(_EntryList == nullptr,
631 "must be no entering threads: EntryList=" INTPTR_FORMAT,
632 p2i(_EntryList));
633
634 if (obj != nullptr) {
635 if (log_is_enabled(Trace, monitorinflation)) {
691 log_info(monitorinflation)("install_displaced_markword_in_object: "
692 "failed cas_set_mark: new_mark=" INTPTR_FORMAT
693 ", old_mark=" INTPTR_FORMAT ", res=" INTPTR_FORMAT,
694 dmw.value(), markWord::encode(this).value(),
695 res.value());
696 }
697
698 // Note: It does not matter which thread restored the header/dmw
699 // into the object's header. The thread deflating the monitor just
700 // wanted the object's header restored and it is. The threads that
701 // detected a race with the deflation process also wanted the
702 // object's header restored before they retry their operation and
703 // because it is restored they will only retry once.
704 }
705
706 // Convert the fields used by is_busy() to a string that can be
707 // used for diagnostic output.
708 const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
709 ss->print("is_busy: waiters=%d"
710 ", contentions=%d"
711 ", owner=" INTPTR_FORMAT
712 ", cxq=" PTR_FORMAT
713 ", EntryList=" PTR_FORMAT,
714 _waiters,
715 (contentions() > 0 ? contentions() : 0),
716 owner_is_DEFLATER_MARKER()
717 // We report null instead of DEFLATER_MARKER here because is_busy()
718 // ignores DEFLATER_MARKER values.
719 ? p2i(nullptr)
720 : p2i(owner_raw()),
721 p2i(_cxq),
722 p2i(_EntryList));
723 return ss->base();
724 }
725
726 #define MAX_RECHECK_INTERVAL 1000
727
728 void ObjectMonitor::EnterI(JavaThread* current) {
729 assert(current->thread_state() == _thread_blocked, "invariant");
730
731 // Try the lock - TATAS
732 if (TryLock(current) == TryLockResult::Success) {
733 assert(_succ != current, "invariant");
734 assert(owner_raw() == owner_for(current), "invariant");
735 assert(_Responsible != current, "invariant");
736 return;
737 }
738
739 if (try_set_owner_from(DEFLATER_MARKER, current) == DEFLATER_MARKER) {
740 // Cancelled the in-progress async deflation by changing owner from
741 // DEFLATER_MARKER to current. As part of the contended enter protocol,
742 // contentions was incremented to a positive value before EnterI()
743 // was called and that prevents the deflater thread from winning the
744 // last part of the 2-part async deflation protocol. After EnterI()
745 // returns to enter(), contentions is decremented because the caller
746 // now owns the monitor. We bump contentions an extra time here to
747 // prevent the deflater thread from winning the last part of the
748 // 2-part async deflation protocol after the regular decrement
749 // occurs in enter(). The deflater thread will decrement contentions
750 // after it recognizes that the async deflation was cancelled.
751 add_to_contentions(1);
752 assert(_succ != current, "invariant");
753 assert(_Responsible != current, "invariant");
754 return;
755 }
756
757 assert(InitDone, "Unexpectedly not initialized");
758
759 // We try one round of spinning *before* enqueueing current.
760 //
761 // If the _owner is ready but OFFPROC we could use a YieldTo()
762 // operation to donate the remainder of this thread's quantum
763 // to the owner. This has subtle but beneficial affinity
764 // effects.
765
766 if (TrySpin(current)) {
767 assert(owner_raw() == owner_for(current), "invariant");
768 assert(_succ != current, "invariant");
769 assert(_Responsible != current, "invariant");
770 return;
771 }
772
773 // The Spin failed -- Enqueue and park the thread ...
774 assert(_succ != current, "invariant");
775 assert(owner_raw() != owner_for(current), "invariant");
776 assert(_Responsible != current, "invariant");
777
778 // Enqueue "current" on ObjectMonitor's _cxq.
779 //
780 // Node acts as a proxy for current.
781 // As an aside, if were to ever rewrite the synchronization code mostly
782 // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
783 // Java objects. This would avoid awkward lifecycle and liveness issues,
784 // as well as eliminate a subset of ABA issues.
785 // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
786
787 ObjectWaiter node(current);
788 current->_ParkEvent->reset();
789 node._prev = (ObjectWaiter*) 0xBAD;
790 node.TState = ObjectWaiter::TS_CXQ;
791
792 // Push "current" onto the front of the _cxq.
793 // Once on cxq/EntryList, current stays on-queue until it acquires the lock.
794 // Note that spinning tends to reduce the rate at which threads
795 // enqueue and dequeue on EntryList|cxq.
796 ObjectWaiter* nxt;
797 for (;;) {
798 node._next = nxt = _cxq;
799 if (Atomic::cmpxchg(&_cxq, nxt, &node) == nxt) break;
800
801 // Interference - the CAS failed because _cxq changed. Just retry.
802 // As an optional optimization we retry the lock.
803 if (TryLock(current) == TryLockResult::Success) {
804 assert(_succ != current, "invariant");
805 assert(owner_raw() == owner_for(current), "invariant");
806 assert(_Responsible != current, "invariant");
807 return;
808 }
809 }
810
811 // Check for cxq|EntryList edge transition to non-null. This indicates
812 // the onset of contention. While contention persists exiting threads
813 // will use a ST:MEMBAR:LD 1-1 exit protocol. When contention abates exit
814 // operations revert to the faster 1-0 mode. This enter operation may interleave
815 // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
816 // arrange for one of the contending thread to use a timed park() operations
817 // to detect and recover from the race. (Stranding is form of progress failure
818 // where the monitor is unlocked but all the contending threads remain parked).
819 // That is, at least one of the contended threads will periodically poll _owner.
820 // One of the contending threads will become the designated "Responsible" thread.
821 // The Responsible thread uses a timed park instead of a normal indefinite park
822 // operation -- it periodically wakes and checks for and recovers from potential
823 // strandings admitted by 1-0 exit operations. We need at most one Responsible
824 // thread per-monitor at any given moment. Only threads on cxq|EntryList may
825 // be responsible for a monitor.
833
834 if (nxt == nullptr && _EntryList == nullptr) {
835 // Try to assume the role of responsible thread for the monitor.
836 // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=current }
837 Atomic::replace_if_null(&_Responsible, current);
838 }
839
840 // The lock might have been released while this thread was occupied queueing
841 // itself onto _cxq. To close the race and avoid "stranding" and
842 // progress-liveness failure we must resample-retry _owner before parking.
843 // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
844 // In this case the ST-MEMBAR is accomplished with CAS().
845 //
846 // TODO: Defer all thread state transitions until park-time.
847 // Since state transitions are heavy and inefficient we'd like
848 // to defer the state transitions until absolutely necessary,
849 // and in doing so avoid some transitions ...
850
851 int nWakeups = 0;
852 int recheckInterval = 1;
853 bool do_timed_parked = false;
854
855 ContinuationEntry* ce = current->last_continuation();
856 if (ce != nullptr && ce->is_virtual_thread()) {
857 do_timed_parked = true;
858 }
859
860 for (;;) {
861
862 if (TryLock(current) == TryLockResult::Success) {
863 break;
864 }
865 assert(owner_raw() != owner_for(current), "invariant");
866
867 // park self
868 if (_Responsible == current || do_timed_parked) {
869 current->_ParkEvent->park((jlong) recheckInterval);
870 // Increase the recheckInterval, but clamp the value.
871 recheckInterval *= 8;
872 if (recheckInterval > MAX_RECHECK_INTERVAL) {
873 recheckInterval = MAX_RECHECK_INTERVAL;
874 }
875 } else {
876 current->_ParkEvent->park();
877 }
878
879 if (TryLock(current) == TryLockResult::Success) {
880 break;
881 }
882
883 if (try_set_owner_from(DEFLATER_MARKER, current) == DEFLATER_MARKER) {
884 // Cancelled the in-progress async deflation by changing owner from
885 // DEFLATER_MARKER to current. As part of the contended enter protocol,
886 // contentions was incremented to a positive value before EnterI()
887 // was called and that prevents the deflater thread from winning the
888 // last part of the 2-part async deflation protocol. After EnterI()
918 // We can find that we were unpark()ed and redesignated _succ while
919 // we were spinning. That's harmless. If we iterate and call park(),
920 // park() will consume the event and return immediately and we'll
921 // just spin again. This pattern can repeat, leaving _succ to simply
922 // spin on a CPU.
923
924 if (_succ == current) _succ = nullptr;
925
926 // Invariant: after clearing _succ a thread *must* retry _owner before parking.
927 OrderAccess::fence();
928 }
929
930 // Egress :
931 // current has acquired the lock -- Unlink current from the cxq or EntryList.
932 // Normally we'll find current on the EntryList .
933 // From the perspective of the lock owner (this thread), the
934 // EntryList is stable and cxq is prepend-only.
935 // The head of cxq is volatile but the interior is stable.
936 // In addition, current.TState is stable.
937
938 assert(owner_raw() == owner_for(current), "invariant");
939
940 UnlinkAfterAcquire(current, &node);
941 if (_succ == current) _succ = nullptr;
942
943 assert(_succ != current, "invariant");
944 if (_Responsible == current) {
945 _Responsible = nullptr;
946 OrderAccess::fence(); // Dekker pivot-point
947
948 // We may leave threads on cxq|EntryList without a designated
949 // "Responsible" thread. This is benign. When this thread subsequently
950 // exits the monitor it can "see" such preexisting "old" threads --
951 // threads that arrived on the cxq|EntryList before the fence, above --
952 // by LDing cxq|EntryList. Newly arrived threads -- that is, threads
953 // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
954 // non-null and elect a new "Responsible" timer thread.
955 //
956 // This thread executes:
957 // ST Responsible=null; MEMBAR (in enter epilogue - here)
958 // LD cxq|EntryList (in subsequent exit)
974 // STs to monitor meta-data and user-data could reorder with (become
975 // visible after) the ST in exit that drops ownership of the lock.
976 // Some other thread could then acquire the lock, but observe inconsistent
977 // or old monitor meta-data and heap data. That violates the JMM.
978 // To that end, the 1-0 exit() operation must have at least STST|LDST
979 // "release" barrier semantics. Specifically, there must be at least a
980 // STST|LDST barrier in exit() before the ST of null into _owner that drops
981 // the lock. The barrier ensures that changes to monitor meta-data and data
982 // protected by the lock will be visible before we release the lock, and
983 // therefore before some other thread (CPU) has a chance to acquire the lock.
984 // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
985 //
986 // Critically, any prior STs to _succ or EntryList must be visible before
987 // the ST of null into _owner in the *subsequent* (following) corresponding
988 // monitorexit. Recall too, that in 1-0 mode monitorexit does not necessarily
989 // execute a serializing instruction.
990
991 return;
992 }
993
994 bool ObjectMonitor::HandlePreemptedVThread(JavaThread* current) {
995 // Either because we acquire the lock below or because we will preempt the
996 // vthread clear the _current_pending_monitor field from the current JavaThread.
997 current->set_current_pending_monitor(nullptr);
998
999 // Try once more after freezing the continuation.
1000 if (TryLock(current) == TryLockResult::Success) {
1001 assert(owner_raw() == owner_for(current), "invariant");
1002 assert(_succ != current, "invariant");
1003 assert(_Responsible != current, "invariant");
1004 current->set_preemption_cancelled(true);
1005 add_to_contentions(-1);
1006 return true;
1007 }
1008
1009 if (try_set_owner_from(DEFLATER_MARKER, current) == DEFLATER_MARKER) {
1010 // Cancelled the in-progress async deflation by changing owner from
1011 // DEFLATER_MARKER to current. As part of the contended enter protocol,
1012 // contentions was incremented to a positive value before this call to
1013 // HandlePreemptedVThread(). We avoid decrementing contentions to
1014 // prevent the deflater thread from winning the last part of the
1015 // 2-part async deflation protocol. The deflater thread will decrement
1016 // contentions after it recognizes that the async deflation was cancelled.
1017 assert(_succ != current, "invariant");
1018 assert(_Responsible != current, "invariant");
1019 current->set_preemption_cancelled(true);
1020 return true;
1021 }
1022
1023 oop vthread = current->vthread();
1024 assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
1025 java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::BLOCKING);
1026
1027 ObjectWaiter* node = new ObjectWaiter(vthread);
1028 node->_prev = (ObjectWaiter*) 0xBAD;
1029 node->TState = ObjectWaiter::TS_CXQ;
1030
1031 // Push node associated with vthread onto the front of the _cxq.
1032 ObjectWaiter* nxt;
1033 for (;;) {
1034 node->_next = nxt = _cxq;
1035 if (Atomic::cmpxchg(&_cxq, nxt, node) == nxt) break;
1036
1037 // Interference - the CAS failed because _cxq changed. Just retry.
1038 // As an optional optimization we retry the lock.
1039 if (TryLock(current) == TryLockResult::Success) {
1040 assert(owner_raw() == owner_for(current), "invariant");
1041 assert(_succ != current, "invariant");
1042 assert(_Responsible != current, "invariant");
1043 current->set_preemption_cancelled(true);
1044 java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::RUNNING);
1045 add_to_contentions(-1);
1046 delete node;
1047 return true;
1048 }
1049 }
1050
1051 // We have to try once more since owner could have exited monitor and checked
1052 // _cxq before we added the node to the queue.
1053 if (TryLock(current) == TryLockResult::Success) {
1054 assert(owner_raw() == owner_for(current), "invariant");
1055 assert(_Responsible != current, "invariant");
1056 current->set_preemption_cancelled(true);
1057 java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::RUNNING);
1058 UnlinkAfterAcquire(current, node, vthread);
1059 delete node;
1060 if (_succ == (JavaThread*)java_lang_Thread::thread_id(vthread)) _succ = nullptr;
1061 add_to_contentions(-1);
1062 return true;
1063 }
1064
1065 if (nxt == nullptr && _EntryList == nullptr) {
1066 // The C2 unlock() fast path first checks if _cxq and _EntryList are empty and
1067 // if they are it just clears the _owner field. Since we always run the risk of
1068 // having that check happening before we added the node to _cxq and the release
1069 // of the monitor happening after the last TryLock attempt we need to do something
1070 // to avoid stranding. We set the _Responsible field which results in a timed-wait.
1071 if (Atomic::replace_if_null(&_Responsible, (JavaThread*)java_lang_Thread::thread_id(vthread))) {
1072 java_lang_VirtualThread::set_recheckInterval(vthread, 1);
1073 }
1074 }
1075
1076 return false;
1077 }
1078
1079 // ReenterI() is a specialized inline form of the latter half of the
1080 // contended slow-path from EnterI(). We use ReenterI() only for
1081 // monitor reentry in wait().
1082 //
1083 // In the future we should reconcile EnterI() and ReenterI().
1084
1085 void ObjectMonitor::ReenterI(JavaThread* current, ObjectWaiter* currentNode) {
1086 assert(current != nullptr, "invariant");
1087 assert(currentNode != nullptr, "invariant");
1088 assert(currentNode->_thread == current, "invariant");
1089 assert(_waiters > 0, "invariant");
1090 assert(object()->mark() == markWord::encode(this), "invariant");
1091
1092 assert(current->thread_state() != _thread_blocked, "invariant");
1093
1094 int nWakeups = 0;
1095 for (;;) {
1096 ObjectWaiter::TStates v = currentNode->TState;
1097 guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
1098 assert(owner_raw() != owner_for(current), "invariant");
1099
1100 if (TrySpin(current)) {
1101 break;
1102 }
1103
1104 {
1105 OSThreadContendState osts(current->osthread());
1106
1107 assert(current->thread_state() == _thread_in_vm, "invariant");
1108
1109 {
1110 ClearSuccOnSuspend csos(this);
1111 ThreadBlockInVMPreprocess<ClearSuccOnSuspend> tbivs(current, csos, true /* allow_suspend */);
1112 current->_ParkEvent->park();
1113 }
1114 }
1115
1116 // Try again, but just so we distinguish between futile wakeups and
1117 // successful wakeups. The following test isn't algorithmically
1118 // necessary, but it helps us maintain sensible statistics.
1131 // find that _succ == current.
1132 if (_succ == current) _succ = nullptr;
1133
1134 // Invariant: after clearing _succ a contending thread
1135 // *must* retry _owner before parking.
1136 OrderAccess::fence();
1137
1138 // This PerfData object can be used in parallel with a safepoint.
1139 // See the work around in PerfDataManager::destroy().
1140 OM_PERFDATA_OP(FutileWakeups, inc());
1141 }
1142
1143 // current has acquired the lock -- Unlink current from the cxq or EntryList .
1144 // Normally we'll find current on the EntryList.
1145 // Unlinking from the EntryList is constant-time and atomic-free.
1146 // From the perspective of the lock owner (this thread), the
1147 // EntryList is stable and cxq is prepend-only.
1148 // The head of cxq is volatile but the interior is stable.
1149 // In addition, current.TState is stable.
1150
1151 assert(owner_raw() == owner_for(current), "invariant");
1152 assert(object()->mark() == markWord::encode(this), "invariant");
1153 UnlinkAfterAcquire(current, currentNode);
1154 if (_succ == current) _succ = nullptr;
1155 assert(_succ != current, "invariant");
1156 currentNode->TState = ObjectWaiter::TS_RUN;
1157 OrderAccess::fence(); // see comments at the end of EnterI()
1158 }
1159
1160 void ObjectMonitor::redo_enter(JavaThread* current) {
1161 assert(java_lang_VirtualThread::state(current->vthread()) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
1162 assert(current->is_in_VTMS_transition(), "must be");
1163
1164 if (TryLock(current) == TryLockResult::Success) {
1165 VThreadEpilog(current);
1166 return;
1167 }
1168
1169 oop vthread = current->vthread();
1170 if (_succ == (JavaThread*)java_lang_Thread::thread_id(vthread)) _succ = nullptr;
1171
1172 // Invariant: after clearing _succ a thread *must* retry _owner before parking.
1173 OrderAccess::fence();
1174
1175 if (TryLock(current) == TryLockResult::Success) {
1176 assert(owner_raw() == owner_for(current), "invariant");
1177 VThreadEpilog(current);
1178 return;
1179 }
1180
1181 // Fast preemption. The JT will read this variable on return to the
1182 // monitorenter_redo stub and will just remove enterSpecial frame
1183 // from the stack and return to Continuation.run()
1184 current->set_preempting(true);
1185
1186 java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::BLOCKING);
1187 if (_Responsible == (JavaThread*)java_lang_Thread::thread_id(vthread)) {
1188 int recheckInterval = java_lang_VirtualThread::recheckInterval(vthread);
1189 assert(recheckInterval >= 1 && recheckInterval <= 6, "invariant");
1190 if (recheckInterval < 6) {
1191 recheckInterval++;
1192 java_lang_VirtualThread::set_recheckInterval(vthread, recheckInterval);
1193 }
1194 } else if (java_lang_VirtualThread::recheckInterval(vthread) > 0) {
1195 // No need to do timed park anymore
1196 java_lang_VirtualThread::set_recheckInterval(vthread, 0);
1197 }
1198 }
1199
1200 void ObjectMonitor::VThreadEpilog(JavaThread* current) {
1201 assert(owner_raw() == owner_for(current), "invariant");
1202 add_to_contentions(-1);
1203
1204 oop vthread = current->vthread();
1205 if (java_lang_VirtualThread::recheckInterval(vthread) > 0) {
1206 java_lang_VirtualThread::set_recheckInterval(vthread, 0);
1207 }
1208 int64_t threadid = java_lang_Thread::thread_id(vthread);
1209 if (_succ == (JavaThread*)threadid) _succ = nullptr;
1210 if (_Responsible == (JavaThread*)threadid) {
1211 _Responsible = nullptr;
1212 OrderAccess::fence(); // Dekker pivot-point
1213 }
1214 ObjectWaiter* node = LookupWaiter(threadid);
1215 UnlinkAfterAcquire(current, node, vthread);
1216 delete node;
1217 }
1218
1219 // By convention we unlink a contending thread from EntryList|cxq immediately
1220 // after the thread acquires the lock in ::enter(). Equally, we could defer
1221 // unlinking the thread until ::exit()-time.
1222
1223 void ObjectMonitor::UnlinkAfterAcquire(JavaThread* current, ObjectWaiter* currentNode, oop vthread) {
1224 assert(owner_raw() == owner_for(current), "invariant");
1225 assert((currentNode->_thread == current) || (currentNode->_thread == nullptr && currentNode->vthread() == vthread), "invariant");
1226
1227 if (currentNode->TState == ObjectWaiter::TS_ENTER) {
1228 // Normal case: remove current from the DLL EntryList .
1229 // This is a constant-time operation.
1230 ObjectWaiter* nxt = currentNode->_next;
1231 ObjectWaiter* prv = currentNode->_prev;
1232 if (nxt != nullptr) nxt->_prev = prv;
1233 if (prv != nullptr) prv->_next = nxt;
1234 if (currentNode == _EntryList) _EntryList = nxt;
1235 assert(nxt == nullptr || nxt->TState == ObjectWaiter::TS_ENTER, "invariant");
1236 assert(prv == nullptr || prv->TState == ObjectWaiter::TS_ENTER, "invariant");
1237 } else {
1238 assert(currentNode->TState == ObjectWaiter::TS_CXQ, "invariant");
1239 // Inopportune interleaving -- current is still on the cxq.
1240 // This usually means the enqueue of self raced an exiting thread.
1241 // Normally we'll find current near the front of the cxq, so
1242 // dequeueing is typically fast. If needbe we can accelerate
1243 // this with some MCS/CHL-like bidirectional list hints and advisory
1244 // back-links so dequeueing from the interior will normally operate
1245 // in constant-time.
1265 q = p;
1266 assert(p->TState == ObjectWaiter::TS_CXQ, "invariant");
1267 }
1268 assert(v != currentNode, "invariant");
1269 assert(p == currentNode, "Node not found on cxq");
1270 assert(p != _cxq, "invariant");
1271 assert(q != nullptr, "invariant");
1272 assert(q->_next == p, "invariant");
1273 q->_next = p->_next;
1274 }
1275 }
1276
1277 #ifdef ASSERT
1278 // Diagnostic hygiene ...
1279 currentNode->_prev = (ObjectWaiter*) 0xBAD;
1280 currentNode->_next = (ObjectWaiter*) 0xBAD;
1281 currentNode->TState = ObjectWaiter::TS_RUN;
1282 #endif
1283 }
1284
1285 // Fix this. Save ObjectWaiter* when freezing. Or use hashtable.
1286 ObjectWaiter* ObjectMonitor::LookupWaiter(int64_t threadid) {
1287 ObjectWaiter* p;
1288 for (p = _EntryList; p != nullptr && (!p->is_vthread() || java_lang_Thread::thread_id(p->vthread()) != threadid); p = p->_next) {}
1289 if (p != nullptr) return p;
1290 for (p = _cxq; p != nullptr && (!p->is_vthread() || java_lang_Thread::thread_id(p->vthread()) != threadid); p = p->_next) {}
1291 assert(p != nullptr, "should be on either _cxq or _EntryList");
1292 return p;
1293 }
1294
1295 // -----------------------------------------------------------------------------
1296 // Exit support
1297 //
1298 // exit()
1299 // ~~~~~~
1300 // Note that the collector can't reclaim the objectMonitor or deflate
1301 // the object out from underneath the thread calling ::exit() as the
1302 // thread calling ::exit() never transitions to a stable state.
1303 // This inhibits GC, which in turn inhibits asynchronous (and
1304 // inopportune) reclamation of "this".
1305 //
1306 // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
1307 // There's one exception to the claim above, however. EnterI() can call
1308 // exit() to drop a lock if the acquirer has been externally suspended.
1309 // In that case exit() is called with _thread_state == _thread_blocked,
1310 // but the monitor's _contentions field is > 0, which inhibits reclamation.
1311 //
1312 // 1-0 exit
1313 // ~~~~~~~~
1314 // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
1334 // exiting thread will notice and unpark the stranded thread, or, (b)
1335 // the timer expires. If the lock is high traffic then the stranding latency
1336 // will be low due to (a). If the lock is low traffic then the odds of
1337 // stranding are lower, although the worst-case stranding latency
1338 // is longer. Critically, we don't want to put excessive load in the
1339 // platform's timer subsystem. We want to minimize both the timer injection
1340 // rate (timers created/sec) as well as the number of timers active at
1341 // any one time. (more precisely, we want to minimize timer-seconds, which is
1342 // the integral of the # of active timers at any instant over time).
1343 // Both impinge on OS scalability. Given that, at most one thread parked on
1344 // a monitor will use a timer.
1345 //
1346 // There is also the risk of a futile wake-up. If we drop the lock
1347 // another thread can reacquire the lock immediately, and we can
1348 // then wake a thread unnecessarily. This is benign, and we've
1349 // structured the code so the windows are short and the frequency
1350 // of such futile wakups is low.
1351
1352 void ObjectMonitor::exit(JavaThread* current, bool not_suspended) {
1353 void* cur = owner_raw();
1354 if (owner_for(current) != cur) {
1355 // Apparent unbalanced locking ...
1356 // Naively we'd like to throw IllegalMonitorStateException.
1357 // As a practical matter we can neither allocate nor throw an
1358 // exception as ::exit() can be called from leaf routines.
1359 // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
1360 // Upon deeper reflection, however, in a properly run JVM the only
1361 // way we should encounter this situation is in the presence of
1362 // unbalanced JNI locking. TODO: CheckJNICalls.
1363 // See also: CR4414101
1364 #ifdef ASSERT
1365 LogStreamHandle(Error, monitorinflation) lsh;
1366 lsh.print_cr("ERROR: ObjectMonitor::exit(): thread=" INTPTR_FORMAT
1367 " is exiting an ObjectMonitor it does not own.", p2i(current));
1368 lsh.print_cr("The imbalance is possibly caused by JNI locking.");
1369 print_debug_style_on(&lsh);
1370 assert(false, "Non-balanced monitor enter/exit!");
1371 #endif
1372 return;
1373 }
1374
1375 if (_recursions != 0) {
1376 _recursions--; // this is simple recursive enter
1377 return;
1378 }
1379
1380 // Invariant: after setting Responsible=null an thread must execute
1381 // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
1382 _Responsible = nullptr;
1383
1384 #if INCLUDE_JFR
1385 // get the owner's thread id for the MonitorEnter event
1386 // if it is enabled and the thread isn't suspended
1387 if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
1388 _previous_owner_tid = JFR_THREAD_ID(current);
1389 }
1390 #endif
1391
1392 for (;;) {
1393 assert(owner_for(current) == owner_raw(), "invariant");
1394
1395 // Drop the lock.
1396 // release semantics: prior loads and stores from within the critical section
1397 // must not float (reorder) past the following store that drops the lock.
1398 // Uses a storeload to separate release_store(owner) from the
1399 // successor check. The try_set_owner_from() below uses cmpxchg() so
1400 // we get the fence down there.
1401 release_clear_owner(current);
1402 OrderAccess::storeload();
1403
1404 if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != nullptr) {
1405 return;
1406 }
1407 // Other threads are blocked trying to acquire the lock.
1408
1409 // Normally the exiting thread is responsible for ensuring succession,
1410 // but if other successors are ready or other entering threads are spinning
1411 // then this thread can simply store null into _owner and exit without
1412 // waking a successor. The existence of spinners or ready successors
1413 // guarantees proper succession (liveness). Responsibility passes to the
1414 // ready or running successors. The exiting thread delegates the duty.
1415 // More precisely, if a successor already exists this thread is absolved
1416 // of the responsibility of waking (unparking) one.
1417 //
1418 // The _succ variable is critical to reducing futile wakeup frequency.
1419 // _succ identifies the "heir presumptive" thread that has been made
1429 // to drop the lock and then spin briefly to see if a spinner managed
1430 // to acquire the lock. If so, the exiting thread could exit
1431 // immediately without waking a successor, otherwise the exiting
1432 // thread would need to dequeue and wake a successor.
1433 // (Note that we'd need to make the post-drop spin short, but no
1434 // shorter than the worst-case round-trip cache-line migration time.
1435 // The dropped lock needs to become visible to the spinner, and then
1436 // the acquisition of the lock by the spinner must become visible to
1437 // the exiting thread).
1438
1439 // It appears that an heir-presumptive (successor) must be made ready.
1440 // Only the current lock owner can manipulate the EntryList or
1441 // drain _cxq, so we need to reacquire the lock. If we fail
1442 // to reacquire the lock the responsibility for ensuring succession
1443 // falls to the new owner.
1444 //
1445 if (try_set_owner_from(nullptr, current) != nullptr) {
1446 return;
1447 }
1448
1449 guarantee(owner_raw() == owner_for(current), "invariant");
1450
1451 ObjectWaiter* w = nullptr;
1452
1453 w = _EntryList;
1454 if (w != nullptr) {
1455 // I'd like to write: guarantee (w->_thread != current).
1456 // But in practice an exiting thread may find itself on the EntryList.
1457 // Let's say thread T1 calls O.wait(). Wait() enqueues T1 on O's waitset and
1458 // then calls exit(). Exit release the lock by setting O._owner to null.
1459 // Let's say T1 then stalls. T2 acquires O and calls O.notify(). The
1460 // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
1461 // release the lock "O". T2 resumes immediately after the ST of null into
1462 // _owner, above. T2 notices that the EntryList is populated, so it
1463 // reacquires the lock and then finds itself on the EntryList.
1464 // Given all that, we have to tolerate the circumstance where "w" is
1465 // associated with current.
1466 assert(w->TState == ObjectWaiter::TS_ENTER, "invariant");
1467 ExitEpilog(current, w);
1468 return;
1469 }
1506 }
1507
1508 // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = nullptr
1509 // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
1510
1511 // See if we can abdicate to a spinner instead of waking a thread.
1512 // A primary goal of the implementation is to reduce the
1513 // context-switch rate.
1514 if (_succ != nullptr) continue;
1515
1516 w = _EntryList;
1517 if (w != nullptr) {
1518 guarantee(w->TState == ObjectWaiter::TS_ENTER, "invariant");
1519 ExitEpilog(current, w);
1520 return;
1521 }
1522 }
1523 }
1524
1525 void ObjectMonitor::ExitEpilog(JavaThread* current, ObjectWaiter* Wakee) {
1526 assert(owner_raw() == owner_for(current), "invariant");
1527
1528 // Exit protocol:
1529 // 1. ST _succ = wakee
1530 // 2. membar #loadstore|#storestore;
1531 // 2. ST _owner = nullptr
1532 // 3. unpark(wakee)
1533
1534 oop vthread = nullptr;
1535 if (Wakee->_thread != nullptr) {
1536 // Platform thread case
1537 _succ = Wakee->_thread;
1538 } else {
1539 assert(Wakee->vthread() != nullptr, "invariant");
1540 vthread = Wakee->vthread();
1541 _succ = (JavaThread*)java_lang_Thread::thread_id(vthread);
1542 }
1543 ParkEvent * Trigger = Wakee->_event;
1544
1545 // Hygiene -- once we've set _owner = nullptr we can't safely dereference Wakee again.
1546 // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
1547 // out-of-scope (non-extant).
1548 Wakee = nullptr;
1549
1550 // Drop the lock.
1551 // Uses a fence to separate release_store(owner) from the LD in unpark().
1552 release_clear_owner(current);
1553 OrderAccess::fence();
1554
1555 DTRACE_MONITOR_PROBE(contended__exit, this, object(), current);
1556
1557 if (vthread == nullptr) {
1558 // Platform thread case
1559 Trigger->unpark();
1560 } else if (java_lang_VirtualThread::set_onWaitingList(vthread, _vthread_cxq_head)) {
1561 Trigger->unpark();
1562 }
1563
1564 // Maintain stats and report events to JVMTI
1565 OM_PERFDATA_OP(Parks, inc());
1566 }
1567
1568 // complete_exit exits a lock returning recursion count
1569 // complete_exit requires an inflated monitor
1570 // The _owner field is not always the Thread addr even with an
1571 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1572 // thread due to contention.
1573 intx ObjectMonitor::complete_exit(JavaThread* current) {
1574 assert(InitDone, "Unexpectedly not initialized");
1575
1576 void* cur = owner_raw();
1577 if (owner_for(current) != cur) {
1578 if (LockingMode == LM_LEGACY && is_stack_locker(current)) {
1579 assert(_recursions == 0, "internal state error");
1580 set_owner_from_BasicLock(current); // Convert from BasicLock* to Thread*.
1581 _recursions = 0;
1582 }
1583 }
1584
1585 guarantee(owner_for(current) == owner_raw(), "complete_exit not owner");
1586 intx save = _recursions; // record the old recursion count
1587 _recursions = 0; // set the recursion level to be 0
1588 exit(current); // exit the monitor
1589 guarantee(owner_raw() != owner_for(current), "invariant");
1590 return save;
1591 }
1592
1593 // Checks that the current THREAD owns this monitor and causes an
1594 // immediate return if it doesn't. We don't use the CHECK macro
1595 // because we want the IMSE to be the only exception that is thrown
1596 // from the call site when false is returned. Any other pending
1597 // exception is ignored.
1598 #define CHECK_OWNER() \
1599 do { \
1600 if (!check_owner(THREAD)) { \
1601 assert(HAS_PENDING_EXCEPTION, "expected a pending IMSE here."); \
1602 return; \
1603 } \
1604 } while (false)
1605
1606 // Returns true if the specified thread owns the ObjectMonitor.
1607 // Otherwise returns false and throws IllegalMonitorStateException
1608 // (IMSE). If there is a pending exception and the specified thread
1609 // is not the owner, that exception will be replaced by the IMSE.
1610 bool ObjectMonitor::check_owner(TRAPS) {
1611 JavaThread* current = THREAD;
1612 void* cur = owner_raw();
1613 if (cur == owner_for(current)) {
1614 return true;
1615 }
1616 THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
1617 "current thread is not owner", false);
1618 }
1619
1620 static inline bool is_excluded(const Klass* monitor_klass) {
1621 assert(monitor_klass != nullptr, "invariant");
1622 NOT_JFR_RETURN_(false);
1623 JFR_ONLY(return vmSymbols::jfr_chunk_rotation_monitor() == monitor_klass->name();)
1624 }
1625
1626 static void post_monitor_wait_event(EventJavaMonitorWait* event,
1627 ObjectMonitor* monitor,
1628 uint64_t notifier_tid,
1629 jlong timeout,
1630 bool timedout) {
1631 assert(event != nullptr, "invariant");
1632 assert(monitor != nullptr, "invariant");
1633 const Klass* monitor_klass = monitor->object()->klass();
1665 if (JvmtiExport::should_post_monitor_waited()) {
1666 // Note: 'false' parameter is passed here because the
1667 // wait was not timed out due to thread interrupt.
1668 JvmtiExport::post_monitor_waited(current, this, false);
1669
1670 // In this short circuit of the monitor wait protocol, the
1671 // current thread never drops ownership of the monitor and
1672 // never gets added to the wait queue so the current thread
1673 // cannot be made the successor. This means that the
1674 // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
1675 // consume an unpark() meant for the ParkEvent associated with
1676 // this ObjectMonitor.
1677 }
1678 if (event.should_commit()) {
1679 post_monitor_wait_event(&event, this, 0, millis, false);
1680 }
1681 THROW(vmSymbols::java_lang_InterruptedException());
1682 return;
1683 }
1684
1685 ContinuationEntry* ce = current->last_continuation();
1686 if (ce != nullptr && ce->is_virtual_thread()) {
1687 const Klass* monitor_klass = object()->klass();
1688 if (!is_excluded(monitor_klass)) {
1689 ResourceMark rm;
1690 char reason[256];
1691 jio_snprintf(reason, sizeof reason, "Object.wait on object of klass %s", monitor_klass->external_name());
1692 post_virtual_thread_pinned_event(current, reason);
1693 }
1694 }
1695
1696 current->set_current_waiting_monitor(this);
1697
1698 // create a node to be put into the queue
1699 // Critically, after we reset() the event but prior to park(), we must check
1700 // for a pending interrupt.
1701 ObjectWaiter node(current);
1702 node.TState = ObjectWaiter::TS_WAIT;
1703 current->_ParkEvent->reset();
1704 OrderAccess::fence(); // ST into Event; membar ; LD interrupted-flag
1705
1706 // Enter the waiting queue, which is a circular doubly linked list in this case
1707 // but it could be a priority queue or any data structure.
1708 // _WaitSetLock protects the wait queue. Normally the wait queue is accessed only
1709 // by the owner of the monitor *except* in the case where park()
1710 // returns because of a timeout of interrupt. Contention is exceptionally rare
1711 // so we use a simple spin-lock instead of a heavier-weight blocking lock.
1712
1713 Thread::SpinAcquire(&_WaitSetLock, "WaitSet - add");
1714 AddWaiter(&node);
1715 Thread::SpinRelease(&_WaitSetLock);
1716
1717 _Responsible = nullptr;
1718
1719 intx save = _recursions; // record the old recursion count
1720 _waiters++; // increment the number of waiters
1721 _recursions = 0; // set the recursion level to be 1
1722 exit(current); // exit the monitor
1723 guarantee(owner_raw() != owner_for(current), "invariant");
1724
1725 // The thread is on the WaitSet list - now park() it.
1726 // On MP systems it's conceivable that a brief spin before we park
1727 // could be profitable.
1728 //
1729 // TODO-FIXME: change the following logic to a loop of the form
1730 // while (!timeout && !interrupted && _notified == 0) park()
1731
1732 int ret = OS_OK;
1733 int WasNotified = 0;
1734
1735 // Need to check interrupt state whilst still _thread_in_vm
1736 bool interrupted = interruptible && current->is_interrupted(false);
1737
1738 { // State transition wrappers
1739 OSThread* osthread = current->osthread();
1740 OSThreadWaitState osts(osthread, true);
1741
1742 assert(current->thread_state() == _thread_in_vm, "invariant");
1743
1809 // The ObjectMonitor was notified and the current thread is
1810 // the successor which also means that an unpark() has already
1811 // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
1812 // consume the unpark() that was done when the successor was
1813 // set because the same ParkEvent is shared between Java
1814 // monitors and JVM/TI RawMonitors (for now).
1815 //
1816 // We redo the unpark() to ensure forward progress, i.e., we
1817 // don't want all pending threads hanging (parked) with none
1818 // entering the unlocked monitor.
1819 node._event->unpark();
1820 }
1821 }
1822
1823 if (event.should_commit()) {
1824 post_monitor_wait_event(&event, this, node._notifier_tid, millis, ret == OS_TIMEOUT);
1825 }
1826
1827 OrderAccess::fence();
1828
1829 assert(owner_raw() != owner_for(current), "invariant");
1830 ObjectWaiter::TStates v = node.TState;
1831 if (v == ObjectWaiter::TS_RUN) {
1832 enter(current);
1833 } else {
1834 guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
1835 ReenterI(current, &node);
1836 node.wait_reenter_end(this);
1837 }
1838
1839 // current has reacquired the lock.
1840 // Lifecycle - the node representing current must not appear on any queues.
1841 // Node is about to go out-of-scope, but even if it were immortal we wouldn't
1842 // want residual elements associated with this thread left on any lists.
1843 guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
1844 assert(owner_raw() == owner_for(current), "invariant");
1845 assert(_succ != current, "invariant");
1846 } // OSThreadWaitState()
1847
1848 current->set_current_waiting_monitor(nullptr);
1849
1850 guarantee(_recursions == 0, "invariant");
1851 int relock_count = JvmtiDeferredUpdates::get_and_reset_relock_count_after_wait(current);
1852 _recursions = save // restore the old recursion count
1853 + relock_count; // increased by the deferred relock count
1854 NOT_LOOM_MONITOR_SUPPORT(current->inc_held_monitor_count(relock_count);) // Deopt never entered these counts.
1855 _waiters--; // decrement the number of waiters
1856
1857 // Verify a few postconditions
1858 assert(owner_raw() == owner_for(current), "invariant");
1859 assert(_succ != current, "invariant");
1860 assert(object()->mark() == markWord::encode(this), "invariant");
1861
1862 // check if the notification happened
1863 if (!WasNotified) {
1864 // no, it could be timeout or Thread.interrupt() or both
1865 // check for interrupt event, otherwise it is timeout
1866 if (interruptible && current->is_interrupted(true) && !HAS_PENDING_EXCEPTION) {
1867 THROW(vmSymbols::java_lang_InterruptedException());
1868 }
1869 }
1870
1871 // NOTE: Spurious wake up will be consider as timeout.
1872 // Monitor notify has precedence over thread interrupt.
1873 }
1874
1875
1876 // Consider:
1877 // If the lock is cool (cxq == null && succ == null) and we're on an MP system
1878 // then instead of transferring a thread from the WaitSet to the EntryList
2115 }
2116
2117 //
2118 // Consider the following alternative:
2119 // Periodically set _SpinDuration = _SpinLimit and try a long/full
2120 // spin attempt. "Periodically" might mean after a tally of
2121 // the # of failed spin attempts (or iterations) reaches some threshold.
2122 // This takes us into the realm of 1-out-of-N spinning, where we
2123 // hold the duration constant but vary the frequency.
2124
2125 int ctr = _SpinDuration;
2126 if (ctr <= 0) return false;
2127
2128 // We're good to spin ... spin ingress.
2129 // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
2130 // when preparing to LD...CAS _owner, etc and the CAS is likely
2131 // to succeed.
2132 if (_succ == nullptr) {
2133 _succ = current;
2134 }
2135 void* prv = nullptr;
2136
2137 // There are three ways to exit the following loop:
2138 // 1. A successful spin where this thread has acquired the lock.
2139 // 2. Spin failure with prejudice
2140 // 3. Spin failure without prejudice
2141
2142 while (--ctr >= 0) {
2143
2144 // Periodic polling -- Check for pending GC
2145 // Threads may spin while they're unsafe.
2146 // We don't want spinning threads to delay the JVM from reaching
2147 // a stop-the-world safepoint or to steal cycles from GC.
2148 // If we detect a pending safepoint we abort in order that
2149 // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
2150 // this thread, if safe, doesn't steal cycles from GC.
2151 // This is in keeping with the "no loitering in runtime" rule.
2152 // We periodically check to see if there's a safepoint pending.
2153 if ((ctr & 0xFF) == 0) {
2154 // Can't call SafepointMechanism::should_process() since that
2155 // might update the poll values and we could be in a thread_blocked
2156 // state here which is not allowed so just check the poll.
2157 if (SafepointMechanism::local_poll_armed(current)) {
2158 break;
2159 }
2160 SpinPause();
2161 }
2162
2163 // Probe _owner with TATAS
2164 // If this thread observes the monitor transition or flicker
2165 // from locked to unlocked to locked, then the odds that this
2166 // thread will acquire the lock in this spin attempt go down
2167 // considerably. The same argument applies if the CAS fails
2168 // or if we observe _owner change from one non-null value to
2169 // another non-null value. In such cases we might abort
2170 // the spin without prejudice or apply a "penalty" to the
2171 // spin count-down variable "ctr", reducing it by 100, say.
2172
2173 void* ox = owner_raw();
2174 if (ox == nullptr) {
2175 ox = try_set_owner_from(nullptr, current);
2176 if (ox == nullptr) {
2177 // The CAS succeeded -- this thread acquired ownership
2178 // Take care of some bookkeeping to exit spin state.
2179 if (_succ == current) {
2180 _succ = nullptr;
2181 }
2182
2183 // Increase _SpinDuration :
2184 // The spin was successful (profitable) so we tend toward
2185 // longer spin attempts in the future.
2186 // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
2187 // If we acquired the lock early in the spin cycle it
2188 // makes sense to increase _SpinDuration proportionally.
2189 // Note that we don't clamp SpinDuration precisely at SpinLimit.
2190 _SpinDuration = adjust_up(_SpinDuration);
2191 return true;
2192 }
2193
2194 // The CAS failed ... we can take any of the following actions:
2195 // * penalize: ctr -= CASPenalty
2224 OrderAccess::fence();
2225 if (TryLock(current) == TryLockResult::Success) {
2226 return true;
2227 }
2228 }
2229
2230 return false;
2231 }
2232
2233
2234 // -----------------------------------------------------------------------------
2235 // WaitSet management ...
2236
2237 ObjectWaiter::ObjectWaiter(JavaThread* current) {
2238 _next = nullptr;
2239 _prev = nullptr;
2240 _notified = 0;
2241 _notifier_tid = 0;
2242 TState = TS_RUN;
2243 _thread = current;
2244 _event = _thread != nullptr ? _thread->_ParkEvent : ObjectMonitor::vthread_unparker_ParkEvent();
2245 _active = false;
2246 assert(_event != nullptr, "invariant");
2247 }
2248
2249 ObjectWaiter::ObjectWaiter(oop vthread) : ObjectWaiter((JavaThread*)nullptr) {
2250 _vthread = OopHandle(JavaThread::thread_oop_storage(), vthread);
2251 }
2252
2253 void ObjectWaiter::wait_reenter_begin(ObjectMonitor * const mon) {
2254 _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(_thread, mon);
2255 }
2256
2257 void ObjectWaiter::wait_reenter_end(ObjectMonitor * const mon) {
2258 JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(_thread, _active);
2259 }
2260
2261 inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) {
2262 assert(node != nullptr, "should not add null node");
2263 assert(node->_prev == nullptr, "node already in list");
2264 assert(node->_next == nullptr, "node already in list");
2265 // put node at end of queue (circular doubly linked list)
2266 if (_WaitSet == nullptr) {
2267 _WaitSet = node;
2268 node->_prev = node;
2269 node->_next = node;
2270 } else {
2271 ObjectWaiter* head = _WaitSet;
2272 ObjectWaiter* tail = head->_prev;
2348 { \
2349 n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events, \
2350 CHECK); \
2351 }
2352 NEWPERFCOUNTER(_sync_Inflations);
2353 NEWPERFCOUNTER(_sync_Deflations);
2354 NEWPERFCOUNTER(_sync_ContendedLockAttempts);
2355 NEWPERFCOUNTER(_sync_FutileWakeups);
2356 NEWPERFCOUNTER(_sync_Parks);
2357 NEWPERFCOUNTER(_sync_Notifications);
2358 NEWPERFVARIABLE(_sync_MonExtant);
2359 #undef NEWPERFCOUNTER
2360 #undef NEWPERFVARIABLE
2361 }
2362
2363 _oop_storage = OopStorageSet::create_weak("ObjectSynchronizer Weak", mtSynchronizer);
2364
2365 DEBUG_ONLY(InitDone = true;)
2366 }
2367
2368 void ObjectMonitor::Initialize2() {
2369 _vthread_cxq_head = OopHandle(JavaThread::thread_oop_storage(), nullptr);
2370 _vthread_unparker_ParkEvent = ParkEvent::Allocate(nullptr);
2371 }
2372
2373 void ObjectMonitor::print_on(outputStream* st) const {
2374 // The minimal things to print for markWord printing, more can be added for debugging and logging.
2375 st->print("{contentions=0x%08x,waiters=0x%08x"
2376 ",recursions=" INTX_FORMAT ",owner=" INTPTR_FORMAT "}",
2377 contentions(), waiters(), recursions(),
2378 p2i(owner()));
2379 }
2380 void ObjectMonitor::print() const { print_on(tty); }
2381
2382 #ifdef ASSERT
2383 // Print the ObjectMonitor like a debugger would:
2384 //
2385 // (ObjectMonitor) 0x00007fdfb6012e40 = {
2386 // _header = 0x0000000000000001
2387 // _object = 0x000000070ff45fd0
2388 // _pad_buf0 = {
2389 // [0] = '\0'
2390 // ...
2391 // [43] = '\0'
2392 // }
|