18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "gc/shared/collectedHeap.hpp"
28 #include "jfr/jfrEvents.hpp"
29 #include "logging/log.hpp"
30 #include "logging/logStream.hpp"
31 #include "memory/allocation.inline.hpp"
32 #include "memory/padded.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "memory/universe.hpp"
35 #include "oops/markWord.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "runtime/atomic.hpp"
38 #include "runtime/frame.inline.hpp"
39 #include "runtime/globals.hpp"
40 #include "runtime/handles.inline.hpp"
41 #include "runtime/handshake.hpp"
42 #include "runtime/interfaceSupport.inline.hpp"
43 #include "runtime/javaThread.hpp"
44 #include "runtime/lockStack.inline.hpp"
45 #include "runtime/mutexLocker.hpp"
46 #include "runtime/objectMonitor.hpp"
47 #include "runtime/objectMonitor.inline.hpp"
48 #include "runtime/os.inline.hpp"
49 #include "runtime/osThread.hpp"
50 #include "runtime/perfData.hpp"
51 #include "runtime/safepointMechanism.inline.hpp"
52 #include "runtime/safepointVerifiers.hpp"
53 #include "runtime/sharedRuntime.hpp"
54 #include "runtime/stubRoutines.hpp"
55 #include "runtime/synchronizer.hpp"
56 #include "runtime/threads.hpp"
57 #include "runtime/timer.hpp"
58 #include "runtime/trimNativeHeap.hpp"
59 #include "runtime/vframe.hpp"
60 #include "runtime/vmThread.hpp"
61 #include "utilities/align.hpp"
62 #include "utilities/dtrace.hpp"
63 #include "utilities/events.hpp"
64 #include "utilities/globalDefinitions.hpp"
65 #include "utilities/linkedlist.hpp"
66 #include "utilities/preserveException.hpp"
67
68 class ObjectMonitorDeflationLogging;
69
70 void MonitorList::add(ObjectMonitor* m) {
71 ObjectMonitor* head;
72 do {
73 head = Atomic::load(&_head);
74 m->set_next_om(head);
75 } while (Atomic::cmpxchg(&_head, head, m) != head);
76
77 size_t count = Atomic::add(&_count, 1u);
78 if (count > max()) {
79 Atomic::inc(&_max);
80 }
81 }
82
83 size_t MonitorList::count() const {
84 return Atomic::load(&_count);
85 }
86
87 size_t MonitorList::max() const {
88 return Atomic::load(&_max);
89 }
259 static constexpr size_t inflation_lock_count() {
260 return 256;
261 }
262
263 // Static storage for an array of PlatformMutex.
264 alignas(PlatformMutex) static uint8_t _inflation_locks[inflation_lock_count()][sizeof(PlatformMutex)];
265
266 static inline PlatformMutex* inflation_lock(size_t index) {
267 return reinterpret_cast<PlatformMutex*>(_inflation_locks[index]);
268 }
269
270 void ObjectSynchronizer::initialize() {
271 for (size_t i = 0; i < inflation_lock_count(); i++) {
272 ::new(static_cast<void*>(inflation_lock(i))) PlatformMutex();
273 }
274 // Start the ceiling with the estimate for one thread.
275 set_in_use_list_ceiling(AvgMonitorsPerThreadEstimate);
276
277 // Start the timer for deflations, so it does not trigger immediately.
278 _last_async_deflation_time_ns = os::javaTimeNanos();
279 }
280
281 MonitorList ObjectSynchronizer::_in_use_list;
282 // monitors_used_above_threshold() policy is as follows:
283 //
284 // The ratio of the current _in_use_list count to the ceiling is used
285 // to determine if we are above MonitorUsedDeflationThreshold and need
286 // to do an async monitor deflation cycle. The ceiling is increased by
287 // AvgMonitorsPerThreadEstimate when a thread is added to the system
288 // and is decreased by AvgMonitorsPerThreadEstimate when a thread is
289 // removed from the system.
290 //
291 // Note: If the _in_use_list max exceeds the ceiling, then
292 // monitors_used_above_threshold() will use the in_use_list max instead
293 // of the thread count derived ceiling because we have used more
294 // ObjectMonitors than the estimated average.
295 //
296 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
297 // no-progress async monitor deflation cycles in a row, then the ceiling
298 // is adjusted upwards by monitors_used_above_threshold().
332 assert(current->thread_state() == _thread_in_Java, "invariant");
333 NoSafepointVerifier nsv;
334 if (obj == nullptr) return false; // slow-path for invalid obj
335 const markWord mark = obj->mark();
336
337 if (LockingMode == LM_LIGHTWEIGHT) {
338 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
339 // Degenerate notify
340 // fast-locked by caller so by definition the implied waitset is empty.
341 return true;
342 }
343 } else if (LockingMode == LM_LEGACY) {
344 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
345 // Degenerate notify
346 // stack-locked by caller so by definition the implied waitset is empty.
347 return true;
348 }
349 }
350
351 if (mark.has_monitor()) {
352 ObjectMonitor* const mon = mark.monitor();
353 assert(mon->object() == oop(obj), "invariant");
354 if (mon->owner() != current) return false; // slow-path for IMS exception
355
356 if (mon->first_waiter() != nullptr) {
357 // We have one or more waiters. Since this is an inflated monitor
358 // that we own, we can transfer one or more threads from the waitset
359 // to the entrylist here and now, avoiding the slow-path.
360 if (all) {
361 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current);
362 } else {
363 DTRACE_MONITOR_PROBE(notify, mon, obj, current);
364 }
365 int free_count = 0;
366 do {
367 mon->INotify(current);
368 ++free_count;
369 } while (mon->first_waiter() != nullptr && all);
370 OM_PERFDATA_OP(Notifications, inc(free_count));
371 }
372 return true;
377 }
378
379
380 // The LockNode emitted directly at the synchronization site would have
381 // been too big if it were to have included support for the cases of inflated
382 // recursive enter and exit, so they go here instead.
383 // Note that we can't safely call AsyncPrintJavaStack() from within
384 // quick_enter() as our thread state remains _in_Java.
385
386 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
387 BasicLock * lock) {
388 assert(current->thread_state() == _thread_in_Java, "invariant");
389 NoSafepointVerifier nsv;
390 if (obj == nullptr) return false; // Need to throw NPE
391
392 if (obj->klass()->is_value_based()) {
393 return false;
394 }
395
396 if (LockingMode == LM_LIGHTWEIGHT) {
397 LockStack& lock_stack = current->lock_stack();
398 if (lock_stack.is_full()) {
399 // Always go into runtime if the lock stack is full.
400 return false;
401 }
402 if (lock_stack.try_recursive_enter(obj)) {
403 // Recursive lock successful.
404 current->inc_held_monitor_count();
405 return true;
406 }
407 }
408
409 const markWord mark = obj->mark();
410
411 if (mark.has_monitor()) {
412 ObjectMonitor* const m = mark.monitor();
413 // An async deflation or GC can race us before we manage to make
414 // the ObjectMonitor busy by setting the owner below. If we detect
415 // that race we just bail out to the slow-path here.
416 if (m->object_peek() == nullptr) {
417 return false;
418 }
419 JavaThread* const owner = static_cast<JavaThread*>(m->owner_raw());
420
421 // Lock contention and Transactional Lock Elision (TLE) diagnostics
422 // and observability
423 // Case: light contention possibly amenable to TLE
424 // Case: TLE inimical operations such as nested/recursive synchronization
425
426 if (owner == current) {
427 m->_recursions++;
428 current->inc_held_monitor_count();
429 return true;
430 }
431
432 if (LockingMode != LM_LIGHTWEIGHT) {
433 // This Java Monitor is inflated so obj's header will never be
434 // displaced to this thread's BasicLock. Make the displaced header
435 // non-null so this BasicLock is not seen as recursive nor as
436 // being locked. We do this unconditionally so that this thread's
437 // BasicLock cannot be mis-interpreted by any stack walkers. For
438 // performance reasons, stack walkers generally first check for
439 // stack-locking in the object's header, the second check is for
440 // recursive stack-locking in the displaced header in the BasicLock,
441 // and last are the inflated Java Monitor (ObjectMonitor) checks.
442 lock->set_displaced_header(markWord::unused_mark());
443 }
444
445 if (owner == nullptr && m->try_set_owner_from(nullptr, current) == nullptr) {
446 assert(m->_recursions == 0, "invariant");
447 current->inc_held_monitor_count();
448 return true;
449 }
450 }
451
452 // Note that we could inflate in quick_enter.
507 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
508 }
509 }
510
511 static bool useHeavyMonitors() {
512 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
513 return LockingMode == LM_MONITOR;
514 #else
515 return false;
516 #endif
517 }
518
519 // -----------------------------------------------------------------------------
520 // Monitor Enter/Exit
521
522 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
523 // When called with locking_thread != Thread::current() some mechanism must synchronize
524 // the locking_thread with respect to the current thread. Currently only used when
525 // deoptimizing and re-locking locks. See Deoptimization::relock_objects
526 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
527 if (!enter_fast_impl(obj, lock, locking_thread)) {
528 // Inflated ObjectMonitor::enter_for is required
529
530 // An async deflation can race after the inflate_for() call and before
531 // enter_for() can make the ObjectMonitor busy. enter_for() returns false
532 // if we have lost the race to async deflation and we simply try again.
533 while (true) {
534 ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter);
535 if (monitor->enter_for(locking_thread)) {
536 return;
537 }
538 assert(monitor->is_being_async_deflated(), "must be");
539 }
540 }
541 }
542
543 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
544 assert(current == Thread::current(), "must be");
545 if (!enter_fast_impl(obj, lock, current)) {
546 // Inflated ObjectMonitor::enter is required
547
548 // An async deflation can race after the inflate() call and before
549 // enter() can make the ObjectMonitor busy. enter() returns false if
550 // we have lost the race to async deflation and we simply try again.
551 while (true) {
552 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
553 if (monitor->enter(current)) {
554 return;
555 }
556 }
557 }
558 }
559
560 // The interpreter and compiler assembly code tries to lock using the fast path
561 // of this algorithm. Make sure to update that code if the following function is
562 // changed. The implementation is extremely sensitive to race condition. Be careful.
563 bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
564
565 if (obj->klass()->is_value_based()) {
566 handle_sync_on_value_based_class(obj, locking_thread);
567 }
568
569 locking_thread->inc_held_monitor_count();
570
571 if (!useHeavyMonitors()) {
572 if (LockingMode == LM_LIGHTWEIGHT) {
573 // Fast-locking does not use the 'lock' argument.
574 LockStack& lock_stack = locking_thread->lock_stack();
575 if (lock_stack.is_full()) {
576 // We unconditionally make room on the lock stack by inflating
577 // the least recently locked object on the lock stack.
578
579 // About the choice to inflate least recently locked object.
580 // First we must chose to inflate a lock, either some lock on
581 // the lock-stack or the lock that is currently being entered
582 // (which may or may not be on the lock-stack).
583 // Second the best lock to inflate is a lock which is entered
584 // in a control flow where there are only a very few locks being
585 // used, as the costly part of inflated locking is inflation,
586 // not locking. But this property is entirely program dependent.
587 // Third inflating the lock currently being entered on when it
588 // is not present on the lock-stack will result in a still full
589 // lock-stack. This creates a scenario where every deeper nested
590 // monitorenter must call into the runtime.
591 // The rational here is as follows:
592 // Because we cannot (currently) figure out the second, and want
593 // to avoid the third, we inflate a lock on the lock-stack.
594 // The least recently locked lock is chosen as it is the lock
595 // with the longest critical section.
596
597 log_info(monitorinflation)("LockStack capacity exceeded, inflating.");
598 ObjectMonitor* monitor = inflate_for(locking_thread, lock_stack.bottom(), inflate_cause_vm_internal);
599 assert(monitor->owner() == Thread::current(), "must be owner=" PTR_FORMAT " current=" PTR_FORMAT " mark=" PTR_FORMAT,
600 p2i(monitor->owner()), p2i(Thread::current()), monitor->object()->mark_acquire().value());
601 assert(!lock_stack.is_full(), "must have made room here");
602 }
603
604 markWord mark = obj()->mark_acquire();
605 while (mark.is_unlocked()) {
606 // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
607 // Try to swing into 'fast-locked' state.
608 assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
609 const markWord locked_mark = mark.set_fast_locked();
610 const markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
611 if (old_mark == mark) {
612 // Successfully fast-locked, push object to lock-stack and return.
613 lock_stack.push(obj());
614 return true;
615 }
616 mark = old_mark;
617 }
618
619 if (mark.is_fast_locked() && lock_stack.try_recursive_enter(obj())) {
620 // Recursive lock successful.
621 return true;
622 }
623
624 // Failed to fast lock.
625 return false;
626 } else if (LockingMode == LM_LEGACY) {
627 markWord mark = obj->mark();
628 if (mark.is_unlocked()) {
629 // Anticipate successful CAS -- the ST of the displaced mark must
630 // be visible <= the ST performed by the CAS.
631 lock->set_displaced_header(mark);
632 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
633 return true;
634 }
635 } else if (mark.has_locker() &&
636 locking_thread->is_lock_owned((address) mark.locker())) {
637 assert(lock != mark.locker(), "must not re-lock the same lock");
638 assert(lock != (BasicLock*) obj->mark().value(), "don't relock with same BasicLock");
639 lock->set_displaced_header(markWord::from_pointer(nullptr));
640 return true;
641 }
642
643 // The object header will never be displaced to this lock,
644 // so it does not matter what the value is, except that it
645 // must be non-zero to avoid looking like a re-entrant lock,
646 // and must not look locked either.
647 lock->set_displaced_header(markWord::unused_mark());
648
649 // Failed to fast lock.
650 return false;
651 }
652 } else if (VerifyHeavyMonitors) {
653 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
654 }
655
656 return false;
657 }
658
659 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
660 current->dec_held_monitor_count();
661
662 if (!useHeavyMonitors()) {
663 markWord mark = object->mark();
664 if (LockingMode == LM_LIGHTWEIGHT) {
665 // Fast-locking does not use the 'lock' argument.
666 LockStack& lock_stack = current->lock_stack();
667 if (mark.is_fast_locked() && lock_stack.try_recursive_exit(object)) {
668 // Recursively unlocked.
669 return;
670 }
671
672 if (mark.is_fast_locked() && lock_stack.is_recursive(object)) {
673 // This lock is recursive but is not at the top of the lock stack so we're
674 // doing an unbalanced exit. We have to fall thru to inflation below and
675 // let ObjectMonitor::exit() do the unlock.
676 } else {
677 while (mark.is_fast_locked()) {
678 // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
679 const markWord unlocked_mark = mark.set_unlocked();
680 const markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
681 if (old_mark == mark) {
682 size_t recursions = lock_stack.remove(object) - 1;
683 assert(recursions == 0, "must not be recursive here");
684 return;
685 }
686 mark = old_mark;
687 }
688 }
689 } else if (LockingMode == LM_LEGACY) {
690 markWord dhw = lock->displaced_header();
691 if (dhw.value() == 0) {
692 // If the displaced header is null, then this exit matches up with
693 // a recursive enter. No real work to do here except for diagnostics.
694 #ifndef PRODUCT
695 if (mark != markWord::INFLATING()) {
696 // Only do diagnostics if we are not racing an inflation. Simply
697 // exiting a recursive enter of a Java Monitor that is being
698 // inflated is safe; see the has_monitor() comment below.
699 assert(!mark.is_unlocked(), "invariant");
700 assert(!mark.has_locker() ||
701 current->is_lock_owned((address)mark.locker()), "invariant");
702 if (mark.has_monitor()) {
703 // The BasicLock's displaced_header is marked as a recursive
704 // enter and we have an inflated Java Monitor (ObjectMonitor).
705 // This is a special case where the Java Monitor was inflated
706 // after this thread entered the stack-lock recursively. When a
707 // Java Monitor is inflated, we cannot safely walk the Java
708 // Monitor owner's stack and update the BasicLocks because a
709 // Java Monitor can be asynchronously inflated by a thread that
710 // does not own the Java Monitor.
711 ObjectMonitor* m = mark.monitor();
712 assert(m->object()->mark() == mark, "invariant");
713 assert(m->is_entered(current), "invariant");
714 }
715 }
716 #endif
717 return;
718 }
719
720 if (mark == markWord::from_pointer(lock)) {
721 // If the object is stack-locked by the current thread, try to
722 // swing the displaced header from the BasicLock back to the mark.
723 assert(dhw.is_neutral(), "invariant");
724 if (object->cas_set_mark(dhw, mark) == mark) {
725 return;
726 }
727 }
728 }
729 } else if (VerifyHeavyMonitors) {
730 guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
731 }
735 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
736 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
737 assert(!monitor->is_owner_anonymous(), "must not be");
738 monitor->exit(current);
739 }
740
741 // -----------------------------------------------------------------------------
742 // JNI locks on java objects
743 // NOTE: must use heavy weight monitor to handle jni monitor enter
744 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
745 if (obj->klass()->is_value_based()) {
746 handle_sync_on_value_based_class(obj, current);
747 }
748
749 // the current locking is from JNI instead of Java code
750 current->set_current_pending_monitor_is_from_java(false);
751 // An async deflation can race after the inflate() call and before
752 // enter() can make the ObjectMonitor busy. enter() returns false if
753 // we have lost the race to async deflation and we simply try again.
754 while (true) {
755 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
756 if (monitor->enter(current)) {
757 current->inc_held_monitor_count(1, true);
758 break;
759 }
760 }
761 current->set_current_pending_monitor_is_from_java(true);
762 }
763
764 // NOTE: must use heavy weight monitor to handle jni monitor exit
765 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
766 JavaThread* current = THREAD;
767
768 // The ObjectMonitor* can't be async deflated until ownership is
769 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
770 ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
771 // If this thread has locked the object, exit the monitor. We
772 // intentionally do not use CHECK on check_owner because we must exit the
773 // monitor even if an exception was already pending.
774 if (monitor->check_owner(THREAD)) {
775 monitor->exit(current);
776 current->dec_held_monitor_count(1, true);
777 }
778 }
779
780 // -----------------------------------------------------------------------------
781 // Internal VM locks on java objects
782 // standard constructor, allows locking failures
783 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
784 _thread = thread;
785 _thread->check_for_valid_safepoint_state();
786 _obj = obj;
787
788 if (_obj() != nullptr) {
789 ObjectSynchronizer::enter(_obj, &_lock, _thread);
790 }
791 }
792
793 ObjectLocker::~ObjectLocker() {
794 if (_obj() != nullptr) {
795 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
796 }
797 }
798
799
800 // -----------------------------------------------------------------------------
801 // Wait/Notify/NotifyAll
802 // NOTE: must use heavy weight monitor to handle wait()
803 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
804 JavaThread* current = THREAD;
805 if (millis < 0) {
806 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
807 }
808 // The ObjectMonitor* can't be async deflated because the _waiters
809 // field is incremented before ownership is dropped and decremented
810 // after ownership is regained.
811 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
812
813 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
814 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
815
816 // This dummy call is in place to get around dtrace bug 6254741. Once
817 // that's fixed we can uncomment the following line, remove the call
818 // and change this function back into a "void" func.
819 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
820 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
821 return ret_code;
822 }
823
824 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
825 JavaThread* current = THREAD;
826
827 markWord mark = obj->mark();
828 if (LockingMode == LM_LIGHTWEIGHT) {
829 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
830 // Not inflated so there can't be any waiters to notify.
831 return;
832 }
833 } else if (LockingMode == LM_LEGACY) {
834 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
835 // Not inflated so there can't be any waiters to notify.
836 return;
837 }
838 }
839 // The ObjectMonitor* can't be async deflated until ownership is
840 // dropped by the calling thread.
841 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
842 monitor->notify(CHECK);
843 }
844
845 // NOTE: see comment of notify()
846 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
847 JavaThread* current = THREAD;
848
849 markWord mark = obj->mark();
850 if (LockingMode == LM_LIGHTWEIGHT) {
851 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
852 // Not inflated so there can't be any waiters to notify.
853 return;
854 }
855 } else if (LockingMode == LM_LEGACY) {
856 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
857 // Not inflated so there can't be any waiters to notify.
858 return;
859 }
860 }
861 // The ObjectMonitor* can't be async deflated until ownership is
862 // dropped by the calling thread.
863 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
864 monitor->notifyAll(CHECK);
865 }
866
867 // -----------------------------------------------------------------------------
868 // Hash Code handling
869
870 struct SharedGlobals {
871 char _pad_prefix[OM_CACHE_LINE_SIZE];
872 // This is a highly shared mostly-read variable.
873 // To avoid false-sharing it needs to be the sole occupant of a cache line.
874 volatile int stw_random;
875 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));
876 // Hot RW variable -- Sequester to avoid false-sharing
877 volatile int hc_sequence;
878 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int));
879 };
880
881 static SharedGlobals GVars;
882
883 static markWord read_stable_mark(oop obj) {
975 value = 1; // for sensitivity testing
976 } else if (hashCode == 3) {
977 value = ++GVars.hc_sequence;
978 } else if (hashCode == 4) {
979 value = cast_from_oop<intptr_t>(obj);
980 } else {
981 // Marsaglia's xor-shift scheme with thread-specific state
982 // This is probably the best overall implementation -- we'll
983 // likely make this the default in future releases.
984 unsigned t = current->_hashStateX;
985 t ^= (t << 11);
986 current->_hashStateX = current->_hashStateY;
987 current->_hashStateY = current->_hashStateZ;
988 current->_hashStateZ = current->_hashStateW;
989 unsigned v = current->_hashStateW;
990 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
991 current->_hashStateW = v;
992 value = v;
993 }
994
995 value &= markWord::hash_mask;
996 if (value == 0) value = 0xBAD;
997 assert(value != markWord::no_hash, "invariant");
998 return value;
999 }
1000
1001 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
1002
1003 while (true) {
1004 ObjectMonitor* monitor = nullptr;
1005 markWord temp, test;
1006 intptr_t hash;
1007 markWord mark = read_stable_mark(obj);
1008 if (VerifyHeavyMonitors) {
1009 assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
1010 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
1011 }
1012 if (mark.is_unlocked() || (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked())) {
1013 hash = mark.hash();
1014 if (hash != 0) { // if it has a hash, just return it
1015 return hash;
1016 }
1017 hash = get_next_hash(current, obj); // get a new hash
1018 temp = mark.copy_set_hash(hash); // merge the hash into header
1019 // try to install the hash
1020 test = obj->cas_set_mark(temp, mark);
1021 if (test == mark) { // if the hash was installed, return it
1071 // So we have to inflate the stack-lock into an ObjectMonitor
1072 // even if the current thread owns the lock. The BasicLock on
1073 // a thread's stack can be asynchronously read by other threads
1074 // during an inflate() call so any change to that stack memory
1075 // may not propagate to other threads correctly.
1076 }
1077
1078 // Inflate the monitor to set the hash.
1079
1080 // An async deflation can race after the inflate() call and before we
1081 // can update the ObjectMonitor's header with the hash value below.
1082 monitor = inflate(current, obj, inflate_cause_hash_code);
1083 // Load ObjectMonitor's header/dmw field and see if it has a hash.
1084 mark = monitor->header();
1085 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
1086 hash = mark.hash();
1087 if (hash == 0) { // if it does not have a hash
1088 hash = get_next_hash(current, obj); // get a new hash
1089 temp = mark.copy_set_hash(hash) ; // merge the hash into header
1090 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1091 uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value());
1092 test = markWord(v);
1093 if (test != mark) {
1094 // The attempt to update the ObjectMonitor's header/dmw field
1095 // did not work. This can happen if another thread managed to
1096 // merge in the hash just before our cmpxchg().
1097 // If we add any new usages of the header/dmw field, this code
1098 // will need to be updated.
1099 hash = test.hash();
1100 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1101 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1102 }
1103 if (monitor->is_being_async_deflated()) {
1104 // If we detect that async deflation has occurred, then we
1105 // attempt to restore the header/dmw to the object's header
1106 // so that we only retry once if the deflater thread happens
1107 // to be slow.
1108 monitor->install_displaced_markword_in_object(obj);
1109 continue;
1110 }
1111 }
1114 }
1115 }
1116
1117 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1118 Handle h_obj) {
1119 assert(current == JavaThread::current(), "Can only be called on current thread");
1120 oop obj = h_obj();
1121
1122 markWord mark = read_stable_mark(obj);
1123
1124 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1125 // stack-locked case, header points into owner's stack
1126 return current->is_lock_owned((address)mark.locker());
1127 }
1128
1129 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1130 // fast-locking case, see if lock is in current's lock stack
1131 return current->lock_stack().contains(h_obj());
1132 }
1133
1134 if (mark.has_monitor()) {
1135 // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1136 // The first stage of async deflation does not affect any field
1137 // used by this comparison so the ObjectMonitor* is usable here.
1138 ObjectMonitor* monitor = mark.monitor();
1139 return monitor->is_entered(current) != 0;
1140 }
1141 // Unlocked case, header in place
1142 assert(mark.is_unlocked(), "sanity check");
1143 return false;
1144 }
1145
1146 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
1147 oop obj = h_obj();
1148 markWord mark = read_stable_mark(obj);
1149
1150 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1151 // stack-locked so header points into owner's stack.
1152 // owning_thread_from_monitor_owner() may also return null here:
1153 return Threads::owning_thread_from_monitor_owner(t_list, (address) mark.locker());
1154 }
1155
1156 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1157 // fast-locked so get owner from the object.
1158 // owning_thread_from_object() may also return null here:
1159 return Threads::owning_thread_from_object(t_list, h_obj());
1160 }
1161
1162 if (mark.has_monitor()) {
1163 // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1164 // The first stage of async deflation does not affect any field
1165 // used by this comparison so the ObjectMonitor* is usable here.
1166 ObjectMonitor* monitor = mark.monitor();
1167 assert(monitor != nullptr, "monitor should be non-null");
1168 // owning_thread_from_monitor() may also return null here:
1169 return Threads::owning_thread_from_monitor(t_list, monitor);
1170 }
1171
1172 // Unlocked case, header in place
1173 // Cannot have assertion since this object may have been
1174 // locked by another thread when reaching here.
1175 // assert(mark.is_unlocked(), "sanity check");
1176
1177 return nullptr;
1178 }
1179
1180 // Visitors ...
1181
1182 // Iterate over all ObjectMonitors.
1183 template <typename Function>
1184 void ObjectSynchronizer::monitors_iterate(Function function) {
1185 MonitorList::Iterator iter = _in_use_list.iterator();
1186 while (iter.has_next()) {
1222 }
1223
1224 static bool monitors_used_above_threshold(MonitorList* list) {
1225 if (MonitorUsedDeflationThreshold == 0) { // disabled case is easy
1226 return false;
1227 }
1228 // Start with ceiling based on a per-thread estimate:
1229 size_t ceiling = ObjectSynchronizer::in_use_list_ceiling();
1230 size_t old_ceiling = ceiling;
1231 if (ceiling < list->max()) {
1232 // The max used by the system has exceeded the ceiling so use that:
1233 ceiling = list->max();
1234 }
1235 size_t monitors_used = list->count();
1236 if (monitors_used == 0) { // empty list is easy
1237 return false;
1238 }
1239 if (NoAsyncDeflationProgressMax != 0 &&
1240 _no_progress_cnt >= NoAsyncDeflationProgressMax) {
1241 double remainder = (100.0 - MonitorUsedDeflationThreshold) / 100.0;
1242 size_t new_ceiling = ceiling + (size_t)((double)ceiling * remainder) + 1;
1243 ObjectSynchronizer::set_in_use_list_ceiling(new_ceiling);
1244 log_info(monitorinflation)("Too many deflations without progress; "
1245 "bumping in_use_list_ceiling from " SIZE_FORMAT
1246 " to " SIZE_FORMAT, old_ceiling, new_ceiling);
1247 _no_progress_cnt = 0;
1248 ceiling = new_ceiling;
1249 }
1250
1251 // Check if our monitor usage is above the threshold:
1252 size_t monitor_usage = (monitors_used * 100LL) / ceiling;
1253 if (int(monitor_usage) > MonitorUsedDeflationThreshold) {
1254 log_info(monitorinflation)("monitors_used=" SIZE_FORMAT ", ceiling=" SIZE_FORMAT
1255 ", monitor_usage=" SIZE_FORMAT ", threshold=%d",
1256 monitors_used, ceiling, monitor_usage, MonitorUsedDeflationThreshold);
1257 return true;
1258 }
1259
1260 return false;
1261 }
1262
1358
1359 return ret_code;
1360 }
1361
1362 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {
1363 return (os::javaTimeNanos() - last_async_deflation_time_ns()) / (NANOUNITS / MILLIUNITS);
1364 }
1365
1366 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1367 const oop obj,
1368 ObjectSynchronizer::InflateCause cause) {
1369 assert(event != nullptr, "invariant");
1370 event->set_monitorClass(obj->klass());
1371 event->set_address((uintptr_t)(void*)obj);
1372 event->set_cause((u1)cause);
1373 event->commit();
1374 }
1375
1376 // Fast path code shared by multiple functions
1377 void ObjectSynchronizer::inflate_helper(oop obj) {
1378 markWord mark = obj->mark_acquire();
1379 if (mark.has_monitor()) {
1380 ObjectMonitor* monitor = mark.monitor();
1381 markWord dmw = monitor->header();
1382 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1383 return;
1384 }
1385 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1386 }
1387
1388 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
1389 assert(current == Thread::current(), "must be");
1390 if (LockingMode == LM_LIGHTWEIGHT && current->is_Java_thread()) {
1391 return inflate_impl(JavaThread::cast(current), obj, cause);
1392 }
1393 return inflate_impl(nullptr, obj, cause);
1394 }
1395
1396 ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
1397 assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
1398 return inflate_impl(thread, obj, cause);
1399 }
1400
1401 ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* inflating_thread, oop object, const InflateCause cause) {
1402 // The JavaThread* inflating_thread parameter is only used by LM_LIGHTWEIGHT and requires
1403 // that the inflating_thread == Thread::current() or is suspended throughout the call by
1404 // some other mechanism.
1405 // Even with LM_LIGHTWEIGHT the thread might be nullptr when called from a non
1406 // JavaThread. (As may still be the case from FastHashCode). However it is only
1407 // important for the correctness of the LM_LIGHTWEIGHT algorithm that the thread
1408 // is set when called from ObjectSynchronizer::enter from the owning thread,
1409 // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
1410 EventJavaMonitorInflate event;
1411
1412 for (;;) {
1413 const markWord mark = object->mark_acquire();
1414
1415 // The mark can be in one of the following states:
1416 // * inflated - Just return if using stack-locking.
1417 // If using fast-locking and the ObjectMonitor owner
1418 // is anonymous and the inflating_thread owns the
1419 // object lock, then we make the inflating_thread
1420 // the ObjectMonitor owner and remove the lock from
1421 // the inflating_thread's lock stack.
1422 // * fast-locked - Coerce it to inflated from fast-locked.
1423 // * stack-locked - Coerce it to inflated from stack-locked.
1424 // * INFLATING - Busy wait for conversion from stack-locked to
1425 // inflated.
1426 // * unlocked - Aggressively inflate the object.
1427
1428 // CASE: inflated
1429 if (mark.has_monitor()) {
1430 ObjectMonitor* inf = mark.monitor();
1431 markWord dmw = inf->header();
1432 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1433 if (LockingMode == LM_LIGHTWEIGHT && inf->is_owner_anonymous() &&
1434 inflating_thread != nullptr && inflating_thread->lock_stack().contains(object)) {
1435 inf->set_owner_from_anonymous(inflating_thread);
1436 size_t removed = inflating_thread->lock_stack().remove(object);
1437 inf->set_recursions(removed - 1);
1438 }
1439 return inf;
1440 }
1441
1442 if (LockingMode != LM_LIGHTWEIGHT) {
1443 // New lightweight locking does not use INFLATING.
1444 // CASE: inflation in progress - inflating over a stack-lock.
1445 // Some other thread is converting from stack-locked to inflated.
1446 // Only that thread can complete inflation -- other threads must wait.
1447 // The INFLATING value is transient.
1448 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1449 // We could always eliminate polling by parking the thread on some auxiliary list.
1450 if (mark == markWord::INFLATING()) {
1451 read_stable_mark(object);
1452 continue;
1453 }
1454 }
1455
1456 // CASE: fast-locked
1457 // Could be fast-locked either by the inflating_thread or by some other thread.
1458 //
1459 // Note that we allocate the ObjectMonitor speculatively, _before_
1460 // attempting to set the object's mark to the new ObjectMonitor. If
1461 // the inflating_thread owns the monitor, then we set the ObjectMonitor's
1462 // owner to the inflating_thread. Otherwise, we set the ObjectMonitor's owner
1463 // to anonymous. If we lose the race to set the object's mark to the
1464 // new ObjectMonitor, then we just delete it and loop around again.
1465 //
1466 LogStreamHandle(Trace, monitorinflation) lsh;
1467 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1468 ObjectMonitor* monitor = new ObjectMonitor(object);
1469 monitor->set_header(mark.set_unlocked());
1470 bool own = inflating_thread != nullptr && inflating_thread->lock_stack().contains(object);
1471 if (own) {
1472 // Owned by inflating_thread.
1473 monitor->set_owner_from(nullptr, inflating_thread);
1474 } else {
1475 // Owned by somebody else.
1476 monitor->set_owner_anonymous();
1477 }
1478 markWord monitor_mark = markWord::encode(monitor);
1479 markWord old_mark = object->cas_set_mark(monitor_mark, mark);
1480 if (old_mark == mark) {
1481 // Success! Return inflated monitor.
1482 if (own) {
1483 size_t removed = inflating_thread->lock_stack().remove(object);
1484 monitor->set_recursions(removed - 1);
1485 }
1486 // Once the ObjectMonitor is configured and object is associated
1487 // with the ObjectMonitor, it is safe to allow async deflation:
1488 _in_use_list.add(monitor);
1489
1490 // Hopefully the performance counters are allocated on distinct
1491 // cache lines to avoid false sharing on MP systems ...
1492 OM_PERFDATA_OP(Inflations, inc());
1493 if (log_is_enabled(Trace, monitorinflation)) {
1494 ResourceMark rm;
1495 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1496 INTPTR_FORMAT ", type='%s'", p2i(object),
1497 object->mark().value(), object->klass()->external_name());
1498 }
1499 if (event.should_commit()) {
1500 post_monitor_inflate_event(&event, object, cause);
1501 }
1502 return monitor;
1503 } else {
1504 delete monitor;
1505 continue; // Interference -- just retry
1506 }
1507 }
1508
1509 // CASE: stack-locked
1510 // Could be stack-locked either by current or by some other thread.
1511 //
1512 // Note that we allocate the ObjectMonitor speculatively, _before_ attempting
1513 // to install INFLATING into the mark word. We originally installed INFLATING,
1514 // allocated the ObjectMonitor, and then finally STed the address of the
1515 // ObjectMonitor into the mark. This was correct, but artificially lengthened
1516 // the interval in which INFLATING appeared in the mark, thus increasing
1517 // the odds of inflation contention. If we lose the race to set INFLATING,
1518 // then we just delete the ObjectMonitor and loop around again.
1519 //
1520 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1521 assert(LockingMode != LM_LIGHTWEIGHT, "cannot happen with new lightweight locking");
1522 ObjectMonitor* m = new ObjectMonitor(object);
1523 // Optimistically prepare the ObjectMonitor - anticipate successful CAS
1524 // We do this before the CAS in order to minimize the length of time
1525 // in which INFLATING appears in the mark.
1526
1527 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
1528 if (cmp != mark) {
1529 delete m;
1530 continue; // Interference -- just retry
1531 }
1532
1533 // We've successfully installed INFLATING (0) into the mark-word.
1534 // This is the only case where 0 will appear in a mark-word.
1535 // Only the singular thread that successfully swings the mark-word
1536 // to 0 can perform (or more precisely, complete) inflation.
1537 //
1538 // Why do we CAS a 0 into the mark-word instead of just CASing the
1539 // mark-word from the stack-locked value directly to the new inflated state?
1540 // Consider what happens when a thread unlocks a stack-locked object.
1541 // It attempts to use CAS to swing the displaced header value from the
1633 OM_PERFDATA_OP(Inflations, inc());
1634 if (log_is_enabled(Trace, monitorinflation)) {
1635 ResourceMark rm;
1636 lsh.print_cr("inflate(unlocked): object=" INTPTR_FORMAT ", mark="
1637 INTPTR_FORMAT ", type='%s'", p2i(object),
1638 object->mark().value(), object->klass()->external_name());
1639 }
1640 if (event.should_commit()) {
1641 post_monitor_inflate_event(&event, object, cause);
1642 }
1643 return m;
1644 }
1645 }
1646
1647 // Walk the in-use list and deflate (at most MonitorDeflationMax) idle
1648 // ObjectMonitors. Returns the number of deflated ObjectMonitors.
1649 //
1650 size_t ObjectSynchronizer::deflate_monitor_list(ObjectMonitorDeflationSafepointer* safepointer) {
1651 MonitorList::Iterator iter = _in_use_list.iterator();
1652 size_t deflated_count = 0;
1653
1654 while (iter.has_next()) {
1655 if (deflated_count >= (size_t)MonitorDeflationMax) {
1656 break;
1657 }
1658 ObjectMonitor* mid = iter.next();
1659 if (mid->deflate_monitor()) {
1660 deflated_count++;
1661 }
1662
1663 // Must check for a safepoint/handshake and honor it.
1664 safepointer->block_for_safepoint("deflation", "deflated_count", deflated_count);
1665 }
1666
1667 return deflated_count;
1668 }
1669
1670 class HandshakeForDeflation : public HandshakeClosure {
1671 public:
1672 HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {}
1673
1674 void do_thread(Thread* thread) {
1675 log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread="
1676 INTPTR_FORMAT, p2i(thread));
1677 }
1678 };
1679
1680 class VM_RendezvousGCThreads : public VM_Operation {
1681 public:
1682 bool evaluate_at_safepoint() const override { return false; }
1683 VMOp_Type type() const override { return VMOp_RendezvousGCThreads; }
1684 void doit() override {
1685 Universe::heap()->safepoint_synchronize_begin();
1686 Universe::heap()->safepoint_synchronize_end();
1687 };
1688 };
1689
1690 static size_t delete_monitors(GrowableArray<ObjectMonitor*>* delete_list,
1691 ObjectMonitorDeflationSafepointer* safepointer) {
1692 NativeHeapTrimmer::SuspendMark sm("monitor deletion");
1693 size_t deleted_count = 0;
1694 for (ObjectMonitor* monitor: *delete_list) {
1695 delete monitor;
1696 deleted_count++;
1803 // The async deflation request has been processed.
1804 _last_async_deflation_time_ns = os::javaTimeNanos();
1805 set_is_async_deflation_requested(false);
1806
1807 ObjectMonitorDeflationLogging log;
1808 ObjectMonitorDeflationSafepointer safepointer(current, &log);
1809
1810 log.begin();
1811
1812 // Deflate some idle ObjectMonitors.
1813 size_t deflated_count = deflate_monitor_list(&safepointer);
1814
1815 // Unlink the deflated ObjectMonitors from the in-use list.
1816 size_t unlinked_count = 0;
1817 size_t deleted_count = 0;
1818 if (deflated_count > 0) {
1819 ResourceMark rm(current);
1820 GrowableArray<ObjectMonitor*> delete_list((int)deflated_count);
1821 unlinked_count = _in_use_list.unlink_deflated(deflated_count, &delete_list, &safepointer);
1822
1823 log.before_handshake(unlinked_count);
1824
1825 // A JavaThread needs to handshake in order to safely free the
1826 // ObjectMonitors that were deflated in this cycle.
1827 HandshakeForDeflation hfd_hc;
1828 Handshake::execute(&hfd_hc);
1829 // Also, we sync and desync GC threads around the handshake, so that they can
1830 // safely read the mark-word and look-through to the object-monitor, without
1831 // being afraid that the object-monitor is going away.
1832 VM_RendezvousGCThreads sync_gc;
1833 VMThread::execute(&sync_gc);
1834
1835 log.after_handshake();
1836
1837 // After the handshake, safely free the ObjectMonitors that were
1838 // deflated and unlinked in this cycle.
1839
1840 // Delete the unlinked ObjectMonitors.
1841 deleted_count = delete_monitors(&delete_list, &safepointer);
1842 assert(unlinked_count == deleted_count, "must be");
2011 }
2012
2013 size_t ck_in_use_max = _in_use_list.max();
2014 if (l_in_use_max == ck_in_use_max) {
2015 out->print_cr("in_use_max=" SIZE_FORMAT " equals ck_in_use_max="
2016 SIZE_FORMAT, l_in_use_max, ck_in_use_max);
2017 } else {
2018 out->print_cr("WARNING: in_use_max=" SIZE_FORMAT " is not equal to "
2019 "ck_in_use_max=" SIZE_FORMAT, l_in_use_max, ck_in_use_max);
2020 }
2021 }
2022
2023 // Check an in-use monitor entry; log any errors.
2024 void ObjectSynchronizer::chk_in_use_entry(ObjectMonitor* n, outputStream* out,
2025 int* error_cnt_p) {
2026 if (n->owner_is_DEFLATER_MARKER()) {
2027 // This could happen when monitor deflation blocks for a safepoint.
2028 return;
2029 }
2030
2031 if (n->header().value() == 0) {
2032 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor must "
2033 "have non-null _header field.", p2i(n));
2034 *error_cnt_p = *error_cnt_p + 1;
2035 }
2036 const oop obj = n->object_peek();
2037 if (obj != nullptr) {
2038 const markWord mark = obj->mark();
2039 if (!mark.has_monitor()) {
2040 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
2041 "object does not think it has a monitor: obj="
2042 INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n),
2043 p2i(obj), mark.value());
2044 *error_cnt_p = *error_cnt_p + 1;
2045 }
2046 ObjectMonitor* const obj_mon = mark.monitor();
2047 if (n != obj_mon) {
2048 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
2049 "object does not refer to the same monitor: obj="
2050 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
2051 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
2052 *error_cnt_p = *error_cnt_p + 1;
2053 }
2054 }
2055 }
2056
2057 // Log details about ObjectMonitors on the in_use_list. The 'BHL'
2058 // flags indicate why the entry is in-use, 'object' and 'object type'
2059 // indicate the associated object and its type.
2060 void ObjectSynchronizer::log_in_use_monitor_details(outputStream* out, bool log_all) {
2061 if (_in_use_list.count() > 0) {
2062 stringStream ss;
2063 out->print_cr("In-use monitor info:");
2064 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
2065 out->print_cr("%18s %s %18s %18s",
2066 "monitor", "BHL", "object", "object type");
2067 out->print_cr("================== === ================== ==================");
2068
2069 auto is_interesting = [&](ObjectMonitor* monitor) {
2070 return log_all || monitor->has_owner() || monitor->is_busy();
2071 };
2072
2073 monitors_iterate([&](ObjectMonitor* monitor) {
2074 if (is_interesting(monitor)) {
2075 const oop obj = monitor->object_peek();
2076 const markWord mark = monitor->header();
2077 ResourceMark rm;
2078 out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(monitor),
2079 monitor->is_busy(), mark.hash() != 0, monitor->owner() != nullptr,
2080 p2i(obj), obj == nullptr ? "" : obj->klass()->external_name());
2081 if (monitor->is_busy()) {
2082 out->print(" (%s)", monitor->is_busy_to_string(&ss));
2083 ss.reset();
2084 }
2085 out->cr();
2086 }
2087 });
2088 }
2089
2090 out->flush();
2091 }
|
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "gc/shared/collectedHeap.hpp"
28 #include "jfr/jfrEvents.hpp"
29 #include "logging/log.hpp"
30 #include "logging/logStream.hpp"
31 #include "memory/allocation.inline.hpp"
32 #include "memory/padded.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "memory/universe.hpp"
35 #include "oops/markWord.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "runtime/atomic.hpp"
38 #include "runtime/basicLock.inline.hpp"
39 #include "runtime/frame.inline.hpp"
40 #include "runtime/globals.hpp"
41 #include "runtime/handles.inline.hpp"
42 #include "runtime/handshake.hpp"
43 #include "runtime/interfaceSupport.inline.hpp"
44 #include "runtime/javaThread.hpp"
45 #include "runtime/lightweightSynchronizer.hpp"
46 #include "runtime/lockStack.inline.hpp"
47 #include "runtime/mutexLocker.hpp"
48 #include "runtime/objectMonitor.hpp"
49 #include "runtime/objectMonitor.inline.hpp"
50 #include "runtime/os.inline.hpp"
51 #include "runtime/osThread.hpp"
52 #include "runtime/perfData.hpp"
53 #include "runtime/safepointMechanism.inline.hpp"
54 #include "runtime/safepointVerifiers.hpp"
55 #include "runtime/sharedRuntime.hpp"
56 #include "runtime/stubRoutines.hpp"
57 #include "runtime/synchronizer.hpp"
58 #include "runtime/threads.hpp"
59 #include "runtime/timer.hpp"
60 #include "runtime/trimNativeHeap.hpp"
61 #include "runtime/vframe.hpp"
62 #include "runtime/vmThread.hpp"
63 #include "utilities/align.hpp"
64 #include "utilities/dtrace.hpp"
65 #include "utilities/events.hpp"
66 #include "utilities/globalDefinitions.hpp"
67 #include "utilities/linkedlist.hpp"
68 #include "utilities/preserveException.hpp"
69
70 class ObjectMonitorDeflationLogging;
71
72 ObjectMonitor* ObjectSynchronizer::read_monitor(markWord mark) {
73 assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking uses table");
74 return mark.monitor();
75 }
76
77 ObjectMonitor* ObjectSynchronizer::read_monitor(Thread* current, oop obj, markWord mark) {
78 if (LockingMode != LM_LIGHTWEIGHT) {
79 return read_monitor(mark);
80 }
81 return LightweightSynchronizer::read_monitor(current, obj);
82 }
83
84 void MonitorList::add(ObjectMonitor* m) {
85 ObjectMonitor* head;
86 do {
87 head = Atomic::load(&_head);
88 m->set_next_om(head);
89 } while (Atomic::cmpxchg(&_head, head, m) != head);
90
91 size_t count = Atomic::add(&_count, 1u);
92 if (count > max()) {
93 Atomic::inc(&_max);
94 }
95 }
96
97 size_t MonitorList::count() const {
98 return Atomic::load(&_count);
99 }
100
101 size_t MonitorList::max() const {
102 return Atomic::load(&_max);
103 }
273 static constexpr size_t inflation_lock_count() {
274 return 256;
275 }
276
277 // Static storage for an array of PlatformMutex.
278 alignas(PlatformMutex) static uint8_t _inflation_locks[inflation_lock_count()][sizeof(PlatformMutex)];
279
280 static inline PlatformMutex* inflation_lock(size_t index) {
281 return reinterpret_cast<PlatformMutex*>(_inflation_locks[index]);
282 }
283
284 void ObjectSynchronizer::initialize() {
285 for (size_t i = 0; i < inflation_lock_count(); i++) {
286 ::new(static_cast<void*>(inflation_lock(i))) PlatformMutex();
287 }
288 // Start the ceiling with the estimate for one thread.
289 set_in_use_list_ceiling(AvgMonitorsPerThreadEstimate);
290
291 // Start the timer for deflations, so it does not trigger immediately.
292 _last_async_deflation_time_ns = os::javaTimeNanos();
293
294 if (LockingMode == LM_LIGHTWEIGHT) {
295 LightweightSynchronizer::initialize();
296 }
297 }
298
299 MonitorList ObjectSynchronizer::_in_use_list;
300 // monitors_used_above_threshold() policy is as follows:
301 //
302 // The ratio of the current _in_use_list count to the ceiling is used
303 // to determine if we are above MonitorUsedDeflationThreshold and need
304 // to do an async monitor deflation cycle. The ceiling is increased by
305 // AvgMonitorsPerThreadEstimate when a thread is added to the system
306 // and is decreased by AvgMonitorsPerThreadEstimate when a thread is
307 // removed from the system.
308 //
309 // Note: If the _in_use_list max exceeds the ceiling, then
310 // monitors_used_above_threshold() will use the in_use_list max instead
311 // of the thread count derived ceiling because we have used more
312 // ObjectMonitors than the estimated average.
313 //
314 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
315 // no-progress async monitor deflation cycles in a row, then the ceiling
316 // is adjusted upwards by monitors_used_above_threshold().
350 assert(current->thread_state() == _thread_in_Java, "invariant");
351 NoSafepointVerifier nsv;
352 if (obj == nullptr) return false; // slow-path for invalid obj
353 const markWord mark = obj->mark();
354
355 if (LockingMode == LM_LIGHTWEIGHT) {
356 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
357 // Degenerate notify
358 // fast-locked by caller so by definition the implied waitset is empty.
359 return true;
360 }
361 } else if (LockingMode == LM_LEGACY) {
362 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
363 // Degenerate notify
364 // stack-locked by caller so by definition the implied waitset is empty.
365 return true;
366 }
367 }
368
369 if (mark.has_monitor()) {
370 ObjectMonitor* const mon = read_monitor(current, obj, mark);
371 if (LockingMode == LM_LIGHTWEIGHT && mon == nullptr) {
372 // Racing with inflation/deflation go slow path
373 return false;
374 }
375 assert(mon->object() == oop(obj), "invariant");
376 if (mon->owner() != current) return false; // slow-path for IMS exception
377
378 if (mon->first_waiter() != nullptr) {
379 // We have one or more waiters. Since this is an inflated monitor
380 // that we own, we can transfer one or more threads from the waitset
381 // to the entrylist here and now, avoiding the slow-path.
382 if (all) {
383 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current);
384 } else {
385 DTRACE_MONITOR_PROBE(notify, mon, obj, current);
386 }
387 int free_count = 0;
388 do {
389 mon->INotify(current);
390 ++free_count;
391 } while (mon->first_waiter() != nullptr && all);
392 OM_PERFDATA_OP(Notifications, inc(free_count));
393 }
394 return true;
399 }
400
401
402 // The LockNode emitted directly at the synchronization site would have
403 // been too big if it were to have included support for the cases of inflated
404 // recursive enter and exit, so they go here instead.
405 // Note that we can't safely call AsyncPrintJavaStack() from within
406 // quick_enter() as our thread state remains _in_Java.
407
408 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
409 BasicLock * lock) {
410 assert(current->thread_state() == _thread_in_Java, "invariant");
411 NoSafepointVerifier nsv;
412 if (obj == nullptr) return false; // Need to throw NPE
413
414 if (obj->klass()->is_value_based()) {
415 return false;
416 }
417
418 if (LockingMode == LM_LIGHTWEIGHT) {
419 return LightweightSynchronizer::quick_enter(obj, current, lock);
420 }
421
422 const markWord mark = obj->mark();
423
424 if (mark.has_monitor()) {
425 ObjectMonitor* const m = ObjectSynchronizer::read_monitor(mark);
426 // An async deflation or GC can race us before we manage to make
427 // the ObjectMonitor busy by setting the owner below. If we detect
428 // that race we just bail out to the slow-path here.
429 if (m->object_peek() == nullptr) {
430 return false;
431 }
432 JavaThread* const owner = static_cast<JavaThread*>(m->owner_raw());
433
434 // Lock contention and Transactional Lock Elision (TLE) diagnostics
435 // and observability
436 // Case: light contention possibly amenable to TLE
437 // Case: TLE inimical operations such as nested/recursive synchronization
438
439 if (owner == current) {
440 m->_recursions++;
441 current->inc_held_monitor_count();
442 return true;
443 }
444
445 if (LockingMode == LM_LEGACY) {
446 // This Java Monitor is inflated so obj's header will never be
447 // displaced to this thread's BasicLock. Make the displaced header
448 // non-null so this BasicLock is not seen as recursive nor as
449 // being locked. We do this unconditionally so that this thread's
450 // BasicLock cannot be mis-interpreted by any stack walkers. For
451 // performance reasons, stack walkers generally first check for
452 // stack-locking in the object's header, the second check is for
453 // recursive stack-locking in the displaced header in the BasicLock,
454 // and last are the inflated Java Monitor (ObjectMonitor) checks.
455 lock->set_displaced_header(markWord::unused_mark());
456 }
457
458 if (owner == nullptr && m->try_set_owner_from(nullptr, current) == nullptr) {
459 assert(m->_recursions == 0, "invariant");
460 current->inc_held_monitor_count();
461 return true;
462 }
463 }
464
465 // Note that we could inflate in quick_enter.
520 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
521 }
522 }
523
524 static bool useHeavyMonitors() {
525 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
526 return LockingMode == LM_MONITOR;
527 #else
528 return false;
529 #endif
530 }
531
532 // -----------------------------------------------------------------------------
533 // Monitor Enter/Exit
534
535 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
536 // When called with locking_thread != Thread::current() some mechanism must synchronize
537 // the locking_thread with respect to the current thread. Currently only used when
538 // deoptimizing and re-locking locks. See Deoptimization::relock_objects
539 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
540
541 if (LockingMode == LM_LIGHTWEIGHT) {
542 return LightweightSynchronizer::enter_for(obj, lock, locking_thread);
543 }
544
545 if (!enter_fast_impl(obj, lock, locking_thread)) {
546 // Inflated ObjectMonitor::enter_for is required
547
548 // An async deflation can race after the inflate_for() call and before
549 // enter_for() can make the ObjectMonitor busy. enter_for() returns false
550 // if we have lost the race to async deflation and we simply try again.
551 while (true) {
552 ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter);
553 if (monitor->enter_for(locking_thread)) {
554 return;
555 }
556 assert(monitor->is_being_async_deflated(), "must be");
557 }
558 }
559 }
560
561 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
562 assert(current == Thread::current(), "must be");
563
564 if (LockingMode == LM_LIGHTWEIGHT) {
565 return LightweightSynchronizer::enter(obj, lock, current);
566 }
567
568 if (!enter_fast_impl(obj, lock, current)) {
569 // Inflated ObjectMonitor::enter is required
570
571 // An async deflation can race after the inflate() call and before
572 // enter() can make the ObjectMonitor busy. enter() returns false if
573 // we have lost the race to async deflation and we simply try again.
574 while (true) {
575 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
576 if (monitor->enter(current)) {
577 return;
578 }
579 }
580 }
581 }
582
583 // The interpreter and compiler assembly code tries to lock using the fast path
584 // of this algorithm. Make sure to update that code if the following function is
585 // changed. The implementation is extremely sensitive to race condition. Be careful.
586 bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
587 assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer");
588
589 if (obj->klass()->is_value_based()) {
590 handle_sync_on_value_based_class(obj, locking_thread);
591 }
592
593 locking_thread->inc_held_monitor_count();
594
595 if (!useHeavyMonitors()) {
596 if (LockingMode == LM_LEGACY) {
597 markWord mark = obj->mark();
598 if (mark.is_unlocked()) {
599 // Anticipate successful CAS -- the ST of the displaced mark must
600 // be visible <= the ST performed by the CAS.
601 lock->set_displaced_header(mark);
602 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
603 return true;
604 }
605 } else if (mark.has_locker() &&
606 locking_thread->is_lock_owned((address) mark.locker())) {
607 assert(lock != mark.locker(), "must not re-lock the same lock");
608 assert(lock != (BasicLock*) obj->mark().value(), "don't relock with same BasicLock");
609 lock->set_displaced_header(markWord::from_pointer(nullptr));
610 return true;
611 }
612
613 // The object header will never be displaced to this lock,
614 // so it does not matter what the value is, except that it
615 // must be non-zero to avoid looking like a re-entrant lock,
616 // and must not look locked either.
617 lock->set_displaced_header(markWord::unused_mark());
618
619 // Failed to fast lock.
620 return false;
621 }
622 } else if (VerifyHeavyMonitors) {
623 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
624 }
625
626 return false;
627 }
628
629 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
630 current->dec_held_monitor_count();
631
632 if (LockingMode == LM_LIGHTWEIGHT) {
633 return LightweightSynchronizer::exit(object, current);
634 }
635
636 if (!useHeavyMonitors()) {
637 markWord mark = object->mark();
638 if (LockingMode == LM_LEGACY) {
639 markWord dhw = lock->displaced_header();
640 if (dhw.value() == 0) {
641 // If the displaced header is null, then this exit matches up with
642 // a recursive enter. No real work to do here except for diagnostics.
643 #ifndef PRODUCT
644 if (mark != markWord::INFLATING()) {
645 // Only do diagnostics if we are not racing an inflation. Simply
646 // exiting a recursive enter of a Java Monitor that is being
647 // inflated is safe; see the has_monitor() comment below.
648 assert(!mark.is_unlocked(), "invariant");
649 assert(!mark.has_locker() ||
650 current->is_lock_owned((address)mark.locker()), "invariant");
651 if (mark.has_monitor()) {
652 // The BasicLock's displaced_header is marked as a recursive
653 // enter and we have an inflated Java Monitor (ObjectMonitor).
654 // This is a special case where the Java Monitor was inflated
655 // after this thread entered the stack-lock recursively. When a
656 // Java Monitor is inflated, we cannot safely walk the Java
657 // Monitor owner's stack and update the BasicLocks because a
658 // Java Monitor can be asynchronously inflated by a thread that
659 // does not own the Java Monitor.
660 ObjectMonitor* m = read_monitor(mark);
661 assert(m->object()->mark() == mark, "invariant");
662 assert(m->is_entered(current), "invariant");
663 }
664 }
665 #endif
666 return;
667 }
668
669 if (mark == markWord::from_pointer(lock)) {
670 // If the object is stack-locked by the current thread, try to
671 // swing the displaced header from the BasicLock back to the mark.
672 assert(dhw.is_neutral(), "invariant");
673 if (object->cas_set_mark(dhw, mark) == mark) {
674 return;
675 }
676 }
677 }
678 } else if (VerifyHeavyMonitors) {
679 guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
680 }
684 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
685 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
686 assert(!monitor->is_owner_anonymous(), "must not be");
687 monitor->exit(current);
688 }
689
690 // -----------------------------------------------------------------------------
691 // JNI locks on java objects
692 // NOTE: must use heavy weight monitor to handle jni monitor enter
693 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
694 if (obj->klass()->is_value_based()) {
695 handle_sync_on_value_based_class(obj, current);
696 }
697
698 // the current locking is from JNI instead of Java code
699 current->set_current_pending_monitor_is_from_java(false);
700 // An async deflation can race after the inflate() call and before
701 // enter() can make the ObjectMonitor busy. enter() returns false if
702 // we have lost the race to async deflation and we simply try again.
703 while (true) {
704 ObjectMonitor* monitor;
705 bool entered;
706 if (LockingMode == LM_LIGHTWEIGHT) {
707 entered = LightweightSynchronizer::inflate_and_enter(obj(), current, current, inflate_cause_jni_enter) != nullptr;
708 } else {
709 monitor = inflate(current, obj(), inflate_cause_jni_enter);
710 entered = monitor->enter(current);
711 }
712
713 if (entered) {
714 current->inc_held_monitor_count(1, true);
715 break;
716 }
717 }
718 current->set_current_pending_monitor_is_from_java(true);
719 }
720
721 // NOTE: must use heavy weight monitor to handle jni monitor exit
722 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
723 JavaThread* current = THREAD;
724
725 ObjectMonitor* monitor;
726 if (LockingMode == LM_LIGHTWEIGHT) {
727 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
728 } else {
729 // The ObjectMonitor* can't be async deflated until ownership is
730 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
731 monitor = inflate(current, obj, inflate_cause_jni_exit);
732 }
733 // If this thread has locked the object, exit the monitor. We
734 // intentionally do not use CHECK on check_owner because we must exit the
735 // monitor even if an exception was already pending.
736 if (monitor->check_owner(THREAD)) {
737 monitor->exit(current);
738 current->dec_held_monitor_count(1, true);
739 }
740 }
741
742 // -----------------------------------------------------------------------------
743 // Internal VM locks on java objects
744 // standard constructor, allows locking failures
745 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
746 _thread = thread;
747 _thread->check_for_valid_safepoint_state();
748 _obj = obj;
749
750 if (_obj() != nullptr) {
751 ObjectSynchronizer::enter(_obj, &_lock, _thread);
752 }
753 }
754
755 ObjectLocker::~ObjectLocker() {
756 if (_obj() != nullptr) {
757 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
758 }
759 }
760
761
762 // -----------------------------------------------------------------------------
763 // Wait/Notify/NotifyAll
764 // NOTE: must use heavy weight monitor to handle wait()
765
766 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
767 JavaThread* current = THREAD;
768 if (millis < 0) {
769 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
770 }
771
772 ObjectMonitor* monitor;
773 if (LockingMode == LM_LIGHTWEIGHT) {
774 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
775 } else {
776 // The ObjectMonitor* can't be async deflated because the _waiters
777 // field is incremented before ownership is dropped and decremented
778 // after ownership is regained.
779 monitor = inflate(current, obj(), inflate_cause_wait);
780 }
781
782 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
783 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
784
785 // This dummy call is in place to get around dtrace bug 6254741. Once
786 // that's fixed we can uncomment the following line, remove the call
787 // and change this function back into a "void" func.
788 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
789 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
790 return ret_code;
791 }
792
793 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
794 JavaThread* current = THREAD;
795
796 markWord mark = obj->mark();
797 if (LockingMode == LM_LIGHTWEIGHT) {
798 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
799 // Not inflated so there can't be any waiters to notify.
800 return;
801 }
802 } else if (LockingMode == LM_LEGACY) {
803 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
804 // Not inflated so there can't be any waiters to notify.
805 return;
806 }
807 }
808
809 ObjectMonitor* monitor;
810 if (LockingMode == LM_LIGHTWEIGHT) {
811 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
812 } else {
813 // The ObjectMonitor* can't be async deflated until ownership is
814 // dropped by the calling thread.
815 monitor = inflate(current, obj(), inflate_cause_notify);
816 }
817 monitor->notify(CHECK);
818 }
819
820 // NOTE: see comment of notify()
821 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
822 JavaThread* current = THREAD;
823
824 markWord mark = obj->mark();
825 if (LockingMode == LM_LIGHTWEIGHT) {
826 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
827 // Not inflated so there can't be any waiters to notify.
828 return;
829 }
830 } else if (LockingMode == LM_LEGACY) {
831 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
832 // Not inflated so there can't be any waiters to notify.
833 return;
834 }
835 }
836
837 ObjectMonitor* monitor;
838 if (LockingMode == LM_LIGHTWEIGHT) {
839 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
840 } else {
841 // The ObjectMonitor* can't be async deflated until ownership is
842 // dropped by the calling thread.
843 monitor = inflate(current, obj(), inflate_cause_notify);
844 }
845 monitor->notifyAll(CHECK);
846 }
847
848 // -----------------------------------------------------------------------------
849 // Hash Code handling
850
851 struct SharedGlobals {
852 char _pad_prefix[OM_CACHE_LINE_SIZE];
853 // This is a highly shared mostly-read variable.
854 // To avoid false-sharing it needs to be the sole occupant of a cache line.
855 volatile int stw_random;
856 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));
857 // Hot RW variable -- Sequester to avoid false-sharing
858 volatile int hc_sequence;
859 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int));
860 };
861
862 static SharedGlobals GVars;
863
864 static markWord read_stable_mark(oop obj) {
956 value = 1; // for sensitivity testing
957 } else if (hashCode == 3) {
958 value = ++GVars.hc_sequence;
959 } else if (hashCode == 4) {
960 value = cast_from_oop<intptr_t>(obj);
961 } else {
962 // Marsaglia's xor-shift scheme with thread-specific state
963 // This is probably the best overall implementation -- we'll
964 // likely make this the default in future releases.
965 unsigned t = current->_hashStateX;
966 t ^= (t << 11);
967 current->_hashStateX = current->_hashStateY;
968 current->_hashStateY = current->_hashStateZ;
969 current->_hashStateZ = current->_hashStateW;
970 unsigned v = current->_hashStateW;
971 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
972 current->_hashStateW = v;
973 value = v;
974 }
975
976 value &= UseCompactObjectHeaders ? markWord::hash_mask_compact : markWord::hash_mask;
977 if (value == 0) value = 0xBAD;
978 assert(value != markWord::no_hash, "invariant");
979 return value;
980 }
981
982 intptr_t ObjectSynchronizer::get_next_hash(Thread* current, oop obj) {
983 // CLEANUP[Axel]: hack for LightweightSynchronizer being in different translation unit
984 return ::get_next_hash(current, obj);
985 }
986
987 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
988 if (LockingMode == LM_LIGHTWEIGHT) {
989 return LightweightSynchronizer::FastHashCode(current, obj);
990 }
991
992 while (true) {
993 ObjectMonitor* monitor = nullptr;
994 markWord temp, test;
995 intptr_t hash;
996 markWord mark = read_stable_mark(obj);
997 if (VerifyHeavyMonitors) {
998 assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
999 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
1000 }
1001 if (mark.is_unlocked() || (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked())) {
1002 hash = mark.hash();
1003 if (hash != 0) { // if it has a hash, just return it
1004 return hash;
1005 }
1006 hash = get_next_hash(current, obj); // get a new hash
1007 temp = mark.copy_set_hash(hash); // merge the hash into header
1008 // try to install the hash
1009 test = obj->cas_set_mark(temp, mark);
1010 if (test == mark) { // if the hash was installed, return it
1060 // So we have to inflate the stack-lock into an ObjectMonitor
1061 // even if the current thread owns the lock. The BasicLock on
1062 // a thread's stack can be asynchronously read by other threads
1063 // during an inflate() call so any change to that stack memory
1064 // may not propagate to other threads correctly.
1065 }
1066
1067 // Inflate the monitor to set the hash.
1068
1069 // An async deflation can race after the inflate() call and before we
1070 // can update the ObjectMonitor's header with the hash value below.
1071 monitor = inflate(current, obj, inflate_cause_hash_code);
1072 // Load ObjectMonitor's header/dmw field and see if it has a hash.
1073 mark = monitor->header();
1074 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
1075 hash = mark.hash();
1076 if (hash == 0) { // if it does not have a hash
1077 hash = get_next_hash(current, obj); // get a new hash
1078 temp = mark.copy_set_hash(hash) ; // merge the hash into header
1079 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1080 uintptr_t v = Atomic::cmpxchg(monitor->metadata_addr(), mark.value(), temp.value());
1081 test = markWord(v);
1082 if (test != mark) {
1083 // The attempt to update the ObjectMonitor's header/dmw field
1084 // did not work. This can happen if another thread managed to
1085 // merge in the hash just before our cmpxchg().
1086 // If we add any new usages of the header/dmw field, this code
1087 // will need to be updated.
1088 hash = test.hash();
1089 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1090 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1091 }
1092 if (monitor->is_being_async_deflated()) {
1093 // If we detect that async deflation has occurred, then we
1094 // attempt to restore the header/dmw to the object's header
1095 // so that we only retry once if the deflater thread happens
1096 // to be slow.
1097 monitor->install_displaced_markword_in_object(obj);
1098 continue;
1099 }
1100 }
1103 }
1104 }
1105
1106 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1107 Handle h_obj) {
1108 assert(current == JavaThread::current(), "Can only be called on current thread");
1109 oop obj = h_obj();
1110
1111 markWord mark = read_stable_mark(obj);
1112
1113 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1114 // stack-locked case, header points into owner's stack
1115 return current->is_lock_owned((address)mark.locker());
1116 }
1117
1118 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1119 // fast-locking case, see if lock is in current's lock stack
1120 return current->lock_stack().contains(h_obj());
1121 }
1122
1123 while (LockingMode == LM_LIGHTWEIGHT && mark.has_monitor()) {
1124 ObjectMonitor* monitor = LightweightSynchronizer::read_monitor(current, obj);
1125 if (monitor != nullptr) {
1126 return monitor->is_entered(current) != 0;
1127 }
1128 // Racing with inflation/deflation, retry
1129 mark = obj->mark_acquire();
1130
1131 if (mark.is_fast_locked()) {
1132 // Some other thread fast_locked, current could not have held the lock
1133 return false;
1134 }
1135 }
1136
1137 if (LockingMode != LM_LIGHTWEIGHT && mark.has_monitor()) {
1138 // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1139 // The first stage of async deflation does not affect any field
1140 // used by this comparison so the ObjectMonitor* is usable here.
1141 ObjectMonitor* monitor = read_monitor(mark);
1142 return monitor->is_entered(current) != 0;
1143 }
1144 // Unlocked case, header in place
1145 assert(mark.is_unlocked(), "sanity check");
1146 return false;
1147 }
1148
1149 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
1150 oop obj = h_obj();
1151 markWord mark = read_stable_mark(obj);
1152
1153 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1154 // stack-locked so header points into owner's stack.
1155 // owning_thread_from_monitor_owner() may also return null here:
1156 return Threads::owning_thread_from_monitor_owner(t_list, (address) mark.locker());
1157 }
1158
1159 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1160 // fast-locked so get owner from the object.
1161 // owning_thread_from_object() may also return null here:
1162 return Threads::owning_thread_from_object(t_list, h_obj());
1163 }
1164
1165 while (LockingMode == LM_LIGHTWEIGHT && mark.has_monitor()) {
1166 ObjectMonitor* monitor = LightweightSynchronizer::read_monitor(Thread::current(), obj);
1167 if (monitor != nullptr) {
1168 return Threads::owning_thread_from_monitor(t_list, monitor);
1169 }
1170 // Racing with inflation/deflation, retry
1171 mark = obj->mark_acquire();
1172
1173 if (mark.is_fast_locked()) {
1174 // Some other thread fast_locked
1175 return Threads::owning_thread_from_object(t_list, h_obj());
1176 }
1177 }
1178
1179 if (LockingMode != LM_LIGHTWEIGHT && mark.has_monitor()) {
1180 // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1181 // The first stage of async deflation does not affect any field
1182 // used by this comparison so the ObjectMonitor* is usable here.
1183 ObjectMonitor* monitor = read_monitor(mark);
1184 assert(monitor != nullptr, "monitor should be non-null");
1185 // owning_thread_from_monitor() may also return null here:
1186 return Threads::owning_thread_from_monitor(t_list, monitor);
1187 }
1188
1189 // Unlocked case, header in place
1190 // Cannot have assertion since this object may have been
1191 // locked by another thread when reaching here.
1192 // assert(mark.is_unlocked(), "sanity check");
1193
1194 return nullptr;
1195 }
1196
1197 // Visitors ...
1198
1199 // Iterate over all ObjectMonitors.
1200 template <typename Function>
1201 void ObjectSynchronizer::monitors_iterate(Function function) {
1202 MonitorList::Iterator iter = _in_use_list.iterator();
1203 while (iter.has_next()) {
1239 }
1240
1241 static bool monitors_used_above_threshold(MonitorList* list) {
1242 if (MonitorUsedDeflationThreshold == 0) { // disabled case is easy
1243 return false;
1244 }
1245 // Start with ceiling based on a per-thread estimate:
1246 size_t ceiling = ObjectSynchronizer::in_use_list_ceiling();
1247 size_t old_ceiling = ceiling;
1248 if (ceiling < list->max()) {
1249 // The max used by the system has exceeded the ceiling so use that:
1250 ceiling = list->max();
1251 }
1252 size_t monitors_used = list->count();
1253 if (monitors_used == 0) { // empty list is easy
1254 return false;
1255 }
1256 if (NoAsyncDeflationProgressMax != 0 &&
1257 _no_progress_cnt >= NoAsyncDeflationProgressMax) {
1258 double remainder = (100.0 - MonitorUsedDeflationThreshold) / 100.0;
1259 size_t new_ceiling = ceiling / remainder + 1;
1260 ObjectSynchronizer::set_in_use_list_ceiling(new_ceiling);
1261 log_info(monitorinflation)("Too many deflations without progress; "
1262 "bumping in_use_list_ceiling from " SIZE_FORMAT
1263 " to " SIZE_FORMAT, old_ceiling, new_ceiling);
1264 _no_progress_cnt = 0;
1265 ceiling = new_ceiling;
1266 }
1267
1268 // Check if our monitor usage is above the threshold:
1269 size_t monitor_usage = (monitors_used * 100LL) / ceiling;
1270 if (int(monitor_usage) > MonitorUsedDeflationThreshold) {
1271 log_info(monitorinflation)("monitors_used=" SIZE_FORMAT ", ceiling=" SIZE_FORMAT
1272 ", monitor_usage=" SIZE_FORMAT ", threshold=%d",
1273 monitors_used, ceiling, monitor_usage, MonitorUsedDeflationThreshold);
1274 return true;
1275 }
1276
1277 return false;
1278 }
1279
1375
1376 return ret_code;
1377 }
1378
1379 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {
1380 return (os::javaTimeNanos() - last_async_deflation_time_ns()) / (NANOUNITS / MILLIUNITS);
1381 }
1382
1383 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1384 const oop obj,
1385 ObjectSynchronizer::InflateCause cause) {
1386 assert(event != nullptr, "invariant");
1387 event->set_monitorClass(obj->klass());
1388 event->set_address((uintptr_t)(void*)obj);
1389 event->set_cause((u1)cause);
1390 event->commit();
1391 }
1392
1393 // Fast path code shared by multiple functions
1394 void ObjectSynchronizer::inflate_helper(oop obj) {
1395 if (LockingMode == LM_LIGHTWEIGHT) {
1396 return;
1397 }
1398 markWord mark = obj->mark_acquire();
1399 if (mark.has_monitor()) {
1400 ObjectMonitor* monitor = read_monitor(mark);
1401 markWord dmw = monitor->header();
1402 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1403 return;
1404 }
1405 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1406 }
1407
1408 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
1409 assert(current == Thread::current(), "must be");
1410 assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate");
1411 return inflate_impl(obj, cause);
1412 }
1413
1414 ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
1415 assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
1416 assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_for");
1417 return inflate_impl(obj, cause);
1418 }
1419
1420 ObjectMonitor* ObjectSynchronizer::inflate_impl(oop object, const InflateCause cause) {
1421 assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_impl");
1422 EventJavaMonitorInflate event;
1423
1424 for (;;) {
1425 const markWord mark = object->mark_acquire();
1426
1427 // The mark can be in one of the following states:
1428 // * inflated - Just return it.
1429 // * stack-locked - Coerce it to inflated from stack-locked.
1430 // * INFLATING - Busy wait for conversion from stack-locked to
1431 // inflated.
1432 // * unlocked - Aggressively inflate the object.
1433
1434 // CASE: inflated
1435 if (mark.has_monitor()) {
1436 ObjectMonitor* inf = mark.monitor();
1437 markWord dmw = inf->header();
1438 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1439 return inf;
1440 }
1441
1442 // CASE: inflation in progress - inflating over a stack-lock.
1443 // Some other thread is converting from stack-locked to inflated.
1444 // Only that thread can complete inflation -- other threads must wait.
1445 // The INFLATING value is transient.
1446 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1447 // We could always eliminate polling by parking the thread on some auxiliary list.
1448 if (mark == markWord::INFLATING()) {
1449 read_stable_mark(object);
1450 continue;
1451 }
1452
1453 // CASE: stack-locked
1454 // Could be stack-locked either by current or by some other thread.
1455 //
1456 // Note that we allocate the ObjectMonitor speculatively, _before_ attempting
1457 // to install INFLATING into the mark word. We originally installed INFLATING,
1458 // allocated the ObjectMonitor, and then finally STed the address of the
1459 // ObjectMonitor into the mark. This was correct, but artificially lengthened
1460 // the interval in which INFLATING appeared in the mark, thus increasing
1461 // the odds of inflation contention. If we lose the race to set INFLATING,
1462 // then we just delete the ObjectMonitor and loop around again.
1463 //
1464 LogStreamHandle(Trace, monitorinflation) lsh;
1465 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1466 ObjectMonitor* m = new ObjectMonitor(object);
1467 // Optimistically prepare the ObjectMonitor - anticipate successful CAS
1468 // We do this before the CAS in order to minimize the length of time
1469 // in which INFLATING appears in the mark.
1470
1471 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
1472 if (cmp != mark) {
1473 delete m;
1474 continue; // Interference -- just retry
1475 }
1476
1477 // We've successfully installed INFLATING (0) into the mark-word.
1478 // This is the only case where 0 will appear in a mark-word.
1479 // Only the singular thread that successfully swings the mark-word
1480 // to 0 can perform (or more precisely, complete) inflation.
1481 //
1482 // Why do we CAS a 0 into the mark-word instead of just CASing the
1483 // mark-word from the stack-locked value directly to the new inflated state?
1484 // Consider what happens when a thread unlocks a stack-locked object.
1485 // It attempts to use CAS to swing the displaced header value from the
1577 OM_PERFDATA_OP(Inflations, inc());
1578 if (log_is_enabled(Trace, monitorinflation)) {
1579 ResourceMark rm;
1580 lsh.print_cr("inflate(unlocked): object=" INTPTR_FORMAT ", mark="
1581 INTPTR_FORMAT ", type='%s'", p2i(object),
1582 object->mark().value(), object->klass()->external_name());
1583 }
1584 if (event.should_commit()) {
1585 post_monitor_inflate_event(&event, object, cause);
1586 }
1587 return m;
1588 }
1589 }
1590
1591 // Walk the in-use list and deflate (at most MonitorDeflationMax) idle
1592 // ObjectMonitors. Returns the number of deflated ObjectMonitors.
1593 //
1594 size_t ObjectSynchronizer::deflate_monitor_list(ObjectMonitorDeflationSafepointer* safepointer) {
1595 MonitorList::Iterator iter = _in_use_list.iterator();
1596 size_t deflated_count = 0;
1597 Thread* current = Thread::current();
1598
1599 while (iter.has_next()) {
1600 if (deflated_count >= (size_t)MonitorDeflationMax) {
1601 break;
1602 }
1603 ObjectMonitor* mid = iter.next();
1604 if (mid->deflate_monitor(current)) {
1605 deflated_count++;
1606 }
1607
1608 // Must check for a safepoint/handshake and honor it.
1609 safepointer->block_for_safepoint("deflation", "deflated_count", deflated_count);
1610 }
1611
1612 return deflated_count;
1613 }
1614
1615 class HandshakeForDeflation : public HandshakeClosure {
1616 public:
1617 HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {}
1618
1619 void do_thread(Thread* thread) {
1620 log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread="
1621 INTPTR_FORMAT, p2i(thread));
1622 if (thread->is_Java_thread()) {
1623 // Clear OM cache
1624 JavaThread* jt = JavaThread::cast(thread);
1625 jt->om_clear_monitor_cache();
1626 }
1627 }
1628 };
1629
1630 class VM_RendezvousGCThreads : public VM_Operation {
1631 public:
1632 bool evaluate_at_safepoint() const override { return false; }
1633 VMOp_Type type() const override { return VMOp_RendezvousGCThreads; }
1634 void doit() override {
1635 Universe::heap()->safepoint_synchronize_begin();
1636 Universe::heap()->safepoint_synchronize_end();
1637 };
1638 };
1639
1640 static size_t delete_monitors(GrowableArray<ObjectMonitor*>* delete_list,
1641 ObjectMonitorDeflationSafepointer* safepointer) {
1642 NativeHeapTrimmer::SuspendMark sm("monitor deletion");
1643 size_t deleted_count = 0;
1644 for (ObjectMonitor* monitor: *delete_list) {
1645 delete monitor;
1646 deleted_count++;
1753 // The async deflation request has been processed.
1754 _last_async_deflation_time_ns = os::javaTimeNanos();
1755 set_is_async_deflation_requested(false);
1756
1757 ObjectMonitorDeflationLogging log;
1758 ObjectMonitorDeflationSafepointer safepointer(current, &log);
1759
1760 log.begin();
1761
1762 // Deflate some idle ObjectMonitors.
1763 size_t deflated_count = deflate_monitor_list(&safepointer);
1764
1765 // Unlink the deflated ObjectMonitors from the in-use list.
1766 size_t unlinked_count = 0;
1767 size_t deleted_count = 0;
1768 if (deflated_count > 0) {
1769 ResourceMark rm(current);
1770 GrowableArray<ObjectMonitor*> delete_list((int)deflated_count);
1771 unlinked_count = _in_use_list.unlink_deflated(deflated_count, &delete_list, &safepointer);
1772
1773 #ifdef ASSERT
1774 if (LockingMode == LM_LIGHTWEIGHT) {
1775 for (ObjectMonitor* monitor : delete_list) {
1776 assert(!LightweightSynchronizer::contains_monitor(current, monitor), "Should have been removed");
1777 }
1778 }
1779 #endif
1780
1781 log.before_handshake(unlinked_count);
1782
1783 // A JavaThread needs to handshake in order to safely free the
1784 // ObjectMonitors that were deflated in this cycle.
1785 HandshakeForDeflation hfd_hc;
1786 Handshake::execute(&hfd_hc);
1787 // Also, we sync and desync GC threads around the handshake, so that they can
1788 // safely read the mark-word and look-through to the object-monitor, without
1789 // being afraid that the object-monitor is going away.
1790 VM_RendezvousGCThreads sync_gc;
1791 VMThread::execute(&sync_gc);
1792
1793 log.after_handshake();
1794
1795 // After the handshake, safely free the ObjectMonitors that were
1796 // deflated and unlinked in this cycle.
1797
1798 // Delete the unlinked ObjectMonitors.
1799 deleted_count = delete_monitors(&delete_list, &safepointer);
1800 assert(unlinked_count == deleted_count, "must be");
1969 }
1970
1971 size_t ck_in_use_max = _in_use_list.max();
1972 if (l_in_use_max == ck_in_use_max) {
1973 out->print_cr("in_use_max=" SIZE_FORMAT " equals ck_in_use_max="
1974 SIZE_FORMAT, l_in_use_max, ck_in_use_max);
1975 } else {
1976 out->print_cr("WARNING: in_use_max=" SIZE_FORMAT " is not equal to "
1977 "ck_in_use_max=" SIZE_FORMAT, l_in_use_max, ck_in_use_max);
1978 }
1979 }
1980
1981 // Check an in-use monitor entry; log any errors.
1982 void ObjectSynchronizer::chk_in_use_entry(ObjectMonitor* n, outputStream* out,
1983 int* error_cnt_p) {
1984 if (n->owner_is_DEFLATER_MARKER()) {
1985 // This could happen when monitor deflation blocks for a safepoint.
1986 return;
1987 }
1988
1989
1990 if (n->metadata() == 0) {
1991 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor must "
1992 "have non-null _metadata (header/hash) field.", p2i(n));
1993 *error_cnt_p = *error_cnt_p + 1;
1994 }
1995
1996 const oop obj = n->object_peek();
1997 if (obj == nullptr) {
1998 return;
1999 }
2000
2001 const markWord mark = obj->mark();
2002 if (!mark.has_monitor()) {
2003 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
2004 "object does not think it has a monitor: obj="
2005 INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n),
2006 p2i(obj), mark.value());
2007 *error_cnt_p = *error_cnt_p + 1;
2008 return;
2009 }
2010
2011 ObjectMonitor* const obj_mon = read_monitor(Thread::current(), obj, mark);
2012 if (n != obj_mon) {
2013 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
2014 "object does not refer to the same monitor: obj="
2015 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
2016 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
2017 *error_cnt_p = *error_cnt_p + 1;
2018 }
2019 }
2020
2021 // Log details about ObjectMonitors on the in_use_list. The 'BHL'
2022 // flags indicate why the entry is in-use, 'object' and 'object type'
2023 // indicate the associated object and its type.
2024 void ObjectSynchronizer::log_in_use_monitor_details(outputStream* out, bool log_all) {
2025 if (_in_use_list.count() > 0) {
2026 stringStream ss;
2027 out->print_cr("In-use monitor info:");
2028 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
2029 out->print_cr("%18s %s %18s %18s",
2030 "monitor", "BHL", "object", "object type");
2031 out->print_cr("================== === ================== ==================");
2032
2033 auto is_interesting = [&](ObjectMonitor* monitor) {
2034 return log_all || monitor->has_owner() || monitor->is_busy();
2035 };
2036
2037 monitors_iterate([&](ObjectMonitor* monitor) {
2038 if (is_interesting(monitor)) {
2039 const oop obj = monitor->object_peek();
2040 const intptr_t hash = LockingMode == LM_LIGHTWEIGHT ? monitor->hash() : monitor->header().hash();
2041 ResourceMark rm;
2042 out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(monitor),
2043 monitor->is_busy(), hash != 0, monitor->owner() != nullptr,
2044 p2i(obj), obj == nullptr ? "" : obj->klass()->external_name());
2045 if (monitor->is_busy()) {
2046 out->print(" (%s)", monitor->is_busy_to_string(&ss));
2047 ss.reset();
2048 }
2049 out->cr();
2050 }
2051 });
2052 }
2053
2054 out->flush();
2055 }
|