18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "gc/shared/collectedHeap.hpp"
28 #include "jfr/jfrEvents.hpp"
29 #include "logging/log.hpp"
30 #include "logging/logStream.hpp"
31 #include "memory/allocation.inline.hpp"
32 #include "memory/padded.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "memory/universe.hpp"
35 #include "oops/markWord.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "runtime/atomic.hpp"
38 #include "runtime/frame.inline.hpp"
39 #include "runtime/globals.hpp"
40 #include "runtime/handles.inline.hpp"
41 #include "runtime/handshake.hpp"
42 #include "runtime/interfaceSupport.inline.hpp"
43 #include "runtime/javaThread.hpp"
44 #include "runtime/lockStack.inline.hpp"
45 #include "runtime/mutexLocker.hpp"
46 #include "runtime/objectMonitor.hpp"
47 #include "runtime/objectMonitor.inline.hpp"
48 #include "runtime/os.inline.hpp"
49 #include "runtime/osThread.hpp"
50 #include "runtime/perfData.hpp"
51 #include "runtime/safepointMechanism.inline.hpp"
52 #include "runtime/safepointVerifiers.hpp"
53 #include "runtime/sharedRuntime.hpp"
54 #include "runtime/stubRoutines.hpp"
55 #include "runtime/synchronizer.hpp"
56 #include "runtime/threads.hpp"
57 #include "runtime/timer.hpp"
58 #include "runtime/trimNativeHeap.hpp"
59 #include "runtime/vframe.hpp"
60 #include "runtime/vmThread.hpp"
61 #include "utilities/align.hpp"
62 #include "utilities/dtrace.hpp"
63 #include "utilities/events.hpp"
64 #include "utilities/globalDefinitions.hpp"
65 #include "utilities/linkedlist.hpp"
66 #include "utilities/preserveException.hpp"
67
68 class ObjectMonitorDeflationLogging;
69
70 void MonitorList::add(ObjectMonitor* m) {
71 ObjectMonitor* head;
72 do {
73 head = Atomic::load(&_head);
74 m->set_next_om(head);
75 } while (Atomic::cmpxchg(&_head, head, m) != head);
259 static constexpr size_t inflation_lock_count() {
260 return 256;
261 }
262
263 // Static storage for an array of PlatformMutex.
264 alignas(PlatformMutex) static uint8_t _inflation_locks[inflation_lock_count()][sizeof(PlatformMutex)];
265
266 static inline PlatformMutex* inflation_lock(size_t index) {
267 return reinterpret_cast<PlatformMutex*>(_inflation_locks[index]);
268 }
269
270 void ObjectSynchronizer::initialize() {
271 for (size_t i = 0; i < inflation_lock_count(); i++) {
272 ::new(static_cast<void*>(inflation_lock(i))) PlatformMutex();
273 }
274 // Start the ceiling with the estimate for one thread.
275 set_in_use_list_ceiling(AvgMonitorsPerThreadEstimate);
276
277 // Start the timer for deflations, so it does not trigger immediately.
278 _last_async_deflation_time_ns = os::javaTimeNanos();
279 }
280
281 MonitorList ObjectSynchronizer::_in_use_list;
282 // monitors_used_above_threshold() policy is as follows:
283 //
284 // The ratio of the current _in_use_list count to the ceiling is used
285 // to determine if we are above MonitorUsedDeflationThreshold and need
286 // to do an async monitor deflation cycle. The ceiling is increased by
287 // AvgMonitorsPerThreadEstimate when a thread is added to the system
288 // and is decreased by AvgMonitorsPerThreadEstimate when a thread is
289 // removed from the system.
290 //
291 // Note: If the _in_use_list max exceeds the ceiling, then
292 // monitors_used_above_threshold() will use the in_use_list max instead
293 // of the thread count derived ceiling because we have used more
294 // ObjectMonitors than the estimated average.
295 //
296 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
297 // no-progress async monitor deflation cycles in a row, then the ceiling
298 // is adjusted upwards by monitors_used_above_threshold().
332 assert(current->thread_state() == _thread_in_Java, "invariant");
333 NoSafepointVerifier nsv;
334 if (obj == nullptr) return false; // slow-path for invalid obj
335 const markWord mark = obj->mark();
336
337 if (LockingMode == LM_LIGHTWEIGHT) {
338 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
339 // Degenerate notify
340 // fast-locked by caller so by definition the implied waitset is empty.
341 return true;
342 }
343 } else if (LockingMode == LM_LEGACY) {
344 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
345 // Degenerate notify
346 // stack-locked by caller so by definition the implied waitset is empty.
347 return true;
348 }
349 }
350
351 if (mark.has_monitor()) {
352 ObjectMonitor* const mon = mark.monitor();
353 assert(mon->object() == oop(obj), "invariant");
354 if (mon->owner() != current) return false; // slow-path for IMS exception
355
356 if (mon->first_waiter() != nullptr) {
357 // We have one or more waiters. Since this is an inflated monitor
358 // that we own, we can transfer one or more threads from the waitset
359 // to the entrylist here and now, avoiding the slow-path.
360 if (all) {
361 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current);
362 } else {
363 DTRACE_MONITOR_PROBE(notify, mon, obj, current);
364 }
365 int free_count = 0;
366 do {
367 mon->INotify(current);
368 ++free_count;
369 } while (mon->first_waiter() != nullptr && all);
370 OM_PERFDATA_OP(Notifications, inc(free_count));
371 }
372 return true;
373 }
374
375 // other IMS exception states take the slow-path
376 return false;
377 }
378
379
380 // The LockNode emitted directly at the synchronization site would have
381 // been too big if it were to have included support for the cases of inflated
382 // recursive enter and exit, so they go here instead.
383 // Note that we can't safely call AsyncPrintJavaStack() from within
384 // quick_enter() as our thread state remains _in_Java.
385
386 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
387 BasicLock * lock) {
388 assert(current->thread_state() == _thread_in_Java, "invariant");
389 NoSafepointVerifier nsv;
390 if (obj == nullptr) return false; // Need to throw NPE
391
392 if (obj->klass()->is_value_based()) {
393 return false;
394 }
395
396 if (LockingMode == LM_LIGHTWEIGHT) {
397 LockStack& lock_stack = current->lock_stack();
398 if (lock_stack.is_full()) {
399 // Always go into runtime if the lock stack is full.
400 return false;
401 }
402 if (lock_stack.try_recursive_enter(obj)) {
403 // Recursive lock successful.
404 current->inc_held_monitor_count();
405 return true;
406 }
407 }
408
409 const markWord mark = obj->mark();
410
411 if (mark.has_monitor()) {
412 ObjectMonitor* const m = mark.monitor();
413 // An async deflation or GC can race us before we manage to make
414 // the ObjectMonitor busy by setting the owner below. If we detect
415 // that race we just bail out to the slow-path here.
416 if (m->object_peek() == nullptr) {
417 return false;
418 }
419 JavaThread* const owner = static_cast<JavaThread*>(m->owner_raw());
420
421 // Lock contention and Transactional Lock Elision (TLE) diagnostics
422 // and observability
423 // Case: light contention possibly amenable to TLE
424 // Case: TLE inimical operations such as nested/recursive synchronization
425
426 if (owner == current) {
427 m->_recursions++;
428 current->inc_held_monitor_count();
429 return true;
430 }
431
432 if (LockingMode != LM_LIGHTWEIGHT) {
433 // This Java Monitor is inflated so obj's header will never be
434 // displaced to this thread's BasicLock. Make the displaced header
435 // non-null so this BasicLock is not seen as recursive nor as
436 // being locked. We do this unconditionally so that this thread's
437 // BasicLock cannot be mis-interpreted by any stack walkers. For
438 // performance reasons, stack walkers generally first check for
439 // stack-locking in the object's header, the second check is for
440 // recursive stack-locking in the displaced header in the BasicLock,
441 // and last are the inflated Java Monitor (ObjectMonitor) checks.
442 lock->set_displaced_header(markWord::unused_mark());
443 }
444
445 if (owner == nullptr && m->try_set_owner_from(nullptr, current) == nullptr) {
446 assert(m->_recursions == 0, "invariant");
447 current->inc_held_monitor_count();
448 return true;
449 }
450 }
451
452 // Note that we could inflate in quick_enter.
453 // This is likely a useful optimization
454 // Critically, in quick_enter() we must not:
455 // -- block indefinitely, or
456 // -- reach a safepoint
457
458 return false; // revert to slow-path
459 }
460
461 // Handle notifications when synchronizing on value based classes
462 void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* locking_thread) {
463 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
491 vblog.info("Synchronizing on object " INTPTR_FORMAT " of klass %s", p2i(obj()), obj->klass()->external_name());
492 if (locking_thread->has_last_Java_frame()) {
493 LogStream info_stream(vblog.info());
494 locking_thread->print_active_stack_on(&info_stream);
495 } else {
496 vblog.info("Cannot find the last Java frame");
497 }
498
499 EventSyncOnValueBasedClass event;
500 if (event.should_commit()) {
501 event.set_valueBasedClass(obj->klass());
502 event.commit();
503 }
504 }
505
506 if (bcp_was_adjusted) {
507 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
508 }
509 }
510
511 static bool useHeavyMonitors() {
512 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
513 return LockingMode == LM_MONITOR;
514 #else
515 return false;
516 #endif
517 }
518
519 // -----------------------------------------------------------------------------
520 // Monitor Enter/Exit
521
522 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
523 // When called with locking_thread != Thread::current() some mechanism must synchronize
524 // the locking_thread with respect to the current thread. Currently only used when
525 // deoptimizing and re-locking locks. See Deoptimization::relock_objects
526 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
527 if (!enter_fast_impl(obj, lock, locking_thread)) {
528 // Inflated ObjectMonitor::enter_for is required
529
530 // An async deflation can race after the inflate_for() call and before
531 // enter_for() can make the ObjectMonitor busy. enter_for() returns false
532 // if we have lost the race to async deflation and we simply try again.
533 while (true) {
534 ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter);
535 if (monitor->enter_for(locking_thread)) {
536 return;
537 }
538 assert(monitor->is_being_async_deflated(), "must be");
539 }
540 }
541 }
542
543 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
544 assert(current == Thread::current(), "must be");
545 if (!enter_fast_impl(obj, lock, current)) {
546 // Inflated ObjectMonitor::enter is required
547
548 // An async deflation can race after the inflate() call and before
549 // enter() can make the ObjectMonitor busy. enter() returns false if
550 // we have lost the race to async deflation and we simply try again.
551 while (true) {
552 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
553 if (monitor->enter(current)) {
554 return;
555 }
556 }
557 }
558 }
559
560 // The interpreter and compiler assembly code tries to lock using the fast path
561 // of this algorithm. Make sure to update that code if the following function is
562 // changed. The implementation is extremely sensitive to race condition. Be careful.
563 bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
564
565 if (obj->klass()->is_value_based()) {
566 handle_sync_on_value_based_class(obj, locking_thread);
567 }
568
569 locking_thread->inc_held_monitor_count();
570
571 if (!useHeavyMonitors()) {
572 if (LockingMode == LM_LIGHTWEIGHT) {
573 // Fast-locking does not use the 'lock' argument.
574 LockStack& lock_stack = locking_thread->lock_stack();
575 if (lock_stack.is_full()) {
576 // We unconditionally make room on the lock stack by inflating
577 // the least recently locked object on the lock stack.
578
579 // About the choice to inflate least recently locked object.
580 // First we must chose to inflate a lock, either some lock on
581 // the lock-stack or the lock that is currently being entered
582 // (which may or may not be on the lock-stack).
583 // Second the best lock to inflate is a lock which is entered
584 // in a control flow where there are only a very few locks being
585 // used, as the costly part of inflated locking is inflation,
586 // not locking. But this property is entirely program dependent.
587 // Third inflating the lock currently being entered on when it
588 // is not present on the lock-stack will result in a still full
589 // lock-stack. This creates a scenario where every deeper nested
590 // monitorenter must call into the runtime.
591 // The rational here is as follows:
592 // Because we cannot (currently) figure out the second, and want
593 // to avoid the third, we inflate a lock on the lock-stack.
594 // The least recently locked lock is chosen as it is the lock
595 // with the longest critical section.
596
597 log_info(monitorinflation)("LockStack capacity exceeded, inflating.");
598 ObjectMonitor* monitor = inflate_for(locking_thread, lock_stack.bottom(), inflate_cause_vm_internal);
599 assert(monitor->owner() == Thread::current(), "must be owner=" PTR_FORMAT " current=" PTR_FORMAT " mark=" PTR_FORMAT,
600 p2i(monitor->owner()), p2i(Thread::current()), monitor->object()->mark_acquire().value());
601 assert(!lock_stack.is_full(), "must have made room here");
602 }
603
604 markWord mark = obj()->mark_acquire();
605 while (mark.is_unlocked()) {
606 // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
607 // Try to swing into 'fast-locked' state.
608 assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
609 const markWord locked_mark = mark.set_fast_locked();
610 const markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
611 if (old_mark == mark) {
612 // Successfully fast-locked, push object to lock-stack and return.
613 lock_stack.push(obj());
614 return true;
615 }
616 mark = old_mark;
617 }
618
619 if (mark.is_fast_locked() && lock_stack.try_recursive_enter(obj())) {
620 // Recursive lock successful.
621 return true;
622 }
623
624 // Failed to fast lock.
625 return false;
626 } else if (LockingMode == LM_LEGACY) {
627 markWord mark = obj->mark();
628 if (mark.is_unlocked()) {
629 // Anticipate successful CAS -- the ST of the displaced mark must
630 // be visible <= the ST performed by the CAS.
631 lock->set_displaced_header(mark);
632 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
633 return true;
634 }
635 } else if (mark.has_locker() &&
636 locking_thread->is_lock_owned((address) mark.locker())) {
637 assert(lock != mark.locker(), "must not re-lock the same lock");
638 assert(lock != (BasicLock*) obj->mark().value(), "don't relock with same BasicLock");
639 lock->set_displaced_header(markWord::from_pointer(nullptr));
640 return true;
641 }
642
643 // The object header will never be displaced to this lock,
644 // so it does not matter what the value is, except that it
645 // must be non-zero to avoid looking like a re-entrant lock,
646 // and must not look locked either.
647 lock->set_displaced_header(markWord::unused_mark());
648
649 // Failed to fast lock.
650 return false;
651 }
652 } else if (VerifyHeavyMonitors) {
653 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
654 }
655
656 return false;
657 }
658
659 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
660 current->dec_held_monitor_count();
661
662 if (!useHeavyMonitors()) {
663 markWord mark = object->mark();
664 if (LockingMode == LM_LIGHTWEIGHT) {
665 // Fast-locking does not use the 'lock' argument.
666 LockStack& lock_stack = current->lock_stack();
667 if (mark.is_fast_locked() && lock_stack.try_recursive_exit(object)) {
668 // Recursively unlocked.
669 return;
670 }
671
672 if (mark.is_fast_locked() && lock_stack.is_recursive(object)) {
673 // This lock is recursive but is not at the top of the lock stack so we're
674 // doing an unbalanced exit. We have to fall thru to inflation below and
675 // let ObjectMonitor::exit() do the unlock.
676 } else {
677 while (mark.is_fast_locked()) {
678 // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
679 const markWord unlocked_mark = mark.set_unlocked();
680 const markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
681 if (old_mark == mark) {
682 size_t recursions = lock_stack.remove(object) - 1;
683 assert(recursions == 0, "must not be recursive here");
684 return;
685 }
686 mark = old_mark;
687 }
688 }
689 } else if (LockingMode == LM_LEGACY) {
690 markWord dhw = lock->displaced_header();
691 if (dhw.value() == 0) {
692 // If the displaced header is null, then this exit matches up with
693 // a recursive enter. No real work to do here except for diagnostics.
694 #ifndef PRODUCT
695 if (mark != markWord::INFLATING()) {
696 // Only do diagnostics if we are not racing an inflation. Simply
697 // exiting a recursive enter of a Java Monitor that is being
698 // inflated is safe; see the has_monitor() comment below.
699 assert(!mark.is_unlocked(), "invariant");
700 assert(!mark.has_locker() ||
701 current->is_lock_owned((address)mark.locker()), "invariant");
702 if (mark.has_monitor()) {
703 // The BasicLock's displaced_header is marked as a recursive
704 // enter and we have an inflated Java Monitor (ObjectMonitor).
705 // This is a special case where the Java Monitor was inflated
706 // after this thread entered the stack-lock recursively. When a
707 // Java Monitor is inflated, we cannot safely walk the Java
708 // Monitor owner's stack and update the BasicLocks because a
709 // Java Monitor can be asynchronously inflated by a thread that
710 // does not own the Java Monitor.
711 ObjectMonitor* m = mark.monitor();
712 assert(m->object()->mark() == mark, "invariant");
713 assert(m->is_entered(current), "invariant");
714 }
715 }
716 #endif
717 return;
718 }
719
720 if (mark == markWord::from_pointer(lock)) {
721 // If the object is stack-locked by the current thread, try to
722 // swing the displaced header from the BasicLock back to the mark.
723 assert(dhw.is_neutral(), "invariant");
724 if (object->cas_set_mark(dhw, mark) == mark) {
725 return;
726 }
727 }
728 }
729 } else if (VerifyHeavyMonitors) {
730 guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
731 }
735 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
736 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
737 assert(!monitor->is_owner_anonymous(), "must not be");
738 monitor->exit(current);
739 }
740
741 // -----------------------------------------------------------------------------
742 // JNI locks on java objects
743 // NOTE: must use heavy weight monitor to handle jni monitor enter
744 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
745 if (obj->klass()->is_value_based()) {
746 handle_sync_on_value_based_class(obj, current);
747 }
748
749 // the current locking is from JNI instead of Java code
750 current->set_current_pending_monitor_is_from_java(false);
751 // An async deflation can race after the inflate() call and before
752 // enter() can make the ObjectMonitor busy. enter() returns false if
753 // we have lost the race to async deflation and we simply try again.
754 while (true) {
755 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
756 if (monitor->enter(current)) {
757 current->inc_held_monitor_count(1, true);
758 break;
759 }
760 }
761 current->set_current_pending_monitor_is_from_java(true);
762 }
763
764 // NOTE: must use heavy weight monitor to handle jni monitor exit
765 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
766 JavaThread* current = THREAD;
767
768 // The ObjectMonitor* can't be async deflated until ownership is
769 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
770 ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
771 // If this thread has locked the object, exit the monitor. We
772 // intentionally do not use CHECK on check_owner because we must exit the
773 // monitor even if an exception was already pending.
774 if (monitor->check_owner(THREAD)) {
775 monitor->exit(current);
776 current->dec_held_monitor_count(1, true);
777 }
778 }
779
780 // -----------------------------------------------------------------------------
781 // Internal VM locks on java objects
782 // standard constructor, allows locking failures
783 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
784 _thread = thread;
785 _thread->check_for_valid_safepoint_state();
786 _obj = obj;
787
788 if (_obj() != nullptr) {
789 ObjectSynchronizer::enter(_obj, &_lock, _thread);
790 }
791 }
792
793 ObjectLocker::~ObjectLocker() {
794 if (_obj() != nullptr) {
795 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
796 }
797 }
798
799
800 // -----------------------------------------------------------------------------
801 // Wait/Notify/NotifyAll
802 // NOTE: must use heavy weight monitor to handle wait()
803 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
804 JavaThread* current = THREAD;
805 if (millis < 0) {
806 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
807 }
808 // The ObjectMonitor* can't be async deflated because the _waiters
809 // field is incremented before ownership is dropped and decremented
810 // after ownership is regained.
811 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
812
813 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
814 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
815
816 // This dummy call is in place to get around dtrace bug 6254741. Once
817 // that's fixed we can uncomment the following line, remove the call
818 // and change this function back into a "void" func.
819 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
820 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
821 return ret_code;
822 }
823
824 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
825 JavaThread* current = THREAD;
826
827 markWord mark = obj->mark();
828 if (LockingMode == LM_LIGHTWEIGHT) {
829 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
830 // Not inflated so there can't be any waiters to notify.
831 return;
832 }
833 } else if (LockingMode == LM_LEGACY) {
834 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
835 // Not inflated so there can't be any waiters to notify.
836 return;
837 }
838 }
839 // The ObjectMonitor* can't be async deflated until ownership is
840 // dropped by the calling thread.
841 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
842 monitor->notify(CHECK);
843 }
844
845 // NOTE: see comment of notify()
846 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
847 JavaThread* current = THREAD;
848
849 markWord mark = obj->mark();
850 if (LockingMode == LM_LIGHTWEIGHT) {
851 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
852 // Not inflated so there can't be any waiters to notify.
853 return;
854 }
855 } else if (LockingMode == LM_LEGACY) {
856 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
857 // Not inflated so there can't be any waiters to notify.
858 return;
859 }
860 }
861 // The ObjectMonitor* can't be async deflated until ownership is
862 // dropped by the calling thread.
863 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
864 monitor->notifyAll(CHECK);
865 }
866
867 // -----------------------------------------------------------------------------
868 // Hash Code handling
869
870 struct SharedGlobals {
871 char _pad_prefix[OM_CACHE_LINE_SIZE];
872 // This is a highly shared mostly-read variable.
873 // To avoid false-sharing it needs to be the sole occupant of a cache line.
874 volatile int stw_random;
875 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));
876 // Hot RW variable -- Sequester to avoid false-sharing
877 volatile int hc_sequence;
878 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int));
879 };
880
881 static SharedGlobals GVars;
882
883 static markWord read_stable_mark(oop obj) {
941 }
942 }
943
944 // hashCode() generation :
945 //
946 // Possibilities:
947 // * MD5Digest of {obj,stw_random}
948 // * CRC32 of {obj,stw_random} or any linear-feedback shift register function.
949 // * A DES- or AES-style SBox[] mechanism
950 // * One of the Phi-based schemes, such as:
951 // 2654435761 = 2^32 * Phi (golden ratio)
952 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stw_random ;
953 // * A variation of Marsaglia's shift-xor RNG scheme.
954 // * (obj ^ stw_random) is appealing, but can result
955 // in undesirable regularity in the hashCode values of adjacent objects
956 // (objects allocated back-to-back, in particular). This could potentially
957 // result in hashtable collisions and reduced hashtable efficiency.
958 // There are simple ways to "diffuse" the middle address bits over the
959 // generated hashCode values:
960
961 static inline intptr_t get_next_hash(Thread* current, oop obj) {
962 intptr_t value = 0;
963 if (hashCode == 0) {
964 // This form uses global Park-Miller RNG.
965 // On MP system we'll have lots of RW access to a global, so the
966 // mechanism induces lots of coherency traffic.
967 value = os::random();
968 } else if (hashCode == 1) {
969 // This variation has the property of being stable (idempotent)
970 // between STW operations. This can be useful in some of the 1-0
971 // synchronization schemes.
972 intptr_t addr_bits = cast_from_oop<intptr_t>(obj) >> 3;
973 value = addr_bits ^ (addr_bits >> 5) ^ GVars.stw_random;
974 } else if (hashCode == 2) {
975 value = 1; // for sensitivity testing
976 } else if (hashCode == 3) {
977 value = ++GVars.hc_sequence;
978 } else if (hashCode == 4) {
979 value = cast_from_oop<intptr_t>(obj);
980 } else {
981 // Marsaglia's xor-shift scheme with thread-specific state
982 // This is probably the best overall implementation -- we'll
983 // likely make this the default in future releases.
984 unsigned t = current->_hashStateX;
985 t ^= (t << 11);
986 current->_hashStateX = current->_hashStateY;
987 current->_hashStateY = current->_hashStateZ;
988 current->_hashStateZ = current->_hashStateW;
989 unsigned v = current->_hashStateW;
990 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
991 current->_hashStateW = v;
992 value = v;
993 }
994
995 value &= markWord::hash_mask;
996 if (value == 0) value = 0xBAD;
997 assert(value != markWord::no_hash, "invariant");
998 return value;
999 }
1000
1001 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
1002
1003 while (true) {
1004 ObjectMonitor* monitor = nullptr;
1005 markWord temp, test;
1006 intptr_t hash;
1007 markWord mark = read_stable_mark(obj);
1008 if (VerifyHeavyMonitors) {
1009 assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
1010 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
1011 }
1012 if (mark.is_unlocked() || (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked())) {
1013 hash = mark.hash();
1014 if (hash != 0) { // if it has a hash, just return it
1015 return hash;
1016 }
1017 hash = get_next_hash(current, obj); // get a new hash
1018 temp = mark.copy_set_hash(hash); // merge the hash into header
1019 // try to install the hash
1020 test = obj->cas_set_mark(temp, mark);
1021 if (test == mark) { // if the hash was installed, return it
1075 // a thread's stack can be asynchronously read by other threads
1076 // during an inflate() call so any change to that stack memory
1077 // may not propagate to other threads correctly.
1078 }
1079
1080 // Inflate the monitor to set the hash.
1081
1082 // There's no need to inflate if the mark has already got a monitor.
1083 // NOTE: an async deflation can race after we get the monitor and
1084 // before we can update the ObjectMonitor's header with the hash
1085 // value below.
1086 monitor = mark.has_monitor() ? mark.monitor() : inflate(current, obj, inflate_cause_hash_code);
1087 // Load ObjectMonitor's header/dmw field and see if it has a hash.
1088 mark = monitor->header();
1089 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
1090 hash = mark.hash();
1091 if (hash == 0) { // if it does not have a hash
1092 hash = get_next_hash(current, obj); // get a new hash
1093 temp = mark.copy_set_hash(hash) ; // merge the hash into header
1094 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1095 uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value());
1096 test = markWord(v);
1097 if (test != mark) {
1098 // The attempt to update the ObjectMonitor's header/dmw field
1099 // did not work. This can happen if another thread managed to
1100 // merge in the hash just before our cmpxchg().
1101 // If we add any new usages of the header/dmw field, this code
1102 // will need to be updated.
1103 hash = test.hash();
1104 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1105 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1106 }
1107 if (monitor->is_being_async_deflated()) {
1108 // If we detect that async deflation has occurred, then we
1109 // attempt to restore the header/dmw to the object's header
1110 // so that we only retry once if the deflater thread happens
1111 // to be slow.
1112 monitor->install_displaced_markword_in_object(obj);
1113 continue;
1114 }
1115 }
1116 // We finally get the hash.
1117 return hash;
1118 }
1119 }
1120
1121 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1122 Handle h_obj) {
1123 assert(current == JavaThread::current(), "Can only be called on current thread");
1124 oop obj = h_obj();
1125
1126 markWord mark = read_stable_mark(obj);
1127
1128 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1129 // stack-locked case, header points into owner's stack
1130 return current->is_lock_owned((address)mark.locker());
1131 }
1132
1133 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1134 // fast-locking case, see if lock is in current's lock stack
1135 return current->lock_stack().contains(h_obj());
1136 }
1137
1138 if (mark.has_monitor()) {
1139 // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1140 // The first stage of async deflation does not affect any field
1141 // used by this comparison so the ObjectMonitor* is usable here.
1142 ObjectMonitor* monitor = mark.monitor();
1143 return monitor->is_entered(current) != 0;
1144 }
1145 // Unlocked case, header in place
1146 assert(mark.is_unlocked(), "sanity check");
1147 return false;
1148 }
1149
1150 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
1151 oop obj = h_obj();
1152 markWord mark = read_stable_mark(obj);
1153
1154 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1155 // stack-locked so header points into owner's stack.
1156 // owning_thread_from_monitor_owner() may also return null here:
1157 return Threads::owning_thread_from_monitor_owner(t_list, (address) mark.locker());
1158 }
1159
1160 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1161 // fast-locked so get owner from the object.
1162 // owning_thread_from_object() may also return null here:
1163 return Threads::owning_thread_from_object(t_list, h_obj());
1164 }
1165
1166 if (mark.has_monitor()) {
1167 // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1168 // The first stage of async deflation does not affect any field
1169 // used by this comparison so the ObjectMonitor* is usable here.
1170 ObjectMonitor* monitor = mark.monitor();
1171 assert(monitor != nullptr, "monitor should be non-null");
1172 // owning_thread_from_monitor() may also return null here:
1173 return Threads::owning_thread_from_monitor(t_list, monitor);
1174 }
1175
1176 // Unlocked case, header in place
1177 // Cannot have assertion since this object may have been
1178 // locked by another thread when reaching here.
1179 // assert(mark.is_unlocked(), "sanity check");
1180
1181 return nullptr;
1182 }
1183
1184 // Visitors ...
1185
1186 // Iterate over all ObjectMonitors.
1187 template <typename Function>
1188 void ObjectSynchronizer::monitors_iterate(Function function) {
1189 MonitorList::Iterator iter = _in_use_list.iterator();
1190 while (iter.has_next()) {
1226 }
1227
1228 static bool monitors_used_above_threshold(MonitorList* list) {
1229 if (MonitorUsedDeflationThreshold == 0) { // disabled case is easy
1230 return false;
1231 }
1232 // Start with ceiling based on a per-thread estimate:
1233 size_t ceiling = ObjectSynchronizer::in_use_list_ceiling();
1234 size_t old_ceiling = ceiling;
1235 if (ceiling < list->max()) {
1236 // The max used by the system has exceeded the ceiling so use that:
1237 ceiling = list->max();
1238 }
1239 size_t monitors_used = list->count();
1240 if (monitors_used == 0) { // empty list is easy
1241 return false;
1242 }
1243 if (NoAsyncDeflationProgressMax != 0 &&
1244 _no_progress_cnt >= NoAsyncDeflationProgressMax) {
1245 double remainder = (100.0 - MonitorUsedDeflationThreshold) / 100.0;
1246 size_t new_ceiling = ceiling + (size_t)((double)ceiling * remainder) + 1;
1247 ObjectSynchronizer::set_in_use_list_ceiling(new_ceiling);
1248 log_info(monitorinflation)("Too many deflations without progress; "
1249 "bumping in_use_list_ceiling from " SIZE_FORMAT
1250 " to " SIZE_FORMAT, old_ceiling, new_ceiling);
1251 _no_progress_cnt = 0;
1252 ceiling = new_ceiling;
1253 }
1254
1255 // Check if our monitor usage is above the threshold:
1256 size_t monitor_usage = (monitors_used * 100LL) / ceiling;
1257 if (int(monitor_usage) > MonitorUsedDeflationThreshold) {
1258 log_info(monitorinflation)("monitors_used=" SIZE_FORMAT ", ceiling=" SIZE_FORMAT
1259 ", monitor_usage=" SIZE_FORMAT ", threshold=%d",
1260 monitors_used, ceiling, monitor_usage, MonitorUsedDeflationThreshold);
1261 return true;
1262 }
1263
1264 return false;
1265 }
1266
1362
1363 return ret_code;
1364 }
1365
1366 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {
1367 return (os::javaTimeNanos() - last_async_deflation_time_ns()) / (NANOUNITS / MILLIUNITS);
1368 }
1369
1370 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1371 const oop obj,
1372 ObjectSynchronizer::InflateCause cause) {
1373 assert(event != nullptr, "invariant");
1374 event->set_monitorClass(obj->klass());
1375 event->set_address((uintptr_t)(void*)obj);
1376 event->set_cause((u1)cause);
1377 event->commit();
1378 }
1379
1380 // Fast path code shared by multiple functions
1381 void ObjectSynchronizer::inflate_helper(oop obj) {
1382 markWord mark = obj->mark_acquire();
1383 if (mark.has_monitor()) {
1384 ObjectMonitor* monitor = mark.monitor();
1385 markWord dmw = monitor->header();
1386 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1387 return;
1388 }
1389 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1390 }
1391
1392 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
1393 assert(current == Thread::current(), "must be");
1394 if (LockingMode == LM_LIGHTWEIGHT && current->is_Java_thread()) {
1395 return inflate_impl(JavaThread::cast(current), obj, cause);
1396 }
1397 return inflate_impl(nullptr, obj, cause);
1398 }
1399
1400 ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
1401 assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
1402 return inflate_impl(thread, obj, cause);
1403 }
1404
1405 ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* inflating_thread, oop object, const InflateCause cause) {
1406 // The JavaThread* inflating_thread parameter is only used by LM_LIGHTWEIGHT and requires
1407 // that the inflating_thread == Thread::current() or is suspended throughout the call by
1408 // some other mechanism.
1409 // Even with LM_LIGHTWEIGHT the thread might be nullptr when called from a non
1410 // JavaThread. (As may still be the case from FastHashCode). However it is only
1411 // important for the correctness of the LM_LIGHTWEIGHT algorithm that the thread
1412 // is set when called from ObjectSynchronizer::enter from the owning thread,
1413 // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
1414 EventJavaMonitorInflate event;
1415
1416 for (;;) {
1417 const markWord mark = object->mark_acquire();
1418
1419 // The mark can be in one of the following states:
1420 // * inflated - Just return if using stack-locking.
1421 // If using fast-locking and the ObjectMonitor owner
1422 // is anonymous and the inflating_thread owns the
1423 // object lock, then we make the inflating_thread
1424 // the ObjectMonitor owner and remove the lock from
1425 // the inflating_thread's lock stack.
1426 // * fast-locked - Coerce it to inflated from fast-locked.
1427 // * stack-locked - Coerce it to inflated from stack-locked.
1428 // * INFLATING - Busy wait for conversion from stack-locked to
1429 // inflated.
1430 // * unlocked - Aggressively inflate the object.
1431
1432 // CASE: inflated
1433 if (mark.has_monitor()) {
1434 ObjectMonitor* inf = mark.monitor();
1435 markWord dmw = inf->header();
1436 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1437 if (LockingMode == LM_LIGHTWEIGHT && inf->is_owner_anonymous() &&
1438 inflating_thread != nullptr && inflating_thread->lock_stack().contains(object)) {
1439 inf->set_owner_from_anonymous(inflating_thread);
1440 size_t removed = inflating_thread->lock_stack().remove(object);
1441 inf->set_recursions(removed - 1);
1442 }
1443 return inf;
1444 }
1445
1446 if (LockingMode != LM_LIGHTWEIGHT) {
1447 // New lightweight locking does not use INFLATING.
1448 // CASE: inflation in progress - inflating over a stack-lock.
1449 // Some other thread is converting from stack-locked to inflated.
1450 // Only that thread can complete inflation -- other threads must wait.
1451 // The INFLATING value is transient.
1452 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1453 // We could always eliminate polling by parking the thread on some auxiliary list.
1454 if (mark == markWord::INFLATING()) {
1455 read_stable_mark(object);
1456 continue;
1457 }
1458 }
1459
1460 // CASE: fast-locked
1461 // Could be fast-locked either by the inflating_thread or by some other thread.
1462 //
1463 // Note that we allocate the ObjectMonitor speculatively, _before_
1464 // attempting to set the object's mark to the new ObjectMonitor. If
1465 // the inflating_thread owns the monitor, then we set the ObjectMonitor's
1466 // owner to the inflating_thread. Otherwise, we set the ObjectMonitor's owner
1467 // to anonymous. If we lose the race to set the object's mark to the
1468 // new ObjectMonitor, then we just delete it and loop around again.
1469 //
1470 LogStreamHandle(Trace, monitorinflation) lsh;
1471 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1472 ObjectMonitor* monitor = new ObjectMonitor(object);
1473 monitor->set_header(mark.set_unlocked());
1474 bool own = inflating_thread != nullptr && inflating_thread->lock_stack().contains(object);
1475 if (own) {
1476 // Owned by inflating_thread.
1477 monitor->set_owner_from(nullptr, inflating_thread);
1478 } else {
1479 // Owned by somebody else.
1480 monitor->set_owner_anonymous();
1481 }
1482 markWord monitor_mark = markWord::encode(monitor);
1483 markWord old_mark = object->cas_set_mark(monitor_mark, mark);
1484 if (old_mark == mark) {
1485 // Success! Return inflated monitor.
1486 if (own) {
1487 size_t removed = inflating_thread->lock_stack().remove(object);
1488 monitor->set_recursions(removed - 1);
1489 }
1490 // Once the ObjectMonitor is configured and object is associated
1491 // with the ObjectMonitor, it is safe to allow async deflation:
1492 _in_use_list.add(monitor);
1493
1494 // Hopefully the performance counters are allocated on distinct
1495 // cache lines to avoid false sharing on MP systems ...
1496 OM_PERFDATA_OP(Inflations, inc());
1497 if (log_is_enabled(Trace, monitorinflation)) {
1498 ResourceMark rm;
1499 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1500 INTPTR_FORMAT ", type='%s'", p2i(object),
1501 object->mark().value(), object->klass()->external_name());
1502 }
1503 if (event.should_commit()) {
1504 post_monitor_inflate_event(&event, object, cause);
1505 }
1506 return monitor;
1507 } else {
1508 delete monitor;
1509 continue; // Interference -- just retry
1510 }
1511 }
1512
1513 // CASE: stack-locked
1514 // Could be stack-locked either by current or by some other thread.
1515 //
1516 // Note that we allocate the ObjectMonitor speculatively, _before_ attempting
1517 // to install INFLATING into the mark word. We originally installed INFLATING,
1518 // allocated the ObjectMonitor, and then finally STed the address of the
1519 // ObjectMonitor into the mark. This was correct, but artificially lengthened
1520 // the interval in which INFLATING appeared in the mark, thus increasing
1521 // the odds of inflation contention. If we lose the race to set INFLATING,
1522 // then we just delete the ObjectMonitor and loop around again.
1523 //
1524 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1525 assert(LockingMode != LM_LIGHTWEIGHT, "cannot happen with new lightweight locking");
1526 ObjectMonitor* m = new ObjectMonitor(object);
1527 // Optimistically prepare the ObjectMonitor - anticipate successful CAS
1528 // We do this before the CAS in order to minimize the length of time
1529 // in which INFLATING appears in the mark.
1530
1531 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
1532 if (cmp != mark) {
1533 delete m;
1534 continue; // Interference -- just retry
1535 }
1536
1537 // We've successfully installed INFLATING (0) into the mark-word.
1538 // This is the only case where 0 will appear in a mark-word.
1539 // Only the singular thread that successfully swings the mark-word
1540 // to 0 can perform (or more precisely, complete) inflation.
1541 //
1542 // Why do we CAS a 0 into the mark-word instead of just CASing the
1543 // mark-word from the stack-locked value directly to the new inflated state?
1544 // Consider what happens when a thread unlocks a stack-locked object.
1545 // It attempts to use CAS to swing the displaced header value from the
1637 OM_PERFDATA_OP(Inflations, inc());
1638 if (log_is_enabled(Trace, monitorinflation)) {
1639 ResourceMark rm;
1640 lsh.print_cr("inflate(unlocked): object=" INTPTR_FORMAT ", mark="
1641 INTPTR_FORMAT ", type='%s'", p2i(object),
1642 object->mark().value(), object->klass()->external_name());
1643 }
1644 if (event.should_commit()) {
1645 post_monitor_inflate_event(&event, object, cause);
1646 }
1647 return m;
1648 }
1649 }
1650
1651 // Walk the in-use list and deflate (at most MonitorDeflationMax) idle
1652 // ObjectMonitors. Returns the number of deflated ObjectMonitors.
1653 //
1654 size_t ObjectSynchronizer::deflate_monitor_list(ObjectMonitorDeflationSafepointer* safepointer) {
1655 MonitorList::Iterator iter = _in_use_list.iterator();
1656 size_t deflated_count = 0;
1657
1658 while (iter.has_next()) {
1659 if (deflated_count >= (size_t)MonitorDeflationMax) {
1660 break;
1661 }
1662 ObjectMonitor* mid = iter.next();
1663 if (mid->deflate_monitor()) {
1664 deflated_count++;
1665 }
1666
1667 // Must check for a safepoint/handshake and honor it.
1668 safepointer->block_for_safepoint("deflation", "deflated_count", deflated_count);
1669 }
1670
1671 return deflated_count;
1672 }
1673
1674 class HandshakeForDeflation : public HandshakeClosure {
1675 public:
1676 HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {}
1677
1678 void do_thread(Thread* thread) {
1679 log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread="
1680 INTPTR_FORMAT, p2i(thread));
1681 }
1682 };
1683
1684 class VM_RendezvousGCThreads : public VM_Operation {
1685 public:
1686 bool evaluate_at_safepoint() const override { return false; }
1687 VMOp_Type type() const override { return VMOp_RendezvousGCThreads; }
1688 void doit() override {
1689 Universe::heap()->safepoint_synchronize_begin();
1690 Universe::heap()->safepoint_synchronize_end();
1691 };
1692 };
1693
1694 static size_t delete_monitors(GrowableArray<ObjectMonitor*>* delete_list,
1695 ObjectMonitorDeflationSafepointer* safepointer) {
1696 NativeHeapTrimmer::SuspendMark sm("monitor deletion");
1697 size_t deleted_count = 0;
1698 for (ObjectMonitor* monitor: *delete_list) {
1699 delete monitor;
1700 deleted_count++;
1807 // The async deflation request has been processed.
1808 _last_async_deflation_time_ns = os::javaTimeNanos();
1809 set_is_async_deflation_requested(false);
1810
1811 ObjectMonitorDeflationLogging log;
1812 ObjectMonitorDeflationSafepointer safepointer(current, &log);
1813
1814 log.begin();
1815
1816 // Deflate some idle ObjectMonitors.
1817 size_t deflated_count = deflate_monitor_list(&safepointer);
1818
1819 // Unlink the deflated ObjectMonitors from the in-use list.
1820 size_t unlinked_count = 0;
1821 size_t deleted_count = 0;
1822 if (deflated_count > 0) {
1823 ResourceMark rm(current);
1824 GrowableArray<ObjectMonitor*> delete_list((int)deflated_count);
1825 unlinked_count = _in_use_list.unlink_deflated(deflated_count, &delete_list, &safepointer);
1826
1827 log.before_handshake(unlinked_count);
1828
1829 // A JavaThread needs to handshake in order to safely free the
1830 // ObjectMonitors that were deflated in this cycle.
1831 HandshakeForDeflation hfd_hc;
1832 Handshake::execute(&hfd_hc);
1833 // Also, we sync and desync GC threads around the handshake, so that they can
1834 // safely read the mark-word and look-through to the object-monitor, without
1835 // being afraid that the object-monitor is going away.
1836 VM_RendezvousGCThreads sync_gc;
1837 VMThread::execute(&sync_gc);
1838
1839 log.after_handshake();
1840
1841 // After the handshake, safely free the ObjectMonitors that were
1842 // deflated and unlinked in this cycle.
1843
1844 // Delete the unlinked ObjectMonitors.
1845 deleted_count = delete_monitors(&delete_list, &safepointer);
1846 assert(unlinked_count == deleted_count, "must be");
2015 }
2016
2017 size_t ck_in_use_max = _in_use_list.max();
2018 if (l_in_use_max == ck_in_use_max) {
2019 out->print_cr("in_use_max=" SIZE_FORMAT " equals ck_in_use_max="
2020 SIZE_FORMAT, l_in_use_max, ck_in_use_max);
2021 } else {
2022 out->print_cr("WARNING: in_use_max=" SIZE_FORMAT " is not equal to "
2023 "ck_in_use_max=" SIZE_FORMAT, l_in_use_max, ck_in_use_max);
2024 }
2025 }
2026
2027 // Check an in-use monitor entry; log any errors.
2028 void ObjectSynchronizer::chk_in_use_entry(ObjectMonitor* n, outputStream* out,
2029 int* error_cnt_p) {
2030 if (n->owner_is_DEFLATER_MARKER()) {
2031 // This could happen when monitor deflation blocks for a safepoint.
2032 return;
2033 }
2034
2035 if (n->header().value() == 0) {
2036 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor must "
2037 "have non-null _header field.", p2i(n));
2038 *error_cnt_p = *error_cnt_p + 1;
2039 }
2040 const oop obj = n->object_peek();
2041 if (obj != nullptr) {
2042 const markWord mark = obj->mark();
2043 if (!mark.has_monitor()) {
2044 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
2045 "object does not think it has a monitor: obj="
2046 INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n),
2047 p2i(obj), mark.value());
2048 *error_cnt_p = *error_cnt_p + 1;
2049 }
2050 ObjectMonitor* const obj_mon = mark.monitor();
2051 if (n != obj_mon) {
2052 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
2053 "object does not refer to the same monitor: obj="
2054 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
2055 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
2056 *error_cnt_p = *error_cnt_p + 1;
2057 }
2058 }
2059 }
2060
2061 // Log details about ObjectMonitors on the in_use_list. The 'BHL'
2062 // flags indicate why the entry is in-use, 'object' and 'object type'
2063 // indicate the associated object and its type.
2064 void ObjectSynchronizer::log_in_use_monitor_details(outputStream* out, bool log_all) {
2065 if (_in_use_list.count() > 0) {
2066 stringStream ss;
2067 out->print_cr("In-use monitor info:");
2068 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
2069 out->print_cr("%18s %s %18s %18s",
2070 "monitor", "BHL", "object", "object type");
2071 out->print_cr("================== === ================== ==================");
2072
2073 auto is_interesting = [&](ObjectMonitor* monitor) {
2074 return log_all || monitor->has_owner() || monitor->is_busy();
2075 };
2076
2077 monitors_iterate([&](ObjectMonitor* monitor) {
2078 if (is_interesting(monitor)) {
2079 const oop obj = monitor->object_peek();
2080 const markWord mark = monitor->header();
2081 ResourceMark rm;
2082 out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(monitor),
2083 monitor->is_busy(), mark.hash() != 0, monitor->owner() != nullptr,
2084 p2i(obj), obj == nullptr ? "" : obj->klass()->external_name());
2085 if (monitor->is_busy()) {
2086 out->print(" (%s)", monitor->is_busy_to_string(&ss));
2087 ss.reset();
2088 }
2089 out->cr();
2090 }
2091 });
2092 }
2093
2094 out->flush();
2095 }
|
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "gc/shared/collectedHeap.hpp"
28 #include "jfr/jfrEvents.hpp"
29 #include "logging/log.hpp"
30 #include "logging/logStream.hpp"
31 #include "memory/allocation.inline.hpp"
32 #include "memory/padded.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "memory/universe.hpp"
35 #include "oops/markWord.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "runtime/atomic.hpp"
38 #include "runtime/basicLock.inline.hpp"
39 #include "runtime/frame.inline.hpp"
40 #include "runtime/globals.hpp"
41 #include "runtime/handles.inline.hpp"
42 #include "runtime/handshake.hpp"
43 #include "runtime/interfaceSupport.inline.hpp"
44 #include "runtime/javaThread.hpp"
45 #include "runtime/lightweightSynchronizer.hpp"
46 #include "runtime/lockStack.inline.hpp"
47 #include "runtime/mutexLocker.hpp"
48 #include "runtime/objectMonitor.hpp"
49 #include "runtime/objectMonitor.inline.hpp"
50 #include "runtime/os.inline.hpp"
51 #include "runtime/osThread.hpp"
52 #include "runtime/perfData.hpp"
53 #include "runtime/safepointMechanism.inline.hpp"
54 #include "runtime/safepointVerifiers.hpp"
55 #include "runtime/sharedRuntime.hpp"
56 #include "runtime/stubRoutines.hpp"
57 #include "runtime/synchronizer.inline.hpp"
58 #include "runtime/threads.hpp"
59 #include "runtime/timer.hpp"
60 #include "runtime/trimNativeHeap.hpp"
61 #include "runtime/vframe.hpp"
62 #include "runtime/vmThread.hpp"
63 #include "utilities/align.hpp"
64 #include "utilities/dtrace.hpp"
65 #include "utilities/events.hpp"
66 #include "utilities/globalDefinitions.hpp"
67 #include "utilities/linkedlist.hpp"
68 #include "utilities/preserveException.hpp"
69
70 class ObjectMonitorDeflationLogging;
71
72 void MonitorList::add(ObjectMonitor* m) {
73 ObjectMonitor* head;
74 do {
75 head = Atomic::load(&_head);
76 m->set_next_om(head);
77 } while (Atomic::cmpxchg(&_head, head, m) != head);
261 static constexpr size_t inflation_lock_count() {
262 return 256;
263 }
264
265 // Static storage for an array of PlatformMutex.
266 alignas(PlatformMutex) static uint8_t _inflation_locks[inflation_lock_count()][sizeof(PlatformMutex)];
267
268 static inline PlatformMutex* inflation_lock(size_t index) {
269 return reinterpret_cast<PlatformMutex*>(_inflation_locks[index]);
270 }
271
272 void ObjectSynchronizer::initialize() {
273 for (size_t i = 0; i < inflation_lock_count(); i++) {
274 ::new(static_cast<void*>(inflation_lock(i))) PlatformMutex();
275 }
276 // Start the ceiling with the estimate for one thread.
277 set_in_use_list_ceiling(AvgMonitorsPerThreadEstimate);
278
279 // Start the timer for deflations, so it does not trigger immediately.
280 _last_async_deflation_time_ns = os::javaTimeNanos();
281
282 if (LockingMode == LM_LIGHTWEIGHT) {
283 LightweightSynchronizer::initialize();
284 }
285 }
286
287 MonitorList ObjectSynchronizer::_in_use_list;
288 // monitors_used_above_threshold() policy is as follows:
289 //
290 // The ratio of the current _in_use_list count to the ceiling is used
291 // to determine if we are above MonitorUsedDeflationThreshold and need
292 // to do an async monitor deflation cycle. The ceiling is increased by
293 // AvgMonitorsPerThreadEstimate when a thread is added to the system
294 // and is decreased by AvgMonitorsPerThreadEstimate when a thread is
295 // removed from the system.
296 //
297 // Note: If the _in_use_list max exceeds the ceiling, then
298 // monitors_used_above_threshold() will use the in_use_list max instead
299 // of the thread count derived ceiling because we have used more
300 // ObjectMonitors than the estimated average.
301 //
302 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
303 // no-progress async monitor deflation cycles in a row, then the ceiling
304 // is adjusted upwards by monitors_used_above_threshold().
338 assert(current->thread_state() == _thread_in_Java, "invariant");
339 NoSafepointVerifier nsv;
340 if (obj == nullptr) return false; // slow-path for invalid obj
341 const markWord mark = obj->mark();
342
343 if (LockingMode == LM_LIGHTWEIGHT) {
344 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
345 // Degenerate notify
346 // fast-locked by caller so by definition the implied waitset is empty.
347 return true;
348 }
349 } else if (LockingMode == LM_LEGACY) {
350 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
351 // Degenerate notify
352 // stack-locked by caller so by definition the implied waitset is empty.
353 return true;
354 }
355 }
356
357 if (mark.has_monitor()) {
358 ObjectMonitor* const mon = read_monitor(current, obj, mark);
359 if (LockingMode == LM_LIGHTWEIGHT && mon == nullptr) {
360 // Racing with inflation/deflation go slow path
361 return false;
362 }
363 assert(mon->object() == oop(obj), "invariant");
364 if (mon->owner() != current) return false; // slow-path for IMS exception
365
366 if (mon->first_waiter() != nullptr) {
367 // We have one or more waiters. Since this is an inflated monitor
368 // that we own, we can transfer one or more threads from the waitset
369 // to the entrylist here and now, avoiding the slow-path.
370 if (all) {
371 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current);
372 } else {
373 DTRACE_MONITOR_PROBE(notify, mon, obj, current);
374 }
375 int free_count = 0;
376 do {
377 mon->INotify(current);
378 ++free_count;
379 } while (mon->first_waiter() != nullptr && all);
380 OM_PERFDATA_OP(Notifications, inc(free_count));
381 }
382 return true;
383 }
384
385 // other IMS exception states take the slow-path
386 return false;
387 }
388
389 static bool useHeavyMonitors() {
390 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
391 return LockingMode == LM_MONITOR;
392 #else
393 return false;
394 #endif
395 }
396
397 // The LockNode emitted directly at the synchronization site would have
398 // been too big if it were to have included support for the cases of inflated
399 // recursive enter and exit, so they go here instead.
400 // Note that we can't safely call AsyncPrintJavaStack() from within
401 // quick_enter() as our thread state remains _in_Java.
402
403 bool ObjectSynchronizer::quick_enter_legacy(oop obj, JavaThread* current,
404 BasicLock * lock) {
405 assert(current->thread_state() == _thread_in_Java, "invariant");
406
407 if (useHeavyMonitors()) {
408 return false; // Slow path
409 }
410
411 if (LockingMode == LM_LIGHTWEIGHT) {
412 return LightweightSynchronizer::quick_enter(obj, current, lock);
413 }
414
415 assert(LockingMode == LM_LEGACY, "legacy mode below");
416
417 const markWord mark = obj->mark();
418
419 if (mark.has_monitor()) {
420
421 ObjectMonitor* const m = read_monitor(mark);
422 // An async deflation or GC can race us before we manage to make
423 // the ObjectMonitor busy by setting the owner below. If we detect
424 // that race we just bail out to the slow-path here.
425 if (m->object_peek() == nullptr) {
426 return false;
427 }
428 JavaThread* const owner = static_cast<JavaThread*>(m->owner_raw());
429
430 // Lock contention and Transactional Lock Elision (TLE) diagnostics
431 // and observability
432 // Case: light contention possibly amenable to TLE
433 // Case: TLE inimical operations such as nested/recursive synchronization
434
435 if (owner == current) {
436 m->_recursions++;
437 current->inc_held_monitor_count();
438 return true;
439 }
440
441 // This Java Monitor is inflated so obj's header will never be
442 // displaced to this thread's BasicLock. Make the displaced header
443 // non-null so this BasicLock is not seen as recursive nor as
444 // being locked. We do this unconditionally so that this thread's
445 // BasicLock cannot be mis-interpreted by any stack walkers. For
446 // performance reasons, stack walkers generally first check for
447 // stack-locking in the object's header, the second check is for
448 // recursive stack-locking in the displaced header in the BasicLock,
449 // and last are the inflated Java Monitor (ObjectMonitor) checks.
450 lock->set_displaced_header(markWord::unused_mark());
451
452 if (owner == nullptr && m->try_set_owner_from(nullptr, current) == nullptr) {
453 assert(m->_recursions == 0, "invariant");
454 current->inc_held_monitor_count();
455 return true;
456 }
457 }
458
459 // Note that we could inflate in quick_enter.
460 // This is likely a useful optimization
461 // Critically, in quick_enter() we must not:
462 // -- block indefinitely, or
463 // -- reach a safepoint
464
465 return false; // revert to slow-path
466 }
467
468 // Handle notifications when synchronizing on value based classes
469 void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* locking_thread) {
470 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
498 vblog.info("Synchronizing on object " INTPTR_FORMAT " of klass %s", p2i(obj()), obj->klass()->external_name());
499 if (locking_thread->has_last_Java_frame()) {
500 LogStream info_stream(vblog.info());
501 locking_thread->print_active_stack_on(&info_stream);
502 } else {
503 vblog.info("Cannot find the last Java frame");
504 }
505
506 EventSyncOnValueBasedClass event;
507 if (event.should_commit()) {
508 event.set_valueBasedClass(obj->klass());
509 event.commit();
510 }
511 }
512
513 if (bcp_was_adjusted) {
514 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
515 }
516 }
517
518 // -----------------------------------------------------------------------------
519 // Monitor Enter/Exit
520
521 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
522 // When called with locking_thread != Thread::current() some mechanism must synchronize
523 // the locking_thread with respect to the current thread. Currently only used when
524 // deoptimizing and re-locking locks. See Deoptimization::relock_objects
525 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
526
527 if (LockingMode == LM_LIGHTWEIGHT) {
528 return LightweightSynchronizer::enter_for(obj, lock, locking_thread);
529 }
530
531 if (!enter_fast_impl(obj, lock, locking_thread)) {
532 // Inflated ObjectMonitor::enter_for is required
533
534 // An async deflation can race after the inflate_for() call and before
535 // enter_for() can make the ObjectMonitor busy. enter_for() returns false
536 // if we have lost the race to async deflation and we simply try again.
537 while (true) {
538 ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter);
539 if (monitor->enter_for(locking_thread)) {
540 return;
541 }
542 assert(monitor->is_being_async_deflated(), "must be");
543 }
544 }
545 }
546
547 void ObjectSynchronizer::enter_legacy(Handle obj, BasicLock* lock, JavaThread* current) {
548 if (!enter_fast_impl(obj, lock, current)) {
549 // Inflated ObjectMonitor::enter is required
550
551 // An async deflation can race after the inflate() call and before
552 // enter() can make the ObjectMonitor busy. enter() returns false if
553 // we have lost the race to async deflation and we simply try again.
554 while (true) {
555 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
556 if (monitor->enter(current)) {
557 return;
558 }
559 }
560 }
561 }
562
563 // The interpreter and compiler assembly code tries to lock using the fast path
564 // of this algorithm. Make sure to update that code if the following function is
565 // changed. The implementation is extremely sensitive to race condition. Be careful.
566 bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
567 assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer");
568
569 if (obj->klass()->is_value_based()) {
570 handle_sync_on_value_based_class(obj, locking_thread);
571 }
572
573 locking_thread->inc_held_monitor_count();
574
575 if (!useHeavyMonitors()) {
576 if (LockingMode == LM_LEGACY) {
577 markWord mark = obj->mark();
578 if (mark.is_unlocked()) {
579 // Anticipate successful CAS -- the ST of the displaced mark must
580 // be visible <= the ST performed by the CAS.
581 lock->set_displaced_header(mark);
582 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
583 return true;
584 }
585 } else if (mark.has_locker() &&
586 locking_thread->is_lock_owned((address) mark.locker())) {
587 assert(lock != mark.locker(), "must not re-lock the same lock");
588 assert(lock != (BasicLock*) obj->mark().value(), "don't relock with same BasicLock");
589 lock->set_displaced_header(markWord::from_pointer(nullptr));
590 return true;
591 }
592
593 // The object header will never be displaced to this lock,
594 // so it does not matter what the value is, except that it
595 // must be non-zero to avoid looking like a re-entrant lock,
596 // and must not look locked either.
597 lock->set_displaced_header(markWord::unused_mark());
598
599 // Failed to fast lock.
600 return false;
601 }
602 } else if (VerifyHeavyMonitors) {
603 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
604 }
605
606 return false;
607 }
608
609 void ObjectSynchronizer::exit_legacy(oop object, BasicLock* lock, JavaThread* current) {
610 assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer");
611
612 if (!useHeavyMonitors()) {
613 markWord mark = object->mark();
614 if (LockingMode == LM_LEGACY) {
615 markWord dhw = lock->displaced_header();
616 if (dhw.value() == 0) {
617 // If the displaced header is null, then this exit matches up with
618 // a recursive enter. No real work to do here except for diagnostics.
619 #ifndef PRODUCT
620 if (mark != markWord::INFLATING()) {
621 // Only do diagnostics if we are not racing an inflation. Simply
622 // exiting a recursive enter of a Java Monitor that is being
623 // inflated is safe; see the has_monitor() comment below.
624 assert(!mark.is_unlocked(), "invariant");
625 assert(!mark.has_locker() ||
626 current->is_lock_owned((address)mark.locker()), "invariant");
627 if (mark.has_monitor()) {
628 // The BasicLock's displaced_header is marked as a recursive
629 // enter and we have an inflated Java Monitor (ObjectMonitor).
630 // This is a special case where the Java Monitor was inflated
631 // after this thread entered the stack-lock recursively. When a
632 // Java Monitor is inflated, we cannot safely walk the Java
633 // Monitor owner's stack and update the BasicLocks because a
634 // Java Monitor can be asynchronously inflated by a thread that
635 // does not own the Java Monitor.
636 ObjectMonitor* m = read_monitor(mark);
637 assert(m->object()->mark() == mark, "invariant");
638 assert(m->is_entered(current), "invariant");
639 }
640 }
641 #endif
642 return;
643 }
644
645 if (mark == markWord::from_pointer(lock)) {
646 // If the object is stack-locked by the current thread, try to
647 // swing the displaced header from the BasicLock back to the mark.
648 assert(dhw.is_neutral(), "invariant");
649 if (object->cas_set_mark(dhw, mark) == mark) {
650 return;
651 }
652 }
653 }
654 } else if (VerifyHeavyMonitors) {
655 guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
656 }
660 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
661 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
662 assert(!monitor->is_owner_anonymous(), "must not be");
663 monitor->exit(current);
664 }
665
666 // -----------------------------------------------------------------------------
667 // JNI locks on java objects
668 // NOTE: must use heavy weight monitor to handle jni monitor enter
669 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
670 if (obj->klass()->is_value_based()) {
671 handle_sync_on_value_based_class(obj, current);
672 }
673
674 // the current locking is from JNI instead of Java code
675 current->set_current_pending_monitor_is_from_java(false);
676 // An async deflation can race after the inflate() call and before
677 // enter() can make the ObjectMonitor busy. enter() returns false if
678 // we have lost the race to async deflation and we simply try again.
679 while (true) {
680 ObjectMonitor* monitor;
681 bool entered;
682 if (LockingMode == LM_LIGHTWEIGHT) {
683 entered = LightweightSynchronizer::inflate_and_enter(obj(), current, current, inflate_cause_jni_enter) != nullptr;
684 } else {
685 monitor = inflate(current, obj(), inflate_cause_jni_enter);
686 entered = monitor->enter(current);
687 }
688
689 if (entered) {
690 current->inc_held_monitor_count(1, true);
691 break;
692 }
693 }
694 current->set_current_pending_monitor_is_from_java(true);
695 }
696
697 // NOTE: must use heavy weight monitor to handle jni monitor exit
698 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
699 JavaThread* current = THREAD;
700
701 ObjectMonitor* monitor;
702 if (LockingMode == LM_LIGHTWEIGHT) {
703 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
704 } else {
705 // The ObjectMonitor* can't be async deflated until ownership is
706 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
707 monitor = inflate(current, obj, inflate_cause_jni_exit);
708 }
709 // If this thread has locked the object, exit the monitor. We
710 // intentionally do not use CHECK on check_owner because we must exit the
711 // monitor even if an exception was already pending.
712 if (monitor->check_owner(THREAD)) {
713 monitor->exit(current);
714 current->dec_held_monitor_count(1, true);
715 }
716 }
717
718 // -----------------------------------------------------------------------------
719 // Internal VM locks on java objects
720 // standard constructor, allows locking failures
721 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
722 _thread = thread;
723 _thread->check_for_valid_safepoint_state();
724 _obj = obj;
725
726 if (_obj() != nullptr) {
727 ObjectSynchronizer::enter(_obj, &_lock, _thread);
728 }
729 }
730
731 ObjectLocker::~ObjectLocker() {
732 if (_obj() != nullptr) {
733 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
734 }
735 }
736
737
738 // -----------------------------------------------------------------------------
739 // Wait/Notify/NotifyAll
740 // NOTE: must use heavy weight monitor to handle wait()
741
742 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
743 JavaThread* current = THREAD;
744 if (millis < 0) {
745 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
746 }
747
748 ObjectMonitor* monitor;
749 if (LockingMode == LM_LIGHTWEIGHT) {
750 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
751 } else {
752 // The ObjectMonitor* can't be async deflated because the _waiters
753 // field is incremented before ownership is dropped and decremented
754 // after ownership is regained.
755 monitor = inflate(current, obj(), inflate_cause_wait);
756 }
757
758 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
759 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
760
761 // This dummy call is in place to get around dtrace bug 6254741. Once
762 // that's fixed we can uncomment the following line, remove the call
763 // and change this function back into a "void" func.
764 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
765 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
766 return ret_code;
767 }
768
769 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
770 JavaThread* current = THREAD;
771
772 markWord mark = obj->mark();
773 if (LockingMode == LM_LIGHTWEIGHT) {
774 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
775 // Not inflated so there can't be any waiters to notify.
776 return;
777 }
778 } else if (LockingMode == LM_LEGACY) {
779 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
780 // Not inflated so there can't be any waiters to notify.
781 return;
782 }
783 }
784
785 ObjectMonitor* monitor;
786 if (LockingMode == LM_LIGHTWEIGHT) {
787 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
788 } else {
789 // The ObjectMonitor* can't be async deflated until ownership is
790 // dropped by the calling thread.
791 monitor = inflate(current, obj(), inflate_cause_notify);
792 }
793 monitor->notify(CHECK);
794 }
795
796 // NOTE: see comment of notify()
797 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
798 JavaThread* current = THREAD;
799
800 markWord mark = obj->mark();
801 if (LockingMode == LM_LIGHTWEIGHT) {
802 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
803 // Not inflated so there can't be any waiters to notify.
804 return;
805 }
806 } else if (LockingMode == LM_LEGACY) {
807 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
808 // Not inflated so there can't be any waiters to notify.
809 return;
810 }
811 }
812
813 ObjectMonitor* monitor;
814 if (LockingMode == LM_LIGHTWEIGHT) {
815 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
816 } else {
817 // The ObjectMonitor* can't be async deflated until ownership is
818 // dropped by the calling thread.
819 monitor = inflate(current, obj(), inflate_cause_notify);
820 }
821 monitor->notifyAll(CHECK);
822 }
823
824 // -----------------------------------------------------------------------------
825 // Hash Code handling
826
827 struct SharedGlobals {
828 char _pad_prefix[OM_CACHE_LINE_SIZE];
829 // This is a highly shared mostly-read variable.
830 // To avoid false-sharing it needs to be the sole occupant of a cache line.
831 volatile int stw_random;
832 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));
833 // Hot RW variable -- Sequester to avoid false-sharing
834 volatile int hc_sequence;
835 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int));
836 };
837
838 static SharedGlobals GVars;
839
840 static markWord read_stable_mark(oop obj) {
898 }
899 }
900
901 // hashCode() generation :
902 //
903 // Possibilities:
904 // * MD5Digest of {obj,stw_random}
905 // * CRC32 of {obj,stw_random} or any linear-feedback shift register function.
906 // * A DES- or AES-style SBox[] mechanism
907 // * One of the Phi-based schemes, such as:
908 // 2654435761 = 2^32 * Phi (golden ratio)
909 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stw_random ;
910 // * A variation of Marsaglia's shift-xor RNG scheme.
911 // * (obj ^ stw_random) is appealing, but can result
912 // in undesirable regularity in the hashCode values of adjacent objects
913 // (objects allocated back-to-back, in particular). This could potentially
914 // result in hashtable collisions and reduced hashtable efficiency.
915 // There are simple ways to "diffuse" the middle address bits over the
916 // generated hashCode values:
917
918 static intptr_t get_next_hash(Thread* current, oop obj) {
919 intptr_t value = 0;
920 if (hashCode == 0) {
921 // This form uses global Park-Miller RNG.
922 // On MP system we'll have lots of RW access to a global, so the
923 // mechanism induces lots of coherency traffic.
924 value = os::random();
925 } else if (hashCode == 1) {
926 // This variation has the property of being stable (idempotent)
927 // between STW operations. This can be useful in some of the 1-0
928 // synchronization schemes.
929 intptr_t addr_bits = cast_from_oop<intptr_t>(obj) >> 3;
930 value = addr_bits ^ (addr_bits >> 5) ^ GVars.stw_random;
931 } else if (hashCode == 2) {
932 value = 1; // for sensitivity testing
933 } else if (hashCode == 3) {
934 value = ++GVars.hc_sequence;
935 } else if (hashCode == 4) {
936 value = cast_from_oop<intptr_t>(obj);
937 } else {
938 // Marsaglia's xor-shift scheme with thread-specific state
939 // This is probably the best overall implementation -- we'll
940 // likely make this the default in future releases.
941 unsigned t = current->_hashStateX;
942 t ^= (t << 11);
943 current->_hashStateX = current->_hashStateY;
944 current->_hashStateY = current->_hashStateZ;
945 current->_hashStateZ = current->_hashStateW;
946 unsigned v = current->_hashStateW;
947 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
948 current->_hashStateW = v;
949 value = v;
950 }
951
952 value &= UseCompactObjectHeaders ? markWord::hash_mask_compact : markWord::hash_mask;
953 if (value == 0) value = 0xBAD;
954 assert(value != markWord::no_hash, "invariant");
955 return value;
956 }
957
958 static intptr_t install_hash_code(Thread* current, oop obj) {
959 assert(UseObjectMonitorTable && LockingMode == LM_LIGHTWEIGHT, "must be");
960
961 markWord mark = obj->mark_acquire();
962 for(;;) {
963 intptr_t hash = mark.hash();
964 if (hash != 0) {
965 return hash;
966 }
967
968 hash = get_next_hash(current, obj);
969 const markWord old_mark = mark;
970 const markWord new_mark = old_mark.copy_set_hash(hash);
971
972 mark = obj->cas_set_mark(new_mark, old_mark);
973 if (old_mark == mark) {
974 return hash;
975 }
976 }
977 }
978
979 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
980 // Since the monitor isn't in the object header, it can simply be installed.
981 if (UseObjectMonitorTable) {
982 return install_hash_code(current, obj);
983 }
984
985 while (true) {
986 ObjectMonitor* monitor = nullptr;
987 markWord temp, test;
988 intptr_t hash;
989 markWord mark = read_stable_mark(obj);
990 if (VerifyHeavyMonitors) {
991 assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
992 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
993 }
994 if (mark.is_unlocked() || (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked())) {
995 hash = mark.hash();
996 if (hash != 0) { // if it has a hash, just return it
997 return hash;
998 }
999 hash = get_next_hash(current, obj); // get a new hash
1000 temp = mark.copy_set_hash(hash); // merge the hash into header
1001 // try to install the hash
1002 test = obj->cas_set_mark(temp, mark);
1003 if (test == mark) { // if the hash was installed, return it
1057 // a thread's stack can be asynchronously read by other threads
1058 // during an inflate() call so any change to that stack memory
1059 // may not propagate to other threads correctly.
1060 }
1061
1062 // Inflate the monitor to set the hash.
1063
1064 // There's no need to inflate if the mark has already got a monitor.
1065 // NOTE: an async deflation can race after we get the monitor and
1066 // before we can update the ObjectMonitor's header with the hash
1067 // value below.
1068 monitor = mark.has_monitor() ? mark.monitor() : inflate(current, obj, inflate_cause_hash_code);
1069 // Load ObjectMonitor's header/dmw field and see if it has a hash.
1070 mark = monitor->header();
1071 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
1072 hash = mark.hash();
1073 if (hash == 0) { // if it does not have a hash
1074 hash = get_next_hash(current, obj); // get a new hash
1075 temp = mark.copy_set_hash(hash) ; // merge the hash into header
1076 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1077 uintptr_t v = Atomic::cmpxchg(monitor->metadata_addr(), mark.value(), temp.value());
1078 test = markWord(v);
1079 if (test != mark) {
1080 // The attempt to update the ObjectMonitor's header/dmw field
1081 // did not work. This can happen if another thread managed to
1082 // merge in the hash just before our cmpxchg().
1083 // If we add any new usages of the header/dmw field, this code
1084 // will need to be updated.
1085 hash = test.hash();
1086 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1087 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1088 }
1089 if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
1090 // If we detect that async deflation has occurred, then we
1091 // attempt to restore the header/dmw to the object's header
1092 // so that we only retry once if the deflater thread happens
1093 // to be slow.
1094 monitor->install_displaced_markword_in_object(obj);
1095 continue;
1096 }
1097 }
1098 // We finally get the hash.
1099 return hash;
1100 }
1101 }
1102
1103 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1104 Handle h_obj) {
1105 assert(current == JavaThread::current(), "Can only be called on current thread");
1106 oop obj = h_obj();
1107
1108 markWord mark = read_stable_mark(obj);
1109
1110 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1111 // stack-locked case, header points into owner's stack
1112 return current->is_lock_owned((address)mark.locker());
1113 }
1114
1115 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1116 // fast-locking case, see if lock is in current's lock stack
1117 return current->lock_stack().contains(h_obj());
1118 }
1119
1120 while (LockingMode == LM_LIGHTWEIGHT && mark.has_monitor()) {
1121 ObjectMonitor* monitor = read_monitor(current, obj, mark);
1122 if (monitor != nullptr) {
1123 return monitor->is_entered(current) != 0;
1124 }
1125 // Racing with inflation/deflation, retry
1126 mark = obj->mark_acquire();
1127
1128 if (mark.is_fast_locked()) {
1129 // Some other thread fast_locked, current could not have held the lock
1130 return false;
1131 }
1132 }
1133
1134 if (LockingMode != LM_LIGHTWEIGHT && mark.has_monitor()) {
1135 // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1136 // The first stage of async deflation does not affect any field
1137 // used by this comparison so the ObjectMonitor* is usable here.
1138 ObjectMonitor* monitor = read_monitor(mark);
1139 return monitor->is_entered(current) != 0;
1140 }
1141 // Unlocked case, header in place
1142 assert(mark.is_unlocked(), "sanity check");
1143 return false;
1144 }
1145
1146 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
1147 oop obj = h_obj();
1148 markWord mark = read_stable_mark(obj);
1149
1150 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1151 // stack-locked so header points into owner's stack.
1152 // owning_thread_from_monitor_owner() may also return null here:
1153 return Threads::owning_thread_from_monitor_owner(t_list, (address) mark.locker());
1154 }
1155
1156 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1157 // fast-locked so get owner from the object.
1158 // owning_thread_from_object() may also return null here:
1159 return Threads::owning_thread_from_object(t_list, h_obj());
1160 }
1161
1162 while (LockingMode == LM_LIGHTWEIGHT && mark.has_monitor()) {
1163 ObjectMonitor* monitor = read_monitor(Thread::current(), obj, mark);
1164 if (monitor != nullptr) {
1165 return Threads::owning_thread_from_monitor(t_list, monitor);
1166 }
1167 // Racing with inflation/deflation, retry
1168 mark = obj->mark_acquire();
1169
1170 if (mark.is_fast_locked()) {
1171 // Some other thread fast_locked
1172 return Threads::owning_thread_from_object(t_list, h_obj());
1173 }
1174 }
1175
1176 if (LockingMode != LM_LIGHTWEIGHT && mark.has_monitor()) {
1177 // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1178 // The first stage of async deflation does not affect any field
1179 // used by this comparison so the ObjectMonitor* is usable here.
1180 ObjectMonitor* monitor = read_monitor(mark);
1181 assert(monitor != nullptr, "monitor should be non-null");
1182 // owning_thread_from_monitor() may also return null here:
1183 return Threads::owning_thread_from_monitor(t_list, monitor);
1184 }
1185
1186 // Unlocked case, header in place
1187 // Cannot have assertion since this object may have been
1188 // locked by another thread when reaching here.
1189 // assert(mark.is_unlocked(), "sanity check");
1190
1191 return nullptr;
1192 }
1193
1194 // Visitors ...
1195
1196 // Iterate over all ObjectMonitors.
1197 template <typename Function>
1198 void ObjectSynchronizer::monitors_iterate(Function function) {
1199 MonitorList::Iterator iter = _in_use_list.iterator();
1200 while (iter.has_next()) {
1236 }
1237
1238 static bool monitors_used_above_threshold(MonitorList* list) {
1239 if (MonitorUsedDeflationThreshold == 0) { // disabled case is easy
1240 return false;
1241 }
1242 // Start with ceiling based on a per-thread estimate:
1243 size_t ceiling = ObjectSynchronizer::in_use_list_ceiling();
1244 size_t old_ceiling = ceiling;
1245 if (ceiling < list->max()) {
1246 // The max used by the system has exceeded the ceiling so use that:
1247 ceiling = list->max();
1248 }
1249 size_t monitors_used = list->count();
1250 if (monitors_used == 0) { // empty list is easy
1251 return false;
1252 }
1253 if (NoAsyncDeflationProgressMax != 0 &&
1254 _no_progress_cnt >= NoAsyncDeflationProgressMax) {
1255 double remainder = (100.0 - MonitorUsedDeflationThreshold) / 100.0;
1256 size_t new_ceiling = ceiling / remainder + 1;
1257 ObjectSynchronizer::set_in_use_list_ceiling(new_ceiling);
1258 log_info(monitorinflation)("Too many deflations without progress; "
1259 "bumping in_use_list_ceiling from " SIZE_FORMAT
1260 " to " SIZE_FORMAT, old_ceiling, new_ceiling);
1261 _no_progress_cnt = 0;
1262 ceiling = new_ceiling;
1263 }
1264
1265 // Check if our monitor usage is above the threshold:
1266 size_t monitor_usage = (monitors_used * 100LL) / ceiling;
1267 if (int(monitor_usage) > MonitorUsedDeflationThreshold) {
1268 log_info(monitorinflation)("monitors_used=" SIZE_FORMAT ", ceiling=" SIZE_FORMAT
1269 ", monitor_usage=" SIZE_FORMAT ", threshold=%d",
1270 monitors_used, ceiling, monitor_usage, MonitorUsedDeflationThreshold);
1271 return true;
1272 }
1273
1274 return false;
1275 }
1276
1372
1373 return ret_code;
1374 }
1375
1376 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {
1377 return (os::javaTimeNanos() - last_async_deflation_time_ns()) / (NANOUNITS / MILLIUNITS);
1378 }
1379
1380 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1381 const oop obj,
1382 ObjectSynchronizer::InflateCause cause) {
1383 assert(event != nullptr, "invariant");
1384 event->set_monitorClass(obj->klass());
1385 event->set_address((uintptr_t)(void*)obj);
1386 event->set_cause((u1)cause);
1387 event->commit();
1388 }
1389
1390 // Fast path code shared by multiple functions
1391 void ObjectSynchronizer::inflate_helper(oop obj) {
1392 assert(LockingMode != LM_LIGHTWEIGHT, "only inflate through enter");
1393 markWord mark = obj->mark_acquire();
1394 if (mark.has_monitor()) {
1395 ObjectMonitor* monitor = read_monitor(mark);
1396 markWord dmw = monitor->header();
1397 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1398 return;
1399 }
1400 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1401 }
1402
1403 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
1404 assert(current == Thread::current(), "must be");
1405 assert(LockingMode != LM_LIGHTWEIGHT, "only inflate through enter");
1406 return inflate_impl(obj, cause);
1407 }
1408
1409 ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
1410 assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
1411 assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_for");
1412 return inflate_impl(obj, cause);
1413 }
1414
1415 ObjectMonitor* ObjectSynchronizer::inflate_impl(oop object, const InflateCause cause) {
1416 assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_impl");
1417 EventJavaMonitorInflate event;
1418
1419 for (;;) {
1420 const markWord mark = object->mark_acquire();
1421
1422 // The mark can be in one of the following states:
1423 // * inflated - Just return it.
1424 // * stack-locked - Coerce it to inflated from stack-locked.
1425 // * INFLATING - Busy wait for conversion from stack-locked to
1426 // inflated.
1427 // * unlocked - Aggressively inflate the object.
1428
1429 // CASE: inflated
1430 if (mark.has_monitor()) {
1431 ObjectMonitor* inf = mark.monitor();
1432 markWord dmw = inf->header();
1433 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1434 return inf;
1435 }
1436
1437 // CASE: inflation in progress - inflating over a stack-lock.
1438 // Some other thread is converting from stack-locked to inflated.
1439 // Only that thread can complete inflation -- other threads must wait.
1440 // The INFLATING value is transient.
1441 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1442 // We could always eliminate polling by parking the thread on some auxiliary list.
1443 if (mark == markWord::INFLATING()) {
1444 read_stable_mark(object);
1445 continue;
1446 }
1447
1448 // CASE: stack-locked
1449 // Could be stack-locked either by current or by some other thread.
1450 //
1451 // Note that we allocate the ObjectMonitor speculatively, _before_ attempting
1452 // to install INFLATING into the mark word. We originally installed INFLATING,
1453 // allocated the ObjectMonitor, and then finally STed the address of the
1454 // ObjectMonitor into the mark. This was correct, but artificially lengthened
1455 // the interval in which INFLATING appeared in the mark, thus increasing
1456 // the odds of inflation contention. If we lose the race to set INFLATING,
1457 // then we just delete the ObjectMonitor and loop around again.
1458 //
1459 LogStreamHandle(Trace, monitorinflation) lsh;
1460 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1461 ObjectMonitor* m = new ObjectMonitor(object);
1462 // Optimistically prepare the ObjectMonitor - anticipate successful CAS
1463 // We do this before the CAS in order to minimize the length of time
1464 // in which INFLATING appears in the mark.
1465
1466 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
1467 if (cmp != mark) {
1468 delete m;
1469 continue; // Interference -- just retry
1470 }
1471
1472 // We've successfully installed INFLATING (0) into the mark-word.
1473 // This is the only case where 0 will appear in a mark-word.
1474 // Only the singular thread that successfully swings the mark-word
1475 // to 0 can perform (or more precisely, complete) inflation.
1476 //
1477 // Why do we CAS a 0 into the mark-word instead of just CASing the
1478 // mark-word from the stack-locked value directly to the new inflated state?
1479 // Consider what happens when a thread unlocks a stack-locked object.
1480 // It attempts to use CAS to swing the displaced header value from the
1572 OM_PERFDATA_OP(Inflations, inc());
1573 if (log_is_enabled(Trace, monitorinflation)) {
1574 ResourceMark rm;
1575 lsh.print_cr("inflate(unlocked): object=" INTPTR_FORMAT ", mark="
1576 INTPTR_FORMAT ", type='%s'", p2i(object),
1577 object->mark().value(), object->klass()->external_name());
1578 }
1579 if (event.should_commit()) {
1580 post_monitor_inflate_event(&event, object, cause);
1581 }
1582 return m;
1583 }
1584 }
1585
1586 // Walk the in-use list and deflate (at most MonitorDeflationMax) idle
1587 // ObjectMonitors. Returns the number of deflated ObjectMonitors.
1588 //
1589 size_t ObjectSynchronizer::deflate_monitor_list(ObjectMonitorDeflationSafepointer* safepointer) {
1590 MonitorList::Iterator iter = _in_use_list.iterator();
1591 size_t deflated_count = 0;
1592 Thread* current = Thread::current();
1593
1594 while (iter.has_next()) {
1595 if (deflated_count >= (size_t)MonitorDeflationMax) {
1596 break;
1597 }
1598 ObjectMonitor* mid = iter.next();
1599 if (mid->deflate_monitor(current)) {
1600 deflated_count++;
1601 }
1602
1603 // Must check for a safepoint/handshake and honor it.
1604 safepointer->block_for_safepoint("deflation", "deflated_count", deflated_count);
1605 }
1606
1607 return deflated_count;
1608 }
1609
1610 class HandshakeForDeflation : public HandshakeClosure {
1611 public:
1612 HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {}
1613
1614 void do_thread(Thread* thread) {
1615 log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread="
1616 INTPTR_FORMAT, p2i(thread));
1617 if (thread->is_Java_thread()) {
1618 // Clear OM cache
1619 JavaThread* jt = JavaThread::cast(thread);
1620 jt->om_clear_monitor_cache();
1621 }
1622 }
1623 };
1624
1625 class VM_RendezvousGCThreads : public VM_Operation {
1626 public:
1627 bool evaluate_at_safepoint() const override { return false; }
1628 VMOp_Type type() const override { return VMOp_RendezvousGCThreads; }
1629 void doit() override {
1630 Universe::heap()->safepoint_synchronize_begin();
1631 Universe::heap()->safepoint_synchronize_end();
1632 };
1633 };
1634
1635 static size_t delete_monitors(GrowableArray<ObjectMonitor*>* delete_list,
1636 ObjectMonitorDeflationSafepointer* safepointer) {
1637 NativeHeapTrimmer::SuspendMark sm("monitor deletion");
1638 size_t deleted_count = 0;
1639 for (ObjectMonitor* monitor: *delete_list) {
1640 delete monitor;
1641 deleted_count++;
1748 // The async deflation request has been processed.
1749 _last_async_deflation_time_ns = os::javaTimeNanos();
1750 set_is_async_deflation_requested(false);
1751
1752 ObjectMonitorDeflationLogging log;
1753 ObjectMonitorDeflationSafepointer safepointer(current, &log);
1754
1755 log.begin();
1756
1757 // Deflate some idle ObjectMonitors.
1758 size_t deflated_count = deflate_monitor_list(&safepointer);
1759
1760 // Unlink the deflated ObjectMonitors from the in-use list.
1761 size_t unlinked_count = 0;
1762 size_t deleted_count = 0;
1763 if (deflated_count > 0) {
1764 ResourceMark rm(current);
1765 GrowableArray<ObjectMonitor*> delete_list((int)deflated_count);
1766 unlinked_count = _in_use_list.unlink_deflated(deflated_count, &delete_list, &safepointer);
1767
1768 #ifdef ASSERT
1769 if (UseObjectMonitorTable) {
1770 for (ObjectMonitor* monitor : delete_list) {
1771 assert(!LightweightSynchronizer::contains_monitor(current, monitor), "Should have been removed");
1772 }
1773 }
1774 #endif
1775
1776 log.before_handshake(unlinked_count);
1777
1778 // A JavaThread needs to handshake in order to safely free the
1779 // ObjectMonitors that were deflated in this cycle.
1780 HandshakeForDeflation hfd_hc;
1781 Handshake::execute(&hfd_hc);
1782 // Also, we sync and desync GC threads around the handshake, so that they can
1783 // safely read the mark-word and look-through to the object-monitor, without
1784 // being afraid that the object-monitor is going away.
1785 VM_RendezvousGCThreads sync_gc;
1786 VMThread::execute(&sync_gc);
1787
1788 log.after_handshake();
1789
1790 // After the handshake, safely free the ObjectMonitors that were
1791 // deflated and unlinked in this cycle.
1792
1793 // Delete the unlinked ObjectMonitors.
1794 deleted_count = delete_monitors(&delete_list, &safepointer);
1795 assert(unlinked_count == deleted_count, "must be");
1964 }
1965
1966 size_t ck_in_use_max = _in_use_list.max();
1967 if (l_in_use_max == ck_in_use_max) {
1968 out->print_cr("in_use_max=" SIZE_FORMAT " equals ck_in_use_max="
1969 SIZE_FORMAT, l_in_use_max, ck_in_use_max);
1970 } else {
1971 out->print_cr("WARNING: in_use_max=" SIZE_FORMAT " is not equal to "
1972 "ck_in_use_max=" SIZE_FORMAT, l_in_use_max, ck_in_use_max);
1973 }
1974 }
1975
1976 // Check an in-use monitor entry; log any errors.
1977 void ObjectSynchronizer::chk_in_use_entry(ObjectMonitor* n, outputStream* out,
1978 int* error_cnt_p) {
1979 if (n->owner_is_DEFLATER_MARKER()) {
1980 // This could happen when monitor deflation blocks for a safepoint.
1981 return;
1982 }
1983
1984
1985 if (n->metadata() == 0) {
1986 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor must "
1987 "have non-null _metadata (header/hash) field.", p2i(n));
1988 *error_cnt_p = *error_cnt_p + 1;
1989 }
1990
1991 const oop obj = n->object_peek();
1992 if (obj == nullptr) {
1993 return;
1994 }
1995
1996 const markWord mark = obj->mark();
1997 if (!mark.has_monitor()) {
1998 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
1999 "object does not think it has a monitor: obj="
2000 INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n),
2001 p2i(obj), mark.value());
2002 *error_cnt_p = *error_cnt_p + 1;
2003 return;
2004 }
2005
2006 ObjectMonitor* const obj_mon = read_monitor(Thread::current(), obj, mark);
2007 if (n != obj_mon) {
2008 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
2009 "object does not refer to the same monitor: obj="
2010 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
2011 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
2012 *error_cnt_p = *error_cnt_p + 1;
2013 }
2014 }
2015
2016 // Log details about ObjectMonitors on the in_use_list. The 'BHL'
2017 // flags indicate why the entry is in-use, 'object' and 'object type'
2018 // indicate the associated object and its type.
2019 void ObjectSynchronizer::log_in_use_monitor_details(outputStream* out, bool log_all) {
2020 if (_in_use_list.count() > 0) {
2021 stringStream ss;
2022 out->print_cr("In-use monitor info:");
2023 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
2024 out->print_cr("%18s %s %18s %18s",
2025 "monitor", "BHL", "object", "object type");
2026 out->print_cr("================== === ================== ==================");
2027
2028 auto is_interesting = [&](ObjectMonitor* monitor) {
2029 return log_all || monitor->has_owner() || monitor->is_busy();
2030 };
2031
2032 monitors_iterate([&](ObjectMonitor* monitor) {
2033 if (is_interesting(monitor)) {
2034 const oop obj = monitor->object_peek();
2035 const intptr_t hash = UseObjectMonitorTable ? monitor->hash() : monitor->header().hash();
2036 ResourceMark rm;
2037 out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(monitor),
2038 monitor->is_busy(), hash != 0, monitor->owner() != nullptr,
2039 p2i(obj), obj == nullptr ? "" : obj->klass()->external_name());
2040 if (monitor->is_busy()) {
2041 out->print(" (%s)", monitor->is_busy_to_string(&ss));
2042 ss.reset();
2043 }
2044 out->cr();
2045 }
2046 });
2047 }
2048
2049 out->flush();
2050 }
|