298 // removed from the system.
299 //
300 // Note: If the _in_use_list max exceeds the ceiling, then
301 // monitors_used_above_threshold() will use the in_use_list max instead
302 // of the thread count derived ceiling because we have used more
303 // ObjectMonitors than the estimated average.
304 //
305 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
306 // no-progress async monitor deflation cycles in a row, then the ceiling
307 // is adjusted upwards by monitors_used_above_threshold().
308 //
309 // Start the ceiling with the estimate for one thread in initialize()
310 // which is called after cmd line options are processed.
311 static size_t _in_use_list_ceiling = 0;
312 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
313 bool volatile ObjectSynchronizer::_is_final_audit = false;
314 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
315 static uintx _no_progress_cnt = 0;
316 static bool _no_progress_skip_increment = false;
317
318 // =====================> Quick functions
319
320 // The quick_* forms are special fast-path variants used to improve
321 // performance. In the simplest case, a "quick_*" implementation could
322 // simply return false, in which case the caller will perform the necessary
323 // state transitions and call the slow-path form.
324 // The fast-path is designed to handle frequently arising cases in an efficient
325 // manner and is just a degenerate "optimistic" variant of the slow-path.
326 // returns true -- to indicate the call was satisfied.
327 // returns false -- to indicate the call needs the services of the slow-path.
328 // A no-loitering ordinance is in effect for code in the quick_* family
329 // operators: safepoints or indefinite blocking (blocking that might span a
330 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
331 // entry.
332 //
333 // Consider: An interesting optimization is to have the JIT recognize the
334 // following common idiom:
335 // synchronized (someobj) { .... ; notify(); }
336 // That is, we find a notify() or notifyAll() call that immediately precedes
337 // the monitorexit operation. In that case the JIT could fuse the operations
338 // into a single notifyAndExit() runtime primitive.
339
340 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
341 assert(current->thread_state() == _thread_in_Java, "invariant");
342 NoSafepointVerifier nsv;
343 if (obj == nullptr) return false; // slow-path for invalid obj
344 const markWord mark = obj->mark();
345
346 if (LockingMode == LM_LIGHTWEIGHT) {
347 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
348 // Degenerate notify
349 // fast-locked by caller so by definition the implied waitset is empty.
350 return true;
351 }
352 } else if (LockingMode == LM_LEGACY) {
353 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
354 // Degenerate notify
355 // stack-locked by caller so by definition the implied waitset is empty.
356 return true;
357 }
358 }
359
360 if (mark.has_monitor()) {
361 ObjectMonitor* const mon = read_monitor(current, obj, mark);
362 if (LockingMode == LM_LIGHTWEIGHT && mon == nullptr) {
363 // Racing with inflation/deflation go slow path
385 // other IMS exception states take the slow-path
386 return false;
387 }
388
389 static bool useHeavyMonitors() {
390 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
391 return LockingMode == LM_MONITOR;
392 #else
393 return false;
394 #endif
395 }
396
397 // The LockNode emitted directly at the synchronization site would have
398 // been too big if it were to have included support for the cases of inflated
399 // recursive enter and exit, so they go here instead.
400 // Note that we can't safely call AsyncPrintJavaStack() from within
401 // quick_enter() as our thread state remains _in_Java.
402
403 bool ObjectSynchronizer::quick_enter_legacy(oop obj, BasicLock* lock, JavaThread* current) {
404 assert(current->thread_state() == _thread_in_Java, "invariant");
405
406 if (useHeavyMonitors()) {
407 return false; // Slow path
408 }
409
410 assert(LockingMode == LM_LEGACY, "legacy mode below");
411
412 const markWord mark = obj->mark();
413
414 if (mark.has_monitor()) {
415
416 ObjectMonitor* const m = read_monitor(mark);
417 // An async deflation or GC can race us before we manage to make
418 // the ObjectMonitor busy by setting the owner below. If we detect
419 // that race we just bail out to the slow-path here.
420 if (m->object_peek() == nullptr) {
421 return false;
422 }
423
424 // Lock contention and Transactional Lock Elision (TLE) diagnostics
500 EventSyncOnValueBasedClass event;
501 if (event.should_commit()) {
502 event.set_valueBasedClass(obj->klass());
503 event.commit();
504 }
505 }
506
507 if (bcp_was_adjusted) {
508 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
509 }
510 }
511
512 // -----------------------------------------------------------------------------
513 // Monitor Enter/Exit
514
515 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
516 // When called with locking_thread != Thread::current() some mechanism must synchronize
517 // the locking_thread with respect to the current thread. Currently only used when
518 // deoptimizing and re-locking locks. See Deoptimization::relock_objects
519 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
520
521 if (LockingMode == LM_LIGHTWEIGHT) {
522 return LightweightSynchronizer::enter_for(obj, lock, locking_thread);
523 }
524
525 if (!enter_fast_impl(obj, lock, locking_thread)) {
526 // Inflated ObjectMonitor::enter_for is required
527
528 // An async deflation can race after the inflate_for() call and before
529 // enter_for() can make the ObjectMonitor busy. enter_for() returns false
530 // if we have lost the race to async deflation and we simply try again.
531 while (true) {
532 ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter);
533 if (monitor->enter_for(locking_thread)) {
534 return;
535 }
536 assert(monitor->is_being_async_deflated(), "must be");
537 }
538 }
539 }
540
541 void ObjectSynchronizer::enter_legacy(Handle obj, BasicLock* lock, JavaThread* current) {
542 if (!enter_fast_impl(obj, lock, current)) {
543 // Inflated ObjectMonitor::enter is required
544
545 // An async deflation can race after the inflate() call and before
546 // enter() can make the ObjectMonitor busy. enter() returns false if
547 // we have lost the race to async deflation and we simply try again.
548 while (true) {
549 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
550 if (monitor->enter(current)) {
551 return;
552 }
553 }
554 }
555 }
556
557 // The interpreter and compiler assembly code tries to lock using the fast path
558 // of this algorithm. Make sure to update that code if the following function is
559 // changed. The implementation is extremely sensitive to race condition. Be careful.
560 bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
561 assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer");
562
563 if (obj->klass()->is_value_based()) {
564 handle_sync_on_value_based_class(obj, locking_thread);
565 }
566
567 locking_thread->inc_held_monitor_count();
568
569 if (!useHeavyMonitors()) {
570 if (LockingMode == LM_LEGACY) {
571 markWord mark = obj->mark();
572 if (mark.is_unlocked()) {
573 // Anticipate successful CAS -- the ST of the displaced mark must
574 // be visible <= the ST performed by the CAS.
575 lock->set_displaced_header(mark);
576 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
577 return true;
578 }
579 } else if (mark.has_locker() &&
580 locking_thread->is_lock_owned((address) mark.locker())) {
588 // so it does not matter what the value is, except that it
589 // must be non-zero to avoid looking like a re-entrant lock,
590 // and must not look locked either.
591 lock->set_displaced_header(markWord::unused_mark());
592
593 // Failed to fast lock.
594 return false;
595 }
596 } else if (VerifyHeavyMonitors) {
597 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
598 }
599
600 return false;
601 }
602
603 void ObjectSynchronizer::exit_legacy(oop object, BasicLock* lock, JavaThread* current) {
604 assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer");
605
606 if (!useHeavyMonitors()) {
607 markWord mark = object->mark();
608 if (LockingMode == LM_LEGACY) {
609 markWord dhw = lock->displaced_header();
610 if (dhw.value() == 0) {
611 // If the displaced header is null, then this exit matches up with
612 // a recursive enter. No real work to do here except for diagnostics.
613 #ifndef PRODUCT
614 if (mark != markWord::INFLATING()) {
615 // Only do diagnostics if we are not racing an inflation. Simply
616 // exiting a recursive enter of a Java Monitor that is being
617 // inflated is safe; see the has_monitor() comment below.
618 assert(!mark.is_unlocked(), "invariant");
619 assert(!mark.has_locker() ||
620 current->is_lock_owned((address)mark.locker()), "invariant");
621 if (mark.has_monitor()) {
622 // The BasicLock's displaced_header is marked as a recursive
623 // enter and we have an inflated Java Monitor (ObjectMonitor).
624 // This is a special case where the Java Monitor was inflated
625 // after this thread entered the stack-lock recursively. When a
626 // Java Monitor is inflated, we cannot safely walk the Java
627 // Monitor owner's stack and update the BasicLocks because a
644 return;
645 }
646 }
647 }
648 } else if (VerifyHeavyMonitors) {
649 guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
650 }
651
652 // We have to take the slow-path of possible inflation and then exit.
653 // The ObjectMonitor* can't be async deflated until ownership is
654 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
655 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
656 assert(!monitor->has_anonymous_owner(), "must not be");
657 monitor->exit(current);
658 }
659
660 // -----------------------------------------------------------------------------
661 // JNI locks on java objects
662 // NOTE: must use heavy weight monitor to handle jni monitor enter
663 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
664 // Top native frames in the stack will not be seen if we attempt
665 // preemption, since we start walking from the last Java anchor.
666 NoPreemptMark npm(current);
667
668 if (obj->klass()->is_value_based()) {
669 handle_sync_on_value_based_class(obj, current);
670 }
671
672 // the current locking is from JNI instead of Java code
673 current->set_current_pending_monitor_is_from_java(false);
674 // An async deflation can race after the inflate() call and before
675 // enter() can make the ObjectMonitor busy. enter() returns false if
676 // we have lost the race to async deflation and we simply try again.
677 while (true) {
678 ObjectMonitor* monitor;
679 bool entered;
680 if (LockingMode == LM_LIGHTWEIGHT) {
681 entered = LightweightSynchronizer::inflate_and_enter(obj(), inflate_cause_jni_enter, current, current) != nullptr;
682 } else {
683 monitor = inflate(current, obj(), inflate_cause_jni_enter);
684 entered = monitor->enter(current);
685 }
686
687 if (entered) {
688 current->inc_held_monitor_count(1, true);
689 break;
690 }
691 }
692 current->set_current_pending_monitor_is_from_java(true);
693 }
694
695 // NOTE: must use heavy weight monitor to handle jni monitor exit
696 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
697 JavaThread* current = THREAD;
698
699 ObjectMonitor* monitor;
700 if (LockingMode == LM_LIGHTWEIGHT) {
701 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
702 } else {
703 // The ObjectMonitor* can't be async deflated until ownership is
704 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
705 monitor = inflate(current, obj, inflate_cause_jni_exit);
706 }
707 // If this thread has locked the object, exit the monitor. We
708 // intentionally do not use CHECK on check_owner because we must exit the
709 // monitor even if an exception was already pending.
710 if (monitor->check_owner(THREAD)) {
711 monitor->exit(current);
712 current->dec_held_monitor_count(1, true);
713 }
714 }
715
716 // -----------------------------------------------------------------------------
717 // Internal VM locks on java objects
722 _obj = obj;
723
724 if (_obj() != nullptr) {
725 ObjectSynchronizer::enter(_obj, &_lock, _thread);
726 }
727 }
728
729 ObjectLocker::~ObjectLocker() {
730 if (_obj() != nullptr) {
731 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
732 }
733 }
734
735
736 // -----------------------------------------------------------------------------
737 // Wait/Notify/NotifyAll
738 // NOTE: must use heavy weight monitor to handle wait()
739
740 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
741 JavaThread* current = THREAD;
742 if (millis < 0) {
743 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
744 }
745
746 ObjectMonitor* monitor;
747 if (LockingMode == LM_LIGHTWEIGHT) {
748 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
749 } else {
750 // The ObjectMonitor* can't be async deflated because the _waiters
751 // field is incremented before ownership is dropped and decremented
752 // after ownership is regained.
753 monitor = inflate(current, obj(), inflate_cause_wait);
754 }
755
756 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
757 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
758
759 // This dummy call is in place to get around dtrace bug 6254741. Once
760 // that's fixed we can uncomment the following line, remove the call
761 // and change this function back into a "void" func.
764 return ret_code;
765 }
766
767 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
768 if (millis < 0) {
769 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
770 }
771
772 ObjectMonitor* monitor;
773 if (LockingMode == LM_LIGHTWEIGHT) {
774 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
775 } else {
776 monitor = inflate(THREAD, obj(), inflate_cause_wait);
777 }
778 monitor->wait(millis, false, THREAD);
779 }
780
781
782 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
783 JavaThread* current = THREAD;
784
785 markWord mark = obj->mark();
786 if (LockingMode == LM_LIGHTWEIGHT) {
787 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
788 // Not inflated so there can't be any waiters to notify.
789 return;
790 }
791 } else if (LockingMode == LM_LEGACY) {
792 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
793 // Not inflated so there can't be any waiters to notify.
794 return;
795 }
796 }
797
798 ObjectMonitor* monitor;
799 if (LockingMode == LM_LIGHTWEIGHT) {
800 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
801 } else {
802 // The ObjectMonitor* can't be async deflated until ownership is
803 // dropped by the calling thread.
804 monitor = inflate(current, obj(), inflate_cause_notify);
805 }
806 monitor->notify(CHECK);
807 }
808
809 // NOTE: see comment of notify()
810 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
811 JavaThread* current = THREAD;
812
813 markWord mark = obj->mark();
814 if (LockingMode == LM_LIGHTWEIGHT) {
815 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
816 // Not inflated so there can't be any waiters to notify.
817 return;
818 }
819 } else if (LockingMode == LM_LEGACY) {
820 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
821 // Not inflated so there can't be any waiters to notify.
822 return;
823 }
824 }
825
826 ObjectMonitor* monitor;
827 if (LockingMode == LM_LIGHTWEIGHT) {
828 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
829 } else {
830 // The ObjectMonitor* can't be async deflated until ownership is
831 // dropped by the calling thread.
973
974 markWord mark = obj->mark_acquire();
975 for (;;) {
976 intptr_t hash = mark.hash();
977 if (hash != 0) {
978 return hash;
979 }
980
981 hash = get_next_hash(current, obj);
982 const markWord old_mark = mark;
983 const markWord new_mark = old_mark.copy_set_hash(hash);
984
985 mark = obj->cas_set_mark(new_mark, old_mark);
986 if (old_mark == mark) {
987 return hash;
988 }
989 }
990 }
991
992 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
993 if (UseObjectMonitorTable) {
994 // Since the monitor isn't in the object header, the hash can simply be
995 // installed in the object header.
996 return install_hash_code(current, obj);
997 }
998
999 while (true) {
1000 ObjectMonitor* monitor = nullptr;
1001 markWord temp, test;
1002 intptr_t hash;
1003 markWord mark = read_stable_mark(obj);
1004 if (VerifyHeavyMonitors) {
1005 assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
1006 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
1007 }
1008 if (mark.is_unlocked() || (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked())) {
1009 hash = mark.hash();
1010 if (hash != 0) { // if it has a hash, just return it
1011 return hash;
1012 }
1099 hash = test.hash();
1100 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1101 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1102 }
1103 if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
1104 // If we detect that async deflation has occurred, then we
1105 // attempt to restore the header/dmw to the object's header
1106 // so that we only retry once if the deflater thread happens
1107 // to be slow.
1108 monitor->install_displaced_markword_in_object(obj);
1109 continue;
1110 }
1111 }
1112 // We finally get the hash.
1113 return hash;
1114 }
1115 }
1116
1117 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1118 Handle h_obj) {
1119 assert(current == JavaThread::current(), "Can only be called on current thread");
1120 oop obj = h_obj();
1121
1122 markWord mark = read_stable_mark(obj);
1123
1124 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1125 // stack-locked case, header points into owner's stack
1126 return current->is_lock_owned((address)mark.locker());
1127 }
1128
1129 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1130 // fast-locking case, see if lock is in current's lock stack
1131 return current->lock_stack().contains(h_obj());
1132 }
1133
1134 while (LockingMode == LM_LIGHTWEIGHT && mark.has_monitor()) {
1135 ObjectMonitor* monitor = read_monitor(current, obj, mark);
1136 if (monitor != nullptr) {
1137 return monitor->is_entered(current) != 0;
1138 }
1442 markWord dmw = monitor->header();
1443 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1444 return;
1445 }
1446 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1447 }
1448
1449 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
1450 assert(current == Thread::current(), "must be");
1451 assert(LockingMode != LM_LIGHTWEIGHT, "only inflate through enter");
1452 return inflate_impl(current->is_Java_thread() ? JavaThread::cast(current) : nullptr, obj, cause);
1453 }
1454
1455 ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
1456 assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
1457 assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_for");
1458 return inflate_impl(thread, obj, cause);
1459 }
1460
1461 ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* locking_thread, oop object, const InflateCause cause) {
1462 // The JavaThread* locking_thread requires that the locking_thread == Thread::current() or
1463 // is suspended throughout the call by some other mechanism.
1464 // The thread might be nullptr when called from a non JavaThread. (As may still be
1465 // the case from FastHashCode). However it is only important for correctness that the
1466 // thread is set when called from ObjectSynchronizer::enter from the owning thread,
1467 // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
1468 assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_impl");
1469 EventJavaMonitorInflate event;
1470
1471 for (;;) {
1472 const markWord mark = object->mark_acquire();
1473
1474 // The mark can be in one of the following states:
1475 // * inflated - If the ObjectMonitor owner is anonymous and the
1476 // locking_thread owns the object lock, then we
1477 // make the locking_thread the ObjectMonitor owner.
1478 // * stack-locked - Coerce it to inflated from stack-locked.
1479 // * INFLATING - Busy wait for conversion from stack-locked to
1480 // inflated.
1481 // * unlocked - Aggressively inflate the object.
|
298 // removed from the system.
299 //
300 // Note: If the _in_use_list max exceeds the ceiling, then
301 // monitors_used_above_threshold() will use the in_use_list max instead
302 // of the thread count derived ceiling because we have used more
303 // ObjectMonitors than the estimated average.
304 //
305 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
306 // no-progress async monitor deflation cycles in a row, then the ceiling
307 // is adjusted upwards by monitors_used_above_threshold().
308 //
309 // Start the ceiling with the estimate for one thread in initialize()
310 // which is called after cmd line options are processed.
311 static size_t _in_use_list_ceiling = 0;
312 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
313 bool volatile ObjectSynchronizer::_is_final_audit = false;
314 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
315 static uintx _no_progress_cnt = 0;
316 static bool _no_progress_skip_increment = false;
317
318 // These checks are required for wait, notify and exit to avoid inflating the monitor to
319 // find out this inline type object cannot be locked.
320 #define CHECK_THROW_NOSYNC_IMSE(obj) \
321 if (EnableValhalla && (obj)->mark().is_inline_type()) { \
322 JavaThread* THREAD = current; \
323 ResourceMark rm(THREAD); \
324 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
325 }
326
327 #define CHECK_THROW_NOSYNC_IMSE_0(obj) \
328 if (EnableValhalla && (obj)->mark().is_inline_type()) { \
329 JavaThread* THREAD = current; \
330 ResourceMark rm(THREAD); \
331 THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
332 }
333
334 // =====================> Quick functions
335
336 // The quick_* forms are special fast-path variants used to improve
337 // performance. In the simplest case, a "quick_*" implementation could
338 // simply return false, in which case the caller will perform the necessary
339 // state transitions and call the slow-path form.
340 // The fast-path is designed to handle frequently arising cases in an efficient
341 // manner and is just a degenerate "optimistic" variant of the slow-path.
342 // returns true -- to indicate the call was satisfied.
343 // returns false -- to indicate the call needs the services of the slow-path.
344 // A no-loitering ordinance is in effect for code in the quick_* family
345 // operators: safepoints or indefinite blocking (blocking that might span a
346 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
347 // entry.
348 //
349 // Consider: An interesting optimization is to have the JIT recognize the
350 // following common idiom:
351 // synchronized (someobj) { .... ; notify(); }
352 // That is, we find a notify() or notifyAll() call that immediately precedes
353 // the monitorexit operation. In that case the JIT could fuse the operations
354 // into a single notifyAndExit() runtime primitive.
355
356 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
357 assert(current->thread_state() == _thread_in_Java, "invariant");
358 NoSafepointVerifier nsv;
359 if (obj == nullptr) return false; // slow-path for invalid obj
360 assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
361 const markWord mark = obj->mark();
362
363 if (LockingMode == LM_LIGHTWEIGHT) {
364 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
365 // Degenerate notify
366 // fast-locked by caller so by definition the implied waitset is empty.
367 return true;
368 }
369 } else if (LockingMode == LM_LEGACY) {
370 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
371 // Degenerate notify
372 // stack-locked by caller so by definition the implied waitset is empty.
373 return true;
374 }
375 }
376
377 if (mark.has_monitor()) {
378 ObjectMonitor* const mon = read_monitor(current, obj, mark);
379 if (LockingMode == LM_LIGHTWEIGHT && mon == nullptr) {
380 // Racing with inflation/deflation go slow path
402 // other IMS exception states take the slow-path
403 return false;
404 }
405
406 static bool useHeavyMonitors() {
407 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
408 return LockingMode == LM_MONITOR;
409 #else
410 return false;
411 #endif
412 }
413
414 // The LockNode emitted directly at the synchronization site would have
415 // been too big if it were to have included support for the cases of inflated
416 // recursive enter and exit, so they go here instead.
417 // Note that we can't safely call AsyncPrintJavaStack() from within
418 // quick_enter() as our thread state remains _in_Java.
419
420 bool ObjectSynchronizer::quick_enter_legacy(oop obj, BasicLock* lock, JavaThread* current) {
421 assert(current->thread_state() == _thread_in_Java, "invariant");
422 assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
423
424 if (useHeavyMonitors()) {
425 return false; // Slow path
426 }
427
428 assert(LockingMode == LM_LEGACY, "legacy mode below");
429
430 const markWord mark = obj->mark();
431
432 if (mark.has_monitor()) {
433
434 ObjectMonitor* const m = read_monitor(mark);
435 // An async deflation or GC can race us before we manage to make
436 // the ObjectMonitor busy by setting the owner below. If we detect
437 // that race we just bail out to the slow-path here.
438 if (m->object_peek() == nullptr) {
439 return false;
440 }
441
442 // Lock contention and Transactional Lock Elision (TLE) diagnostics
518 EventSyncOnValueBasedClass event;
519 if (event.should_commit()) {
520 event.set_valueBasedClass(obj->klass());
521 event.commit();
522 }
523 }
524
525 if (bcp_was_adjusted) {
526 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
527 }
528 }
529
530 // -----------------------------------------------------------------------------
531 // Monitor Enter/Exit
532
533 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
534 // When called with locking_thread != Thread::current() some mechanism must synchronize
535 // the locking_thread with respect to the current thread. Currently only used when
536 // deoptimizing and re-locking locks. See Deoptimization::relock_objects
537 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
538 assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "JITed code should never have locked an instance of a value class");
539
540 if (LockingMode == LM_LIGHTWEIGHT) {
541 return LightweightSynchronizer::enter_for(obj, lock, locking_thread);
542 }
543
544 if (!enter_fast_impl(obj, lock, locking_thread)) {
545 // Inflated ObjectMonitor::enter_for is required
546
547 // An async deflation can race after the inflate_for() call and before
548 // enter_for() can make the ObjectMonitor busy. enter_for() returns false
549 // if we have lost the race to async deflation and we simply try again.
550 while (true) {
551 ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter);
552 if (monitor->enter_for(locking_thread)) {
553 return;
554 }
555 assert(monitor->is_being_async_deflated(), "must be");
556 }
557 }
558 }
559
560 void ObjectSynchronizer::enter_legacy(Handle obj, BasicLock* lock, JavaThread* current) {
561 assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "This method should never be called on an instance of an inline class");
562 if (!enter_fast_impl(obj, lock, current)) {
563 // Inflated ObjectMonitor::enter is required
564
565 // An async deflation can race after the inflate() call and before
566 // enter() can make the ObjectMonitor busy. enter() returns false if
567 // we have lost the race to async deflation and we simply try again.
568 while (true) {
569 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
570 if (monitor->enter(current)) {
571 return;
572 }
573 }
574 }
575 }
576
577 // The interpreter and compiler assembly code tries to lock using the fast path
578 // of this algorithm. Make sure to update that code if the following function is
579 // changed. The implementation is extremely sensitive to race condition. Be careful.
580 bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
581 guarantee(!EnableValhalla || !obj->klass()->is_inline_klass(), "Attempt to inflate inline type");
582 assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer");
583
584 if (obj->klass()->is_value_based()) {
585 handle_sync_on_value_based_class(obj, locking_thread);
586 }
587
588 locking_thread->inc_held_monitor_count();
589
590 if (!useHeavyMonitors()) {
591 if (LockingMode == LM_LEGACY) {
592 markWord mark = obj->mark();
593 if (mark.is_unlocked()) {
594 // Anticipate successful CAS -- the ST of the displaced mark must
595 // be visible <= the ST performed by the CAS.
596 lock->set_displaced_header(mark);
597 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
598 return true;
599 }
600 } else if (mark.has_locker() &&
601 locking_thread->is_lock_owned((address) mark.locker())) {
609 // so it does not matter what the value is, except that it
610 // must be non-zero to avoid looking like a re-entrant lock,
611 // and must not look locked either.
612 lock->set_displaced_header(markWord::unused_mark());
613
614 // Failed to fast lock.
615 return false;
616 }
617 } else if (VerifyHeavyMonitors) {
618 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
619 }
620
621 return false;
622 }
623
624 void ObjectSynchronizer::exit_legacy(oop object, BasicLock* lock, JavaThread* current) {
625 assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer");
626
627 if (!useHeavyMonitors()) {
628 markWord mark = object->mark();
629 if (EnableValhalla && mark.is_inline_type()) {
630 return;
631 }
632 if (LockingMode == LM_LEGACY) {
633 markWord dhw = lock->displaced_header();
634 if (dhw.value() == 0) {
635 // If the displaced header is null, then this exit matches up with
636 // a recursive enter. No real work to do here except for diagnostics.
637 #ifndef PRODUCT
638 if (mark != markWord::INFLATING()) {
639 // Only do diagnostics if we are not racing an inflation. Simply
640 // exiting a recursive enter of a Java Monitor that is being
641 // inflated is safe; see the has_monitor() comment below.
642 assert(!mark.is_unlocked(), "invariant");
643 assert(!mark.has_locker() ||
644 current->is_lock_owned((address)mark.locker()), "invariant");
645 if (mark.has_monitor()) {
646 // The BasicLock's displaced_header is marked as a recursive
647 // enter and we have an inflated Java Monitor (ObjectMonitor).
648 // This is a special case where the Java Monitor was inflated
649 // after this thread entered the stack-lock recursively. When a
650 // Java Monitor is inflated, we cannot safely walk the Java
651 // Monitor owner's stack and update the BasicLocks because a
668 return;
669 }
670 }
671 }
672 } else if (VerifyHeavyMonitors) {
673 guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
674 }
675
676 // We have to take the slow-path of possible inflation and then exit.
677 // The ObjectMonitor* can't be async deflated until ownership is
678 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
679 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
680 assert(!monitor->has_anonymous_owner(), "must not be");
681 monitor->exit(current);
682 }
683
684 // -----------------------------------------------------------------------------
685 // JNI locks on java objects
686 // NOTE: must use heavy weight monitor to handle jni monitor enter
687 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
688 JavaThread* THREAD = current;
689 // Top native frames in the stack will not be seen if we attempt
690 // preemption, since we start walking from the last Java anchor.
691 NoPreemptMark npm(current);
692
693 if (obj->klass()->is_value_based()) {
694 handle_sync_on_value_based_class(obj, current);
695 }
696
697 if (EnableValhalla && obj->klass()->is_inline_klass()) {
698 ResourceMark rm(THREAD);
699 const char* desc = "Cannot synchronize on an instance of value class ";
700 const char* className = obj->klass()->external_name();
701 size_t msglen = strlen(desc) + strlen(className) + 1;
702 char* message = NEW_RESOURCE_ARRAY(char, msglen);
703 assert(message != nullptr, "NEW_RESOURCE_ARRAY should have called vm_exit_out_of_memory and not return nullptr");
704 THROW_MSG(vmSymbols::java_lang_IdentityException(), className);
705 }
706
707 // the current locking is from JNI instead of Java code
708 current->set_current_pending_monitor_is_from_java(false);
709 // An async deflation can race after the inflate() call and before
710 // enter() can make the ObjectMonitor busy. enter() returns false if
711 // we have lost the race to async deflation and we simply try again.
712 while (true) {
713 ObjectMonitor* monitor;
714 bool entered;
715 if (LockingMode == LM_LIGHTWEIGHT) {
716 entered = LightweightSynchronizer::inflate_and_enter(obj(), inflate_cause_jni_enter, current, current) != nullptr;
717 } else {
718 monitor = inflate(current, obj(), inflate_cause_jni_enter);
719 entered = monitor->enter(current);
720 }
721
722 if (entered) {
723 current->inc_held_monitor_count(1, true);
724 break;
725 }
726 }
727 current->set_current_pending_monitor_is_from_java(true);
728 }
729
730 // NOTE: must use heavy weight monitor to handle jni monitor exit
731 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
732 JavaThread* current = THREAD;
733 CHECK_THROW_NOSYNC_IMSE(obj);
734
735 ObjectMonitor* monitor;
736 if (LockingMode == LM_LIGHTWEIGHT) {
737 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
738 } else {
739 // The ObjectMonitor* can't be async deflated until ownership is
740 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
741 monitor = inflate(current, obj, inflate_cause_jni_exit);
742 }
743 // If this thread has locked the object, exit the monitor. We
744 // intentionally do not use CHECK on check_owner because we must exit the
745 // monitor even if an exception was already pending.
746 if (monitor->check_owner(THREAD)) {
747 monitor->exit(current);
748 current->dec_held_monitor_count(1, true);
749 }
750 }
751
752 // -----------------------------------------------------------------------------
753 // Internal VM locks on java objects
758 _obj = obj;
759
760 if (_obj() != nullptr) {
761 ObjectSynchronizer::enter(_obj, &_lock, _thread);
762 }
763 }
764
765 ObjectLocker::~ObjectLocker() {
766 if (_obj() != nullptr) {
767 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
768 }
769 }
770
771
772 // -----------------------------------------------------------------------------
773 // Wait/Notify/NotifyAll
774 // NOTE: must use heavy weight monitor to handle wait()
775
776 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
777 JavaThread* current = THREAD;
778 CHECK_THROW_NOSYNC_IMSE_0(obj);
779 if (millis < 0) {
780 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
781 }
782
783 ObjectMonitor* monitor;
784 if (LockingMode == LM_LIGHTWEIGHT) {
785 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
786 } else {
787 // The ObjectMonitor* can't be async deflated because the _waiters
788 // field is incremented before ownership is dropped and decremented
789 // after ownership is regained.
790 monitor = inflate(current, obj(), inflate_cause_wait);
791 }
792
793 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
794 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
795
796 // This dummy call is in place to get around dtrace bug 6254741. Once
797 // that's fixed we can uncomment the following line, remove the call
798 // and change this function back into a "void" func.
801 return ret_code;
802 }
803
804 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
805 if (millis < 0) {
806 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
807 }
808
809 ObjectMonitor* monitor;
810 if (LockingMode == LM_LIGHTWEIGHT) {
811 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
812 } else {
813 monitor = inflate(THREAD, obj(), inflate_cause_wait);
814 }
815 monitor->wait(millis, false, THREAD);
816 }
817
818
819 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
820 JavaThread* current = THREAD;
821 CHECK_THROW_NOSYNC_IMSE(obj);
822
823 markWord mark = obj->mark();
824 if (LockingMode == LM_LIGHTWEIGHT) {
825 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
826 // Not inflated so there can't be any waiters to notify.
827 return;
828 }
829 } else if (LockingMode == LM_LEGACY) {
830 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
831 // Not inflated so there can't be any waiters to notify.
832 return;
833 }
834 }
835
836 ObjectMonitor* monitor;
837 if (LockingMode == LM_LIGHTWEIGHT) {
838 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
839 } else {
840 // The ObjectMonitor* can't be async deflated until ownership is
841 // dropped by the calling thread.
842 monitor = inflate(current, obj(), inflate_cause_notify);
843 }
844 monitor->notify(CHECK);
845 }
846
847 // NOTE: see comment of notify()
848 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
849 JavaThread* current = THREAD;
850 CHECK_THROW_NOSYNC_IMSE(obj);
851
852 markWord mark = obj->mark();
853 if (LockingMode == LM_LIGHTWEIGHT) {
854 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
855 // Not inflated so there can't be any waiters to notify.
856 return;
857 }
858 } else if (LockingMode == LM_LEGACY) {
859 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
860 // Not inflated so there can't be any waiters to notify.
861 return;
862 }
863 }
864
865 ObjectMonitor* monitor;
866 if (LockingMode == LM_LIGHTWEIGHT) {
867 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
868 } else {
869 // The ObjectMonitor* can't be async deflated until ownership is
870 // dropped by the calling thread.
1012
1013 markWord mark = obj->mark_acquire();
1014 for (;;) {
1015 intptr_t hash = mark.hash();
1016 if (hash != 0) {
1017 return hash;
1018 }
1019
1020 hash = get_next_hash(current, obj);
1021 const markWord old_mark = mark;
1022 const markWord new_mark = old_mark.copy_set_hash(hash);
1023
1024 mark = obj->cas_set_mark(new_mark, old_mark);
1025 if (old_mark == mark) {
1026 return hash;
1027 }
1028 }
1029 }
1030
1031 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
1032 if (EnableValhalla && obj->klass()->is_inline_klass()) {
1033 // VM should be calling bootstrap method
1034 ShouldNotReachHere();
1035 }
1036 if (UseObjectMonitorTable) {
1037 // Since the monitor isn't in the object header, the hash can simply be
1038 // installed in the object header.
1039 return install_hash_code(current, obj);
1040 }
1041
1042 while (true) {
1043 ObjectMonitor* monitor = nullptr;
1044 markWord temp, test;
1045 intptr_t hash;
1046 markWord mark = read_stable_mark(obj);
1047 if (VerifyHeavyMonitors) {
1048 assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
1049 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
1050 }
1051 if (mark.is_unlocked() || (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked())) {
1052 hash = mark.hash();
1053 if (hash != 0) { // if it has a hash, just return it
1054 return hash;
1055 }
1142 hash = test.hash();
1143 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1144 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1145 }
1146 if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
1147 // If we detect that async deflation has occurred, then we
1148 // attempt to restore the header/dmw to the object's header
1149 // so that we only retry once if the deflater thread happens
1150 // to be slow.
1151 monitor->install_displaced_markword_in_object(obj);
1152 continue;
1153 }
1154 }
1155 // We finally get the hash.
1156 return hash;
1157 }
1158 }
1159
1160 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1161 Handle h_obj) {
1162 if (EnableValhalla && h_obj->mark().is_inline_type()) {
1163 return false;
1164 }
1165 assert(current == JavaThread::current(), "Can only be called on current thread");
1166 oop obj = h_obj();
1167
1168 markWord mark = read_stable_mark(obj);
1169
1170 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1171 // stack-locked case, header points into owner's stack
1172 return current->is_lock_owned((address)mark.locker());
1173 }
1174
1175 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1176 // fast-locking case, see if lock is in current's lock stack
1177 return current->lock_stack().contains(h_obj());
1178 }
1179
1180 while (LockingMode == LM_LIGHTWEIGHT && mark.has_monitor()) {
1181 ObjectMonitor* monitor = read_monitor(current, obj, mark);
1182 if (monitor != nullptr) {
1183 return monitor->is_entered(current) != 0;
1184 }
1488 markWord dmw = monitor->header();
1489 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1490 return;
1491 }
1492 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1493 }
1494
1495 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
1496 assert(current == Thread::current(), "must be");
1497 assert(LockingMode != LM_LIGHTWEIGHT, "only inflate through enter");
1498 return inflate_impl(current->is_Java_thread() ? JavaThread::cast(current) : nullptr, obj, cause);
1499 }
1500
1501 ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
1502 assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
1503 assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_for");
1504 return inflate_impl(thread, obj, cause);
1505 }
1506
1507 ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* locking_thread, oop object, const InflateCause cause) {
1508 if (EnableValhalla) {
1509 guarantee(!object->klass()->is_inline_klass(), "Attempt to inflate inline type");
1510 }
1511 // The JavaThread* locking_thread requires that the locking_thread == Thread::current() or
1512 // is suspended throughout the call by some other mechanism.
1513 // The thread might be nullptr when called from a non JavaThread. (As may still be
1514 // the case from FastHashCode). However it is only important for correctness that the
1515 // thread is set when called from ObjectSynchronizer::enter from the owning thread,
1516 // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
1517 assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_impl");
1518 EventJavaMonitorInflate event;
1519
1520 for (;;) {
1521 const markWord mark = object->mark_acquire();
1522
1523 // The mark can be in one of the following states:
1524 // * inflated - If the ObjectMonitor owner is anonymous and the
1525 // locking_thread owns the object lock, then we
1526 // make the locking_thread the ObjectMonitor owner.
1527 // * stack-locked - Coerce it to inflated from stack-locked.
1528 // * INFLATING - Busy wait for conversion from stack-locked to
1529 // inflated.
1530 // * unlocked - Aggressively inflate the object.
|