295 // removed from the system.
296 //
297 // Note: If the _in_use_list max exceeds the ceiling, then
298 // monitors_used_above_threshold() will use the in_use_list max instead
299 // of the thread count derived ceiling because we have used more
300 // ObjectMonitors than the estimated average.
301 //
302 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
303 // no-progress async monitor deflation cycles in a row, then the ceiling
304 // is adjusted upwards by monitors_used_above_threshold().
305 //
306 // Start the ceiling with the estimate for one thread in initialize()
307 // which is called after cmd line options are processed.
308 static size_t _in_use_list_ceiling = 0;
309 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
310 bool volatile ObjectSynchronizer::_is_final_audit = false;
311 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
312 static uintx _no_progress_cnt = 0;
313 static bool _no_progress_skip_increment = false;
314
315 // =====================> Quick functions
316
317 // The quick_* forms are special fast-path variants used to improve
318 // performance. In the simplest case, a "quick_*" implementation could
319 // simply return false, in which case the caller will perform the necessary
320 // state transitions and call the slow-path form.
321 // The fast-path is designed to handle frequently arising cases in an efficient
322 // manner and is just a degenerate "optimistic" variant of the slow-path.
323 // returns true -- to indicate the call was satisfied.
324 // returns false -- to indicate the call needs the services of the slow-path.
325 // A no-loitering ordinance is in effect for code in the quick_* family
326 // operators: safepoints or indefinite blocking (blocking that might span a
327 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
328 // entry.
329 //
330 // Consider: An interesting optimization is to have the JIT recognize the
331 // following common idiom:
332 // synchronized (someobj) { .... ; notify(); }
333 // That is, we find a notify() or notifyAll() call that immediately precedes
334 // the monitorexit operation. In that case the JIT could fuse the operations
335 // into a single notifyAndExit() runtime primitive.
336
337 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
338 assert(current->thread_state() == _thread_in_Java, "invariant");
339 NoSafepointVerifier nsv;
340 if (obj == nullptr) return false; // slow-path for invalid obj
341 const markWord mark = obj->mark();
342
343 if (LockingMode == LM_LIGHTWEIGHT) {
344 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
345 // Degenerate notify
346 // fast-locked by caller so by definition the implied waitset is empty.
347 return true;
348 }
349 } else if (LockingMode == LM_LEGACY) {
350 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
351 // Degenerate notify
352 // stack-locked by caller so by definition the implied waitset is empty.
353 return true;
354 }
355 }
356
357 if (mark.has_monitor()) {
358 ObjectMonitor* const mon = read_monitor(current, obj, mark);
359 if (LockingMode == LM_LIGHTWEIGHT && mon == nullptr) {
360 // Racing with inflation/deflation go slow path
385 // other IMS exception states take the slow-path
386 return false;
387 }
388
389 static bool useHeavyMonitors() {
390 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
391 return LockingMode == LM_MONITOR;
392 #else
393 return false;
394 #endif
395 }
396
397 // The LockNode emitted directly at the synchronization site would have
398 // been too big if it were to have included support for the cases of inflated
399 // recursive enter and exit, so they go here instead.
400 // Note that we can't safely call AsyncPrintJavaStack() from within
401 // quick_enter() as our thread state remains _in_Java.
402
403 bool ObjectSynchronizer::quick_enter_legacy(oop obj, BasicLock* lock, JavaThread* current) {
404 assert(current->thread_state() == _thread_in_Java, "invariant");
405
406 if (useHeavyMonitors()) {
407 return false; // Slow path
408 }
409
410 if (LockingMode == LM_LIGHTWEIGHT) {
411 return LightweightSynchronizer::quick_enter(obj, lock, current);
412 }
413
414 assert(LockingMode == LM_LEGACY, "legacy mode below");
415
416 const markWord mark = obj->mark();
417
418 if (mark.has_monitor()) {
419
420 ObjectMonitor* const m = read_monitor(mark);
421 // An async deflation or GC can race us before we manage to make
422 // the ObjectMonitor busy by setting the owner below. If we detect
423 // that race we just bail out to the slow-path here.
424 if (m->object_peek() == nullptr) {
505 EventSyncOnValueBasedClass event;
506 if (event.should_commit()) {
507 event.set_valueBasedClass(obj->klass());
508 event.commit();
509 }
510 }
511
512 if (bcp_was_adjusted) {
513 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
514 }
515 }
516
517 // -----------------------------------------------------------------------------
518 // Monitor Enter/Exit
519
520 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
521 // When called with locking_thread != Thread::current() some mechanism must synchronize
522 // the locking_thread with respect to the current thread. Currently only used when
523 // deoptimizing and re-locking locks. See Deoptimization::relock_objects
524 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
525
526 if (LockingMode == LM_LIGHTWEIGHT) {
527 return LightweightSynchronizer::enter_for(obj, lock, locking_thread);
528 }
529
530 if (!enter_fast_impl(obj, lock, locking_thread)) {
531 // Inflated ObjectMonitor::enter_for is required
532
533 // An async deflation can race after the inflate_for() call and before
534 // enter_for() can make the ObjectMonitor busy. enter_for() returns false
535 // if we have lost the race to async deflation and we simply try again.
536 while (true) {
537 ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter);
538 if (monitor->enter_for(locking_thread)) {
539 return;
540 }
541 assert(monitor->is_being_async_deflated(), "must be");
542 }
543 }
544 }
545
546 void ObjectSynchronizer::enter_legacy(Handle obj, BasicLock* lock, JavaThread* current) {
547 if (!enter_fast_impl(obj, lock, current)) {
548 // Inflated ObjectMonitor::enter is required
549
550 // An async deflation can race after the inflate() call and before
551 // enter() can make the ObjectMonitor busy. enter() returns false if
552 // we have lost the race to async deflation and we simply try again.
553 while (true) {
554 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
555 if (monitor->enter(current)) {
556 return;
557 }
558 }
559 }
560 }
561
562 // The interpreter and compiler assembly code tries to lock using the fast path
563 // of this algorithm. Make sure to update that code if the following function is
564 // changed. The implementation is extremely sensitive to race condition. Be careful.
565 bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
566 assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer");
567
568 if (obj->klass()->is_value_based()) {
569 handle_sync_on_value_based_class(obj, locking_thread);
570 }
571
572 locking_thread->inc_held_monitor_count();
573
574 if (!useHeavyMonitors()) {
575 if (LockingMode == LM_LEGACY) {
576 markWord mark = obj->mark();
577 if (mark.is_unlocked()) {
578 // Anticipate successful CAS -- the ST of the displaced mark must
579 // be visible <= the ST performed by the CAS.
580 lock->set_displaced_header(mark);
581 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
582 return true;
583 }
584 } else if (mark.has_locker() &&
585 locking_thread->is_lock_owned((address) mark.locker())) {
593 // so it does not matter what the value is, except that it
594 // must be non-zero to avoid looking like a re-entrant lock,
595 // and must not look locked either.
596 lock->set_displaced_header(markWord::unused_mark());
597
598 // Failed to fast lock.
599 return false;
600 }
601 } else if (VerifyHeavyMonitors) {
602 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
603 }
604
605 return false;
606 }
607
608 void ObjectSynchronizer::exit_legacy(oop object, BasicLock* lock, JavaThread* current) {
609 assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer");
610
611 if (!useHeavyMonitors()) {
612 markWord mark = object->mark();
613 if (LockingMode == LM_LEGACY) {
614 markWord dhw = lock->displaced_header();
615 if (dhw.value() == 0) {
616 // If the displaced header is null, then this exit matches up with
617 // a recursive enter. No real work to do here except for diagnostics.
618 #ifndef PRODUCT
619 if (mark != markWord::INFLATING()) {
620 // Only do diagnostics if we are not racing an inflation. Simply
621 // exiting a recursive enter of a Java Monitor that is being
622 // inflated is safe; see the has_monitor() comment below.
623 assert(!mark.is_unlocked(), "invariant");
624 assert(!mark.has_locker() ||
625 current->is_lock_owned((address)mark.locker()), "invariant");
626 if (mark.has_monitor()) {
627 // The BasicLock's displaced_header is marked as a recursive
628 // enter and we have an inflated Java Monitor (ObjectMonitor).
629 // This is a special case where the Java Monitor was inflated
630 // after this thread entered the stack-lock recursively. When a
631 // Java Monitor is inflated, we cannot safely walk the Java
632 // Monitor owner's stack and update the BasicLocks because a
649 return;
650 }
651 }
652 }
653 } else if (VerifyHeavyMonitors) {
654 guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
655 }
656
657 // We have to take the slow-path of possible inflation and then exit.
658 // The ObjectMonitor* can't be async deflated until ownership is
659 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
660 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
661 assert(!monitor->is_owner_anonymous(), "must not be");
662 monitor->exit(current);
663 }
664
665 // -----------------------------------------------------------------------------
666 // JNI locks on java objects
667 // NOTE: must use heavy weight monitor to handle jni monitor enter
668 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
669 if (obj->klass()->is_value_based()) {
670 handle_sync_on_value_based_class(obj, current);
671 }
672
673 // the current locking is from JNI instead of Java code
674 current->set_current_pending_monitor_is_from_java(false);
675 // An async deflation can race after the inflate() call and before
676 // enter() can make the ObjectMonitor busy. enter() returns false if
677 // we have lost the race to async deflation and we simply try again.
678 while (true) {
679 ObjectMonitor* monitor;
680 bool entered;
681 if (LockingMode == LM_LIGHTWEIGHT) {
682 entered = LightweightSynchronizer::inflate_and_enter(obj(), inflate_cause_jni_enter, current, current) != nullptr;
683 } else {
684 monitor = inflate(current, obj(), inflate_cause_jni_enter);
685 entered = monitor->enter(current);
686 }
687
688 if (entered) {
689 current->inc_held_monitor_count(1, true);
690 break;
691 }
692 }
693 current->set_current_pending_monitor_is_from_java(true);
694 }
695
696 // NOTE: must use heavy weight monitor to handle jni monitor exit
697 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
698 JavaThread* current = THREAD;
699
700 ObjectMonitor* monitor;
701 if (LockingMode == LM_LIGHTWEIGHT) {
702 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
703 } else {
704 // The ObjectMonitor* can't be async deflated until ownership is
705 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
706 monitor = inflate(current, obj, inflate_cause_jni_exit);
707 }
708 // If this thread has locked the object, exit the monitor. We
709 // intentionally do not use CHECK on check_owner because we must exit the
710 // monitor even if an exception was already pending.
711 if (monitor->check_owner(THREAD)) {
712 monitor->exit(current);
713 current->dec_held_monitor_count(1, true);
714 }
715 }
716
717 // -----------------------------------------------------------------------------
718 // Internal VM locks on java objects
723 _obj = obj;
724
725 if (_obj() != nullptr) {
726 ObjectSynchronizer::enter(_obj, &_lock, _thread);
727 }
728 }
729
730 ObjectLocker::~ObjectLocker() {
731 if (_obj() != nullptr) {
732 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
733 }
734 }
735
736
737 // -----------------------------------------------------------------------------
738 // Wait/Notify/NotifyAll
739 // NOTE: must use heavy weight monitor to handle wait()
740
741 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
742 JavaThread* current = THREAD;
743 if (millis < 0) {
744 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
745 }
746
747 ObjectMonitor* monitor;
748 if (LockingMode == LM_LIGHTWEIGHT) {
749 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
750 } else {
751 // The ObjectMonitor* can't be async deflated because the _waiters
752 // field is incremented before ownership is dropped and decremented
753 // after ownership is regained.
754 monitor = inflate(current, obj(), inflate_cause_wait);
755 }
756
757 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
758 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
759
760 // This dummy call is in place to get around dtrace bug 6254741. Once
761 // that's fixed we can uncomment the following line, remove the call
762 // and change this function back into a "void" func.
765 return ret_code;
766 }
767
768 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
769 if (millis < 0) {
770 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
771 }
772
773 ObjectMonitor* monitor;
774 if (LockingMode == LM_LIGHTWEIGHT) {
775 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
776 } else {
777 monitor = inflate(THREAD, obj(), inflate_cause_wait);
778 }
779 monitor->wait(millis, false, THREAD);
780 }
781
782
783 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
784 JavaThread* current = THREAD;
785
786 markWord mark = obj->mark();
787 if (LockingMode == LM_LIGHTWEIGHT) {
788 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
789 // Not inflated so there can't be any waiters to notify.
790 return;
791 }
792 } else if (LockingMode == LM_LEGACY) {
793 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
794 // Not inflated so there can't be any waiters to notify.
795 return;
796 }
797 }
798
799 ObjectMonitor* monitor;
800 if (LockingMode == LM_LIGHTWEIGHT) {
801 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
802 } else {
803 // The ObjectMonitor* can't be async deflated until ownership is
804 // dropped by the calling thread.
805 monitor = inflate(current, obj(), inflate_cause_notify);
806 }
807 monitor->notify(CHECK);
808 }
809
810 // NOTE: see comment of notify()
811 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
812 JavaThread* current = THREAD;
813
814 markWord mark = obj->mark();
815 if (LockingMode == LM_LIGHTWEIGHT) {
816 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
817 // Not inflated so there can't be any waiters to notify.
818 return;
819 }
820 } else if (LockingMode == LM_LEGACY) {
821 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
822 // Not inflated so there can't be any waiters to notify.
823 return;
824 }
825 }
826
827 ObjectMonitor* monitor;
828 if (LockingMode == LM_LIGHTWEIGHT) {
829 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
830 } else {
831 // The ObjectMonitor* can't be async deflated until ownership is
832 // dropped by the calling thread.
974
975 markWord mark = obj->mark_acquire();
976 for (;;) {
977 intptr_t hash = mark.hash();
978 if (hash != 0) {
979 return hash;
980 }
981
982 hash = get_next_hash(current, obj);
983 const markWord old_mark = mark;
984 const markWord new_mark = old_mark.copy_set_hash(hash);
985
986 mark = obj->cas_set_mark(new_mark, old_mark);
987 if (old_mark == mark) {
988 return hash;
989 }
990 }
991 }
992
993 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
994 if (UseObjectMonitorTable) {
995 // Since the monitor isn't in the object header, the hash can simply be
996 // installed in the object header.
997 return install_hash_code(current, obj);
998 }
999
1000 while (true) {
1001 ObjectMonitor* monitor = nullptr;
1002 markWord temp, test;
1003 intptr_t hash;
1004 markWord mark = read_stable_mark(obj);
1005 if (VerifyHeavyMonitors) {
1006 assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
1007 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
1008 }
1009 if (mark.is_unlocked() || (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked())) {
1010 hash = mark.hash();
1011 if (hash != 0) { // if it has a hash, just return it
1012 return hash;
1013 }
1100 hash = test.hash();
1101 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1102 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1103 }
1104 if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
1105 // If we detect that async deflation has occurred, then we
1106 // attempt to restore the header/dmw to the object's header
1107 // so that we only retry once if the deflater thread happens
1108 // to be slow.
1109 monitor->install_displaced_markword_in_object(obj);
1110 continue;
1111 }
1112 }
1113 // We finally get the hash.
1114 return hash;
1115 }
1116 }
1117
1118 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1119 Handle h_obj) {
1120 assert(current == JavaThread::current(), "Can only be called on current thread");
1121 oop obj = h_obj();
1122
1123 markWord mark = read_stable_mark(obj);
1124
1125 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1126 // stack-locked case, header points into owner's stack
1127 return current->is_lock_owned((address)mark.locker());
1128 }
1129
1130 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1131 // fast-locking case, see if lock is in current's lock stack
1132 return current->lock_stack().contains(h_obj());
1133 }
1134
1135 while (LockingMode == LM_LIGHTWEIGHT && mark.has_monitor()) {
1136 ObjectMonitor* monitor = read_monitor(current, obj, mark);
1137 if (monitor != nullptr) {
1138 return monitor->is_entered(current) != 0;
1139 }
1411 markWord dmw = monitor->header();
1412 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1413 return;
1414 }
1415 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1416 }
1417
1418 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
1419 assert(current == Thread::current(), "must be");
1420 assert(LockingMode != LM_LIGHTWEIGHT, "only inflate through enter");
1421 return inflate_impl(obj, cause);
1422 }
1423
1424 ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
1425 assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
1426 assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_for");
1427 return inflate_impl(obj, cause);
1428 }
1429
1430 ObjectMonitor* ObjectSynchronizer::inflate_impl(oop object, const InflateCause cause) {
1431 assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_impl");
1432 EventJavaMonitorInflate event;
1433
1434 for (;;) {
1435 const markWord mark = object->mark_acquire();
1436
1437 // The mark can be in one of the following states:
1438 // * inflated - Just return it.
1439 // * stack-locked - Coerce it to inflated from stack-locked.
1440 // * INFLATING - Busy wait for conversion from stack-locked to
1441 // inflated.
1442 // * unlocked - Aggressively inflate the object.
1443
1444 // CASE: inflated
1445 if (mark.has_monitor()) {
1446 ObjectMonitor* inf = mark.monitor();
1447 markWord dmw = inf->header();
1448 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1449 return inf;
1450 }
|
295 // removed from the system.
296 //
297 // Note: If the _in_use_list max exceeds the ceiling, then
298 // monitors_used_above_threshold() will use the in_use_list max instead
299 // of the thread count derived ceiling because we have used more
300 // ObjectMonitors than the estimated average.
301 //
302 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
303 // no-progress async monitor deflation cycles in a row, then the ceiling
304 // is adjusted upwards by monitors_used_above_threshold().
305 //
306 // Start the ceiling with the estimate for one thread in initialize()
307 // which is called after cmd line options are processed.
308 static size_t _in_use_list_ceiling = 0;
309 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
310 bool volatile ObjectSynchronizer::_is_final_audit = false;
311 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
312 static uintx _no_progress_cnt = 0;
313 static bool _no_progress_skip_increment = false;
314
315 // These checks are required for wait, notify and exit to avoid inflating the monitor to
316 // find out this inline type object cannot be locked.
317 #define CHECK_THROW_NOSYNC_IMSE(obj) \
318 if (EnableValhalla && (obj)->mark().is_inline_type()) { \
319 JavaThread* THREAD = current; \
320 ResourceMark rm(THREAD); \
321 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
322 }
323
324 #define CHECK_THROW_NOSYNC_IMSE_0(obj) \
325 if (EnableValhalla && (obj)->mark().is_inline_type()) { \
326 JavaThread* THREAD = current; \
327 ResourceMark rm(THREAD); \
328 THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
329 }
330
331 // =====================> Quick functions
332
333 // The quick_* forms are special fast-path variants used to improve
334 // performance. In the simplest case, a "quick_*" implementation could
335 // simply return false, in which case the caller will perform the necessary
336 // state transitions and call the slow-path form.
337 // The fast-path is designed to handle frequently arising cases in an efficient
338 // manner and is just a degenerate "optimistic" variant of the slow-path.
339 // returns true -- to indicate the call was satisfied.
340 // returns false -- to indicate the call needs the services of the slow-path.
341 // A no-loitering ordinance is in effect for code in the quick_* family
342 // operators: safepoints or indefinite blocking (blocking that might span a
343 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
344 // entry.
345 //
346 // Consider: An interesting optimization is to have the JIT recognize the
347 // following common idiom:
348 // synchronized (someobj) { .... ; notify(); }
349 // That is, we find a notify() or notifyAll() call that immediately precedes
350 // the monitorexit operation. In that case the JIT could fuse the operations
351 // into a single notifyAndExit() runtime primitive.
352
353 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
354 assert(current->thread_state() == _thread_in_Java, "invariant");
355 NoSafepointVerifier nsv;
356 if (obj == nullptr) return false; // slow-path for invalid obj
357 assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
358 const markWord mark = obj->mark();
359
360 if (LockingMode == LM_LIGHTWEIGHT) {
361 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
362 // Degenerate notify
363 // fast-locked by caller so by definition the implied waitset is empty.
364 return true;
365 }
366 } else if (LockingMode == LM_LEGACY) {
367 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
368 // Degenerate notify
369 // stack-locked by caller so by definition the implied waitset is empty.
370 return true;
371 }
372 }
373
374 if (mark.has_monitor()) {
375 ObjectMonitor* const mon = read_monitor(current, obj, mark);
376 if (LockingMode == LM_LIGHTWEIGHT && mon == nullptr) {
377 // Racing with inflation/deflation go slow path
402 // other IMS exception states take the slow-path
403 return false;
404 }
405
406 static bool useHeavyMonitors() {
407 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
408 return LockingMode == LM_MONITOR;
409 #else
410 return false;
411 #endif
412 }
413
414 // The LockNode emitted directly at the synchronization site would have
415 // been too big if it were to have included support for the cases of inflated
416 // recursive enter and exit, so they go here instead.
417 // Note that we can't safely call AsyncPrintJavaStack() from within
418 // quick_enter() as our thread state remains _in_Java.
419
420 bool ObjectSynchronizer::quick_enter_legacy(oop obj, BasicLock* lock, JavaThread* current) {
421 assert(current->thread_state() == _thread_in_Java, "invariant");
422 assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
423
424 if (useHeavyMonitors()) {
425 return false; // Slow path
426 }
427
428 if (LockingMode == LM_LIGHTWEIGHT) {
429 return LightweightSynchronizer::quick_enter(obj, lock, current);
430 }
431
432 assert(LockingMode == LM_LEGACY, "legacy mode below");
433
434 const markWord mark = obj->mark();
435
436 if (mark.has_monitor()) {
437
438 ObjectMonitor* const m = read_monitor(mark);
439 // An async deflation or GC can race us before we manage to make
440 // the ObjectMonitor busy by setting the owner below. If we detect
441 // that race we just bail out to the slow-path here.
442 if (m->object_peek() == nullptr) {
523 EventSyncOnValueBasedClass event;
524 if (event.should_commit()) {
525 event.set_valueBasedClass(obj->klass());
526 event.commit();
527 }
528 }
529
530 if (bcp_was_adjusted) {
531 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
532 }
533 }
534
535 // -----------------------------------------------------------------------------
536 // Monitor Enter/Exit
537
538 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
539 // When called with locking_thread != Thread::current() some mechanism must synchronize
540 // the locking_thread with respect to the current thread. Currently only used when
541 // deoptimizing and re-locking locks. See Deoptimization::relock_objects
542 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
543 assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "JITed code should never have locked an instance of a value class");
544
545 if (LockingMode == LM_LIGHTWEIGHT) {
546 return LightweightSynchronizer::enter_for(obj, lock, locking_thread);
547 }
548
549 if (!enter_fast_impl(obj, lock, locking_thread)) {
550 // Inflated ObjectMonitor::enter_for is required
551
552 // An async deflation can race after the inflate_for() call and before
553 // enter_for() can make the ObjectMonitor busy. enter_for() returns false
554 // if we have lost the race to async deflation and we simply try again.
555 while (true) {
556 ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter);
557 if (monitor->enter_for(locking_thread)) {
558 return;
559 }
560 assert(monitor->is_being_async_deflated(), "must be");
561 }
562 }
563 }
564
565 void ObjectSynchronizer::enter_legacy(Handle obj, BasicLock* lock, JavaThread* current) {
566 assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "This method should never be called on an instance of an inline class");
567 if (!enter_fast_impl(obj, lock, current)) {
568 // Inflated ObjectMonitor::enter is required
569
570 // An async deflation can race after the inflate() call and before
571 // enter() can make the ObjectMonitor busy. enter() returns false if
572 // we have lost the race to async deflation and we simply try again.
573 while (true) {
574 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
575 if (monitor->enter(current)) {
576 return;
577 }
578 }
579 }
580 }
581
582 // The interpreter and compiler assembly code tries to lock using the fast path
583 // of this algorithm. Make sure to update that code if the following function is
584 // changed. The implementation is extremely sensitive to race condition. Be careful.
585 bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
586 guarantee(!EnableValhalla || !obj->klass()->is_inline_klass(), "Attempt to inflate inline type");
587 assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer");
588
589 if (obj->klass()->is_value_based()) {
590 handle_sync_on_value_based_class(obj, locking_thread);
591 }
592
593 locking_thread->inc_held_monitor_count();
594
595 if (!useHeavyMonitors()) {
596 if (LockingMode == LM_LEGACY) {
597 markWord mark = obj->mark();
598 if (mark.is_unlocked()) {
599 // Anticipate successful CAS -- the ST of the displaced mark must
600 // be visible <= the ST performed by the CAS.
601 lock->set_displaced_header(mark);
602 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
603 return true;
604 }
605 } else if (mark.has_locker() &&
606 locking_thread->is_lock_owned((address) mark.locker())) {
614 // so it does not matter what the value is, except that it
615 // must be non-zero to avoid looking like a re-entrant lock,
616 // and must not look locked either.
617 lock->set_displaced_header(markWord::unused_mark());
618
619 // Failed to fast lock.
620 return false;
621 }
622 } else if (VerifyHeavyMonitors) {
623 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
624 }
625
626 return false;
627 }
628
629 void ObjectSynchronizer::exit_legacy(oop object, BasicLock* lock, JavaThread* current) {
630 assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer");
631
632 if (!useHeavyMonitors()) {
633 markWord mark = object->mark();
634 if (EnableValhalla && mark.is_inline_type()) {
635 return;
636 }
637 if (LockingMode == LM_LEGACY) {
638 markWord dhw = lock->displaced_header();
639 if (dhw.value() == 0) {
640 // If the displaced header is null, then this exit matches up with
641 // a recursive enter. No real work to do here except for diagnostics.
642 #ifndef PRODUCT
643 if (mark != markWord::INFLATING()) {
644 // Only do diagnostics if we are not racing an inflation. Simply
645 // exiting a recursive enter of a Java Monitor that is being
646 // inflated is safe; see the has_monitor() comment below.
647 assert(!mark.is_unlocked(), "invariant");
648 assert(!mark.has_locker() ||
649 current->is_lock_owned((address)mark.locker()), "invariant");
650 if (mark.has_monitor()) {
651 // The BasicLock's displaced_header is marked as a recursive
652 // enter and we have an inflated Java Monitor (ObjectMonitor).
653 // This is a special case where the Java Monitor was inflated
654 // after this thread entered the stack-lock recursively. When a
655 // Java Monitor is inflated, we cannot safely walk the Java
656 // Monitor owner's stack and update the BasicLocks because a
673 return;
674 }
675 }
676 }
677 } else if (VerifyHeavyMonitors) {
678 guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
679 }
680
681 // We have to take the slow-path of possible inflation and then exit.
682 // The ObjectMonitor* can't be async deflated until ownership is
683 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
684 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
685 assert(!monitor->is_owner_anonymous(), "must not be");
686 monitor->exit(current);
687 }
688
689 // -----------------------------------------------------------------------------
690 // JNI locks on java objects
691 // NOTE: must use heavy weight monitor to handle jni monitor enter
692 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
693 JavaThread* THREAD = current;
694 if (obj->klass()->is_value_based()) {
695 handle_sync_on_value_based_class(obj, current);
696 }
697
698 if (EnableValhalla && obj->klass()->is_inline_klass()) {
699 ResourceMark rm(THREAD);
700 const char* desc = "Cannot synchronize on an instance of value class ";
701 const char* className = obj->klass()->external_name();
702 size_t msglen = strlen(desc) + strlen(className) + 1;
703 char* message = NEW_RESOURCE_ARRAY(char, msglen);
704 assert(message != nullptr, "NEW_RESOURCE_ARRAY should have called vm_exit_out_of_memory and not return nullptr");
705 THROW_MSG(vmSymbols::java_lang_IdentityException(), className);
706 }
707
708 // the current locking is from JNI instead of Java code
709 current->set_current_pending_monitor_is_from_java(false);
710 // An async deflation can race after the inflate() call and before
711 // enter() can make the ObjectMonitor busy. enter() returns false if
712 // we have lost the race to async deflation and we simply try again.
713 while (true) {
714 ObjectMonitor* monitor;
715 bool entered;
716 if (LockingMode == LM_LIGHTWEIGHT) {
717 entered = LightweightSynchronizer::inflate_and_enter(obj(), inflate_cause_jni_enter, current, current) != nullptr;
718 } else {
719 monitor = inflate(current, obj(), inflate_cause_jni_enter);
720 entered = monitor->enter(current);
721 }
722
723 if (entered) {
724 current->inc_held_monitor_count(1, true);
725 break;
726 }
727 }
728 current->set_current_pending_monitor_is_from_java(true);
729 }
730
731 // NOTE: must use heavy weight monitor to handle jni monitor exit
732 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
733 JavaThread* current = THREAD;
734 CHECK_THROW_NOSYNC_IMSE(obj);
735
736 ObjectMonitor* monitor;
737 if (LockingMode == LM_LIGHTWEIGHT) {
738 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
739 } else {
740 // The ObjectMonitor* can't be async deflated until ownership is
741 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
742 monitor = inflate(current, obj, inflate_cause_jni_exit);
743 }
744 // If this thread has locked the object, exit the monitor. We
745 // intentionally do not use CHECK on check_owner because we must exit the
746 // monitor even if an exception was already pending.
747 if (monitor->check_owner(THREAD)) {
748 monitor->exit(current);
749 current->dec_held_monitor_count(1, true);
750 }
751 }
752
753 // -----------------------------------------------------------------------------
754 // Internal VM locks on java objects
759 _obj = obj;
760
761 if (_obj() != nullptr) {
762 ObjectSynchronizer::enter(_obj, &_lock, _thread);
763 }
764 }
765
766 ObjectLocker::~ObjectLocker() {
767 if (_obj() != nullptr) {
768 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
769 }
770 }
771
772
773 // -----------------------------------------------------------------------------
774 // Wait/Notify/NotifyAll
775 // NOTE: must use heavy weight monitor to handle wait()
776
777 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
778 JavaThread* current = THREAD;
779 CHECK_THROW_NOSYNC_IMSE_0(obj);
780 if (millis < 0) {
781 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
782 }
783
784 ObjectMonitor* monitor;
785 if (LockingMode == LM_LIGHTWEIGHT) {
786 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
787 } else {
788 // The ObjectMonitor* can't be async deflated because the _waiters
789 // field is incremented before ownership is dropped and decremented
790 // after ownership is regained.
791 monitor = inflate(current, obj(), inflate_cause_wait);
792 }
793
794 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
795 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
796
797 // This dummy call is in place to get around dtrace bug 6254741. Once
798 // that's fixed we can uncomment the following line, remove the call
799 // and change this function back into a "void" func.
802 return ret_code;
803 }
804
805 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
806 if (millis < 0) {
807 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
808 }
809
810 ObjectMonitor* monitor;
811 if (LockingMode == LM_LIGHTWEIGHT) {
812 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
813 } else {
814 monitor = inflate(THREAD, obj(), inflate_cause_wait);
815 }
816 monitor->wait(millis, false, THREAD);
817 }
818
819
820 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
821 JavaThread* current = THREAD;
822 CHECK_THROW_NOSYNC_IMSE(obj);
823
824 markWord mark = obj->mark();
825 if (LockingMode == LM_LIGHTWEIGHT) {
826 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
827 // Not inflated so there can't be any waiters to notify.
828 return;
829 }
830 } else if (LockingMode == LM_LEGACY) {
831 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
832 // Not inflated so there can't be any waiters to notify.
833 return;
834 }
835 }
836
837 ObjectMonitor* monitor;
838 if (LockingMode == LM_LIGHTWEIGHT) {
839 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
840 } else {
841 // The ObjectMonitor* can't be async deflated until ownership is
842 // dropped by the calling thread.
843 monitor = inflate(current, obj(), inflate_cause_notify);
844 }
845 monitor->notify(CHECK);
846 }
847
848 // NOTE: see comment of notify()
849 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
850 JavaThread* current = THREAD;
851 CHECK_THROW_NOSYNC_IMSE(obj);
852
853 markWord mark = obj->mark();
854 if (LockingMode == LM_LIGHTWEIGHT) {
855 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
856 // Not inflated so there can't be any waiters to notify.
857 return;
858 }
859 } else if (LockingMode == LM_LEGACY) {
860 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
861 // Not inflated so there can't be any waiters to notify.
862 return;
863 }
864 }
865
866 ObjectMonitor* monitor;
867 if (LockingMode == LM_LIGHTWEIGHT) {
868 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
869 } else {
870 // The ObjectMonitor* can't be async deflated until ownership is
871 // dropped by the calling thread.
1013
1014 markWord mark = obj->mark_acquire();
1015 for (;;) {
1016 intptr_t hash = mark.hash();
1017 if (hash != 0) {
1018 return hash;
1019 }
1020
1021 hash = get_next_hash(current, obj);
1022 const markWord old_mark = mark;
1023 const markWord new_mark = old_mark.copy_set_hash(hash);
1024
1025 mark = obj->cas_set_mark(new_mark, old_mark);
1026 if (old_mark == mark) {
1027 return hash;
1028 }
1029 }
1030 }
1031
1032 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
1033 if (EnableValhalla && obj->klass()->is_inline_klass()) {
1034 // VM should be calling bootstrap method
1035 ShouldNotReachHere();
1036 }
1037 if (UseObjectMonitorTable) {
1038 // Since the monitor isn't in the object header, the hash can simply be
1039 // installed in the object header.
1040 return install_hash_code(current, obj);
1041 }
1042
1043 while (true) {
1044 ObjectMonitor* monitor = nullptr;
1045 markWord temp, test;
1046 intptr_t hash;
1047 markWord mark = read_stable_mark(obj);
1048 if (VerifyHeavyMonitors) {
1049 assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
1050 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
1051 }
1052 if (mark.is_unlocked() || (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked())) {
1053 hash = mark.hash();
1054 if (hash != 0) { // if it has a hash, just return it
1055 return hash;
1056 }
1143 hash = test.hash();
1144 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1145 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1146 }
1147 if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
1148 // If we detect that async deflation has occurred, then we
1149 // attempt to restore the header/dmw to the object's header
1150 // so that we only retry once if the deflater thread happens
1151 // to be slow.
1152 monitor->install_displaced_markword_in_object(obj);
1153 continue;
1154 }
1155 }
1156 // We finally get the hash.
1157 return hash;
1158 }
1159 }
1160
1161 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1162 Handle h_obj) {
1163 if (EnableValhalla && h_obj->mark().is_inline_type()) {
1164 return false;
1165 }
1166 assert(current == JavaThread::current(), "Can only be called on current thread");
1167 oop obj = h_obj();
1168
1169 markWord mark = read_stable_mark(obj);
1170
1171 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1172 // stack-locked case, header points into owner's stack
1173 return current->is_lock_owned((address)mark.locker());
1174 }
1175
1176 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1177 // fast-locking case, see if lock is in current's lock stack
1178 return current->lock_stack().contains(h_obj());
1179 }
1180
1181 while (LockingMode == LM_LIGHTWEIGHT && mark.has_monitor()) {
1182 ObjectMonitor* monitor = read_monitor(current, obj, mark);
1183 if (monitor != nullptr) {
1184 return monitor->is_entered(current) != 0;
1185 }
1457 markWord dmw = monitor->header();
1458 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1459 return;
1460 }
1461 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1462 }
1463
1464 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
1465 assert(current == Thread::current(), "must be");
1466 assert(LockingMode != LM_LIGHTWEIGHT, "only inflate through enter");
1467 return inflate_impl(obj, cause);
1468 }
1469
1470 ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
1471 assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
1472 assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_for");
1473 return inflate_impl(obj, cause);
1474 }
1475
1476 ObjectMonitor* ObjectSynchronizer::inflate_impl(oop object, const InflateCause cause) {
1477 if (EnableValhalla) {
1478 guarantee(!object->klass()->is_inline_klass(), "Attempt to inflate inline type");
1479 }
1480 assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_impl");
1481 EventJavaMonitorInflate event;
1482
1483 for (;;) {
1484 const markWord mark = object->mark_acquire();
1485
1486 // The mark can be in one of the following states:
1487 // * inflated - Just return it.
1488 // * stack-locked - Coerce it to inflated from stack-locked.
1489 // * INFLATING - Busy wait for conversion from stack-locked to
1490 // inflated.
1491 // * unlocked - Aggressively inflate the object.
1492
1493 // CASE: inflated
1494 if (mark.has_monitor()) {
1495 ObjectMonitor* inf = mark.monitor();
1496 markWord dmw = inf->header();
1497 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1498 return inf;
1499 }
|