< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page

 289 // removed from the system.
 290 //
 291 // Note: If the _in_use_list max exceeds the ceiling, then
 292 // monitors_used_above_threshold() will use the in_use_list max instead
 293 // of the thread count derived ceiling because we have used more
 294 // ObjectMonitors than the estimated average.
 295 //
 296 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
 297 // no-progress async monitor deflation cycles in a row, then the ceiling
 298 // is adjusted upwards by monitors_used_above_threshold().
 299 //
 300 // Start the ceiling with the estimate for one thread in initialize()
 301 // which is called after cmd line options are processed.
 302 static size_t _in_use_list_ceiling = 0;
 303 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
 304 bool volatile ObjectSynchronizer::_is_final_audit = false;
 305 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
 306 static uintx _no_progress_cnt = 0;
 307 static bool _no_progress_skip_increment = false;
 308 
















 309 // =====================> Quick functions
 310 
 311 // The quick_* forms are special fast-path variants used to improve
 312 // performance.  In the simplest case, a "quick_*" implementation could
 313 // simply return false, in which case the caller will perform the necessary
 314 // state transitions and call the slow-path form.
 315 // The fast-path is designed to handle frequently arising cases in an efficient
 316 // manner and is just a degenerate "optimistic" variant of the slow-path.
 317 // returns true  -- to indicate the call was satisfied.
 318 // returns false -- to indicate the call needs the services of the slow-path.
 319 // A no-loitering ordinance is in effect for code in the quick_* family
 320 // operators: safepoints or indefinite blocking (blocking that might span a
 321 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
 322 // entry.
 323 //
 324 // Consider: An interesting optimization is to have the JIT recognize the
 325 // following common idiom:
 326 //   synchronized (someobj) { .... ; notify(); }
 327 // That is, we find a notify() or notifyAll() call that immediately precedes
 328 // the monitorexit operation.  In that case the JIT could fuse the operations
 329 // into a single notifyAndExit() runtime primitive.
 330 
 331 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
 332   assert(current->thread_state() == _thread_in_Java, "invariant");
 333   NoSafepointVerifier nsv;
 334   if (obj == nullptr) return false;  // slow-path for invalid obj

 335   const markWord mark = obj->mark();
 336 
 337   if (LockingMode == LM_LIGHTWEIGHT) {
 338     if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
 339       // Degenerate notify
 340       // fast-locked by caller so by definition the implied waitset is empty.
 341       return true;
 342     }
 343   } else if (LockingMode == LM_LEGACY) {
 344     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 345       // Degenerate notify
 346       // stack-locked by caller so by definition the implied waitset is empty.
 347       return true;
 348     }
 349   }
 350 
 351   if (mark.has_monitor()) {
 352     ObjectMonitor* const mon = mark.monitor();
 353     assert(mon->object() == oop(obj), "invariant");
 354     if (mon->owner() != current) return false;  // slow-path for IMS exception

 371     }
 372     return true;
 373   }
 374 
 375   // other IMS exception states take the slow-path
 376   return false;
 377 }
 378 
 379 
 380 // The LockNode emitted directly at the synchronization site would have
 381 // been too big if it were to have included support for the cases of inflated
 382 // recursive enter and exit, so they go here instead.
 383 // Note that we can't safely call AsyncPrintJavaStack() from within
 384 // quick_enter() as our thread state remains _in_Java.
 385 
 386 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
 387                                      BasicLock * lock) {
 388   assert(current->thread_state() == _thread_in_Java, "invariant");
 389   NoSafepointVerifier nsv;
 390   if (obj == nullptr) return false;       // Need to throw NPE

 391 
 392   if (obj->klass()->is_value_based()) {
 393     return false;
 394   }
 395 
 396   if (LockingMode == LM_LIGHTWEIGHT) {
 397     LockStack& lock_stack = current->lock_stack();
 398     if (lock_stack.is_full()) {
 399       // Always go into runtime if the lock stack is full.
 400       return false;
 401     }
 402     if (lock_stack.try_recursive_enter(obj)) {
 403       // Recursive lock successful.
 404       current->inc_held_monitor_count();
 405       return true;
 406     }
 407   }
 408 
 409   const markWord mark = obj->mark();
 410 

 507     last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
 508   }
 509 }
 510 
 511 static bool useHeavyMonitors() {
 512 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
 513   return LockingMode == LM_MONITOR;
 514 #else
 515   return false;
 516 #endif
 517 }
 518 
 519 // -----------------------------------------------------------------------------
 520 // Monitor Enter/Exit
 521 
 522 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
 523   // When called with locking_thread != Thread::current() some mechanism must synchronize
 524   // the locking_thread with respect to the current thread. Currently only used when
 525   // deoptimizing and re-locking locks. See Deoptimization::relock_objects
 526   assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");

 527   if (!enter_fast_impl(obj, lock, locking_thread)) {
 528     // Inflated ObjectMonitor::enter_for is required
 529 
 530     // An async deflation can race after the inflate_for() call and before
 531     // enter_for() can make the ObjectMonitor busy. enter_for() returns false
 532     // if we have lost the race to async deflation and we simply try again.
 533     while (true) {
 534       ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter);
 535       if (monitor->enter_for(locking_thread)) {
 536         return;
 537       }
 538       assert(monitor->is_being_async_deflated(), "must be");
 539     }
 540   }
 541 }
 542 
 543 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
 544   assert(current == Thread::current(), "must be");

 545   if (!enter_fast_impl(obj, lock, current)) {
 546     // Inflated ObjectMonitor::enter is required
 547 
 548     // An async deflation can race after the inflate() call and before
 549     // enter() can make the ObjectMonitor busy. enter() returns false if
 550     // we have lost the race to async deflation and we simply try again.
 551     while (true) {
 552       ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
 553       if (monitor->enter(current)) {
 554         return;
 555       }
 556     }
 557   }
 558 }
 559 
 560 // The interpreter and compiler assembly code tries to lock using the fast path
 561 // of this algorithm. Make sure to update that code if the following function is
 562 // changed. The implementation is extremely sensitive to race condition. Be careful.
 563 bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
 564 
 565   if (obj->klass()->is_value_based()) {
 566     handle_sync_on_value_based_class(obj, locking_thread);
 567   }
 568 
 569   locking_thread->inc_held_monitor_count();
 570 
 571   if (!useHeavyMonitors()) {
 572     if (LockingMode == LM_LIGHTWEIGHT) {
 573       // Fast-locking does not use the 'lock' argument.
 574       LockStack& lock_stack = locking_thread->lock_stack();
 575       if (lock_stack.is_full()) {
 576         // We unconditionally make room on the lock stack by inflating
 577         // the least recently locked object on the lock stack.
 578 
 579         // About the choice to inflate least recently locked object.
 580         // First we must chose to inflate a lock, either some lock on
 581         // the lock-stack or the lock that is currently being entered
 582         // (which may or may not be on the lock-stack).
 583         // Second the best lock to inflate is a lock which is entered
 584         // in a control flow where there are only a very few locks being

 644       // so it does not matter what the value is, except that it
 645       // must be non-zero to avoid looking like a re-entrant lock,
 646       // and must not look locked either.
 647       lock->set_displaced_header(markWord::unused_mark());
 648 
 649       // Failed to fast lock.
 650       return false;
 651     }
 652   } else if (VerifyHeavyMonitors) {
 653     guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 654   }
 655 
 656   return false;
 657 }
 658 
 659 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
 660   current->dec_held_monitor_count();
 661 
 662   if (!useHeavyMonitors()) {
 663     markWord mark = object->mark();



 664     if (LockingMode == LM_LIGHTWEIGHT) {
 665       // Fast-locking does not use the 'lock' argument.
 666       LockStack& lock_stack = current->lock_stack();
 667       if (mark.is_fast_locked() && lock_stack.try_recursive_exit(object)) {
 668         // Recursively unlocked.
 669         return;
 670       }
 671 
 672       if (mark.is_fast_locked() && lock_stack.is_recursive(object)) {
 673         // This lock is recursive but is not at the top of the lock stack so we're
 674         // doing an unbalanced exit. We have to fall thru to inflation below and
 675         // let ObjectMonitor::exit() do the unlock.
 676       } else {
 677         while (mark.is_fast_locked()) {
 678           // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
 679           const markWord unlocked_mark = mark.set_unlocked();
 680           const markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
 681           if (old_mark == mark) {
 682             size_t recursions = lock_stack.remove(object) - 1;
 683             assert(recursions == 0, "must not be recursive here");

 725           return;
 726         }
 727       }
 728     }
 729   } else if (VerifyHeavyMonitors) {
 730     guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 731   }
 732 
 733   // We have to take the slow-path of possible inflation and then exit.
 734   // The ObjectMonitor* can't be async deflated until ownership is
 735   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 736   ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
 737   assert(!monitor->is_owner_anonymous(), "must not be");
 738   monitor->exit(current);
 739 }
 740 
 741 // -----------------------------------------------------------------------------
 742 // JNI locks on java objects
 743 // NOTE: must use heavy weight monitor to handle jni monitor enter
 744 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {

 745   if (obj->klass()->is_value_based()) {
 746     handle_sync_on_value_based_class(obj, current);
 747   }
 748 










 749   // the current locking is from JNI instead of Java code
 750   current->set_current_pending_monitor_is_from_java(false);
 751   // An async deflation can race after the inflate() call and before
 752   // enter() can make the ObjectMonitor busy. enter() returns false if
 753   // we have lost the race to async deflation and we simply try again.
 754   while (true) {
 755     ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
 756     if (monitor->enter(current)) {
 757       current->inc_held_monitor_count(1, true);
 758       break;
 759     }
 760   }
 761   current->set_current_pending_monitor_is_from_java(true);
 762 }
 763 
 764 // NOTE: must use heavy weight monitor to handle jni monitor exit
 765 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
 766   JavaThread* current = THREAD;

 767 
 768   // The ObjectMonitor* can't be async deflated until ownership is
 769   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 770   ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
 771   // If this thread has locked the object, exit the monitor. We
 772   // intentionally do not use CHECK on check_owner because we must exit the
 773   // monitor even if an exception was already pending.
 774   if (monitor->check_owner(THREAD)) {
 775     monitor->exit(current);
 776     current->dec_held_monitor_count(1, true);
 777   }
 778 }
 779 
 780 // -----------------------------------------------------------------------------
 781 // Internal VM locks on java objects
 782 // standard constructor, allows locking failures
 783 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
 784   _thread = thread;
 785   _thread->check_for_valid_safepoint_state();
 786   _obj = obj;
 787 
 788   if (_obj() != nullptr) {
 789     ObjectSynchronizer::enter(_obj, &_lock, _thread);
 790   }
 791 }
 792 
 793 ObjectLocker::~ObjectLocker() {
 794   if (_obj() != nullptr) {
 795     ObjectSynchronizer::exit(_obj(), &_lock, _thread);
 796   }
 797 }
 798 
 799 
 800 // -----------------------------------------------------------------------------
 801 //  Wait/Notify/NotifyAll
 802 // NOTE: must use heavy weight monitor to handle wait()
 803 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 804   JavaThread* current = THREAD;

 805   if (millis < 0) {
 806     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 807   }
 808   // The ObjectMonitor* can't be async deflated because the _waiters
 809   // field is incremented before ownership is dropped and decremented
 810   // after ownership is regained.
 811   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
 812 
 813   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
 814   monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
 815 
 816   // This dummy call is in place to get around dtrace bug 6254741.  Once
 817   // that's fixed we can uncomment the following line, remove the call
 818   // and change this function back into a "void" func.
 819   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 820   int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
 821   return ret_code;
 822 }
 823 
 824 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 825   JavaThread* current = THREAD;

 826 
 827   markWord mark = obj->mark();
 828   if (LockingMode == LM_LIGHTWEIGHT) {
 829     if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 830       // Not inflated so there can't be any waiters to notify.
 831       return;
 832     }
 833   } else if (LockingMode == LM_LEGACY) {
 834     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 835       // Not inflated so there can't be any waiters to notify.
 836       return;
 837     }
 838   }
 839   // The ObjectMonitor* can't be async deflated until ownership is
 840   // dropped by the calling thread.
 841   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
 842   monitor->notify(CHECK);
 843 }
 844 
 845 // NOTE: see comment of notify()
 846 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 847   JavaThread* current = THREAD;

 848 
 849   markWord mark = obj->mark();
 850   if (LockingMode == LM_LIGHTWEIGHT) {
 851     if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 852       // Not inflated so there can't be any waiters to notify.
 853       return;
 854     }
 855   } else if (LockingMode == LM_LEGACY) {
 856     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 857       // Not inflated so there can't be any waiters to notify.
 858       return;
 859     }
 860   }
 861   // The ObjectMonitor* can't be async deflated until ownership is
 862   // dropped by the calling thread.
 863   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
 864   monitor->notifyAll(CHECK);
 865 }
 866 
 867 // -----------------------------------------------------------------------------

 982     // This is probably the best overall implementation -- we'll
 983     // likely make this the default in future releases.
 984     unsigned t = current->_hashStateX;
 985     t ^= (t << 11);
 986     current->_hashStateX = current->_hashStateY;
 987     current->_hashStateY = current->_hashStateZ;
 988     current->_hashStateZ = current->_hashStateW;
 989     unsigned v = current->_hashStateW;
 990     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
 991     current->_hashStateW = v;
 992     value = v;
 993   }
 994 
 995   value &= markWord::hash_mask;
 996   if (value == 0) value = 0xBAD;
 997   assert(value != markWord::no_hash, "invariant");
 998   return value;
 999 }
1000 
1001 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {




1002 
1003   while (true) {
1004     ObjectMonitor* monitor = nullptr;
1005     markWord temp, test;
1006     intptr_t hash;
1007     markWord mark = read_stable_mark(obj);
1008     if (VerifyHeavyMonitors) {
1009       assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
1010       guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
1011     }
1012     if (mark.is_neutral() || (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked())) {
1013       hash = mark.hash();
1014       if (hash != 0) {                     // if it has a hash, just return it
1015         return hash;
1016       }
1017       hash = get_next_hash(current, obj);  // get a new hash
1018       temp = mark.copy_set_hash(hash);     // merge the hash into header
1019                                            // try to install the hash
1020       test = obj->cas_set_mark(temp, mark);
1021       if (test == mark) {                  // if the hash was installed, return it

1099         hash = test.hash();
1100         assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1101         assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1102       }
1103       if (monitor->is_being_async_deflated()) {
1104         // If we detect that async deflation has occurred, then we
1105         // attempt to restore the header/dmw to the object's header
1106         // so that we only retry once if the deflater thread happens
1107         // to be slow.
1108         monitor->install_displaced_markword_in_object(obj);
1109         continue;
1110       }
1111     }
1112     // We finally get the hash.
1113     return hash;
1114   }
1115 }
1116 
1117 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1118                                                    Handle h_obj) {



1119   assert(current == JavaThread::current(), "Can only be called on current thread");
1120   oop obj = h_obj();
1121 
1122   markWord mark = read_stable_mark(obj);
1123 
1124   if (LockingMode == LM_LEGACY && mark.has_locker()) {
1125     // stack-locked case, header points into owner's stack
1126     return current->is_lock_owned((address)mark.locker());
1127   }
1128 
1129   if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1130     // fast-locking case, see if lock is in current's lock stack
1131     return current->lock_stack().contains(h_obj());
1132   }
1133 
1134   if (mark.has_monitor()) {
1135     // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1136     // The first stage of async deflation does not affect any field
1137     // used by this comparison so the ObjectMonitor* is usable here.
1138     ObjectMonitor* monitor = mark.monitor();

1382     assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1383     return;
1384   }
1385   (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1386 }
1387 
1388 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
1389   assert(current == Thread::current(), "must be");
1390   if (LockingMode == LM_LIGHTWEIGHT && current->is_Java_thread()) {
1391     return inflate_impl(JavaThread::cast(current), obj, cause);
1392   }
1393   return inflate_impl(nullptr, obj, cause);
1394 }
1395 
1396 ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
1397   assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
1398   return inflate_impl(thread, obj, cause);
1399 }
1400 
1401 ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* inflating_thread, oop object, const InflateCause cause) {



1402   // The JavaThread* inflating_thread parameter is only used by LM_LIGHTWEIGHT and requires
1403   // that the inflating_thread == Thread::current() or is suspended throughout the call by
1404   // some other mechanism.
1405   // Even with LM_LIGHTWEIGHT the thread might be nullptr when called from a non
1406   // JavaThread. (As may still be the case from FastHashCode). However it is only
1407   // important for the correctness of the LM_LIGHTWEIGHT algorithm that the thread
1408   // is set when called from ObjectSynchronizer::enter from the owning thread,
1409   // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
1410   EventJavaMonitorInflate event;
1411 
1412   for (;;) {
1413     const markWord mark = object->mark_acquire();
1414 
1415     // The mark can be in one of the following states:
1416     // *  inflated     - Just return if using stack-locking.
1417     //                   If using fast-locking and the ObjectMonitor owner
1418     //                   is anonymous and the inflating_thread owns the
1419     //                   object lock, then we make the inflating_thread
1420     //                   the ObjectMonitor owner and remove the lock from
1421     //                   the inflating_thread's lock stack.

 289 // removed from the system.
 290 //
 291 // Note: If the _in_use_list max exceeds the ceiling, then
 292 // monitors_used_above_threshold() will use the in_use_list max instead
 293 // of the thread count derived ceiling because we have used more
 294 // ObjectMonitors than the estimated average.
 295 //
 296 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
 297 // no-progress async monitor deflation cycles in a row, then the ceiling
 298 // is adjusted upwards by monitors_used_above_threshold().
 299 //
 300 // Start the ceiling with the estimate for one thread in initialize()
 301 // which is called after cmd line options are processed.
 302 static size_t _in_use_list_ceiling = 0;
 303 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
 304 bool volatile ObjectSynchronizer::_is_final_audit = false;
 305 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
 306 static uintx _no_progress_cnt = 0;
 307 static bool _no_progress_skip_increment = false;
 308 
 309 // These checks are required for wait, notify and exit to avoid inflating the monitor to
 310 // find out this inline type object cannot be locked.
 311 #define CHECK_THROW_NOSYNC_IMSE(obj)  \
 312   if (EnableValhalla && (obj)->mark().is_inline_type()) {  \
 313     JavaThread* THREAD = current;           \
 314     ResourceMark rm(THREAD);                \
 315     THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
 316   }
 317 
 318 #define CHECK_THROW_NOSYNC_IMSE_0(obj)  \
 319   if (EnableValhalla && (obj)->mark().is_inline_type()) {  \
 320     JavaThread* THREAD = current;             \
 321     ResourceMark rm(THREAD);                  \
 322     THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
 323   }
 324 
 325 // =====================> Quick functions
 326 
 327 // The quick_* forms are special fast-path variants used to improve
 328 // performance.  In the simplest case, a "quick_*" implementation could
 329 // simply return false, in which case the caller will perform the necessary
 330 // state transitions and call the slow-path form.
 331 // The fast-path is designed to handle frequently arising cases in an efficient
 332 // manner and is just a degenerate "optimistic" variant of the slow-path.
 333 // returns true  -- to indicate the call was satisfied.
 334 // returns false -- to indicate the call needs the services of the slow-path.
 335 // A no-loitering ordinance is in effect for code in the quick_* family
 336 // operators: safepoints or indefinite blocking (blocking that might span a
 337 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
 338 // entry.
 339 //
 340 // Consider: An interesting optimization is to have the JIT recognize the
 341 // following common idiom:
 342 //   synchronized (someobj) { .... ; notify(); }
 343 // That is, we find a notify() or notifyAll() call that immediately precedes
 344 // the monitorexit operation.  In that case the JIT could fuse the operations
 345 // into a single notifyAndExit() runtime primitive.
 346 
 347 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
 348   assert(current->thread_state() == _thread_in_Java, "invariant");
 349   NoSafepointVerifier nsv;
 350   if (obj == nullptr) return false;  // slow-path for invalid obj
 351   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
 352   const markWord mark = obj->mark();
 353 
 354   if (LockingMode == LM_LIGHTWEIGHT) {
 355     if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
 356       // Degenerate notify
 357       // fast-locked by caller so by definition the implied waitset is empty.
 358       return true;
 359     }
 360   } else if (LockingMode == LM_LEGACY) {
 361     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 362       // Degenerate notify
 363       // stack-locked by caller so by definition the implied waitset is empty.
 364       return true;
 365     }
 366   }
 367 
 368   if (mark.has_monitor()) {
 369     ObjectMonitor* const mon = mark.monitor();
 370     assert(mon->object() == oop(obj), "invariant");
 371     if (mon->owner() != current) return false;  // slow-path for IMS exception

 388     }
 389     return true;
 390   }
 391 
 392   // other IMS exception states take the slow-path
 393   return false;
 394 }
 395 
 396 
 397 // The LockNode emitted directly at the synchronization site would have
 398 // been too big if it were to have included support for the cases of inflated
 399 // recursive enter and exit, so they go here instead.
 400 // Note that we can't safely call AsyncPrintJavaStack() from within
 401 // quick_enter() as our thread state remains _in_Java.
 402 
 403 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
 404                                      BasicLock * lock) {
 405   assert(current->thread_state() == _thread_in_Java, "invariant");
 406   NoSafepointVerifier nsv;
 407   if (obj == nullptr) return false;       // Need to throw NPE
 408   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
 409 
 410   if (obj->klass()->is_value_based()) {
 411     return false;
 412   }
 413 
 414   if (LockingMode == LM_LIGHTWEIGHT) {
 415     LockStack& lock_stack = current->lock_stack();
 416     if (lock_stack.is_full()) {
 417       // Always go into runtime if the lock stack is full.
 418       return false;
 419     }
 420     if (lock_stack.try_recursive_enter(obj)) {
 421       // Recursive lock successful.
 422       current->inc_held_monitor_count();
 423       return true;
 424     }
 425   }
 426 
 427   const markWord mark = obj->mark();
 428 

 525     last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
 526   }
 527 }
 528 
 529 static bool useHeavyMonitors() {
 530 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
 531   return LockingMode == LM_MONITOR;
 532 #else
 533   return false;
 534 #endif
 535 }
 536 
 537 // -----------------------------------------------------------------------------
 538 // Monitor Enter/Exit
 539 
 540 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
 541   // When called with locking_thread != Thread::current() some mechanism must synchronize
 542   // the locking_thread with respect to the current thread. Currently only used when
 543   // deoptimizing and re-locking locks. See Deoptimization::relock_objects
 544   assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
 545   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "JITed code should never have locked an instance of a value class");
 546   if (!enter_fast_impl(obj, lock, locking_thread)) {
 547     // Inflated ObjectMonitor::enter_for is required
 548 
 549     // An async deflation can race after the inflate_for() call and before
 550     // enter_for() can make the ObjectMonitor busy. enter_for() returns false
 551     // if we have lost the race to async deflation and we simply try again.
 552     while (true) {
 553       ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter);
 554       if (monitor->enter_for(locking_thread)) {
 555         return;
 556       }
 557       assert(monitor->is_being_async_deflated(), "must be");
 558     }
 559   }
 560 }
 561 
 562 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
 563   assert(current == Thread::current(), "must be");
 564   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "This method should never be called on an instance of an inline class");
 565   if (!enter_fast_impl(obj, lock, current)) {
 566     // Inflated ObjectMonitor::enter is required
 567 
 568     // An async deflation can race after the inflate() call and before
 569     // enter() can make the ObjectMonitor busy. enter() returns false if
 570     // we have lost the race to async deflation and we simply try again.
 571     while (true) {
 572       ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
 573       if (monitor->enter(current)) {
 574         return;
 575       }
 576     }
 577   }
 578 }
 579 
 580 // The interpreter and compiler assembly code tries to lock using the fast path
 581 // of this algorithm. Make sure to update that code if the following function is
 582 // changed. The implementation is extremely sensitive to race condition. Be careful.
 583 bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
 584   guarantee(!EnableValhalla || !obj->klass()->is_inline_klass(), "Attempt to inflate inline type");
 585   if (obj->klass()->is_value_based()) {
 586     handle_sync_on_value_based_class(obj, locking_thread);
 587   }
 588 
 589   locking_thread->inc_held_monitor_count();
 590 
 591   if (!useHeavyMonitors()) {
 592     if (LockingMode == LM_LIGHTWEIGHT) {
 593       // Fast-locking does not use the 'lock' argument.
 594       LockStack& lock_stack = locking_thread->lock_stack();
 595       if (lock_stack.is_full()) {
 596         // We unconditionally make room on the lock stack by inflating
 597         // the least recently locked object on the lock stack.
 598 
 599         // About the choice to inflate least recently locked object.
 600         // First we must chose to inflate a lock, either some lock on
 601         // the lock-stack or the lock that is currently being entered
 602         // (which may or may not be on the lock-stack).
 603         // Second the best lock to inflate is a lock which is entered
 604         // in a control flow where there are only a very few locks being

 664       // so it does not matter what the value is, except that it
 665       // must be non-zero to avoid looking like a re-entrant lock,
 666       // and must not look locked either.
 667       lock->set_displaced_header(markWord::unused_mark());
 668 
 669       // Failed to fast lock.
 670       return false;
 671     }
 672   } else if (VerifyHeavyMonitors) {
 673     guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 674   }
 675 
 676   return false;
 677 }
 678 
 679 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
 680   current->dec_held_monitor_count();
 681 
 682   if (!useHeavyMonitors()) {
 683     markWord mark = object->mark();
 684     if (EnableValhalla && mark.is_inline_type()) {
 685       return;
 686     }
 687     if (LockingMode == LM_LIGHTWEIGHT) {
 688       // Fast-locking does not use the 'lock' argument.
 689       LockStack& lock_stack = current->lock_stack();
 690       if (mark.is_fast_locked() && lock_stack.try_recursive_exit(object)) {
 691         // Recursively unlocked.
 692         return;
 693       }
 694 
 695       if (mark.is_fast_locked() && lock_stack.is_recursive(object)) {
 696         // This lock is recursive but is not at the top of the lock stack so we're
 697         // doing an unbalanced exit. We have to fall thru to inflation below and
 698         // let ObjectMonitor::exit() do the unlock.
 699       } else {
 700         while (mark.is_fast_locked()) {
 701           // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
 702           const markWord unlocked_mark = mark.set_unlocked();
 703           const markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
 704           if (old_mark == mark) {
 705             size_t recursions = lock_stack.remove(object) - 1;
 706             assert(recursions == 0, "must not be recursive here");

 748           return;
 749         }
 750       }
 751     }
 752   } else if (VerifyHeavyMonitors) {
 753     guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 754   }
 755 
 756   // We have to take the slow-path of possible inflation and then exit.
 757   // The ObjectMonitor* can't be async deflated until ownership is
 758   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 759   ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
 760   assert(!monitor->is_owner_anonymous(), "must not be");
 761   monitor->exit(current);
 762 }
 763 
 764 // -----------------------------------------------------------------------------
 765 // JNI locks on java objects
 766 // NOTE: must use heavy weight monitor to handle jni monitor enter
 767 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
 768   JavaThread* THREAD = current;
 769   if (obj->klass()->is_value_based()) {
 770     handle_sync_on_value_based_class(obj, current);
 771   }
 772 
 773   if (EnableValhalla && obj->klass()->is_inline_klass()) {
 774     ResourceMark rm(THREAD);
 775     const char* desc = "Cannot synchronize on an instance of value class ";
 776     const char* className = obj->klass()->external_name();
 777     size_t msglen = strlen(desc) + strlen(className) + 1;
 778     char* message = NEW_RESOURCE_ARRAY(char, msglen);
 779     assert(message != nullptr, "NEW_RESOURCE_ARRAY should have called vm_exit_out_of_memory and not return nullptr");
 780     THROW_MSG(vmSymbols::java_lang_IdentityException(), className);
 781   }
 782 
 783   // the current locking is from JNI instead of Java code
 784   current->set_current_pending_monitor_is_from_java(false);
 785   // An async deflation can race after the inflate() call and before
 786   // enter() can make the ObjectMonitor busy. enter() returns false if
 787   // we have lost the race to async deflation and we simply try again.
 788   while (true) {
 789     ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
 790     if (monitor->enter(current)) {
 791       current->inc_held_monitor_count(1, true);
 792       break;
 793     }
 794   }
 795   current->set_current_pending_monitor_is_from_java(true);
 796 }
 797 
 798 // NOTE: must use heavy weight monitor to handle jni monitor exit
 799 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
 800   JavaThread* current = THREAD;
 801   CHECK_THROW_NOSYNC_IMSE(obj);
 802 
 803   // The ObjectMonitor* can't be async deflated until ownership is
 804   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 805   ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
 806   // If this thread has locked the object, exit the monitor. We
 807   // intentionally do not use CHECK on check_owner because we must exit the
 808   // monitor even if an exception was already pending.
 809   if (monitor->check_owner(THREAD)) {
 810     monitor->exit(current);
 811     current->dec_held_monitor_count(1, true);
 812   }
 813 }
 814 
 815 // -----------------------------------------------------------------------------
 816 // Internal VM locks on java objects
 817 // standard constructor, allows locking failures
 818 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
 819   _thread = thread;
 820   _thread->check_for_valid_safepoint_state();
 821   _obj = obj;
 822 
 823   if (_obj() != nullptr) {
 824     ObjectSynchronizer::enter(_obj, &_lock, _thread);
 825   }
 826 }
 827 
 828 ObjectLocker::~ObjectLocker() {
 829   if (_obj() != nullptr) {
 830     ObjectSynchronizer::exit(_obj(), &_lock, _thread);
 831   }
 832 }
 833 
 834 
 835 // -----------------------------------------------------------------------------
 836 //  Wait/Notify/NotifyAll
 837 // NOTE: must use heavy weight monitor to handle wait()
 838 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 839   JavaThread* current = THREAD;
 840   CHECK_THROW_NOSYNC_IMSE_0(obj);
 841   if (millis < 0) {
 842     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 843   }
 844   // The ObjectMonitor* can't be async deflated because the _waiters
 845   // field is incremented before ownership is dropped and decremented
 846   // after ownership is regained.
 847   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
 848 
 849   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
 850   monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
 851 
 852   // This dummy call is in place to get around dtrace bug 6254741.  Once
 853   // that's fixed we can uncomment the following line, remove the call
 854   // and change this function back into a "void" func.
 855   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 856   int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
 857   return ret_code;
 858 }
 859 
 860 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 861   JavaThread* current = THREAD;
 862   CHECK_THROW_NOSYNC_IMSE(obj);
 863 
 864   markWord mark = obj->mark();
 865   if (LockingMode == LM_LIGHTWEIGHT) {
 866     if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 867       // Not inflated so there can't be any waiters to notify.
 868       return;
 869     }
 870   } else if (LockingMode == LM_LEGACY) {
 871     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 872       // Not inflated so there can't be any waiters to notify.
 873       return;
 874     }
 875   }
 876   // The ObjectMonitor* can't be async deflated until ownership is
 877   // dropped by the calling thread.
 878   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
 879   monitor->notify(CHECK);
 880 }
 881 
 882 // NOTE: see comment of notify()
 883 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 884   JavaThread* current = THREAD;
 885   CHECK_THROW_NOSYNC_IMSE(obj);
 886 
 887   markWord mark = obj->mark();
 888   if (LockingMode == LM_LIGHTWEIGHT) {
 889     if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 890       // Not inflated so there can't be any waiters to notify.
 891       return;
 892     }
 893   } else if (LockingMode == LM_LEGACY) {
 894     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 895       // Not inflated so there can't be any waiters to notify.
 896       return;
 897     }
 898   }
 899   // The ObjectMonitor* can't be async deflated until ownership is
 900   // dropped by the calling thread.
 901   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
 902   monitor->notifyAll(CHECK);
 903 }
 904 
 905 // -----------------------------------------------------------------------------

1020     // This is probably the best overall implementation -- we'll
1021     // likely make this the default in future releases.
1022     unsigned t = current->_hashStateX;
1023     t ^= (t << 11);
1024     current->_hashStateX = current->_hashStateY;
1025     current->_hashStateY = current->_hashStateZ;
1026     current->_hashStateZ = current->_hashStateW;
1027     unsigned v = current->_hashStateW;
1028     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
1029     current->_hashStateW = v;
1030     value = v;
1031   }
1032 
1033   value &= markWord::hash_mask;
1034   if (value == 0) value = 0xBAD;
1035   assert(value != markWord::no_hash, "invariant");
1036   return value;
1037 }
1038 
1039 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
1040   if (EnableValhalla && obj->klass()->is_inline_klass()) {
1041     // VM should be calling bootstrap method
1042     ShouldNotReachHere();
1043   }
1044 
1045   while (true) {
1046     ObjectMonitor* monitor = nullptr;
1047     markWord temp, test;
1048     intptr_t hash;
1049     markWord mark = read_stable_mark(obj);
1050     if (VerifyHeavyMonitors) {
1051       assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
1052       guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
1053     }
1054     if (mark.is_neutral() || (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked())) {
1055       hash = mark.hash();
1056       if (hash != 0) {                     // if it has a hash, just return it
1057         return hash;
1058       }
1059       hash = get_next_hash(current, obj);  // get a new hash
1060       temp = mark.copy_set_hash(hash);     // merge the hash into header
1061                                            // try to install the hash
1062       test = obj->cas_set_mark(temp, mark);
1063       if (test == mark) {                  // if the hash was installed, return it

1141         hash = test.hash();
1142         assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1143         assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1144       }
1145       if (monitor->is_being_async_deflated()) {
1146         // If we detect that async deflation has occurred, then we
1147         // attempt to restore the header/dmw to the object's header
1148         // so that we only retry once if the deflater thread happens
1149         // to be slow.
1150         monitor->install_displaced_markword_in_object(obj);
1151         continue;
1152       }
1153     }
1154     // We finally get the hash.
1155     return hash;
1156   }
1157 }
1158 
1159 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1160                                                    Handle h_obj) {
1161   if (EnableValhalla && h_obj->mark().is_inline_type()) {
1162     return false;
1163   }
1164   assert(current == JavaThread::current(), "Can only be called on current thread");
1165   oop obj = h_obj();
1166 
1167   markWord mark = read_stable_mark(obj);
1168 
1169   if (LockingMode == LM_LEGACY && mark.has_locker()) {
1170     // stack-locked case, header points into owner's stack
1171     return current->is_lock_owned((address)mark.locker());
1172   }
1173 
1174   if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1175     // fast-locking case, see if lock is in current's lock stack
1176     return current->lock_stack().contains(h_obj());
1177   }
1178 
1179   if (mark.has_monitor()) {
1180     // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1181     // The first stage of async deflation does not affect any field
1182     // used by this comparison so the ObjectMonitor* is usable here.
1183     ObjectMonitor* monitor = mark.monitor();

1427     assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1428     return;
1429   }
1430   (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1431 }
1432 
1433 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
1434   assert(current == Thread::current(), "must be");
1435   if (LockingMode == LM_LIGHTWEIGHT && current->is_Java_thread()) {
1436     return inflate_impl(JavaThread::cast(current), obj, cause);
1437   }
1438   return inflate_impl(nullptr, obj, cause);
1439 }
1440 
1441 ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
1442   assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
1443   return inflate_impl(thread, obj, cause);
1444 }
1445 
1446 ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* inflating_thread, oop object, const InflateCause cause) {
1447   if (EnableValhalla) {
1448     guarantee(!object->klass()->is_inline_klass(), "Attempt to inflate inline type");
1449   }
1450   // The JavaThread* inflating_thread parameter is only used by LM_LIGHTWEIGHT and requires
1451   // that the inflating_thread == Thread::current() or is suspended throughout the call by
1452   // some other mechanism.
1453   // Even with LM_LIGHTWEIGHT the thread might be nullptr when called from a non
1454   // JavaThread. (As may still be the case from FastHashCode). However it is only
1455   // important for the correctness of the LM_LIGHTWEIGHT algorithm that the thread
1456   // is set when called from ObjectSynchronizer::enter from the owning thread,
1457   // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
1458   EventJavaMonitorInflate event;
1459 
1460   for (;;) {
1461     const markWord mark = object->mark_acquire();
1462 
1463     // The mark can be in one of the following states:
1464     // *  inflated     - Just return if using stack-locking.
1465     //                   If using fast-locking and the ObjectMonitor owner
1466     //                   is anonymous and the inflating_thread owns the
1467     //                   object lock, then we make the inflating_thread
1468     //                   the ObjectMonitor owner and remove the lock from
1469     //                   the inflating_thread's lock stack.
< prev index next >