< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page

 266 // and is decreased by AvgMonitorsPerThreadEstimate when a thread is
 267 // removed from the system.
 268 //
 269 // Note: If the _in_use_list max exceeds the ceiling, then
 270 // monitors_used_above_threshold() will use the in_use_list max instead
 271 // of the thread count derived ceiling because we have used more
 272 // ObjectMonitors than the estimated average.
 273 //
 274 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
 275 // no-progress async monitor deflation cycles in a row, then the ceiling
 276 // is adjusted upwards by monitors_used_above_threshold().
 277 //
 278 // Start the ceiling with the estimate for one thread in initialize()
 279 // which is called after cmd line options are processed.
 280 static size_t _in_use_list_ceiling = 0;
 281 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
 282 bool volatile ObjectSynchronizer::_is_final_audit = false;
 283 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
 284 static uintx _no_progress_cnt = 0;
 285 














 286 // =====================> Quick functions
 287 
 288 // The quick_* forms are special fast-path variants used to improve
 289 // performance.  In the simplest case, a "quick_*" implementation could
 290 // simply return false, in which case the caller will perform the necessary
 291 // state transitions and call the slow-path form.
 292 // The fast-path is designed to handle frequently arising cases in an efficient
 293 // manner and is just a degenerate "optimistic" variant of the slow-path.
 294 // returns true  -- to indicate the call was satisfied.
 295 // returns false -- to indicate the call needs the services of the slow-path.
 296 // A no-loitering ordinance is in effect for code in the quick_* family
 297 // operators: safepoints or indefinite blocking (blocking that might span a
 298 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
 299 // entry.
 300 //
 301 // Consider: An interesting optimization is to have the JIT recognize the
 302 // following common idiom:
 303 //   synchronized (someobj) { .... ; notify(); }
 304 // That is, we find a notify() or notifyAll() call that immediately precedes
 305 // the monitorexit operation.  In that case the JIT could fuse the operations
 306 // into a single notifyAndExit() runtime primitive.
 307 
 308 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
 309   assert(current->thread_state() == _thread_in_Java, "invariant");
 310   NoSafepointVerifier nsv;
 311   if (obj == NULL) return false;  // slow-path for invalid obj

 312   const markWord mark = obj->mark();
 313 
 314   if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 315     // Degenerate notify
 316     // stack-locked by caller so by definition the implied waitset is empty.
 317     return true;
 318   }
 319 
 320   if (mark.has_monitor()) {
 321     ObjectMonitor* const mon = mark.monitor();
 322     assert(mon->object() == oop(obj), "invariant");
 323     if (mon->owner() != current) return false;  // slow-path for IMS exception
 324 
 325     if (mon->first_waiter() != NULL) {
 326       // We have one or more waiters. Since this is an inflated monitor
 327       // that we own, we can transfer one or more threads from the waitset
 328       // to the entrylist here and now, avoiding the slow-path.
 329       if (all) {
 330         DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current);
 331       } else {

 340     }
 341     return true;
 342   }
 343 
 344   // other IMS exception states take the slow-path
 345   return false;
 346 }
 347 
 348 
 349 // The LockNode emitted directly at the synchronization site would have
 350 // been too big if it were to have included support for the cases of inflated
 351 // recursive enter and exit, so they go here instead.
 352 // Note that we can't safely call AsyncPrintJavaStack() from within
 353 // quick_enter() as our thread state remains _in_Java.
 354 
 355 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
 356                                      BasicLock * lock) {
 357   assert(current->thread_state() == _thread_in_Java, "invariant");
 358   NoSafepointVerifier nsv;
 359   if (obj == NULL) return false;       // Need to throw NPE

 360 
 361   if (obj->klass()->is_value_based()) {
 362     return false;
 363   }
 364 
 365   const markWord mark = obj->mark();
 366 
 367   if (mark.has_monitor()) {
 368     ObjectMonitor* const m = mark.monitor();
 369     // An async deflation or GC can race us before we manage to make
 370     // the ObjectMonitor busy by setting the owner below. If we detect
 371     // that race we just bail out to the slow-path here.
 372     if (m->object_peek() == NULL) {
 373       return false;
 374     }
 375     JavaThread* const owner = (JavaThread*) m->owner_raw();
 376 
 377     // Lock contention and Transactional Lock Elision (TLE) diagnostics
 378     // and observability
 379     // Case: light contention possibly amenable to TLE

 459   if (bcp_was_adjusted) {
 460     last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
 461   }
 462 }
 463 
 464 static bool useHeavyMonitors() {
 465 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64)
 466   return UseHeavyMonitors;
 467 #else
 468   return false;
 469 #endif
 470 }
 471 
 472 // -----------------------------------------------------------------------------
 473 // Monitor Enter/Exit
 474 // The interpreter and compiler assembly code tries to lock using the fast path
 475 // of this algorithm. Make sure to update that code if the following function is
 476 // changed. The implementation is extremely sensitive to race condition. Be careful.
 477 
 478 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {

 479   if (obj->klass()->is_value_based()) {
 480     handle_sync_on_value_based_class(obj, current);
 481   }
 482 
 483   current->inc_held_monitor_count();
 484 
 485   if (!useHeavyMonitors()) {
 486     markWord mark = obj->mark();
 487     if (mark.is_neutral()) {
 488       // Anticipate successful CAS -- the ST of the displaced mark must
 489       // be visible <= the ST performed by the CAS.
 490       lock->set_displaced_header(mark);
 491       if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
 492         return;
 493       }
 494       // Fall through to inflate() ...
 495     } else if (mark.has_locker() &&
 496                current->is_lock_owned((address)mark.locker())) {
 497       assert(lock != mark.locker(), "must not re-lock the same lock");
 498       assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");

 508   } else if (VerifyHeavyMonitors) {
 509     guarantee(!obj->mark().has_locker(), "must not be stack-locked");
 510   }
 511 
 512   // An async deflation can race after the inflate() call and before
 513   // enter() can make the ObjectMonitor busy. enter() returns false if
 514   // we have lost the race to async deflation and we simply try again.
 515   while (true) {
 516     ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
 517     if (monitor->enter(current)) {
 518       return;
 519     }
 520   }
 521 }
 522 
 523 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
 524   current->dec_held_monitor_count();
 525 
 526   if (!useHeavyMonitors()) {
 527     markWord mark = object->mark();




 528 
 529     markWord dhw = lock->displaced_header();
 530     if (dhw.value() == 0) {
 531       // If the displaced header is NULL, then this exit matches up with
 532       // a recursive enter. No real work to do here except for diagnostics.
 533 #ifndef PRODUCT
 534       if (mark != markWord::INFLATING()) {
 535         // Only do diagnostics if we are not racing an inflation. Simply
 536         // exiting a recursive enter of a Java Monitor that is being
 537         // inflated is safe; see the has_monitor() comment below.
 538         assert(!mark.is_neutral(), "invariant");
 539         assert(!mark.has_locker() ||
 540         current->is_lock_owned((address)mark.locker()), "invariant");
 541         if (mark.has_monitor()) {
 542           // The BasicLock's displaced_header is marked as a recursive
 543           // enter and we have an inflated Java Monitor (ObjectMonitor).
 544           // This is a special case where the Java Monitor was inflated
 545           // after this thread entered the stack-lock recursively. When a
 546           // Java Monitor is inflated, we cannot safely walk the Java
 547           // Monitor owner's stack and update the BasicLocks because a

 571   // We have to take the slow-path of possible inflation and then exit.
 572   // The ObjectMonitor* can't be async deflated until ownership is
 573   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 574   ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
 575   monitor->exit(current);
 576 }
 577 
 578 // -----------------------------------------------------------------------------
 579 // Class Loader  support to workaround deadlocks on the class loader lock objects
 580 // Also used by GC
 581 // complete_exit()/reenter() are used to wait on a nested lock
 582 // i.e. to give up an outer lock completely and then re-enter
 583 // Used when holding nested locks - lock acquisition order: lock1 then lock2
 584 //  1) complete_exit lock1 - saving recursion count
 585 //  2) wait on lock2
 586 //  3) when notified on lock2, unlock lock2
 587 //  4) reenter lock1 with original recursion count
 588 //  5) lock lock2
 589 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 590 intx ObjectSynchronizer::complete_exit(Handle obj, JavaThread* current) {


 591   // The ObjectMonitor* can't be async deflated until ownership is
 592   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 593   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_vm_internal);
 594   intx recur_count = monitor->complete_exit(current);
 595   current->dec_held_monitor_count(recur_count + 1);
 596   return recur_count;
 597 }
 598 
 599 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 600 void ObjectSynchronizer::reenter(Handle obj, intx recursions, JavaThread* current) {


 601   // An async deflation can race after the inflate() call and before
 602   // reenter() -> enter() can make the ObjectMonitor busy. reenter() ->
 603   // enter() returns false if we have lost the race to async deflation
 604   // and we simply try again.
 605   while (true) {
 606     ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_vm_internal);
 607     if (monitor->reenter(recursions, current)) {
 608       current->inc_held_monitor_count(recursions + 1);
 609       return;
 610     }
 611   }
 612 }
 613 
 614 // -----------------------------------------------------------------------------
 615 // JNI locks on java objects
 616 // NOTE: must use heavy weight monitor to handle jni monitor enter
 617 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
 618   if (obj->klass()->is_value_based()) {
 619     handle_sync_on_value_based_class(obj, current);
 620   }

 621 
 622   // the current locking is from JNI instead of Java code
 623   current->set_current_pending_monitor_is_from_java(false);
 624   // An async deflation can race after the inflate() call and before
 625   // enter() can make the ObjectMonitor busy. enter() returns false if
 626   // we have lost the race to async deflation and we simply try again.
 627   while (true) {
 628     ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
 629     if (monitor->enter(current)) {
 630       current->inc_held_monitor_count(1, true);
 631       break;
 632     }
 633   }
 634   current->set_current_pending_monitor_is_from_java(true);
 635 }
 636 
 637 // NOTE: must use heavy weight monitor to handle jni monitor exit
 638 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
 639   JavaThread* current = THREAD;

 640 
 641   // The ObjectMonitor* can't be async deflated until ownership is
 642   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 643   ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
 644   // If this thread has locked the object, exit the monitor. We
 645   // intentionally do not use CHECK on check_owner because we must exit the
 646   // monitor even if an exception was already pending.
 647   if (monitor->check_owner(THREAD)) {
 648     monitor->exit(current);
 649     current->dec_held_monitor_count(1, true);
 650   }
 651 }
 652 
 653 // -----------------------------------------------------------------------------
 654 // Internal VM locks on java objects
 655 // standard constructor, allows locking failures
 656 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
 657   _thread = thread;
 658   _thread->check_for_valid_safepoint_state();
 659   _obj = obj;
 660 
 661   if (_obj() != NULL) {
 662     ObjectSynchronizer::enter(_obj, &_lock, _thread);
 663   }
 664 }
 665 
 666 ObjectLocker::~ObjectLocker() {
 667   if (_obj() != NULL) {
 668     ObjectSynchronizer::exit(_obj(), &_lock, _thread);
 669   }
 670 }
 671 
 672 
 673 // -----------------------------------------------------------------------------
 674 //  Wait/Notify/NotifyAll
 675 // NOTE: must use heavy weight monitor to handle wait()
 676 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 677   JavaThread* current = THREAD;

 678   if (millis < 0) {
 679     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 680   }
 681   // The ObjectMonitor* can't be async deflated because the _waiters
 682   // field is incremented before ownership is dropped and decremented
 683   // after ownership is regained.
 684   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
 685 
 686   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
 687   monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
 688 
 689   // This dummy call is in place to get around dtrace bug 6254741.  Once
 690   // that's fixed we can uncomment the following line, remove the call
 691   // and change this function back into a "void" func.
 692   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 693   int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
 694   return ret_code;
 695 }
 696 
 697 // No exception are possible in this case as we only use this internally when locking is
 698 // correct and we have to wait until notified - so no interrupts or timeouts.
 699 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, JavaThread* current) {

 700   // The ObjectMonitor* can't be async deflated because the _waiters
 701   // field is incremented before ownership is dropped and decremented
 702   // after ownership is regained.
 703   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
 704   monitor->wait(0 /* wait-forever */, false /* not interruptible */, current);
 705 }
 706 
 707 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 708   JavaThread* current = THREAD;

 709 
 710   markWord mark = obj->mark();
 711   if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 712     // Not inflated so there can't be any waiters to notify.
 713     return;
 714   }
 715   // The ObjectMonitor* can't be async deflated until ownership is
 716   // dropped by the calling thread.
 717   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
 718   monitor->notify(CHECK);
 719 }
 720 
 721 // NOTE: see comment of notify()
 722 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 723   JavaThread* current = THREAD;

 724 
 725   markWord mark = obj->mark();
 726   if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 727     // Not inflated so there can't be any waiters to notify.
 728     return;
 729   }
 730   // The ObjectMonitor* can't be async deflated until ownership is
 731   // dropped by the calling thread.
 732   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
 733   monitor->notifyAll(CHECK);
 734 }
 735 
 736 // -----------------------------------------------------------------------------
 737 // Hash Code handling
 738 
 739 struct SharedGlobals {
 740   char         _pad_prefix[OM_CACHE_LINE_SIZE];
 741   // This is a highly shared mostly-read variable.
 742   // To avoid false-sharing it needs to be the sole occupant of a cache line.
 743   volatile int stw_random;

 850     // This is probably the best overall implementation -- we'll
 851     // likely make this the default in future releases.
 852     unsigned t = current->_hashStateX;
 853     t ^= (t << 11);
 854     current->_hashStateX = current->_hashStateY;
 855     current->_hashStateY = current->_hashStateZ;
 856     current->_hashStateZ = current->_hashStateW;
 857     unsigned v = current->_hashStateW;
 858     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
 859     current->_hashStateW = v;
 860     value = v;
 861   }
 862 
 863   value &= markWord::hash_mask;
 864   if (value == 0) value = 0xBAD;
 865   assert(value != markWord::no_hash, "invariant");
 866   return value;
 867 }
 868 
 869 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {




 870 
 871   while (true) {
 872     ObjectMonitor* monitor = NULL;
 873     markWord temp, test;
 874     intptr_t hash;
 875     markWord mark = read_stable_mark(obj);
 876     if (VerifyHeavyMonitors) {
 877       assert(UseHeavyMonitors, "+VerifyHeavyMonitors requires +UseHeavyMonitors");
 878       guarantee(!mark.has_locker(), "must not be stack locked");
 879     }
 880     if (mark.is_neutral()) {               // if this is a normal header
 881       hash = mark.hash();
 882       if (hash != 0) {                     // if it has a hash, just return it
 883         return hash;
 884       }
 885       hash = get_next_hash(current, obj);  // get a new hash
 886       temp = mark.copy_set_hash(hash);     // merge the hash into header
 887                                            // try to install the hash
 888       test = obj->cas_set_mark(temp, mark);
 889       if (test == mark) {                  // if the hash was installed, return it

 961         // If we add any new usages of the header/dmw field, this code
 962         // will need to be updated.
 963         hash = test.hash();
 964         assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
 965         assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
 966       }
 967       if (monitor->is_being_async_deflated()) {
 968         // If we detect that async deflation has occurred, then we
 969         // attempt to restore the header/dmw to the object's header
 970         // so that we only retry once if the deflater thread happens
 971         // to be slow.
 972         monitor->install_displaced_markword_in_object(obj);
 973         continue;
 974       }
 975     }
 976     // We finally get the hash.
 977     return hash;
 978   }
 979 }
 980 
 981 // Deprecated -- use FastHashCode() instead.
 982 
 983 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
 984   return FastHashCode(Thread::current(), obj());
 985 }
 986 
 987 
 988 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
 989                                                    Handle h_obj) {



 990   assert(current == JavaThread::current(), "Can only be called on current thread");
 991   oop obj = h_obj();
 992 
 993   markWord mark = read_stable_mark(obj);
 994 
 995   // Uncontended case, header points to stack
 996   if (mark.has_locker()) {
 997     return current->is_lock_owned((address)mark.locker());
 998   }
 999   // Contended case, header points to ObjectMonitor (tagged pointer)
1000   if (mark.has_monitor()) {
1001     // The first stage of async deflation does not affect any field
1002     // used by this comparison so the ObjectMonitor* is usable here.
1003     ObjectMonitor* monitor = mark.monitor();
1004     return monitor->is_entered(current) != 0;
1005   }
1006   // Unlocked case, header in place
1007   assert(mark.is_neutral(), "sanity check");
1008   return false;
1009 }

1204   event->set_monitorClass(obj->klass());
1205   event->set_address((uintptr_t)(void*)obj);
1206   event->set_cause((u1)cause);
1207   event->commit();
1208 }
1209 
1210 // Fast path code shared by multiple functions
1211 void ObjectSynchronizer::inflate_helper(oop obj) {
1212   markWord mark = obj->mark_acquire();
1213   if (mark.has_monitor()) {
1214     ObjectMonitor* monitor = mark.monitor();
1215     markWord dmw = monitor->header();
1216     assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1217     return;
1218   }
1219   (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1220 }
1221 
1222 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
1223                                            const InflateCause cause) {




1224   EventJavaMonitorInflate event;
1225 
1226   for (;;) {
1227     const markWord mark = object->mark_acquire();
1228 
1229     // The mark can be in one of the following states:
1230     // *  Inflated     - just return
1231     // *  Stack-locked - coerce it to inflated
1232     // *  INFLATING    - busy wait for conversion to complete
1233     // *  Neutral      - aggressively inflate the object.
1234 
1235     // CASE: inflated
1236     if (mark.has_monitor()) {
1237       ObjectMonitor* inf = mark.monitor();
1238       markWord dmw = inf->header();
1239       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1240       return inf;
1241     }
1242 
1243     // CASE: inflation in progress - inflating over a stack-lock.

 266 // and is decreased by AvgMonitorsPerThreadEstimate when a thread is
 267 // removed from the system.
 268 //
 269 // Note: If the _in_use_list max exceeds the ceiling, then
 270 // monitors_used_above_threshold() will use the in_use_list max instead
 271 // of the thread count derived ceiling because we have used more
 272 // ObjectMonitors than the estimated average.
 273 //
 274 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
 275 // no-progress async monitor deflation cycles in a row, then the ceiling
 276 // is adjusted upwards by monitors_used_above_threshold().
 277 //
 278 // Start the ceiling with the estimate for one thread in initialize()
 279 // which is called after cmd line options are processed.
 280 static size_t _in_use_list_ceiling = 0;
 281 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
 282 bool volatile ObjectSynchronizer::_is_final_audit = false;
 283 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
 284 static uintx _no_progress_cnt = 0;
 285 
 286 #define CHECK_THROW_NOSYNC_IMSE(obj)  \
 287   if (EnableValhalla && (obj)->mark().is_inline_type()) {  \
 288     JavaThread* THREAD = current;           \
 289     ResourceMark rm(THREAD);                \
 290     THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
 291   }
 292 
 293 #define CHECK_THROW_NOSYNC_IMSE_0(obj)  \
 294   if (EnableValhalla && (obj)->mark().is_inline_type()) {  \
 295     JavaThread* THREAD = current;             \
 296     ResourceMark rm(THREAD);                  \
 297     THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
 298   }
 299 
 300 // =====================> Quick functions
 301 
 302 // The quick_* forms are special fast-path variants used to improve
 303 // performance.  In the simplest case, a "quick_*" implementation could
 304 // simply return false, in which case the caller will perform the necessary
 305 // state transitions and call the slow-path form.
 306 // The fast-path is designed to handle frequently arising cases in an efficient
 307 // manner and is just a degenerate "optimistic" variant of the slow-path.
 308 // returns true  -- to indicate the call was satisfied.
 309 // returns false -- to indicate the call needs the services of the slow-path.
 310 // A no-loitering ordinance is in effect for code in the quick_* family
 311 // operators: safepoints or indefinite blocking (blocking that might span a
 312 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
 313 // entry.
 314 //
 315 // Consider: An interesting optimization is to have the JIT recognize the
 316 // following common idiom:
 317 //   synchronized (someobj) { .... ; notify(); }
 318 // That is, we find a notify() or notifyAll() call that immediately precedes
 319 // the monitorexit operation.  In that case the JIT could fuse the operations
 320 // into a single notifyAndExit() runtime primitive.
 321 
 322 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
 323   assert(current->thread_state() == _thread_in_Java, "invariant");
 324   NoSafepointVerifier nsv;
 325   if (obj == NULL) return false;  // slow-path for invalid obj
 326   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
 327   const markWord mark = obj->mark();
 328 
 329   if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 330     // Degenerate notify
 331     // stack-locked by caller so by definition the implied waitset is empty.
 332     return true;
 333   }
 334 
 335   if (mark.has_monitor()) {
 336     ObjectMonitor* const mon = mark.monitor();
 337     assert(mon->object() == oop(obj), "invariant");
 338     if (mon->owner() != current) return false;  // slow-path for IMS exception
 339 
 340     if (mon->first_waiter() != NULL) {
 341       // We have one or more waiters. Since this is an inflated monitor
 342       // that we own, we can transfer one or more threads from the waitset
 343       // to the entrylist here and now, avoiding the slow-path.
 344       if (all) {
 345         DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current);
 346       } else {

 355     }
 356     return true;
 357   }
 358 
 359   // other IMS exception states take the slow-path
 360   return false;
 361 }
 362 
 363 
 364 // The LockNode emitted directly at the synchronization site would have
 365 // been too big if it were to have included support for the cases of inflated
 366 // recursive enter and exit, so they go here instead.
 367 // Note that we can't safely call AsyncPrintJavaStack() from within
 368 // quick_enter() as our thread state remains _in_Java.
 369 
 370 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
 371                                      BasicLock * lock) {
 372   assert(current->thread_state() == _thread_in_Java, "invariant");
 373   NoSafepointVerifier nsv;
 374   if (obj == NULL) return false;       // Need to throw NPE
 375   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
 376 
 377   if (obj->klass()->is_value_based()) {
 378     return false;
 379   }
 380 
 381   const markWord mark = obj->mark();
 382 
 383   if (mark.has_monitor()) {
 384     ObjectMonitor* const m = mark.monitor();
 385     // An async deflation or GC can race us before we manage to make
 386     // the ObjectMonitor busy by setting the owner below. If we detect
 387     // that race we just bail out to the slow-path here.
 388     if (m->object_peek() == NULL) {
 389       return false;
 390     }
 391     JavaThread* const owner = (JavaThread*) m->owner_raw();
 392 
 393     // Lock contention and Transactional Lock Elision (TLE) diagnostics
 394     // and observability
 395     // Case: light contention possibly amenable to TLE

 475   if (bcp_was_adjusted) {
 476     last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
 477   }
 478 }
 479 
 480 static bool useHeavyMonitors() {
 481 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64)
 482   return UseHeavyMonitors;
 483 #else
 484   return false;
 485 #endif
 486 }
 487 
 488 // -----------------------------------------------------------------------------
 489 // Monitor Enter/Exit
 490 // The interpreter and compiler assembly code tries to lock using the fast path
 491 // of this algorithm. Make sure to update that code if the following function is
 492 // changed. The implementation is extremely sensitive to race condition. Be careful.
 493 
 494 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
 495   CHECK_THROW_NOSYNC_IMSE(obj);
 496   if (obj->klass()->is_value_based()) {
 497     handle_sync_on_value_based_class(obj, current);
 498   }
 499 
 500   current->inc_held_monitor_count();
 501 
 502   if (!useHeavyMonitors()) {
 503     markWord mark = obj->mark();
 504     if (mark.is_neutral()) {
 505       // Anticipate successful CAS -- the ST of the displaced mark must
 506       // be visible <= the ST performed by the CAS.
 507       lock->set_displaced_header(mark);
 508       if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
 509         return;
 510       }
 511       // Fall through to inflate() ...
 512     } else if (mark.has_locker() &&
 513                current->is_lock_owned((address)mark.locker())) {
 514       assert(lock != mark.locker(), "must not re-lock the same lock");
 515       assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");

 525   } else if (VerifyHeavyMonitors) {
 526     guarantee(!obj->mark().has_locker(), "must not be stack-locked");
 527   }
 528 
 529   // An async deflation can race after the inflate() call and before
 530   // enter() can make the ObjectMonitor busy. enter() returns false if
 531   // we have lost the race to async deflation and we simply try again.
 532   while (true) {
 533     ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
 534     if (monitor->enter(current)) {
 535       return;
 536     }
 537   }
 538 }
 539 
 540 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
 541   current->dec_held_monitor_count();
 542 
 543   if (!useHeavyMonitors()) {
 544     markWord mark = object->mark();
 545     if (EnableValhalla && mark.is_inline_type()) {
 546       return;
 547     }
 548     assert(!EnableValhalla || !object->klass()->is_inline_klass(), "monitor op on inline type");
 549 
 550     markWord dhw = lock->displaced_header();
 551     if (dhw.value() == 0) {
 552       // If the displaced header is NULL, then this exit matches up with
 553       // a recursive enter. No real work to do here except for diagnostics.
 554 #ifndef PRODUCT
 555       if (mark != markWord::INFLATING()) {
 556         // Only do diagnostics if we are not racing an inflation. Simply
 557         // exiting a recursive enter of a Java Monitor that is being
 558         // inflated is safe; see the has_monitor() comment below.
 559         assert(!mark.is_neutral(), "invariant");
 560         assert(!mark.has_locker() ||
 561         current->is_lock_owned((address)mark.locker()), "invariant");
 562         if (mark.has_monitor()) {
 563           // The BasicLock's displaced_header is marked as a recursive
 564           // enter and we have an inflated Java Monitor (ObjectMonitor).
 565           // This is a special case where the Java Monitor was inflated
 566           // after this thread entered the stack-lock recursively. When a
 567           // Java Monitor is inflated, we cannot safely walk the Java
 568           // Monitor owner's stack and update the BasicLocks because a

 592   // We have to take the slow-path of possible inflation and then exit.
 593   // The ObjectMonitor* can't be async deflated until ownership is
 594   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 595   ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
 596   monitor->exit(current);
 597 }
 598 
 599 // -----------------------------------------------------------------------------
 600 // Class Loader  support to workaround deadlocks on the class loader lock objects
 601 // Also used by GC
 602 // complete_exit()/reenter() are used to wait on a nested lock
 603 // i.e. to give up an outer lock completely and then re-enter
 604 // Used when holding nested locks - lock acquisition order: lock1 then lock2
 605 //  1) complete_exit lock1 - saving recursion count
 606 //  2) wait on lock2
 607 //  3) when notified on lock2, unlock lock2
 608 //  4) reenter lock1 with original recursion count
 609 //  5) lock lock2
 610 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 611 intx ObjectSynchronizer::complete_exit(Handle obj, JavaThread* current) {
 612   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
 613 
 614   // The ObjectMonitor* can't be async deflated until ownership is
 615   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 616   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_vm_internal);
 617   intx recur_count = monitor->complete_exit(current);
 618   current->dec_held_monitor_count(recur_count + 1);
 619   return recur_count;
 620 }
 621 
 622 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 623 void ObjectSynchronizer::reenter(Handle obj, intx recursions, JavaThread* current) {
 624   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
 625 
 626   // An async deflation can race after the inflate() call and before
 627   // reenter() -> enter() can make the ObjectMonitor busy. reenter() ->
 628   // enter() returns false if we have lost the race to async deflation
 629   // and we simply try again.
 630   while (true) {
 631     ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_vm_internal);
 632     if (monitor->reenter(recursions, current)) {
 633       current->inc_held_monitor_count(recursions + 1);
 634       return;
 635     }
 636   }
 637 }
 638 
 639 // -----------------------------------------------------------------------------
 640 // JNI locks on java objects
 641 // NOTE: must use heavy weight monitor to handle jni monitor enter
 642 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
 643   if (obj->klass()->is_value_based()) {
 644     handle_sync_on_value_based_class(obj, current);
 645   }
 646   CHECK_THROW_NOSYNC_IMSE(obj);
 647 
 648   // the current locking is from JNI instead of Java code
 649   current->set_current_pending_monitor_is_from_java(false);
 650   // An async deflation can race after the inflate() call and before
 651   // enter() can make the ObjectMonitor busy. enter() returns false if
 652   // we have lost the race to async deflation and we simply try again.
 653   while (true) {
 654     ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
 655     if (monitor->enter(current)) {
 656       current->inc_held_monitor_count(1, true);
 657       break;
 658     }
 659   }
 660   current->set_current_pending_monitor_is_from_java(true);
 661 }
 662 
 663 // NOTE: must use heavy weight monitor to handle jni monitor exit
 664 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
 665   JavaThread* current = THREAD;
 666   CHECK_THROW_NOSYNC_IMSE(obj);
 667 
 668   // The ObjectMonitor* can't be async deflated until ownership is
 669   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 670   ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
 671   // If this thread has locked the object, exit the monitor. We
 672   // intentionally do not use CHECK on check_owner because we must exit the
 673   // monitor even if an exception was already pending.
 674   if (monitor->check_owner(THREAD)) {
 675     monitor->exit(current);
 676     current->dec_held_monitor_count(1, true);
 677   }
 678 }
 679 
 680 // -----------------------------------------------------------------------------
 681 // Internal VM locks on java objects
 682 // standard constructor, allows locking failures
 683 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
 684   _thread = thread;
 685   _thread->check_for_valid_safepoint_state();
 686   _obj = obj;
 687 
 688   if (_obj() != NULL) {
 689     ObjectSynchronizer::enter(_obj, &_lock, _thread);
 690   }
 691 }
 692 
 693 ObjectLocker::~ObjectLocker() {
 694   if (_obj() != NULL) {
 695     ObjectSynchronizer::exit(_obj(), &_lock, _thread);
 696   }
 697 }
 698 
 699 
 700 // -----------------------------------------------------------------------------
 701 //  Wait/Notify/NotifyAll
 702 // NOTE: must use heavy weight monitor to handle wait()
 703 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 704   JavaThread* current = THREAD;
 705   CHECK_THROW_NOSYNC_IMSE_0(obj);
 706   if (millis < 0) {
 707     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 708   }
 709   // The ObjectMonitor* can't be async deflated because the _waiters
 710   // field is incremented before ownership is dropped and decremented
 711   // after ownership is regained.
 712   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
 713 
 714   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
 715   monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
 716 
 717   // This dummy call is in place to get around dtrace bug 6254741.  Once
 718   // that's fixed we can uncomment the following line, remove the call
 719   // and change this function back into a "void" func.
 720   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 721   int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
 722   return ret_code;
 723 }
 724 
 725 // No exception are possible in this case as we only use this internally when locking is
 726 // correct and we have to wait until notified - so no interrupts or timeouts.
 727 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, JavaThread* current) {
 728   CHECK_THROW_NOSYNC_IMSE(obj);
 729   // The ObjectMonitor* can't be async deflated because the _waiters
 730   // field is incremented before ownership is dropped and decremented
 731   // after ownership is regained.
 732   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
 733   monitor->wait(0 /* wait-forever */, false /* not interruptible */, current);
 734 }
 735 
 736 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 737   JavaThread* current = THREAD;
 738   CHECK_THROW_NOSYNC_IMSE(obj);
 739 
 740   markWord mark = obj->mark();
 741   if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 742     // Not inflated so there can't be any waiters to notify.
 743     return;
 744   }
 745   // The ObjectMonitor* can't be async deflated until ownership is
 746   // dropped by the calling thread.
 747   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
 748   monitor->notify(CHECK);
 749 }
 750 
 751 // NOTE: see comment of notify()
 752 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 753   JavaThread* current = THREAD;
 754   CHECK_THROW_NOSYNC_IMSE(obj);
 755 
 756   markWord mark = obj->mark();
 757   if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 758     // Not inflated so there can't be any waiters to notify.
 759     return;
 760   }
 761   // The ObjectMonitor* can't be async deflated until ownership is
 762   // dropped by the calling thread.
 763   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
 764   monitor->notifyAll(CHECK);
 765 }
 766 
 767 // -----------------------------------------------------------------------------
 768 // Hash Code handling
 769 
 770 struct SharedGlobals {
 771   char         _pad_prefix[OM_CACHE_LINE_SIZE];
 772   // This is a highly shared mostly-read variable.
 773   // To avoid false-sharing it needs to be the sole occupant of a cache line.
 774   volatile int stw_random;

 881     // This is probably the best overall implementation -- we'll
 882     // likely make this the default in future releases.
 883     unsigned t = current->_hashStateX;
 884     t ^= (t << 11);
 885     current->_hashStateX = current->_hashStateY;
 886     current->_hashStateY = current->_hashStateZ;
 887     current->_hashStateZ = current->_hashStateW;
 888     unsigned v = current->_hashStateW;
 889     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
 890     current->_hashStateW = v;
 891     value = v;
 892   }
 893 
 894   value &= markWord::hash_mask;
 895   if (value == 0) value = 0xBAD;
 896   assert(value != markWord::no_hash, "invariant");
 897   return value;
 898 }
 899 
 900 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
 901   if (EnableValhalla && obj->klass()->is_inline_klass()) {
 902     // VM should be calling bootstrap method
 903     ShouldNotReachHere();
 904   }
 905 
 906   while (true) {
 907     ObjectMonitor* monitor = NULL;
 908     markWord temp, test;
 909     intptr_t hash;
 910     markWord mark = read_stable_mark(obj);
 911     if (VerifyHeavyMonitors) {
 912       assert(UseHeavyMonitors, "+VerifyHeavyMonitors requires +UseHeavyMonitors");
 913       guarantee(!mark.has_locker(), "must not be stack locked");
 914     }
 915     if (mark.is_neutral()) {               // if this is a normal header
 916       hash = mark.hash();
 917       if (hash != 0) {                     // if it has a hash, just return it
 918         return hash;
 919       }
 920       hash = get_next_hash(current, obj);  // get a new hash
 921       temp = mark.copy_set_hash(hash);     // merge the hash into header
 922                                            // try to install the hash
 923       test = obj->cas_set_mark(temp, mark);
 924       if (test == mark) {                  // if the hash was installed, return it

 996         // If we add any new usages of the header/dmw field, this code
 997         // will need to be updated.
 998         hash = test.hash();
 999         assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1000         assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1001       }
1002       if (monitor->is_being_async_deflated()) {
1003         // If we detect that async deflation has occurred, then we
1004         // attempt to restore the header/dmw to the object's header
1005         // so that we only retry once if the deflater thread happens
1006         // to be slow.
1007         monitor->install_displaced_markword_in_object(obj);
1008         continue;
1009       }
1010     }
1011     // We finally get the hash.
1012     return hash;
1013   }
1014 }
1015 






1016 
1017 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1018                                                    Handle h_obj) {
1019   if (EnableValhalla && h_obj->mark().is_inline_type()) {
1020     return false;
1021   }
1022   assert(current == JavaThread::current(), "Can only be called on current thread");
1023   oop obj = h_obj();
1024 
1025   markWord mark = read_stable_mark(obj);
1026 
1027   // Uncontended case, header points to stack
1028   if (mark.has_locker()) {
1029     return current->is_lock_owned((address)mark.locker());
1030   }
1031   // Contended case, header points to ObjectMonitor (tagged pointer)
1032   if (mark.has_monitor()) {
1033     // The first stage of async deflation does not affect any field
1034     // used by this comparison so the ObjectMonitor* is usable here.
1035     ObjectMonitor* monitor = mark.monitor();
1036     return monitor->is_entered(current) != 0;
1037   }
1038   // Unlocked case, header in place
1039   assert(mark.is_neutral(), "sanity check");
1040   return false;
1041 }

1236   event->set_monitorClass(obj->klass());
1237   event->set_address((uintptr_t)(void*)obj);
1238   event->set_cause((u1)cause);
1239   event->commit();
1240 }
1241 
1242 // Fast path code shared by multiple functions
1243 void ObjectSynchronizer::inflate_helper(oop obj) {
1244   markWord mark = obj->mark_acquire();
1245   if (mark.has_monitor()) {
1246     ObjectMonitor* monitor = mark.monitor();
1247     markWord dmw = monitor->header();
1248     assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1249     return;
1250   }
1251   (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1252 }
1253 
1254 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
1255                                            const InflateCause cause) {
1256   if (EnableValhalla) {
1257     guarantee(!object->klass()->is_inline_klass(), "Attempt to inflate inline type");
1258   }
1259 
1260   EventJavaMonitorInflate event;
1261 
1262   for (;;) {
1263     const markWord mark = object->mark_acquire();
1264 
1265     // The mark can be in one of the following states:
1266     // *  Inflated     - just return
1267     // *  Stack-locked - coerce it to inflated
1268     // *  INFLATING    - busy wait for conversion to complete
1269     // *  Neutral      - aggressively inflate the object.
1270 
1271     // CASE: inflated
1272     if (mark.has_monitor()) {
1273       ObjectMonitor* inf = mark.monitor();
1274       markWord dmw = inf->header();
1275       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1276       return inf;
1277     }
1278 
1279     // CASE: inflation in progress - inflating over a stack-lock.
< prev index next >