< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page

 298 // removed from the system.
 299 //
 300 // Note: If the _in_use_list max exceeds the ceiling, then
 301 // monitors_used_above_threshold() will use the in_use_list max instead
 302 // of the thread count derived ceiling because we have used more
 303 // ObjectMonitors than the estimated average.
 304 //
 305 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
 306 // no-progress async monitor deflation cycles in a row, then the ceiling
 307 // is adjusted upwards by monitors_used_above_threshold().
 308 //
 309 // Start the ceiling with the estimate for one thread in initialize()
 310 // which is called after cmd line options are processed.
 311 static size_t _in_use_list_ceiling = 0;
 312 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
 313 bool volatile ObjectSynchronizer::_is_final_audit = false;
 314 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
 315 static uintx _no_progress_cnt = 0;
 316 static bool _no_progress_skip_increment = false;
 317 
















 318 // =====================> Quick functions
 319 
 320 // The quick_* forms are special fast-path variants used to improve
 321 // performance.  In the simplest case, a "quick_*" implementation could
 322 // simply return false, in which case the caller will perform the necessary
 323 // state transitions and call the slow-path form.
 324 // The fast-path is designed to handle frequently arising cases in an efficient
 325 // manner and is just a degenerate "optimistic" variant of the slow-path.
 326 // returns true  -- to indicate the call was satisfied.
 327 // returns false -- to indicate the call needs the services of the slow-path.
 328 // A no-loitering ordinance is in effect for code in the quick_* family
 329 // operators: safepoints or indefinite blocking (blocking that might span a
 330 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
 331 // entry.
 332 //
 333 // Consider: An interesting optimization is to have the JIT recognize the
 334 // following common idiom:
 335 //   synchronized (someobj) { .... ; notify(); }
 336 // That is, we find a notify() or notifyAll() call that immediately precedes
 337 // the monitorexit operation.  In that case the JIT could fuse the operations
 338 // into a single notifyAndExit() runtime primitive.
 339 
 340 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
 341   assert(current->thread_state() == _thread_in_Java, "invariant");
 342   NoSafepointVerifier nsv;
 343   if (obj == nullptr) return false;  // slow-path for invalid obj

 344   const markWord mark = obj->mark();
 345 
 346   if (LockingMode == LM_LIGHTWEIGHT) {
 347     if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
 348       // Degenerate notify
 349       // fast-locked by caller so by definition the implied waitset is empty.
 350       return true;
 351     }
 352   } else if (LockingMode == LM_LEGACY) {
 353     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 354       // Degenerate notify
 355       // stack-locked by caller so by definition the implied waitset is empty.
 356       return true;
 357     }
 358   }
 359 
 360   if (mark.has_monitor()) {
 361     ObjectMonitor* const mon = read_monitor(current, obj, mark);
 362     if (LockingMode == LM_LIGHTWEIGHT && mon == nullptr) {
 363       // Racing with inflation/deflation go slow path

 381   // other IMS exception states take the slow-path
 382   return false;
 383 }
 384 
 385 static bool useHeavyMonitors() {
 386 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
 387   return LockingMode == LM_MONITOR;
 388 #else
 389   return false;
 390 #endif
 391 }
 392 
 393 // The LockNode emitted directly at the synchronization site would have
 394 // been too big if it were to have included support for the cases of inflated
 395 // recursive enter and exit, so they go here instead.
 396 // Note that we can't safely call AsyncPrintJavaStack() from within
 397 // quick_enter() as our thread state remains _in_Java.
 398 
 399 bool ObjectSynchronizer::quick_enter_legacy(oop obj, BasicLock* lock, JavaThread* current) {
 400   assert(current->thread_state() == _thread_in_Java, "invariant");

 401 
 402   if (useHeavyMonitors()) {
 403     return false;  // Slow path
 404   }
 405 
 406   assert(LockingMode == LM_LEGACY, "legacy mode below");
 407 
 408   const markWord mark = obj->mark();
 409 
 410   if (mark.has_monitor()) {
 411 
 412     ObjectMonitor* const m = read_monitor(mark);
 413     // An async deflation or GC can race us before we manage to make
 414     // the ObjectMonitor busy by setting the owner below. If we detect
 415     // that race we just bail out to the slow-path here.
 416     if (m->object_peek() == nullptr) {
 417       return false;
 418     }
 419 
 420     // Lock contention and Transactional Lock Elision (TLE) diagnostics

 496     EventSyncOnValueBasedClass event;
 497     if (event.should_commit()) {
 498       event.set_valueBasedClass(obj->klass());
 499       event.commit();
 500     }
 501   }
 502 
 503   if (bcp_was_adjusted) {
 504     last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
 505   }
 506 }
 507 
 508 // -----------------------------------------------------------------------------
 509 // Monitor Enter/Exit
 510 
 511 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
 512   // When called with locking_thread != Thread::current() some mechanism must synchronize
 513   // the locking_thread with respect to the current thread. Currently only used when
 514   // deoptimizing and re-locking locks. See Deoptimization::relock_objects
 515   assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");

 516 
 517   if (LockingMode == LM_LIGHTWEIGHT) {
 518     return LightweightSynchronizer::enter_for(obj, lock, locking_thread);
 519   }
 520 
 521   if (!enter_fast_impl(obj, lock, locking_thread)) {
 522     // Inflated ObjectMonitor::enter_for is required
 523 
 524     // An async deflation can race after the inflate_for() call and before
 525     // enter_for() can make the ObjectMonitor busy. enter_for() returns false
 526     // if we have lost the race to async deflation and we simply try again.
 527     while (true) {
 528       ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter);
 529       if (monitor->enter_for(locking_thread)) {
 530         return;
 531       }
 532       assert(monitor->is_being_async_deflated(), "must be");
 533     }
 534   }
 535 }
 536 
 537 void ObjectSynchronizer::enter_legacy(Handle obj, BasicLock* lock, JavaThread* current) {

 538   if (!enter_fast_impl(obj, lock, current)) {
 539     // Inflated ObjectMonitor::enter is required
 540 
 541     // An async deflation can race after the inflate() call and before
 542     // enter() can make the ObjectMonitor busy. enter() returns false if
 543     // we have lost the race to async deflation and we simply try again.
 544     while (true) {
 545       ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
 546       if (monitor->enter(current)) {
 547         return;
 548       }
 549     }
 550   }
 551 }
 552 
 553 // The interpreter and compiler assembly code tries to lock using the fast path
 554 // of this algorithm. Make sure to update that code if the following function is
 555 // changed. The implementation is extremely sensitive to race condition. Be careful.
 556 bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {

 557   assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer");
 558 
 559   if (obj->klass()->is_value_based()) {
 560     handle_sync_on_value_based_class(obj, locking_thread);
 561   }
 562 
 563   locking_thread->inc_held_monitor_count();
 564 
 565   if (!useHeavyMonitors()) {
 566     if (LockingMode == LM_LEGACY) {
 567       markWord mark = obj->mark();
 568       if (mark.is_unlocked()) {
 569         // Anticipate successful CAS -- the ST of the displaced mark must
 570         // be visible <= the ST performed by the CAS.
 571         lock->set_displaced_header(mark);
 572         if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
 573           return true;
 574         }
 575       } else if (mark.has_locker() &&
 576                  locking_thread->is_lock_owned((address) mark.locker())) {

 584       // so it does not matter what the value is, except that it
 585       // must be non-zero to avoid looking like a re-entrant lock,
 586       // and must not look locked either.
 587       lock->set_displaced_header(markWord::unused_mark());
 588 
 589       // Failed to fast lock.
 590       return false;
 591     }
 592   } else if (VerifyHeavyMonitors) {
 593     guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 594   }
 595 
 596   return false;
 597 }
 598 
 599 void ObjectSynchronizer::exit_legacy(oop object, BasicLock* lock, JavaThread* current) {
 600   assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer");
 601 
 602   if (!useHeavyMonitors()) {
 603     markWord mark = object->mark();



 604     if (LockingMode == LM_LEGACY) {
 605       markWord dhw = lock->displaced_header();
 606       if (dhw.value() == 0) {
 607         // If the displaced header is null, then this exit matches up with
 608         // a recursive enter. No real work to do here except for diagnostics.
 609 #ifndef PRODUCT
 610         if (mark != markWord::INFLATING()) {
 611           // Only do diagnostics if we are not racing an inflation. Simply
 612           // exiting a recursive enter of a Java Monitor that is being
 613           // inflated is safe; see the has_monitor() comment below.
 614           assert(!mark.is_unlocked(), "invariant");
 615           assert(!mark.has_locker() ||
 616                  current->is_lock_owned((address)mark.locker()), "invariant");
 617           if (mark.has_monitor()) {
 618             // The BasicLock's displaced_header is marked as a recursive
 619             // enter and we have an inflated Java Monitor (ObjectMonitor).
 620             // This is a special case where the Java Monitor was inflated
 621             // after this thread entered the stack-lock recursively. When a
 622             // Java Monitor is inflated, we cannot safely walk the Java
 623             // Monitor owner's stack and update the BasicLocks because a

 640           return;
 641         }
 642       }
 643     }
 644   } else if (VerifyHeavyMonitors) {
 645     guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 646   }
 647 
 648   // We have to take the slow-path of possible inflation and then exit.
 649   // The ObjectMonitor* can't be async deflated until ownership is
 650   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 651   ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
 652   assert(!monitor->has_anonymous_owner(), "must not be");
 653   monitor->exit(current);
 654 }
 655 
 656 // -----------------------------------------------------------------------------
 657 // JNI locks on java objects
 658 // NOTE: must use heavy weight monitor to handle jni monitor enter
 659 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {

 660   // Top native frames in the stack will not be seen if we attempt
 661   // preemption, since we start walking from the last Java anchor.
 662   NoPreemptMark npm(current);
 663 
 664   if (obj->klass()->is_value_based()) {
 665     handle_sync_on_value_based_class(obj, current);
 666   }
 667 










 668   // the current locking is from JNI instead of Java code
 669   current->set_current_pending_monitor_is_from_java(false);
 670   // An async deflation can race after the inflate() call and before
 671   // enter() can make the ObjectMonitor busy. enter() returns false if
 672   // we have lost the race to async deflation and we simply try again.
 673   while (true) {
 674     ObjectMonitor* monitor;
 675     bool entered;
 676     if (LockingMode == LM_LIGHTWEIGHT) {
 677       BasicLock lock;
 678       entered = LightweightSynchronizer::inflate_and_enter(obj(), &lock, inflate_cause_jni_enter, current, current) != nullptr;
 679     } else {
 680       monitor = inflate(current, obj(), inflate_cause_jni_enter);
 681       entered = monitor->enter(current);
 682     }
 683 
 684     if (entered) {
 685       current->inc_held_monitor_count(1, true);
 686       break;
 687     }
 688   }
 689   current->set_current_pending_monitor_is_from_java(true);
 690 }
 691 
 692 // NOTE: must use heavy weight monitor to handle jni monitor exit
 693 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
 694   JavaThread* current = THREAD;

 695 
 696   ObjectMonitor* monitor;
 697   if (LockingMode == LM_LIGHTWEIGHT) {
 698     monitor = LightweightSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
 699   } else {
 700     // The ObjectMonitor* can't be async deflated until ownership is
 701     // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 702     monitor = inflate(current, obj, inflate_cause_jni_exit);
 703   }
 704   // If this thread has locked the object, exit the monitor. We
 705   // intentionally do not use CHECK on check_owner because we must exit the
 706   // monitor even if an exception was already pending.
 707   if (monitor->check_owner(THREAD)) {
 708     monitor->exit(current);
 709     current->dec_held_monitor_count(1, true);
 710   }
 711 }
 712 
 713 // -----------------------------------------------------------------------------
 714 // Internal VM locks on java objects

 719   _obj = obj;
 720 
 721   if (_obj() != nullptr) {
 722     ObjectSynchronizer::enter(_obj, &_lock, _thread);
 723   }
 724 }
 725 
 726 ObjectLocker::~ObjectLocker() {
 727   if (_obj() != nullptr) {
 728     ObjectSynchronizer::exit(_obj(), &_lock, _thread);
 729   }
 730 }
 731 
 732 
 733 // -----------------------------------------------------------------------------
 734 //  Wait/Notify/NotifyAll
 735 // NOTE: must use heavy weight monitor to handle wait()
 736 
 737 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 738   JavaThread* current = THREAD;

 739   if (millis < 0) {
 740     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 741   }
 742 
 743   ObjectMonitor* monitor;
 744   if (LockingMode == LM_LIGHTWEIGHT) {
 745     monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
 746   } else {
 747     // The ObjectMonitor* can't be async deflated because the _waiters
 748     // field is incremented before ownership is dropped and decremented
 749     // after ownership is regained.
 750     monitor = inflate(current, obj(), inflate_cause_wait);
 751   }
 752 
 753   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
 754   monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
 755 
 756   // This dummy call is in place to get around dtrace bug 6254741.  Once
 757   // that's fixed we can uncomment the following line, remove the call
 758   // and change this function back into a "void" func.

 761   return ret_code;
 762 }
 763 
 764 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
 765   if (millis < 0) {
 766     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 767   }
 768 
 769   ObjectMonitor* monitor;
 770   if (LockingMode == LM_LIGHTWEIGHT) {
 771     monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
 772   } else {
 773     monitor = inflate(THREAD, obj(), inflate_cause_wait);
 774   }
 775   monitor->wait(millis, false, THREAD);
 776 }
 777 
 778 
 779 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 780   JavaThread* current = THREAD;

 781 
 782   markWord mark = obj->mark();
 783   if (LockingMode == LM_LIGHTWEIGHT) {
 784     if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 785       // Not inflated so there can't be any waiters to notify.
 786       return;
 787     }
 788   } else if (LockingMode == LM_LEGACY) {
 789     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 790       // Not inflated so there can't be any waiters to notify.
 791       return;
 792     }
 793   }
 794 
 795   ObjectMonitor* monitor;
 796   if (LockingMode == LM_LIGHTWEIGHT) {
 797     monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
 798   } else {
 799     // The ObjectMonitor* can't be async deflated until ownership is
 800     // dropped by the calling thread.
 801     monitor = inflate(current, obj(), inflate_cause_notify);
 802   }
 803   monitor->notify(CHECK);
 804 }
 805 
 806 // NOTE: see comment of notify()
 807 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 808   JavaThread* current = THREAD;

 809 
 810   markWord mark = obj->mark();
 811   if (LockingMode == LM_LIGHTWEIGHT) {
 812     if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 813       // Not inflated so there can't be any waiters to notify.
 814       return;
 815     }
 816   } else if (LockingMode == LM_LEGACY) {
 817     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 818       // Not inflated so there can't be any waiters to notify.
 819       return;
 820     }
 821   }
 822 
 823   ObjectMonitor* monitor;
 824   if (LockingMode == LM_LIGHTWEIGHT) {
 825     monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
 826   } else {
 827     // The ObjectMonitor* can't be async deflated until ownership is
 828     // dropped by the calling thread.

 970 
 971   markWord mark = obj->mark_acquire();
 972   for (;;) {
 973     intptr_t hash = mark.hash();
 974     if (hash != 0) {
 975       return hash;
 976     }
 977 
 978     hash = get_next_hash(current, obj);
 979     const markWord old_mark = mark;
 980     const markWord new_mark = old_mark.copy_set_hash(hash);
 981 
 982     mark = obj->cas_set_mark(new_mark, old_mark);
 983     if (old_mark == mark) {
 984       return hash;
 985     }
 986   }
 987 }
 988 
 989 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {




 990   if (UseObjectMonitorTable) {
 991     // Since the monitor isn't in the object header, the hash can simply be
 992     // installed in the object header.
 993     return install_hash_code(current, obj);
 994   }
 995 
 996   while (true) {
 997     ObjectMonitor* monitor = nullptr;
 998     markWord temp, test;
 999     intptr_t hash;
1000     markWord mark = read_stable_mark(obj);
1001     if (VerifyHeavyMonitors) {
1002       assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
1003       guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
1004     }
1005     if (mark.is_unlocked() || (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked())) {
1006       hash = mark.hash();
1007       if (hash != 0) {                     // if it has a hash, just return it
1008         return hash;
1009       }

1096         hash = test.hash();
1097         assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1098         assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1099       }
1100       if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
1101         // If we detect that async deflation has occurred, then we
1102         // attempt to restore the header/dmw to the object's header
1103         // so that we only retry once if the deflater thread happens
1104         // to be slow.
1105         monitor->install_displaced_markword_in_object(obj);
1106         continue;
1107       }
1108     }
1109     // We finally get the hash.
1110     return hash;
1111   }
1112 }
1113 
1114 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1115                                                    Handle h_obj) {



1116   assert(current == JavaThread::current(), "Can only be called on current thread");
1117   oop obj = h_obj();
1118 
1119   markWord mark = read_stable_mark(obj);
1120 
1121   if (LockingMode == LM_LEGACY && mark.has_locker()) {
1122     // stack-locked case, header points into owner's stack
1123     return current->is_lock_owned((address)mark.locker());
1124   }
1125 
1126   if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1127     // fast-locking case, see if lock is in current's lock stack
1128     return current->lock_stack().contains(h_obj());
1129   }
1130 
1131   while (LockingMode == LM_LIGHTWEIGHT && mark.has_monitor()) {
1132     ObjectMonitor* monitor = read_monitor(current, obj, mark);
1133     if (monitor != nullptr) {
1134       return monitor->is_entered(current) != 0;
1135     }

1439     markWord dmw = monitor->header();
1440     assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1441     return;
1442   }
1443   (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1444 }
1445 
1446 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
1447   assert(current == Thread::current(), "must be");
1448   assert(LockingMode != LM_LIGHTWEIGHT, "only inflate through enter");
1449   return inflate_impl(current->is_Java_thread() ? JavaThread::cast(current) : nullptr, obj, cause);
1450 }
1451 
1452 ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
1453   assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
1454   assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_for");
1455   return inflate_impl(thread, obj, cause);
1456 }
1457 
1458 ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* locking_thread, oop object, const InflateCause cause) {



1459   // The JavaThread* locking_thread requires that the locking_thread == Thread::current() or
1460   // is suspended throughout the call by some other mechanism.
1461   // The thread might be nullptr when called from a non JavaThread. (As may still be
1462   // the case from FastHashCode). However it is only important for correctness that the
1463   // thread is set when called from ObjectSynchronizer::enter from the owning thread,
1464   // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
1465   assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_impl");
1466   EventJavaMonitorInflate event;
1467 
1468   for (;;) {
1469     const markWord mark = object->mark_acquire();
1470 
1471     // The mark can be in one of the following states:
1472     // *  inflated     - If the ObjectMonitor owner is anonymous and the
1473     //                   locking_thread owns the object lock, then we
1474     //                   make the locking_thread the ObjectMonitor owner.
1475     // *  stack-locked - Coerce it to inflated from stack-locked.
1476     // *  INFLATING    - Busy wait for conversion from stack-locked to
1477     //                   inflated.
1478     // *  unlocked     - Aggressively inflate the object.

 298 // removed from the system.
 299 //
 300 // Note: If the _in_use_list max exceeds the ceiling, then
 301 // monitors_used_above_threshold() will use the in_use_list max instead
 302 // of the thread count derived ceiling because we have used more
 303 // ObjectMonitors than the estimated average.
 304 //
 305 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
 306 // no-progress async monitor deflation cycles in a row, then the ceiling
 307 // is adjusted upwards by monitors_used_above_threshold().
 308 //
 309 // Start the ceiling with the estimate for one thread in initialize()
 310 // which is called after cmd line options are processed.
 311 static size_t _in_use_list_ceiling = 0;
 312 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
 313 bool volatile ObjectSynchronizer::_is_final_audit = false;
 314 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
 315 static uintx _no_progress_cnt = 0;
 316 static bool _no_progress_skip_increment = false;
 317 
 318 // These checks are required for wait, notify and exit to avoid inflating the monitor to
 319 // find out this inline type object cannot be locked.
 320 #define CHECK_THROW_NOSYNC_IMSE(obj)  \
 321   if (EnableValhalla && (obj)->mark().is_inline_type()) {  \
 322     JavaThread* THREAD = current;           \
 323     ResourceMark rm(THREAD);                \
 324     THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
 325   }
 326 
 327 #define CHECK_THROW_NOSYNC_IMSE_0(obj)  \
 328   if (EnableValhalla && (obj)->mark().is_inline_type()) {  \
 329     JavaThread* THREAD = current;             \
 330     ResourceMark rm(THREAD);                  \
 331     THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
 332   }
 333 
 334 // =====================> Quick functions
 335 
 336 // The quick_* forms are special fast-path variants used to improve
 337 // performance.  In the simplest case, a "quick_*" implementation could
 338 // simply return false, in which case the caller will perform the necessary
 339 // state transitions and call the slow-path form.
 340 // The fast-path is designed to handle frequently arising cases in an efficient
 341 // manner and is just a degenerate "optimistic" variant of the slow-path.
 342 // returns true  -- to indicate the call was satisfied.
 343 // returns false -- to indicate the call needs the services of the slow-path.
 344 // A no-loitering ordinance is in effect for code in the quick_* family
 345 // operators: safepoints or indefinite blocking (blocking that might span a
 346 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
 347 // entry.
 348 //
 349 // Consider: An interesting optimization is to have the JIT recognize the
 350 // following common idiom:
 351 //   synchronized (someobj) { .... ; notify(); }
 352 // That is, we find a notify() or notifyAll() call that immediately precedes
 353 // the monitorexit operation.  In that case the JIT could fuse the operations
 354 // into a single notifyAndExit() runtime primitive.
 355 
 356 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
 357   assert(current->thread_state() == _thread_in_Java, "invariant");
 358   NoSafepointVerifier nsv;
 359   if (obj == nullptr) return false;  // slow-path for invalid obj
 360   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
 361   const markWord mark = obj->mark();
 362 
 363   if (LockingMode == LM_LIGHTWEIGHT) {
 364     if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
 365       // Degenerate notify
 366       // fast-locked by caller so by definition the implied waitset is empty.
 367       return true;
 368     }
 369   } else if (LockingMode == LM_LEGACY) {
 370     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 371       // Degenerate notify
 372       // stack-locked by caller so by definition the implied waitset is empty.
 373       return true;
 374     }
 375   }
 376 
 377   if (mark.has_monitor()) {
 378     ObjectMonitor* const mon = read_monitor(current, obj, mark);
 379     if (LockingMode == LM_LIGHTWEIGHT && mon == nullptr) {
 380       // Racing with inflation/deflation go slow path

 398   // other IMS exception states take the slow-path
 399   return false;
 400 }
 401 
 402 static bool useHeavyMonitors() {
 403 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
 404   return LockingMode == LM_MONITOR;
 405 #else
 406   return false;
 407 #endif
 408 }
 409 
 410 // The LockNode emitted directly at the synchronization site would have
 411 // been too big if it were to have included support for the cases of inflated
 412 // recursive enter and exit, so they go here instead.
 413 // Note that we can't safely call AsyncPrintJavaStack() from within
 414 // quick_enter() as our thread state remains _in_Java.
 415 
 416 bool ObjectSynchronizer::quick_enter_legacy(oop obj, BasicLock* lock, JavaThread* current) {
 417   assert(current->thread_state() == _thread_in_Java, "invariant");
 418   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
 419 
 420   if (useHeavyMonitors()) {
 421     return false;  // Slow path
 422   }
 423 
 424   assert(LockingMode == LM_LEGACY, "legacy mode below");
 425 
 426   const markWord mark = obj->mark();
 427 
 428   if (mark.has_monitor()) {
 429 
 430     ObjectMonitor* const m = read_monitor(mark);
 431     // An async deflation or GC can race us before we manage to make
 432     // the ObjectMonitor busy by setting the owner below. If we detect
 433     // that race we just bail out to the slow-path here.
 434     if (m->object_peek() == nullptr) {
 435       return false;
 436     }
 437 
 438     // Lock contention and Transactional Lock Elision (TLE) diagnostics

 514     EventSyncOnValueBasedClass event;
 515     if (event.should_commit()) {
 516       event.set_valueBasedClass(obj->klass());
 517       event.commit();
 518     }
 519   }
 520 
 521   if (bcp_was_adjusted) {
 522     last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
 523   }
 524 }
 525 
 526 // -----------------------------------------------------------------------------
 527 // Monitor Enter/Exit
 528 
 529 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
 530   // When called with locking_thread != Thread::current() some mechanism must synchronize
 531   // the locking_thread with respect to the current thread. Currently only used when
 532   // deoptimizing and re-locking locks. See Deoptimization::relock_objects
 533   assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
 534   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "JITed code should never have locked an instance of a value class");
 535 
 536   if (LockingMode == LM_LIGHTWEIGHT) {
 537     return LightweightSynchronizer::enter_for(obj, lock, locking_thread);
 538   }
 539 
 540   if (!enter_fast_impl(obj, lock, locking_thread)) {
 541     // Inflated ObjectMonitor::enter_for is required
 542 
 543     // An async deflation can race after the inflate_for() call and before
 544     // enter_for() can make the ObjectMonitor busy. enter_for() returns false
 545     // if we have lost the race to async deflation and we simply try again.
 546     while (true) {
 547       ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter);
 548       if (monitor->enter_for(locking_thread)) {
 549         return;
 550       }
 551       assert(monitor->is_being_async_deflated(), "must be");
 552     }
 553   }
 554 }
 555 
 556 void ObjectSynchronizer::enter_legacy(Handle obj, BasicLock* lock, JavaThread* current) {
 557   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "This method should never be called on an instance of an inline class");
 558   if (!enter_fast_impl(obj, lock, current)) {
 559     // Inflated ObjectMonitor::enter is required
 560 
 561     // An async deflation can race after the inflate() call and before
 562     // enter() can make the ObjectMonitor busy. enter() returns false if
 563     // we have lost the race to async deflation and we simply try again.
 564     while (true) {
 565       ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
 566       if (monitor->enter(current)) {
 567         return;
 568       }
 569     }
 570   }
 571 }
 572 
 573 // The interpreter and compiler assembly code tries to lock using the fast path
 574 // of this algorithm. Make sure to update that code if the following function is
 575 // changed. The implementation is extremely sensitive to race condition. Be careful.
 576 bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
 577   guarantee(!EnableValhalla || !obj->klass()->is_inline_klass(), "Attempt to inflate inline type");
 578   assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer");
 579 
 580   if (obj->klass()->is_value_based()) {
 581     handle_sync_on_value_based_class(obj, locking_thread);
 582   }
 583 
 584   locking_thread->inc_held_monitor_count();
 585 
 586   if (!useHeavyMonitors()) {
 587     if (LockingMode == LM_LEGACY) {
 588       markWord mark = obj->mark();
 589       if (mark.is_unlocked()) {
 590         // Anticipate successful CAS -- the ST of the displaced mark must
 591         // be visible <= the ST performed by the CAS.
 592         lock->set_displaced_header(mark);
 593         if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
 594           return true;
 595         }
 596       } else if (mark.has_locker() &&
 597                  locking_thread->is_lock_owned((address) mark.locker())) {

 605       // so it does not matter what the value is, except that it
 606       // must be non-zero to avoid looking like a re-entrant lock,
 607       // and must not look locked either.
 608       lock->set_displaced_header(markWord::unused_mark());
 609 
 610       // Failed to fast lock.
 611       return false;
 612     }
 613   } else if (VerifyHeavyMonitors) {
 614     guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 615   }
 616 
 617   return false;
 618 }
 619 
 620 void ObjectSynchronizer::exit_legacy(oop object, BasicLock* lock, JavaThread* current) {
 621   assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer");
 622 
 623   if (!useHeavyMonitors()) {
 624     markWord mark = object->mark();
 625     if (EnableValhalla && mark.is_inline_type()) {
 626       return;
 627     }
 628     if (LockingMode == LM_LEGACY) {
 629       markWord dhw = lock->displaced_header();
 630       if (dhw.value() == 0) {
 631         // If the displaced header is null, then this exit matches up with
 632         // a recursive enter. No real work to do here except for diagnostics.
 633 #ifndef PRODUCT
 634         if (mark != markWord::INFLATING()) {
 635           // Only do diagnostics if we are not racing an inflation. Simply
 636           // exiting a recursive enter of a Java Monitor that is being
 637           // inflated is safe; see the has_monitor() comment below.
 638           assert(!mark.is_unlocked(), "invariant");
 639           assert(!mark.has_locker() ||
 640                  current->is_lock_owned((address)mark.locker()), "invariant");
 641           if (mark.has_monitor()) {
 642             // The BasicLock's displaced_header is marked as a recursive
 643             // enter and we have an inflated Java Monitor (ObjectMonitor).
 644             // This is a special case where the Java Monitor was inflated
 645             // after this thread entered the stack-lock recursively. When a
 646             // Java Monitor is inflated, we cannot safely walk the Java
 647             // Monitor owner's stack and update the BasicLocks because a

 664           return;
 665         }
 666       }
 667     }
 668   } else if (VerifyHeavyMonitors) {
 669     guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 670   }
 671 
 672   // We have to take the slow-path of possible inflation and then exit.
 673   // The ObjectMonitor* can't be async deflated until ownership is
 674   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 675   ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
 676   assert(!monitor->has_anonymous_owner(), "must not be");
 677   monitor->exit(current);
 678 }
 679 
 680 // -----------------------------------------------------------------------------
 681 // JNI locks on java objects
 682 // NOTE: must use heavy weight monitor to handle jni monitor enter
 683 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
 684   JavaThread* THREAD = current;
 685   // Top native frames in the stack will not be seen if we attempt
 686   // preemption, since we start walking from the last Java anchor.
 687   NoPreemptMark npm(current);
 688 
 689   if (obj->klass()->is_value_based()) {
 690     handle_sync_on_value_based_class(obj, current);
 691   }
 692 
 693   if (EnableValhalla && obj->klass()->is_inline_klass()) {
 694     ResourceMark rm(THREAD);
 695     const char* desc = "Cannot synchronize on an instance of value class ";
 696     const char* className = obj->klass()->external_name();
 697     size_t msglen = strlen(desc) + strlen(className) + 1;
 698     char* message = NEW_RESOURCE_ARRAY(char, msglen);
 699     assert(message != nullptr, "NEW_RESOURCE_ARRAY should have called vm_exit_out_of_memory and not return nullptr");
 700     THROW_MSG(vmSymbols::java_lang_IdentityException(), className);
 701   }
 702 
 703   // the current locking is from JNI instead of Java code
 704   current->set_current_pending_monitor_is_from_java(false);
 705   // An async deflation can race after the inflate() call and before
 706   // enter() can make the ObjectMonitor busy. enter() returns false if
 707   // we have lost the race to async deflation and we simply try again.
 708   while (true) {
 709     ObjectMonitor* monitor;
 710     bool entered;
 711     if (LockingMode == LM_LIGHTWEIGHT) {
 712       BasicLock lock;
 713       entered = LightweightSynchronizer::inflate_and_enter(obj(), &lock, inflate_cause_jni_enter, current, current) != nullptr;
 714     } else {
 715       monitor = inflate(current, obj(), inflate_cause_jni_enter);
 716       entered = monitor->enter(current);
 717     }
 718 
 719     if (entered) {
 720       current->inc_held_monitor_count(1, true);
 721       break;
 722     }
 723   }
 724   current->set_current_pending_monitor_is_from_java(true);
 725 }
 726 
 727 // NOTE: must use heavy weight monitor to handle jni monitor exit
 728 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
 729   JavaThread* current = THREAD;
 730   CHECK_THROW_NOSYNC_IMSE(obj);
 731 
 732   ObjectMonitor* monitor;
 733   if (LockingMode == LM_LIGHTWEIGHT) {
 734     monitor = LightweightSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
 735   } else {
 736     // The ObjectMonitor* can't be async deflated until ownership is
 737     // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 738     monitor = inflate(current, obj, inflate_cause_jni_exit);
 739   }
 740   // If this thread has locked the object, exit the monitor. We
 741   // intentionally do not use CHECK on check_owner because we must exit the
 742   // monitor even if an exception was already pending.
 743   if (monitor->check_owner(THREAD)) {
 744     monitor->exit(current);
 745     current->dec_held_monitor_count(1, true);
 746   }
 747 }
 748 
 749 // -----------------------------------------------------------------------------
 750 // Internal VM locks on java objects

 755   _obj = obj;
 756 
 757   if (_obj() != nullptr) {
 758     ObjectSynchronizer::enter(_obj, &_lock, _thread);
 759   }
 760 }
 761 
 762 ObjectLocker::~ObjectLocker() {
 763   if (_obj() != nullptr) {
 764     ObjectSynchronizer::exit(_obj(), &_lock, _thread);
 765   }
 766 }
 767 
 768 
 769 // -----------------------------------------------------------------------------
 770 //  Wait/Notify/NotifyAll
 771 // NOTE: must use heavy weight monitor to handle wait()
 772 
 773 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 774   JavaThread* current = THREAD;
 775   CHECK_THROW_NOSYNC_IMSE_0(obj);
 776   if (millis < 0) {
 777     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 778   }
 779 
 780   ObjectMonitor* monitor;
 781   if (LockingMode == LM_LIGHTWEIGHT) {
 782     monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
 783   } else {
 784     // The ObjectMonitor* can't be async deflated because the _waiters
 785     // field is incremented before ownership is dropped and decremented
 786     // after ownership is regained.
 787     monitor = inflate(current, obj(), inflate_cause_wait);
 788   }
 789 
 790   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
 791   monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
 792 
 793   // This dummy call is in place to get around dtrace bug 6254741.  Once
 794   // that's fixed we can uncomment the following line, remove the call
 795   // and change this function back into a "void" func.

 798   return ret_code;
 799 }
 800 
 801 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
 802   if (millis < 0) {
 803     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 804   }
 805 
 806   ObjectMonitor* monitor;
 807   if (LockingMode == LM_LIGHTWEIGHT) {
 808     monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
 809   } else {
 810     monitor = inflate(THREAD, obj(), inflate_cause_wait);
 811   }
 812   monitor->wait(millis, false, THREAD);
 813 }
 814 
 815 
 816 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 817   JavaThread* current = THREAD;
 818   CHECK_THROW_NOSYNC_IMSE(obj);
 819 
 820   markWord mark = obj->mark();
 821   if (LockingMode == LM_LIGHTWEIGHT) {
 822     if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 823       // Not inflated so there can't be any waiters to notify.
 824       return;
 825     }
 826   } else if (LockingMode == LM_LEGACY) {
 827     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 828       // Not inflated so there can't be any waiters to notify.
 829       return;
 830     }
 831   }
 832 
 833   ObjectMonitor* monitor;
 834   if (LockingMode == LM_LIGHTWEIGHT) {
 835     monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
 836   } else {
 837     // The ObjectMonitor* can't be async deflated until ownership is
 838     // dropped by the calling thread.
 839     monitor = inflate(current, obj(), inflate_cause_notify);
 840   }
 841   monitor->notify(CHECK);
 842 }
 843 
 844 // NOTE: see comment of notify()
 845 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 846   JavaThread* current = THREAD;
 847   CHECK_THROW_NOSYNC_IMSE(obj);
 848 
 849   markWord mark = obj->mark();
 850   if (LockingMode == LM_LIGHTWEIGHT) {
 851     if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 852       // Not inflated so there can't be any waiters to notify.
 853       return;
 854     }
 855   } else if (LockingMode == LM_LEGACY) {
 856     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 857       // Not inflated so there can't be any waiters to notify.
 858       return;
 859     }
 860   }
 861 
 862   ObjectMonitor* monitor;
 863   if (LockingMode == LM_LIGHTWEIGHT) {
 864     monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
 865   } else {
 866     // The ObjectMonitor* can't be async deflated until ownership is
 867     // dropped by the calling thread.

1009 
1010   markWord mark = obj->mark_acquire();
1011   for (;;) {
1012     intptr_t hash = mark.hash();
1013     if (hash != 0) {
1014       return hash;
1015     }
1016 
1017     hash = get_next_hash(current, obj);
1018     const markWord old_mark = mark;
1019     const markWord new_mark = old_mark.copy_set_hash(hash);
1020 
1021     mark = obj->cas_set_mark(new_mark, old_mark);
1022     if (old_mark == mark) {
1023       return hash;
1024     }
1025   }
1026 }
1027 
1028 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
1029   if (EnableValhalla && obj->klass()->is_inline_klass()) {
1030     // VM should be calling bootstrap method
1031     ShouldNotReachHere();
1032   }
1033   if (UseObjectMonitorTable) {
1034     // Since the monitor isn't in the object header, the hash can simply be
1035     // installed in the object header.
1036     return install_hash_code(current, obj);
1037   }
1038 
1039   while (true) {
1040     ObjectMonitor* monitor = nullptr;
1041     markWord temp, test;
1042     intptr_t hash;
1043     markWord mark = read_stable_mark(obj);
1044     if (VerifyHeavyMonitors) {
1045       assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
1046       guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
1047     }
1048     if (mark.is_unlocked() || (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked())) {
1049       hash = mark.hash();
1050       if (hash != 0) {                     // if it has a hash, just return it
1051         return hash;
1052       }

1139         hash = test.hash();
1140         assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1141         assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1142       }
1143       if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
1144         // If we detect that async deflation has occurred, then we
1145         // attempt to restore the header/dmw to the object's header
1146         // so that we only retry once if the deflater thread happens
1147         // to be slow.
1148         monitor->install_displaced_markword_in_object(obj);
1149         continue;
1150       }
1151     }
1152     // We finally get the hash.
1153     return hash;
1154   }
1155 }
1156 
1157 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1158                                                    Handle h_obj) {
1159   if (EnableValhalla && h_obj->mark().is_inline_type()) {
1160     return false;
1161   }
1162   assert(current == JavaThread::current(), "Can only be called on current thread");
1163   oop obj = h_obj();
1164 
1165   markWord mark = read_stable_mark(obj);
1166 
1167   if (LockingMode == LM_LEGACY && mark.has_locker()) {
1168     // stack-locked case, header points into owner's stack
1169     return current->is_lock_owned((address)mark.locker());
1170   }
1171 
1172   if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1173     // fast-locking case, see if lock is in current's lock stack
1174     return current->lock_stack().contains(h_obj());
1175   }
1176 
1177   while (LockingMode == LM_LIGHTWEIGHT && mark.has_monitor()) {
1178     ObjectMonitor* monitor = read_monitor(current, obj, mark);
1179     if (monitor != nullptr) {
1180       return monitor->is_entered(current) != 0;
1181     }

1485     markWord dmw = monitor->header();
1486     assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1487     return;
1488   }
1489   (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1490 }
1491 
1492 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
1493   assert(current == Thread::current(), "must be");
1494   assert(LockingMode != LM_LIGHTWEIGHT, "only inflate through enter");
1495   return inflate_impl(current->is_Java_thread() ? JavaThread::cast(current) : nullptr, obj, cause);
1496 }
1497 
1498 ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
1499   assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
1500   assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_for");
1501   return inflate_impl(thread, obj, cause);
1502 }
1503 
1504 ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* locking_thread, oop object, const InflateCause cause) {
1505   if (EnableValhalla) {
1506     guarantee(!object->klass()->is_inline_klass(), "Attempt to inflate inline type");
1507   }
1508   // The JavaThread* locking_thread requires that the locking_thread == Thread::current() or
1509   // is suspended throughout the call by some other mechanism.
1510   // The thread might be nullptr when called from a non JavaThread. (As may still be
1511   // the case from FastHashCode). However it is only important for correctness that the
1512   // thread is set when called from ObjectSynchronizer::enter from the owning thread,
1513   // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
1514   assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_impl");
1515   EventJavaMonitorInflate event;
1516 
1517   for (;;) {
1518     const markWord mark = object->mark_acquire();
1519 
1520     // The mark can be in one of the following states:
1521     // *  inflated     - If the ObjectMonitor owner is anonymous and the
1522     //                   locking_thread owns the object lock, then we
1523     //                   make the locking_thread the ObjectMonitor owner.
1524     // *  stack-locked - Coerce it to inflated from stack-locked.
1525     // *  INFLATING    - Busy wait for conversion from stack-locked to
1526     //                   inflated.
1527     // *  unlocked     - Aggressively inflate the object.
< prev index next >