< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page

 295 // removed from the system.
 296 //
 297 // Note: If the _in_use_list max exceeds the ceiling, then
 298 // monitors_used_above_threshold() will use the in_use_list max instead
 299 // of the thread count derived ceiling because we have used more
 300 // ObjectMonitors than the estimated average.
 301 //
 302 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
 303 // no-progress async monitor deflation cycles in a row, then the ceiling
 304 // is adjusted upwards by monitors_used_above_threshold().
 305 //
 306 // Start the ceiling with the estimate for one thread in initialize()
 307 // which is called after cmd line options are processed.
 308 static size_t _in_use_list_ceiling = 0;
 309 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
 310 bool volatile ObjectSynchronizer::_is_final_audit = false;
 311 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
 312 static uintx _no_progress_cnt = 0;
 313 static bool _no_progress_skip_increment = false;
 314 
















 315 // =====================> Quick functions
 316 
 317 // The quick_* forms are special fast-path variants used to improve
 318 // performance.  In the simplest case, a "quick_*" implementation could
 319 // simply return false, in which case the caller will perform the necessary
 320 // state transitions and call the slow-path form.
 321 // The fast-path is designed to handle frequently arising cases in an efficient
 322 // manner and is just a degenerate "optimistic" variant of the slow-path.
 323 // returns true  -- to indicate the call was satisfied.
 324 // returns false -- to indicate the call needs the services of the slow-path.
 325 // A no-loitering ordinance is in effect for code in the quick_* family
 326 // operators: safepoints or indefinite blocking (blocking that might span a
 327 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
 328 // entry.
 329 //
 330 // Consider: An interesting optimization is to have the JIT recognize the
 331 // following common idiom:
 332 //   synchronized (someobj) { .... ; notify(); }
 333 // That is, we find a notify() or notifyAll() call that immediately precedes
 334 // the monitorexit operation.  In that case the JIT could fuse the operations
 335 // into a single notifyAndExit() runtime primitive.
 336 
 337 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
 338   assert(current->thread_state() == _thread_in_Java, "invariant");
 339   NoSafepointVerifier nsv;
 340   if (obj == nullptr) return false;  // slow-path for invalid obj

 341   const markWord mark = obj->mark();
 342 
 343   if (LockingMode == LM_LIGHTWEIGHT) {
 344     if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
 345       // Degenerate notify
 346       // fast-locked by caller so by definition the implied waitset is empty.
 347       return true;
 348     }
 349   } else if (LockingMode == LM_LEGACY) {
 350     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 351       // Degenerate notify
 352       // stack-locked by caller so by definition the implied waitset is empty.
 353       return true;
 354     }
 355   }
 356 
 357   if (mark.has_monitor()) {
 358     ObjectMonitor* const mon = read_monitor(current, obj, mark);
 359     if (LockingMode == LM_LIGHTWEIGHT && mon == nullptr) {
 360       // Racing with inflation/deflation go slow path

 385   // other IMS exception states take the slow-path
 386   return false;
 387 }
 388 
 389 static bool useHeavyMonitors() {
 390 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
 391   return LockingMode == LM_MONITOR;
 392 #else
 393   return false;
 394 #endif
 395 }
 396 
 397 // The LockNode emitted directly at the synchronization site would have
 398 // been too big if it were to have included support for the cases of inflated
 399 // recursive enter and exit, so they go here instead.
 400 // Note that we can't safely call AsyncPrintJavaStack() from within
 401 // quick_enter() as our thread state remains _in_Java.
 402 
 403 bool ObjectSynchronizer::quick_enter_legacy(oop obj, BasicLock* lock, JavaThread* current) {
 404   assert(current->thread_state() == _thread_in_Java, "invariant");

 405 
 406   if (useHeavyMonitors()) {
 407     return false;  // Slow path
 408   }
 409 
 410   if (LockingMode == LM_LIGHTWEIGHT) {
 411     return LightweightSynchronizer::quick_enter(obj, lock, current);
 412   }
 413 
 414   assert(LockingMode == LM_LEGACY, "legacy mode below");
 415 
 416   const markWord mark = obj->mark();
 417 
 418   if (mark.has_monitor()) {
 419 
 420     ObjectMonitor* const m = read_monitor(mark);
 421     // An async deflation or GC can race us before we manage to make
 422     // the ObjectMonitor busy by setting the owner below. If we detect
 423     // that race we just bail out to the slow-path here.
 424     if (m->object_peek() == nullptr) {

 504     EventSyncOnValueBasedClass event;
 505     if (event.should_commit()) {
 506       event.set_valueBasedClass(obj->klass());
 507       event.commit();
 508     }
 509   }
 510 
 511   if (bcp_was_adjusted) {
 512     last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
 513   }
 514 }
 515 
 516 // -----------------------------------------------------------------------------
 517 // Monitor Enter/Exit
 518 
 519 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
 520   // When called with locking_thread != Thread::current() some mechanism must synchronize
 521   // the locking_thread with respect to the current thread. Currently only used when
 522   // deoptimizing and re-locking locks. See Deoptimization::relock_objects
 523   assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");

 524 
 525   if (LockingMode == LM_LIGHTWEIGHT) {
 526     return LightweightSynchronizer::enter_for(obj, lock, locking_thread);
 527   }
 528 
 529   if (!enter_fast_impl(obj, lock, locking_thread)) {
 530     // Inflated ObjectMonitor::enter_for is required
 531 
 532     // An async deflation can race after the inflate_for() call and before
 533     // enter_for() can make the ObjectMonitor busy. enter_for() returns false
 534     // if we have lost the race to async deflation and we simply try again.
 535     while (true) {
 536       ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter);
 537       if (monitor->enter_for(locking_thread)) {
 538         return;
 539       }
 540       assert(monitor->is_being_async_deflated(), "must be");
 541     }
 542   }
 543 }
 544 
 545 void ObjectSynchronizer::enter_legacy(Handle obj, BasicLock* lock, JavaThread* current) {

 546   if (!enter_fast_impl(obj, lock, current)) {
 547     // Inflated ObjectMonitor::enter is required
 548 
 549     // An async deflation can race after the inflate() call and before
 550     // enter() can make the ObjectMonitor busy. enter() returns false if
 551     // we have lost the race to async deflation and we simply try again.
 552     while (true) {
 553       ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
 554       if (monitor->enter(current)) {
 555         return;
 556       }
 557     }
 558   }
 559 }
 560 
 561 // The interpreter and compiler assembly code tries to lock using the fast path
 562 // of this algorithm. Make sure to update that code if the following function is
 563 // changed. The implementation is extremely sensitive to race condition. Be careful.
 564 bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {

 565   assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer");
 566 
 567   if (obj->klass()->is_value_based()) {
 568     handle_sync_on_value_based_class(obj, locking_thread);
 569   }
 570 
 571   locking_thread->inc_held_monitor_count();
 572 
 573   if (!useHeavyMonitors()) {
 574     if (LockingMode == LM_LEGACY) {
 575       markWord mark = obj->mark();
 576       if (mark.is_unlocked()) {
 577         // Anticipate successful CAS -- the ST of the displaced mark must
 578         // be visible <= the ST performed by the CAS.
 579         lock->set_displaced_header(mark);
 580         if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
 581           return true;
 582         }
 583       } else if (mark.has_locker() &&
 584                  locking_thread->is_lock_owned((address) mark.locker())) {

 592       // so it does not matter what the value is, except that it
 593       // must be non-zero to avoid looking like a re-entrant lock,
 594       // and must not look locked either.
 595       lock->set_displaced_header(markWord::unused_mark());
 596 
 597       // Failed to fast lock.
 598       return false;
 599     }
 600   } else if (VerifyHeavyMonitors) {
 601     guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 602   }
 603 
 604   return false;
 605 }
 606 
 607 void ObjectSynchronizer::exit_legacy(oop object, BasicLock* lock, JavaThread* current) {
 608   assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer");
 609 
 610   if (!useHeavyMonitors()) {
 611     markWord mark = object->mark();



 612     if (LockingMode == LM_LEGACY) {
 613       markWord dhw = lock->displaced_header();
 614       if (dhw.value() == 0) {
 615         // If the displaced header is null, then this exit matches up with
 616         // a recursive enter. No real work to do here except for diagnostics.
 617 #ifndef PRODUCT
 618         if (mark != markWord::INFLATING()) {
 619           // Only do diagnostics if we are not racing an inflation. Simply
 620           // exiting a recursive enter of a Java Monitor that is being
 621           // inflated is safe; see the has_monitor() comment below.
 622           assert(!mark.is_unlocked(), "invariant");
 623           assert(!mark.has_locker() ||
 624                  current->is_lock_owned((address)mark.locker()), "invariant");
 625           if (mark.has_monitor()) {
 626             // The BasicLock's displaced_header is marked as a recursive
 627             // enter and we have an inflated Java Monitor (ObjectMonitor).
 628             // This is a special case where the Java Monitor was inflated
 629             // after this thread entered the stack-lock recursively. When a
 630             // Java Monitor is inflated, we cannot safely walk the Java
 631             // Monitor owner's stack and update the BasicLocks because a

 648           return;
 649         }
 650       }
 651     }
 652   } else if (VerifyHeavyMonitors) {
 653     guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 654   }
 655 
 656   // We have to take the slow-path of possible inflation and then exit.
 657   // The ObjectMonitor* can't be async deflated until ownership is
 658   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 659   ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
 660   assert(!monitor->has_anonymous_owner(), "must not be");
 661   monitor->exit(current);
 662 }
 663 
 664 // -----------------------------------------------------------------------------
 665 // JNI locks on java objects
 666 // NOTE: must use heavy weight monitor to handle jni monitor enter
 667 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {

 668   // Top native frames in the stack will not be seen if we attempt
 669   // preemption, since we start walking from the last Java anchor.
 670   NoPreemptMark npm(current);
 671 
 672   if (obj->klass()->is_value_based()) {
 673     handle_sync_on_value_based_class(obj, current);
 674   }
 675 










 676   // the current locking is from JNI instead of Java code
 677   current->set_current_pending_monitor_is_from_java(false);
 678   // An async deflation can race after the inflate() call and before
 679   // enter() can make the ObjectMonitor busy. enter() returns false if
 680   // we have lost the race to async deflation and we simply try again.
 681   while (true) {
 682     ObjectMonitor* monitor;
 683     bool entered;
 684     if (LockingMode == LM_LIGHTWEIGHT) {
 685       entered = LightweightSynchronizer::inflate_and_enter(obj(), inflate_cause_jni_enter, current, current) != nullptr;
 686     } else {
 687       monitor = inflate(current, obj(), inflate_cause_jni_enter);
 688       entered = monitor->enter(current);
 689     }
 690 
 691     if (entered) {
 692       current->inc_held_monitor_count(1, true);
 693       break;
 694     }
 695   }
 696   current->set_current_pending_monitor_is_from_java(true);
 697 }
 698 
 699 // NOTE: must use heavy weight monitor to handle jni monitor exit
 700 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
 701   JavaThread* current = THREAD;

 702 
 703   ObjectMonitor* monitor;
 704   if (LockingMode == LM_LIGHTWEIGHT) {
 705     monitor = LightweightSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
 706   } else {
 707     // The ObjectMonitor* can't be async deflated until ownership is
 708     // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 709     monitor = inflate(current, obj, inflate_cause_jni_exit);
 710   }
 711   // If this thread has locked the object, exit the monitor. We
 712   // intentionally do not use CHECK on check_owner because we must exit the
 713   // monitor even if an exception was already pending.
 714   if (monitor->check_owner(THREAD)) {
 715     monitor->exit(current);
 716     current->dec_held_monitor_count(1, true);
 717   }
 718 }
 719 
 720 // -----------------------------------------------------------------------------
 721 // Internal VM locks on java objects

 726   _obj = obj;
 727 
 728   if (_obj() != nullptr) {
 729     ObjectSynchronizer::enter(_obj, &_lock, _thread);
 730   }
 731 }
 732 
 733 ObjectLocker::~ObjectLocker() {
 734   if (_obj() != nullptr) {
 735     ObjectSynchronizer::exit(_obj(), &_lock, _thread);
 736   }
 737 }
 738 
 739 
 740 // -----------------------------------------------------------------------------
 741 //  Wait/Notify/NotifyAll
 742 // NOTE: must use heavy weight monitor to handle wait()
 743 
 744 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 745   JavaThread* current = THREAD;

 746   if (millis < 0) {
 747     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 748   }
 749 
 750   ObjectMonitor* monitor;
 751   if (LockingMode == LM_LIGHTWEIGHT) {
 752     monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
 753   } else {
 754     // The ObjectMonitor* can't be async deflated because the _waiters
 755     // field is incremented before ownership is dropped and decremented
 756     // after ownership is regained.
 757     monitor = inflate(current, obj(), inflate_cause_wait);
 758   }
 759 
 760   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
 761   monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
 762 
 763   // This dummy call is in place to get around dtrace bug 6254741.  Once
 764   // that's fixed we can uncomment the following line, remove the call
 765   // and change this function back into a "void" func.

 768   return ret_code;
 769 }
 770 
 771 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
 772   if (millis < 0) {
 773     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 774   }
 775 
 776   ObjectMonitor* monitor;
 777   if (LockingMode == LM_LIGHTWEIGHT) {
 778     monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
 779   } else {
 780     monitor = inflate(THREAD, obj(), inflate_cause_wait);
 781   }
 782   monitor->wait(millis, false, THREAD);
 783 }
 784 
 785 
 786 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 787   JavaThread* current = THREAD;

 788 
 789   markWord mark = obj->mark();
 790   if (LockingMode == LM_LIGHTWEIGHT) {
 791     if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 792       // Not inflated so there can't be any waiters to notify.
 793       return;
 794     }
 795   } else if (LockingMode == LM_LEGACY) {
 796     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 797       // Not inflated so there can't be any waiters to notify.
 798       return;
 799     }
 800   }
 801 
 802   ObjectMonitor* monitor;
 803   if (LockingMode == LM_LIGHTWEIGHT) {
 804     monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
 805   } else {
 806     // The ObjectMonitor* can't be async deflated until ownership is
 807     // dropped by the calling thread.
 808     monitor = inflate(current, obj(), inflate_cause_notify);
 809   }
 810   monitor->notify(CHECK);
 811 }
 812 
 813 // NOTE: see comment of notify()
 814 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 815   JavaThread* current = THREAD;

 816 
 817   markWord mark = obj->mark();
 818   if (LockingMode == LM_LIGHTWEIGHT) {
 819     if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 820       // Not inflated so there can't be any waiters to notify.
 821       return;
 822     }
 823   } else if (LockingMode == LM_LEGACY) {
 824     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 825       // Not inflated so there can't be any waiters to notify.
 826       return;
 827     }
 828   }
 829 
 830   ObjectMonitor* monitor;
 831   if (LockingMode == LM_LIGHTWEIGHT) {
 832     monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
 833   } else {
 834     // The ObjectMonitor* can't be async deflated until ownership is
 835     // dropped by the calling thread.

 977 
 978   markWord mark = obj->mark_acquire();
 979   for (;;) {
 980     intptr_t hash = mark.hash();
 981     if (hash != 0) {
 982       return hash;
 983     }
 984 
 985     hash = get_next_hash(current, obj);
 986     const markWord old_mark = mark;
 987     const markWord new_mark = old_mark.copy_set_hash(hash);
 988 
 989     mark = obj->cas_set_mark(new_mark, old_mark);
 990     if (old_mark == mark) {
 991       return hash;
 992     }
 993   }
 994 }
 995 
 996 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {




 997   if (UseObjectMonitorTable) {
 998     // Since the monitor isn't in the object header, the hash can simply be
 999     // installed in the object header.
1000     return install_hash_code(current, obj);
1001   }
1002 
1003   while (true) {
1004     ObjectMonitor* monitor = nullptr;
1005     markWord temp, test;
1006     intptr_t hash;
1007     markWord mark = read_stable_mark(obj);
1008     if (VerifyHeavyMonitors) {
1009       assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
1010       guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
1011     }
1012     if (mark.is_unlocked() || (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked())) {
1013       hash = mark.hash();
1014       if (hash != 0) {                     // if it has a hash, just return it
1015         return hash;
1016       }

1103         hash = test.hash();
1104         assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1105         assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1106       }
1107       if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
1108         // If we detect that async deflation has occurred, then we
1109         // attempt to restore the header/dmw to the object's header
1110         // so that we only retry once if the deflater thread happens
1111         // to be slow.
1112         monitor->install_displaced_markword_in_object(obj);
1113         continue;
1114       }
1115     }
1116     // We finally get the hash.
1117     return hash;
1118   }
1119 }
1120 
1121 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1122                                                    Handle h_obj) {



1123   assert(current == JavaThread::current(), "Can only be called on current thread");
1124   oop obj = h_obj();
1125 
1126   markWord mark = read_stable_mark(obj);
1127 
1128   if (LockingMode == LM_LEGACY && mark.has_locker()) {
1129     // stack-locked case, header points into owner's stack
1130     return current->is_lock_owned((address)mark.locker());
1131   }
1132 
1133   if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1134     // fast-locking case, see if lock is in current's lock stack
1135     return current->lock_stack().contains(h_obj());
1136   }
1137 
1138   while (LockingMode == LM_LIGHTWEIGHT && mark.has_monitor()) {
1139     ObjectMonitor* monitor = read_monitor(current, obj, mark);
1140     if (monitor != nullptr) {
1141       return monitor->is_entered(current) != 0;
1142     }

1434     markWord dmw = monitor->header();
1435     assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1436     return;
1437   }
1438   (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1439 }
1440 
1441 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
1442   assert(current == Thread::current(), "must be");
1443   assert(LockingMode != LM_LIGHTWEIGHT, "only inflate through enter");
1444   return inflate_impl(current->is_Java_thread() ? JavaThread::cast(current) : nullptr, obj, cause);
1445 }
1446 
1447 ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
1448   assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
1449   assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_for");
1450   return inflate_impl(thread, obj, cause);
1451 }
1452 
1453 ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* locking_thread, oop object, const InflateCause cause) {



1454   // The JavaThread* locking_thread requires that the locking_thread == Thread::current() or
1455   // is suspended throughout the call by some other mechanism.
1456   // The thread might be nullptr when called from a non JavaThread. (As may still be
1457   // the case from FastHashCode). However it is only important for correctness that the
1458   // thread is set when called from ObjectSynchronizer::enter from the owning thread,
1459   // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
1460   assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_impl");
1461   EventJavaMonitorInflate event;
1462 
1463   for (;;) {
1464     const markWord mark = object->mark_acquire();
1465 
1466     // The mark can be in one of the following states:
1467     // *  inflated     - If the ObjectMonitor owner is anonymous and the
1468     //                   locking_thread owns the object lock, then we
1469     //                   make the locking_thread the ObjectMonitor owner.
1470     // *  stack-locked - Coerce it to inflated from stack-locked.
1471     // *  INFLATING    - Busy wait for conversion from stack-locked to
1472     //                   inflated.
1473     // *  unlocked     - Aggressively inflate the object.

 295 // removed from the system.
 296 //
 297 // Note: If the _in_use_list max exceeds the ceiling, then
 298 // monitors_used_above_threshold() will use the in_use_list max instead
 299 // of the thread count derived ceiling because we have used more
 300 // ObjectMonitors than the estimated average.
 301 //
 302 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
 303 // no-progress async monitor deflation cycles in a row, then the ceiling
 304 // is adjusted upwards by monitors_used_above_threshold().
 305 //
 306 // Start the ceiling with the estimate for one thread in initialize()
 307 // which is called after cmd line options are processed.
 308 static size_t _in_use_list_ceiling = 0;
 309 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
 310 bool volatile ObjectSynchronizer::_is_final_audit = false;
 311 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
 312 static uintx _no_progress_cnt = 0;
 313 static bool _no_progress_skip_increment = false;
 314 
 315 // These checks are required for wait, notify and exit to avoid inflating the monitor to
 316 // find out this inline type object cannot be locked.
 317 #define CHECK_THROW_NOSYNC_IMSE(obj)  \
 318   if (EnableValhalla && (obj)->mark().is_inline_type()) {  \
 319     JavaThread* THREAD = current;           \
 320     ResourceMark rm(THREAD);                \
 321     THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
 322   }
 323 
 324 #define CHECK_THROW_NOSYNC_IMSE_0(obj)  \
 325   if (EnableValhalla && (obj)->mark().is_inline_type()) {  \
 326     JavaThread* THREAD = current;             \
 327     ResourceMark rm(THREAD);                  \
 328     THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
 329   }
 330 
 331 // =====================> Quick functions
 332 
 333 // The quick_* forms are special fast-path variants used to improve
 334 // performance.  In the simplest case, a "quick_*" implementation could
 335 // simply return false, in which case the caller will perform the necessary
 336 // state transitions and call the slow-path form.
 337 // The fast-path is designed to handle frequently arising cases in an efficient
 338 // manner and is just a degenerate "optimistic" variant of the slow-path.
 339 // returns true  -- to indicate the call was satisfied.
 340 // returns false -- to indicate the call needs the services of the slow-path.
 341 // A no-loitering ordinance is in effect for code in the quick_* family
 342 // operators: safepoints or indefinite blocking (blocking that might span a
 343 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
 344 // entry.
 345 //
 346 // Consider: An interesting optimization is to have the JIT recognize the
 347 // following common idiom:
 348 //   synchronized (someobj) { .... ; notify(); }
 349 // That is, we find a notify() or notifyAll() call that immediately precedes
 350 // the monitorexit operation.  In that case the JIT could fuse the operations
 351 // into a single notifyAndExit() runtime primitive.
 352 
 353 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
 354   assert(current->thread_state() == _thread_in_Java, "invariant");
 355   NoSafepointVerifier nsv;
 356   if (obj == nullptr) return false;  // slow-path for invalid obj
 357   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
 358   const markWord mark = obj->mark();
 359 
 360   if (LockingMode == LM_LIGHTWEIGHT) {
 361     if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
 362       // Degenerate notify
 363       // fast-locked by caller so by definition the implied waitset is empty.
 364       return true;
 365     }
 366   } else if (LockingMode == LM_LEGACY) {
 367     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 368       // Degenerate notify
 369       // stack-locked by caller so by definition the implied waitset is empty.
 370       return true;
 371     }
 372   }
 373 
 374   if (mark.has_monitor()) {
 375     ObjectMonitor* const mon = read_monitor(current, obj, mark);
 376     if (LockingMode == LM_LIGHTWEIGHT && mon == nullptr) {
 377       // Racing with inflation/deflation go slow path

 402   // other IMS exception states take the slow-path
 403   return false;
 404 }
 405 
 406 static bool useHeavyMonitors() {
 407 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
 408   return LockingMode == LM_MONITOR;
 409 #else
 410   return false;
 411 #endif
 412 }
 413 
 414 // The LockNode emitted directly at the synchronization site would have
 415 // been too big if it were to have included support for the cases of inflated
 416 // recursive enter and exit, so they go here instead.
 417 // Note that we can't safely call AsyncPrintJavaStack() from within
 418 // quick_enter() as our thread state remains _in_Java.
 419 
 420 bool ObjectSynchronizer::quick_enter_legacy(oop obj, BasicLock* lock, JavaThread* current) {
 421   assert(current->thread_state() == _thread_in_Java, "invariant");
 422   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
 423 
 424   if (useHeavyMonitors()) {
 425     return false;  // Slow path
 426   }
 427 
 428   if (LockingMode == LM_LIGHTWEIGHT) {
 429     return LightweightSynchronizer::quick_enter(obj, lock, current);
 430   }
 431 
 432   assert(LockingMode == LM_LEGACY, "legacy mode below");
 433 
 434   const markWord mark = obj->mark();
 435 
 436   if (mark.has_monitor()) {
 437 
 438     ObjectMonitor* const m = read_monitor(mark);
 439     // An async deflation or GC can race us before we manage to make
 440     // the ObjectMonitor busy by setting the owner below. If we detect
 441     // that race we just bail out to the slow-path here.
 442     if (m->object_peek() == nullptr) {

 522     EventSyncOnValueBasedClass event;
 523     if (event.should_commit()) {
 524       event.set_valueBasedClass(obj->klass());
 525       event.commit();
 526     }
 527   }
 528 
 529   if (bcp_was_adjusted) {
 530     last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
 531   }
 532 }
 533 
 534 // -----------------------------------------------------------------------------
 535 // Monitor Enter/Exit
 536 
 537 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
 538   // When called with locking_thread != Thread::current() some mechanism must synchronize
 539   // the locking_thread with respect to the current thread. Currently only used when
 540   // deoptimizing and re-locking locks. See Deoptimization::relock_objects
 541   assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
 542   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "JITed code should never have locked an instance of a value class");
 543 
 544   if (LockingMode == LM_LIGHTWEIGHT) {
 545     return LightweightSynchronizer::enter_for(obj, lock, locking_thread);
 546   }
 547 
 548   if (!enter_fast_impl(obj, lock, locking_thread)) {
 549     // Inflated ObjectMonitor::enter_for is required
 550 
 551     // An async deflation can race after the inflate_for() call and before
 552     // enter_for() can make the ObjectMonitor busy. enter_for() returns false
 553     // if we have lost the race to async deflation and we simply try again.
 554     while (true) {
 555       ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter);
 556       if (monitor->enter_for(locking_thread)) {
 557         return;
 558       }
 559       assert(monitor->is_being_async_deflated(), "must be");
 560     }
 561   }
 562 }
 563 
 564 void ObjectSynchronizer::enter_legacy(Handle obj, BasicLock* lock, JavaThread* current) {
 565   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "This method should never be called on an instance of an inline class");
 566   if (!enter_fast_impl(obj, lock, current)) {
 567     // Inflated ObjectMonitor::enter is required
 568 
 569     // An async deflation can race after the inflate() call and before
 570     // enter() can make the ObjectMonitor busy. enter() returns false if
 571     // we have lost the race to async deflation and we simply try again.
 572     while (true) {
 573       ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
 574       if (monitor->enter(current)) {
 575         return;
 576       }
 577     }
 578   }
 579 }
 580 
 581 // The interpreter and compiler assembly code tries to lock using the fast path
 582 // of this algorithm. Make sure to update that code if the following function is
 583 // changed. The implementation is extremely sensitive to race condition. Be careful.
 584 bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
 585   guarantee(!EnableValhalla || !obj->klass()->is_inline_klass(), "Attempt to inflate inline type");
 586   assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer");
 587 
 588   if (obj->klass()->is_value_based()) {
 589     handle_sync_on_value_based_class(obj, locking_thread);
 590   }
 591 
 592   locking_thread->inc_held_monitor_count();
 593 
 594   if (!useHeavyMonitors()) {
 595     if (LockingMode == LM_LEGACY) {
 596       markWord mark = obj->mark();
 597       if (mark.is_unlocked()) {
 598         // Anticipate successful CAS -- the ST of the displaced mark must
 599         // be visible <= the ST performed by the CAS.
 600         lock->set_displaced_header(mark);
 601         if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
 602           return true;
 603         }
 604       } else if (mark.has_locker() &&
 605                  locking_thread->is_lock_owned((address) mark.locker())) {

 613       // so it does not matter what the value is, except that it
 614       // must be non-zero to avoid looking like a re-entrant lock,
 615       // and must not look locked either.
 616       lock->set_displaced_header(markWord::unused_mark());
 617 
 618       // Failed to fast lock.
 619       return false;
 620     }
 621   } else if (VerifyHeavyMonitors) {
 622     guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 623   }
 624 
 625   return false;
 626 }
 627 
 628 void ObjectSynchronizer::exit_legacy(oop object, BasicLock* lock, JavaThread* current) {
 629   assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer");
 630 
 631   if (!useHeavyMonitors()) {
 632     markWord mark = object->mark();
 633     if (EnableValhalla && mark.is_inline_type()) {
 634       return;
 635     }
 636     if (LockingMode == LM_LEGACY) {
 637       markWord dhw = lock->displaced_header();
 638       if (dhw.value() == 0) {
 639         // If the displaced header is null, then this exit matches up with
 640         // a recursive enter. No real work to do here except for diagnostics.
 641 #ifndef PRODUCT
 642         if (mark != markWord::INFLATING()) {
 643           // Only do diagnostics if we are not racing an inflation. Simply
 644           // exiting a recursive enter of a Java Monitor that is being
 645           // inflated is safe; see the has_monitor() comment below.
 646           assert(!mark.is_unlocked(), "invariant");
 647           assert(!mark.has_locker() ||
 648                  current->is_lock_owned((address)mark.locker()), "invariant");
 649           if (mark.has_monitor()) {
 650             // The BasicLock's displaced_header is marked as a recursive
 651             // enter and we have an inflated Java Monitor (ObjectMonitor).
 652             // This is a special case where the Java Monitor was inflated
 653             // after this thread entered the stack-lock recursively. When a
 654             // Java Monitor is inflated, we cannot safely walk the Java
 655             // Monitor owner's stack and update the BasicLocks because a

 672           return;
 673         }
 674       }
 675     }
 676   } else if (VerifyHeavyMonitors) {
 677     guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 678   }
 679 
 680   // We have to take the slow-path of possible inflation and then exit.
 681   // The ObjectMonitor* can't be async deflated until ownership is
 682   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 683   ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
 684   assert(!monitor->has_anonymous_owner(), "must not be");
 685   monitor->exit(current);
 686 }
 687 
 688 // -----------------------------------------------------------------------------
 689 // JNI locks on java objects
 690 // NOTE: must use heavy weight monitor to handle jni monitor enter
 691 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
 692   JavaThread* THREAD = current;
 693   // Top native frames in the stack will not be seen if we attempt
 694   // preemption, since we start walking from the last Java anchor.
 695   NoPreemptMark npm(current);
 696 
 697   if (obj->klass()->is_value_based()) {
 698     handle_sync_on_value_based_class(obj, current);
 699   }
 700 
 701   if (EnableValhalla && obj->klass()->is_inline_klass()) {
 702     ResourceMark rm(THREAD);
 703     const char* desc = "Cannot synchronize on an instance of value class ";
 704     const char* className = obj->klass()->external_name();
 705     size_t msglen = strlen(desc) + strlen(className) + 1;
 706     char* message = NEW_RESOURCE_ARRAY(char, msglen);
 707     assert(message != nullptr, "NEW_RESOURCE_ARRAY should have called vm_exit_out_of_memory and not return nullptr");
 708     THROW_MSG(vmSymbols::java_lang_IdentityException(), className);
 709   }
 710 
 711   // the current locking is from JNI instead of Java code
 712   current->set_current_pending_monitor_is_from_java(false);
 713   // An async deflation can race after the inflate() call and before
 714   // enter() can make the ObjectMonitor busy. enter() returns false if
 715   // we have lost the race to async deflation and we simply try again.
 716   while (true) {
 717     ObjectMonitor* monitor;
 718     bool entered;
 719     if (LockingMode == LM_LIGHTWEIGHT) {
 720       entered = LightweightSynchronizer::inflate_and_enter(obj(), inflate_cause_jni_enter, current, current) != nullptr;
 721     } else {
 722       monitor = inflate(current, obj(), inflate_cause_jni_enter);
 723       entered = monitor->enter(current);
 724     }
 725 
 726     if (entered) {
 727       current->inc_held_monitor_count(1, true);
 728       break;
 729     }
 730   }
 731   current->set_current_pending_monitor_is_from_java(true);
 732 }
 733 
 734 // NOTE: must use heavy weight monitor to handle jni monitor exit
 735 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
 736   JavaThread* current = THREAD;
 737   CHECK_THROW_NOSYNC_IMSE(obj);
 738 
 739   ObjectMonitor* monitor;
 740   if (LockingMode == LM_LIGHTWEIGHT) {
 741     monitor = LightweightSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
 742   } else {
 743     // The ObjectMonitor* can't be async deflated until ownership is
 744     // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 745     monitor = inflate(current, obj, inflate_cause_jni_exit);
 746   }
 747   // If this thread has locked the object, exit the monitor. We
 748   // intentionally do not use CHECK on check_owner because we must exit the
 749   // monitor even if an exception was already pending.
 750   if (monitor->check_owner(THREAD)) {
 751     monitor->exit(current);
 752     current->dec_held_monitor_count(1, true);
 753   }
 754 }
 755 
 756 // -----------------------------------------------------------------------------
 757 // Internal VM locks on java objects

 762   _obj = obj;
 763 
 764   if (_obj() != nullptr) {
 765     ObjectSynchronizer::enter(_obj, &_lock, _thread);
 766   }
 767 }
 768 
 769 ObjectLocker::~ObjectLocker() {
 770   if (_obj() != nullptr) {
 771     ObjectSynchronizer::exit(_obj(), &_lock, _thread);
 772   }
 773 }
 774 
 775 
 776 // -----------------------------------------------------------------------------
 777 //  Wait/Notify/NotifyAll
 778 // NOTE: must use heavy weight monitor to handle wait()
 779 
 780 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 781   JavaThread* current = THREAD;
 782   CHECK_THROW_NOSYNC_IMSE_0(obj);
 783   if (millis < 0) {
 784     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 785   }
 786 
 787   ObjectMonitor* monitor;
 788   if (LockingMode == LM_LIGHTWEIGHT) {
 789     monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
 790   } else {
 791     // The ObjectMonitor* can't be async deflated because the _waiters
 792     // field is incremented before ownership is dropped and decremented
 793     // after ownership is regained.
 794     monitor = inflate(current, obj(), inflate_cause_wait);
 795   }
 796 
 797   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
 798   monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
 799 
 800   // This dummy call is in place to get around dtrace bug 6254741.  Once
 801   // that's fixed we can uncomment the following line, remove the call
 802   // and change this function back into a "void" func.

 805   return ret_code;
 806 }
 807 
 808 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
 809   if (millis < 0) {
 810     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 811   }
 812 
 813   ObjectMonitor* monitor;
 814   if (LockingMode == LM_LIGHTWEIGHT) {
 815     monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
 816   } else {
 817     monitor = inflate(THREAD, obj(), inflate_cause_wait);
 818   }
 819   monitor->wait(millis, false, THREAD);
 820 }
 821 
 822 
 823 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 824   JavaThread* current = THREAD;
 825   CHECK_THROW_NOSYNC_IMSE(obj);
 826 
 827   markWord mark = obj->mark();
 828   if (LockingMode == LM_LIGHTWEIGHT) {
 829     if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 830       // Not inflated so there can't be any waiters to notify.
 831       return;
 832     }
 833   } else if (LockingMode == LM_LEGACY) {
 834     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 835       // Not inflated so there can't be any waiters to notify.
 836       return;
 837     }
 838   }
 839 
 840   ObjectMonitor* monitor;
 841   if (LockingMode == LM_LIGHTWEIGHT) {
 842     monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
 843   } else {
 844     // The ObjectMonitor* can't be async deflated until ownership is
 845     // dropped by the calling thread.
 846     monitor = inflate(current, obj(), inflate_cause_notify);
 847   }
 848   monitor->notify(CHECK);
 849 }
 850 
 851 // NOTE: see comment of notify()
 852 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 853   JavaThread* current = THREAD;
 854   CHECK_THROW_NOSYNC_IMSE(obj);
 855 
 856   markWord mark = obj->mark();
 857   if (LockingMode == LM_LIGHTWEIGHT) {
 858     if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 859       // Not inflated so there can't be any waiters to notify.
 860       return;
 861     }
 862   } else if (LockingMode == LM_LEGACY) {
 863     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 864       // Not inflated so there can't be any waiters to notify.
 865       return;
 866     }
 867   }
 868 
 869   ObjectMonitor* monitor;
 870   if (LockingMode == LM_LIGHTWEIGHT) {
 871     monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
 872   } else {
 873     // The ObjectMonitor* can't be async deflated until ownership is
 874     // dropped by the calling thread.

1016 
1017   markWord mark = obj->mark_acquire();
1018   for (;;) {
1019     intptr_t hash = mark.hash();
1020     if (hash != 0) {
1021       return hash;
1022     }
1023 
1024     hash = get_next_hash(current, obj);
1025     const markWord old_mark = mark;
1026     const markWord new_mark = old_mark.copy_set_hash(hash);
1027 
1028     mark = obj->cas_set_mark(new_mark, old_mark);
1029     if (old_mark == mark) {
1030       return hash;
1031     }
1032   }
1033 }
1034 
1035 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
1036   if (EnableValhalla && obj->klass()->is_inline_klass()) {
1037     // VM should be calling bootstrap method
1038     ShouldNotReachHere();
1039   }
1040   if (UseObjectMonitorTable) {
1041     // Since the monitor isn't in the object header, the hash can simply be
1042     // installed in the object header.
1043     return install_hash_code(current, obj);
1044   }
1045 
1046   while (true) {
1047     ObjectMonitor* monitor = nullptr;
1048     markWord temp, test;
1049     intptr_t hash;
1050     markWord mark = read_stable_mark(obj);
1051     if (VerifyHeavyMonitors) {
1052       assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
1053       guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
1054     }
1055     if (mark.is_unlocked() || (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked())) {
1056       hash = mark.hash();
1057       if (hash != 0) {                     // if it has a hash, just return it
1058         return hash;
1059       }

1146         hash = test.hash();
1147         assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1148         assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1149       }
1150       if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
1151         // If we detect that async deflation has occurred, then we
1152         // attempt to restore the header/dmw to the object's header
1153         // so that we only retry once if the deflater thread happens
1154         // to be slow.
1155         monitor->install_displaced_markword_in_object(obj);
1156         continue;
1157       }
1158     }
1159     // We finally get the hash.
1160     return hash;
1161   }
1162 }
1163 
1164 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1165                                                    Handle h_obj) {
1166   if (EnableValhalla && h_obj->mark().is_inline_type()) {
1167     return false;
1168   }
1169   assert(current == JavaThread::current(), "Can only be called on current thread");
1170   oop obj = h_obj();
1171 
1172   markWord mark = read_stable_mark(obj);
1173 
1174   if (LockingMode == LM_LEGACY && mark.has_locker()) {
1175     // stack-locked case, header points into owner's stack
1176     return current->is_lock_owned((address)mark.locker());
1177   }
1178 
1179   if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1180     // fast-locking case, see if lock is in current's lock stack
1181     return current->lock_stack().contains(h_obj());
1182   }
1183 
1184   while (LockingMode == LM_LIGHTWEIGHT && mark.has_monitor()) {
1185     ObjectMonitor* monitor = read_monitor(current, obj, mark);
1186     if (monitor != nullptr) {
1187       return monitor->is_entered(current) != 0;
1188     }

1480     markWord dmw = monitor->header();
1481     assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1482     return;
1483   }
1484   (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1485 }
1486 
1487 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
1488   assert(current == Thread::current(), "must be");
1489   assert(LockingMode != LM_LIGHTWEIGHT, "only inflate through enter");
1490   return inflate_impl(current->is_Java_thread() ? JavaThread::cast(current) : nullptr, obj, cause);
1491 }
1492 
1493 ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
1494   assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
1495   assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_for");
1496   return inflate_impl(thread, obj, cause);
1497 }
1498 
1499 ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* locking_thread, oop object, const InflateCause cause) {
1500   if (EnableValhalla) {
1501     guarantee(!object->klass()->is_inline_klass(), "Attempt to inflate inline type");
1502   }
1503   // The JavaThread* locking_thread requires that the locking_thread == Thread::current() or
1504   // is suspended throughout the call by some other mechanism.
1505   // The thread might be nullptr when called from a non JavaThread. (As may still be
1506   // the case from FastHashCode). However it is only important for correctness that the
1507   // thread is set when called from ObjectSynchronizer::enter from the owning thread,
1508   // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
1509   assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_impl");
1510   EventJavaMonitorInflate event;
1511 
1512   for (;;) {
1513     const markWord mark = object->mark_acquire();
1514 
1515     // The mark can be in one of the following states:
1516     // *  inflated     - If the ObjectMonitor owner is anonymous and the
1517     //                   locking_thread owns the object lock, then we
1518     //                   make the locking_thread the ObjectMonitor owner.
1519     // *  stack-locked - Coerce it to inflated from stack-locked.
1520     // *  INFLATING    - Busy wait for conversion from stack-locked to
1521     //                   inflated.
1522     // *  unlocked     - Aggressively inflate the object.
< prev index next >