< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page

 287 // removed from the system.
 288 //
 289 // Note: If the _in_use_list max exceeds the ceiling, then
 290 // monitors_used_above_threshold() will use the in_use_list max instead
 291 // of the thread count derived ceiling because we have used more
 292 // ObjectMonitors than the estimated average.
 293 //
 294 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
 295 // no-progress async monitor deflation cycles in a row, then the ceiling
 296 // is adjusted upwards by monitors_used_above_threshold().
 297 //
 298 // Start the ceiling with the estimate for one thread in initialize()
 299 // which is called after cmd line options are processed.
 300 static size_t _in_use_list_ceiling = 0;
 301 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
 302 bool volatile ObjectSynchronizer::_is_final_audit = false;
 303 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
 304 static uintx _no_progress_cnt = 0;
 305 static bool _no_progress_skip_increment = false;
 306 














 307 // =====================> Quick functions
 308 
 309 // The quick_* forms are special fast-path variants used to improve
 310 // performance.  In the simplest case, a "quick_*" implementation could
 311 // simply return false, in which case the caller will perform the necessary
 312 // state transitions and call the slow-path form.
 313 // The fast-path is designed to handle frequently arising cases in an efficient
 314 // manner and is just a degenerate "optimistic" variant of the slow-path.
 315 // returns true  -- to indicate the call was satisfied.
 316 // returns false -- to indicate the call needs the services of the slow-path.
 317 // A no-loitering ordinance is in effect for code in the quick_* family
 318 // operators: safepoints or indefinite blocking (blocking that might span a
 319 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
 320 // entry.
 321 //
 322 // Consider: An interesting optimization is to have the JIT recognize the
 323 // following common idiom:
 324 //   synchronized (someobj) { .... ; notify(); }
 325 // That is, we find a notify() or notifyAll() call that immediately precedes
 326 // the monitorexit operation.  In that case the JIT could fuse the operations
 327 // into a single notifyAndExit() runtime primitive.
 328 
 329 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
 330   assert(current->thread_state() == _thread_in_Java, "invariant");
 331   NoSafepointVerifier nsv;
 332   if (obj == nullptr) return false;  // slow-path for invalid obj

 333   const markWord mark = obj->mark();
 334 
 335   if (LockingMode == LM_LIGHTWEIGHT) {
 336     if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
 337       // Degenerate notify
 338       // fast-locked by caller so by definition the implied waitset is empty.
 339       return true;
 340     }
 341   } else if (LockingMode == LM_LEGACY) {
 342     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 343       // Degenerate notify
 344       // stack-locked by caller so by definition the implied waitset is empty.
 345       return true;
 346     }
 347   }
 348 
 349   if (mark.has_monitor()) {
 350     ObjectMonitor* const mon = mark.monitor();
 351     assert(mon->object() == oop(obj), "invariant");
 352     if (mon->owner() != current) return false;  // slow-path for IMS exception

 369     }
 370     return true;
 371   }
 372 
 373   // other IMS exception states take the slow-path
 374   return false;
 375 }
 376 
 377 
 378 // The LockNode emitted directly at the synchronization site would have
 379 // been too big if it were to have included support for the cases of inflated
 380 // recursive enter and exit, so they go here instead.
 381 // Note that we can't safely call AsyncPrintJavaStack() from within
 382 // quick_enter() as our thread state remains _in_Java.
 383 
 384 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
 385                                      BasicLock * lock) {
 386   assert(current->thread_state() == _thread_in_Java, "invariant");
 387   NoSafepointVerifier nsv;
 388   if (obj == nullptr) return false;       // Need to throw NPE

 389 
 390   if (obj->klass()->is_value_based()) {
 391     return false;
 392   }
 393 
 394   const markWord mark = obj->mark();
 395 
 396   if (mark.has_monitor()) {
 397     ObjectMonitor* const m = mark.monitor();
 398     // An async deflation or GC can race us before we manage to make
 399     // the ObjectMonitor busy by setting the owner below. If we detect
 400     // that race we just bail out to the slow-path here.
 401     if (m->object_peek() == nullptr) {
 402       return false;
 403     }
 404     JavaThread* const owner = static_cast<JavaThread*>(m->owner_raw());
 405 
 406     // Lock contention and Transactional Lock Elision (TLE) diagnostics
 407     // and observability
 408     // Case: light contention possibly amenable to TLE

 490   if (bcp_was_adjusted) {
 491     last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
 492   }
 493 }
 494 
 495 static bool useHeavyMonitors() {
 496 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
 497   return LockingMode == LM_MONITOR;
 498 #else
 499   return false;
 500 #endif
 501 }
 502 
 503 // -----------------------------------------------------------------------------
 504 // Monitor Enter/Exit
 505 // The interpreter and compiler assembly code tries to lock using the fast path
 506 // of this algorithm. Make sure to update that code if the following function is
 507 // changed. The implementation is extremely sensitive to race condition. Be careful.
 508 
 509 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {

 510   if (obj->klass()->is_value_based()) {
 511     handle_sync_on_value_based_class(obj, current);
 512   }
 513 
 514   current->inc_held_monitor_count();
 515 
 516   if (!useHeavyMonitors()) {
 517     if (LockingMode == LM_LIGHTWEIGHT) {
 518       // Fast-locking does not use the 'lock' argument.
 519       LockStack& lock_stack = current->lock_stack();
 520       if (lock_stack.can_push()) {
 521         markWord mark = obj()->mark_acquire();
 522         if (mark.is_neutral()) {
 523           assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
 524           // Try to swing into 'fast-locked' state.
 525           markWord locked_mark = mark.set_fast_locked();
 526           markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
 527           if (old_mark == mark) {
 528             // Successfully fast-locked, push object to lock-stack and return.
 529             lock_stack.push(obj());

 559   } else if (VerifyHeavyMonitors) {
 560     guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 561   }
 562 
 563   // An async deflation can race after the inflate() call and before
 564   // enter() can make the ObjectMonitor busy. enter() returns false if
 565   // we have lost the race to async deflation and we simply try again.
 566   while (true) {
 567     ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
 568     if (monitor->enter(current)) {
 569       return;
 570     }
 571   }
 572 }
 573 
 574 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
 575   current->dec_held_monitor_count();
 576 
 577   if (!useHeavyMonitors()) {
 578     markWord mark = object->mark();



 579     if (LockingMode == LM_LIGHTWEIGHT) {
 580       // Fast-locking does not use the 'lock' argument.
 581       if (mark.is_fast_locked()) {
 582         markWord unlocked_mark = mark.set_unlocked();
 583         markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
 584         if (old_mark != mark) {
 585           // Another thread won the CAS, it must have inflated the monitor.
 586           // It can only have installed an anonymously locked monitor at this point.
 587           // Fetch that monitor, set owner correctly to this thread, and
 588           // exit it (allowing waiting threads to enter).
 589           assert(old_mark.has_monitor(), "must have monitor");
 590           ObjectMonitor* monitor = old_mark.monitor();
 591           assert(monitor->is_owner_anonymous(), "must be anonymous owner");
 592           monitor->set_owner_from_anonymous(current);
 593           monitor->exit(current);
 594         }
 595         LockStack& lock_stack = current->lock_stack();
 596         lock_stack.remove(object);
 597         return;
 598       }

 638     }
 639   } else if (VerifyHeavyMonitors) {
 640     guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 641   }
 642 
 643   // We have to take the slow-path of possible inflation and then exit.
 644   // The ObjectMonitor* can't be async deflated until ownership is
 645   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 646   ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
 647   assert(!monitor->is_owner_anonymous(), "must not be");
 648   monitor->exit(current);
 649 }
 650 
 651 // -----------------------------------------------------------------------------
 652 // JNI locks on java objects
 653 // NOTE: must use heavy weight monitor to handle jni monitor enter
 654 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
 655   if (obj->klass()->is_value_based()) {
 656     handle_sync_on_value_based_class(obj, current);
 657   }

 658 
 659   // the current locking is from JNI instead of Java code
 660   current->set_current_pending_monitor_is_from_java(false);
 661   // An async deflation can race after the inflate() call and before
 662   // enter() can make the ObjectMonitor busy. enter() returns false if
 663   // we have lost the race to async deflation and we simply try again.
 664   while (true) {
 665     ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
 666     if (monitor->enter(current)) {
 667       current->inc_held_monitor_count(1, true);
 668       break;
 669     }
 670   }
 671   current->set_current_pending_monitor_is_from_java(true);
 672 }
 673 
 674 // NOTE: must use heavy weight monitor to handle jni monitor exit
 675 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
 676   JavaThread* current = THREAD;

 677 
 678   // The ObjectMonitor* can't be async deflated until ownership is
 679   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 680   ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
 681   // If this thread has locked the object, exit the monitor. We
 682   // intentionally do not use CHECK on check_owner because we must exit the
 683   // monitor even if an exception was already pending.
 684   if (monitor->check_owner(THREAD)) {
 685     monitor->exit(current);
 686     current->dec_held_monitor_count(1, true);
 687   }
 688 }
 689 
 690 // -----------------------------------------------------------------------------
 691 // Internal VM locks on java objects
 692 // standard constructor, allows locking failures
 693 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
 694   _thread = thread;
 695   _thread->check_for_valid_safepoint_state();
 696   _obj = obj;
 697 
 698   if (_obj() != nullptr) {
 699     ObjectSynchronizer::enter(_obj, &_lock, _thread);
 700   }
 701 }
 702 
 703 ObjectLocker::~ObjectLocker() {
 704   if (_obj() != nullptr) {
 705     ObjectSynchronizer::exit(_obj(), &_lock, _thread);
 706   }
 707 }
 708 
 709 
 710 // -----------------------------------------------------------------------------
 711 //  Wait/Notify/NotifyAll
 712 // NOTE: must use heavy weight monitor to handle wait()
 713 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 714   JavaThread* current = THREAD;

 715   if (millis < 0) {
 716     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 717   }
 718   // The ObjectMonitor* can't be async deflated because the _waiters
 719   // field is incremented before ownership is dropped and decremented
 720   // after ownership is regained.
 721   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
 722 
 723   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
 724   monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
 725 
 726   // This dummy call is in place to get around dtrace bug 6254741.  Once
 727   // that's fixed we can uncomment the following line, remove the call
 728   // and change this function back into a "void" func.
 729   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 730   int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
 731   return ret_code;
 732 }
 733 
 734 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 735   JavaThread* current = THREAD;

 736 
 737   markWord mark = obj->mark();
 738   if (LockingMode == LM_LIGHTWEIGHT) {
 739     if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 740       // Not inflated so there can't be any waiters to notify.
 741       return;
 742     }
 743   } else if (LockingMode == LM_LEGACY) {
 744     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 745       // Not inflated so there can't be any waiters to notify.
 746       return;
 747     }
 748   }
 749   // The ObjectMonitor* can't be async deflated until ownership is
 750   // dropped by the calling thread.
 751   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
 752   monitor->notify(CHECK);
 753 }
 754 
 755 // NOTE: see comment of notify()
 756 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 757   JavaThread* current = THREAD;

 758 
 759   markWord mark = obj->mark();
 760   if (LockingMode == LM_LIGHTWEIGHT) {
 761     if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 762       // Not inflated so there can't be any waiters to notify.
 763       return;
 764     }
 765   } else if (LockingMode == LM_LEGACY) {
 766     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 767       // Not inflated so there can't be any waiters to notify.
 768       return;
 769     }
 770   }
 771   // The ObjectMonitor* can't be async deflated until ownership is
 772   // dropped by the calling thread.
 773   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
 774   monitor->notifyAll(CHECK);
 775 }
 776 
 777 // -----------------------------------------------------------------------------

 899     unsigned v = current->_hashStateW;
 900     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
 901     current->_hashStateW = v;
 902     value = v;
 903   }
 904 
 905   value &= markWord::hash_mask;
 906   if (value == 0) value = 0xBAD;
 907   assert(value != markWord::no_hash, "invariant");
 908   return value;
 909 }
 910 
 911 // Can be called from non JavaThreads (e.g., VMThread) for FastHashCode
 912 // calculations as part of JVM/TI tagging.
 913 static bool is_lock_owned(Thread* thread, oop obj) {
 914   assert(LockingMode == LM_LIGHTWEIGHT, "only call this with new lightweight locking enabled");
 915   return thread->is_Java_thread() ? JavaThread::cast(thread)->lock_stack().contains(obj) : false;
 916 }
 917 
 918 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {




 919 
 920   while (true) {
 921     ObjectMonitor* monitor = nullptr;
 922     markWord temp, test;
 923     intptr_t hash;
 924     markWord mark = read_stable_mark(obj);
 925     if (VerifyHeavyMonitors) {
 926       assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
 927       guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 928     }
 929     if (mark.is_neutral()) {               // if this is a normal header
 930       hash = mark.hash();
 931       if (hash != 0) {                     // if it has a hash, just return it
 932         return hash;
 933       }
 934       hash = get_next_hash(current, obj);  // get a new hash
 935       temp = mark.copy_set_hash(hash);     // merge the hash into header
 936                                            // try to install the hash
 937       test = obj->cas_set_mark(temp, mark);
 938       if (test == mark) {                  // if the hash was installed, return it

1019         hash = test.hash();
1020         assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1021         assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1022       }
1023       if (monitor->is_being_async_deflated()) {
1024         // If we detect that async deflation has occurred, then we
1025         // attempt to restore the header/dmw to the object's header
1026         // so that we only retry once if the deflater thread happens
1027         // to be slow.
1028         monitor->install_displaced_markword_in_object(obj);
1029         continue;
1030       }
1031     }
1032     // We finally get the hash.
1033     return hash;
1034   }
1035 }
1036 
1037 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1038                                                    Handle h_obj) {



1039   assert(current == JavaThread::current(), "Can only be called on current thread");
1040   oop obj = h_obj();
1041 
1042   markWord mark = read_stable_mark(obj);
1043 
1044   if (LockingMode == LM_LEGACY && mark.has_locker()) {
1045     // stack-locked case, header points into owner's stack
1046     return current->is_lock_owned((address)mark.locker());
1047   }
1048 
1049   if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1050     // fast-locking case, see if lock is in current's lock stack
1051     return current->lock_stack().contains(h_obj());
1052   }
1053 
1054   if (mark.has_monitor()) {
1055     // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1056     // The first stage of async deflation does not affect any field
1057     // used by this comparison so the ObjectMonitor* is usable here.
1058     ObjectMonitor* monitor = mark.monitor();

1290   event->set_monitorClass(obj->klass());
1291   event->set_address((uintptr_t)(void*)obj);
1292   event->set_cause((u1)cause);
1293   event->commit();
1294 }
1295 
1296 // Fast path code shared by multiple functions
1297 void ObjectSynchronizer::inflate_helper(oop obj) {
1298   markWord mark = obj->mark_acquire();
1299   if (mark.has_monitor()) {
1300     ObjectMonitor* monitor = mark.monitor();
1301     markWord dmw = monitor->header();
1302     assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1303     return;
1304   }
1305   (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1306 }
1307 
1308 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
1309                                            const InflateCause cause) {




1310   EventJavaMonitorInflate event;
1311 
1312   for (;;) {
1313     const markWord mark = object->mark_acquire();
1314 
1315     // The mark can be in one of the following states:
1316     // *  inflated     - Just return if using stack-locking.
1317     //                   If using fast-locking and the ObjectMonitor owner
1318     //                   is anonymous and the current thread owns the
1319     //                   object lock, then we make the current thread the
1320     //                   ObjectMonitor owner and remove the lock from the
1321     //                   current thread's lock stack.
1322     // *  fast-locked  - Coerce it to inflated from fast-locked.
1323     // *  stack-locked - Coerce it to inflated from stack-locked.
1324     // *  INFLATING    - Busy wait for conversion from stack-locked to
1325     //                   inflated.
1326     // *  neutral      - Aggressively inflate the object.
1327 
1328     // CASE: inflated
1329     if (mark.has_monitor()) {

 287 // removed from the system.
 288 //
 289 // Note: If the _in_use_list max exceeds the ceiling, then
 290 // monitors_used_above_threshold() will use the in_use_list max instead
 291 // of the thread count derived ceiling because we have used more
 292 // ObjectMonitors than the estimated average.
 293 //
 294 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
 295 // no-progress async monitor deflation cycles in a row, then the ceiling
 296 // is adjusted upwards by monitors_used_above_threshold().
 297 //
 298 // Start the ceiling with the estimate for one thread in initialize()
 299 // which is called after cmd line options are processed.
 300 static size_t _in_use_list_ceiling = 0;
 301 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
 302 bool volatile ObjectSynchronizer::_is_final_audit = false;
 303 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
 304 static uintx _no_progress_cnt = 0;
 305 static bool _no_progress_skip_increment = false;
 306 
 307 #define CHECK_THROW_NOSYNC_IMSE(obj)  \
 308   if (EnableValhalla && (obj)->mark().is_inline_type()) {  \
 309     JavaThread* THREAD = current;           \
 310     ResourceMark rm(THREAD);                \
 311     THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
 312   }
 313 
 314 #define CHECK_THROW_NOSYNC_IMSE_0(obj)  \
 315   if (EnableValhalla && (obj)->mark().is_inline_type()) {  \
 316     JavaThread* THREAD = current;             \
 317     ResourceMark rm(THREAD);                  \
 318     THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
 319   }
 320 
 321 // =====================> Quick functions
 322 
 323 // The quick_* forms are special fast-path variants used to improve
 324 // performance.  In the simplest case, a "quick_*" implementation could
 325 // simply return false, in which case the caller will perform the necessary
 326 // state transitions and call the slow-path form.
 327 // The fast-path is designed to handle frequently arising cases in an efficient
 328 // manner and is just a degenerate "optimistic" variant of the slow-path.
 329 // returns true  -- to indicate the call was satisfied.
 330 // returns false -- to indicate the call needs the services of the slow-path.
 331 // A no-loitering ordinance is in effect for code in the quick_* family
 332 // operators: safepoints or indefinite blocking (blocking that might span a
 333 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
 334 // entry.
 335 //
 336 // Consider: An interesting optimization is to have the JIT recognize the
 337 // following common idiom:
 338 //   synchronized (someobj) { .... ; notify(); }
 339 // That is, we find a notify() or notifyAll() call that immediately precedes
 340 // the monitorexit operation.  In that case the JIT could fuse the operations
 341 // into a single notifyAndExit() runtime primitive.
 342 
 343 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
 344   assert(current->thread_state() == _thread_in_Java, "invariant");
 345   NoSafepointVerifier nsv;
 346   if (obj == nullptr) return false;  // slow-path for invalid obj
 347   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
 348   const markWord mark = obj->mark();
 349 
 350   if (LockingMode == LM_LIGHTWEIGHT) {
 351     if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
 352       // Degenerate notify
 353       // fast-locked by caller so by definition the implied waitset is empty.
 354       return true;
 355     }
 356   } else if (LockingMode == LM_LEGACY) {
 357     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 358       // Degenerate notify
 359       // stack-locked by caller so by definition the implied waitset is empty.
 360       return true;
 361     }
 362   }
 363 
 364   if (mark.has_monitor()) {
 365     ObjectMonitor* const mon = mark.monitor();
 366     assert(mon->object() == oop(obj), "invariant");
 367     if (mon->owner() != current) return false;  // slow-path for IMS exception

 384     }
 385     return true;
 386   }
 387 
 388   // other IMS exception states take the slow-path
 389   return false;
 390 }
 391 
 392 
 393 // The LockNode emitted directly at the synchronization site would have
 394 // been too big if it were to have included support for the cases of inflated
 395 // recursive enter and exit, so they go here instead.
 396 // Note that we can't safely call AsyncPrintJavaStack() from within
 397 // quick_enter() as our thread state remains _in_Java.
 398 
 399 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
 400                                      BasicLock * lock) {
 401   assert(current->thread_state() == _thread_in_Java, "invariant");
 402   NoSafepointVerifier nsv;
 403   if (obj == nullptr) return false;       // Need to throw NPE
 404   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
 405 
 406   if (obj->klass()->is_value_based()) {
 407     return false;
 408   }
 409 
 410   const markWord mark = obj->mark();
 411 
 412   if (mark.has_monitor()) {
 413     ObjectMonitor* const m = mark.monitor();
 414     // An async deflation or GC can race us before we manage to make
 415     // the ObjectMonitor busy by setting the owner below. If we detect
 416     // that race we just bail out to the slow-path here.
 417     if (m->object_peek() == nullptr) {
 418       return false;
 419     }
 420     JavaThread* const owner = static_cast<JavaThread*>(m->owner_raw());
 421 
 422     // Lock contention and Transactional Lock Elision (TLE) diagnostics
 423     // and observability
 424     // Case: light contention possibly amenable to TLE

 506   if (bcp_was_adjusted) {
 507     last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
 508   }
 509 }
 510 
 511 static bool useHeavyMonitors() {
 512 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
 513   return LockingMode == LM_MONITOR;
 514 #else
 515   return false;
 516 #endif
 517 }
 518 
 519 // -----------------------------------------------------------------------------
 520 // Monitor Enter/Exit
 521 // The interpreter and compiler assembly code tries to lock using the fast path
 522 // of this algorithm. Make sure to update that code if the following function is
 523 // changed. The implementation is extremely sensitive to race condition. Be careful.
 524 
 525 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
 526   CHECK_THROW_NOSYNC_IMSE(obj);
 527   if (obj->klass()->is_value_based()) {
 528     handle_sync_on_value_based_class(obj, current);
 529   }
 530 
 531   current->inc_held_monitor_count();
 532 
 533   if (!useHeavyMonitors()) {
 534     if (LockingMode == LM_LIGHTWEIGHT) {
 535       // Fast-locking does not use the 'lock' argument.
 536       LockStack& lock_stack = current->lock_stack();
 537       if (lock_stack.can_push()) {
 538         markWord mark = obj()->mark_acquire();
 539         if (mark.is_neutral()) {
 540           assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
 541           // Try to swing into 'fast-locked' state.
 542           markWord locked_mark = mark.set_fast_locked();
 543           markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
 544           if (old_mark == mark) {
 545             // Successfully fast-locked, push object to lock-stack and return.
 546             lock_stack.push(obj());

 576   } else if (VerifyHeavyMonitors) {
 577     guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 578   }
 579 
 580   // An async deflation can race after the inflate() call and before
 581   // enter() can make the ObjectMonitor busy. enter() returns false if
 582   // we have lost the race to async deflation and we simply try again.
 583   while (true) {
 584     ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
 585     if (monitor->enter(current)) {
 586       return;
 587     }
 588   }
 589 }
 590 
 591 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
 592   current->dec_held_monitor_count();
 593 
 594   if (!useHeavyMonitors()) {
 595     markWord mark = object->mark();
 596     if (EnableValhalla && mark.is_inline_type()) {
 597       return;
 598     }
 599     if (LockingMode == LM_LIGHTWEIGHT) {
 600       // Fast-locking does not use the 'lock' argument.
 601       if (mark.is_fast_locked()) {
 602         markWord unlocked_mark = mark.set_unlocked();
 603         markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
 604         if (old_mark != mark) {
 605           // Another thread won the CAS, it must have inflated the monitor.
 606           // It can only have installed an anonymously locked monitor at this point.
 607           // Fetch that monitor, set owner correctly to this thread, and
 608           // exit it (allowing waiting threads to enter).
 609           assert(old_mark.has_monitor(), "must have monitor");
 610           ObjectMonitor* monitor = old_mark.monitor();
 611           assert(monitor->is_owner_anonymous(), "must be anonymous owner");
 612           monitor->set_owner_from_anonymous(current);
 613           monitor->exit(current);
 614         }
 615         LockStack& lock_stack = current->lock_stack();
 616         lock_stack.remove(object);
 617         return;
 618       }

 658     }
 659   } else if (VerifyHeavyMonitors) {
 660     guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 661   }
 662 
 663   // We have to take the slow-path of possible inflation and then exit.
 664   // The ObjectMonitor* can't be async deflated until ownership is
 665   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 666   ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
 667   assert(!monitor->is_owner_anonymous(), "must not be");
 668   monitor->exit(current);
 669 }
 670 
 671 // -----------------------------------------------------------------------------
 672 // JNI locks on java objects
 673 // NOTE: must use heavy weight monitor to handle jni monitor enter
 674 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
 675   if (obj->klass()->is_value_based()) {
 676     handle_sync_on_value_based_class(obj, current);
 677   }
 678   CHECK_THROW_NOSYNC_IMSE(obj);
 679 
 680   // the current locking is from JNI instead of Java code
 681   current->set_current_pending_monitor_is_from_java(false);
 682   // An async deflation can race after the inflate() call and before
 683   // enter() can make the ObjectMonitor busy. enter() returns false if
 684   // we have lost the race to async deflation and we simply try again.
 685   while (true) {
 686     ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
 687     if (monitor->enter(current)) {
 688       current->inc_held_monitor_count(1, true);
 689       break;
 690     }
 691   }
 692   current->set_current_pending_monitor_is_from_java(true);
 693 }
 694 
 695 // NOTE: must use heavy weight monitor to handle jni monitor exit
 696 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
 697   JavaThread* current = THREAD;
 698   CHECK_THROW_NOSYNC_IMSE(obj);
 699 
 700   // The ObjectMonitor* can't be async deflated until ownership is
 701   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 702   ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
 703   // If this thread has locked the object, exit the monitor. We
 704   // intentionally do not use CHECK on check_owner because we must exit the
 705   // monitor even if an exception was already pending.
 706   if (monitor->check_owner(THREAD)) {
 707     monitor->exit(current);
 708     current->dec_held_monitor_count(1, true);
 709   }
 710 }
 711 
 712 // -----------------------------------------------------------------------------
 713 // Internal VM locks on java objects
 714 // standard constructor, allows locking failures
 715 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
 716   _thread = thread;
 717   _thread->check_for_valid_safepoint_state();
 718   _obj = obj;
 719 
 720   if (_obj() != nullptr) {
 721     ObjectSynchronizer::enter(_obj, &_lock, _thread);
 722   }
 723 }
 724 
 725 ObjectLocker::~ObjectLocker() {
 726   if (_obj() != nullptr) {
 727     ObjectSynchronizer::exit(_obj(), &_lock, _thread);
 728   }
 729 }
 730 
 731 
 732 // -----------------------------------------------------------------------------
 733 //  Wait/Notify/NotifyAll
 734 // NOTE: must use heavy weight monitor to handle wait()
 735 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 736   JavaThread* current = THREAD;
 737   CHECK_THROW_NOSYNC_IMSE_0(obj);
 738   if (millis < 0) {
 739     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 740   }
 741   // The ObjectMonitor* can't be async deflated because the _waiters
 742   // field is incremented before ownership is dropped and decremented
 743   // after ownership is regained.
 744   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
 745 
 746   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
 747   monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
 748 
 749   // This dummy call is in place to get around dtrace bug 6254741.  Once
 750   // that's fixed we can uncomment the following line, remove the call
 751   // and change this function back into a "void" func.
 752   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 753   int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
 754   return ret_code;
 755 }
 756 
 757 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 758   JavaThread* current = THREAD;
 759   CHECK_THROW_NOSYNC_IMSE(obj);
 760 
 761   markWord mark = obj->mark();
 762   if (LockingMode == LM_LIGHTWEIGHT) {
 763     if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 764       // Not inflated so there can't be any waiters to notify.
 765       return;
 766     }
 767   } else if (LockingMode == LM_LEGACY) {
 768     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 769       // Not inflated so there can't be any waiters to notify.
 770       return;
 771     }
 772   }
 773   // The ObjectMonitor* can't be async deflated until ownership is
 774   // dropped by the calling thread.
 775   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
 776   monitor->notify(CHECK);
 777 }
 778 
 779 // NOTE: see comment of notify()
 780 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 781   JavaThread* current = THREAD;
 782   CHECK_THROW_NOSYNC_IMSE(obj);
 783 
 784   markWord mark = obj->mark();
 785   if (LockingMode == LM_LIGHTWEIGHT) {
 786     if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 787       // Not inflated so there can't be any waiters to notify.
 788       return;
 789     }
 790   } else if (LockingMode == LM_LEGACY) {
 791     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 792       // Not inflated so there can't be any waiters to notify.
 793       return;
 794     }
 795   }
 796   // The ObjectMonitor* can't be async deflated until ownership is
 797   // dropped by the calling thread.
 798   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
 799   monitor->notifyAll(CHECK);
 800 }
 801 
 802 // -----------------------------------------------------------------------------

 924     unsigned v = current->_hashStateW;
 925     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
 926     current->_hashStateW = v;
 927     value = v;
 928   }
 929 
 930   value &= markWord::hash_mask;
 931   if (value == 0) value = 0xBAD;
 932   assert(value != markWord::no_hash, "invariant");
 933   return value;
 934 }
 935 
 936 // Can be called from non JavaThreads (e.g., VMThread) for FastHashCode
 937 // calculations as part of JVM/TI tagging.
 938 static bool is_lock_owned(Thread* thread, oop obj) {
 939   assert(LockingMode == LM_LIGHTWEIGHT, "only call this with new lightweight locking enabled");
 940   return thread->is_Java_thread() ? JavaThread::cast(thread)->lock_stack().contains(obj) : false;
 941 }
 942 
 943 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
 944   if (EnableValhalla && obj->klass()->is_inline_klass()) {
 945     // VM should be calling bootstrap method
 946     ShouldNotReachHere();
 947   }
 948 
 949   while (true) {
 950     ObjectMonitor* monitor = nullptr;
 951     markWord temp, test;
 952     intptr_t hash;
 953     markWord mark = read_stable_mark(obj);
 954     if (VerifyHeavyMonitors) {
 955       assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
 956       guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 957     }
 958     if (mark.is_neutral()) {               // if this is a normal header
 959       hash = mark.hash();
 960       if (hash != 0) {                     // if it has a hash, just return it
 961         return hash;
 962       }
 963       hash = get_next_hash(current, obj);  // get a new hash
 964       temp = mark.copy_set_hash(hash);     // merge the hash into header
 965                                            // try to install the hash
 966       test = obj->cas_set_mark(temp, mark);
 967       if (test == mark) {                  // if the hash was installed, return it

1048         hash = test.hash();
1049         assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1050         assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1051       }
1052       if (monitor->is_being_async_deflated()) {
1053         // If we detect that async deflation has occurred, then we
1054         // attempt to restore the header/dmw to the object's header
1055         // so that we only retry once if the deflater thread happens
1056         // to be slow.
1057         monitor->install_displaced_markword_in_object(obj);
1058         continue;
1059       }
1060     }
1061     // We finally get the hash.
1062     return hash;
1063   }
1064 }
1065 
1066 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1067                                                    Handle h_obj) {
1068   if (EnableValhalla && h_obj->mark().is_inline_type()) {
1069     return false;
1070   }
1071   assert(current == JavaThread::current(), "Can only be called on current thread");
1072   oop obj = h_obj();
1073 
1074   markWord mark = read_stable_mark(obj);
1075 
1076   if (LockingMode == LM_LEGACY && mark.has_locker()) {
1077     // stack-locked case, header points into owner's stack
1078     return current->is_lock_owned((address)mark.locker());
1079   }
1080 
1081   if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1082     // fast-locking case, see if lock is in current's lock stack
1083     return current->lock_stack().contains(h_obj());
1084   }
1085 
1086   if (mark.has_monitor()) {
1087     // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1088     // The first stage of async deflation does not affect any field
1089     // used by this comparison so the ObjectMonitor* is usable here.
1090     ObjectMonitor* monitor = mark.monitor();

1322   event->set_monitorClass(obj->klass());
1323   event->set_address((uintptr_t)(void*)obj);
1324   event->set_cause((u1)cause);
1325   event->commit();
1326 }
1327 
1328 // Fast path code shared by multiple functions
1329 void ObjectSynchronizer::inflate_helper(oop obj) {
1330   markWord mark = obj->mark_acquire();
1331   if (mark.has_monitor()) {
1332     ObjectMonitor* monitor = mark.monitor();
1333     markWord dmw = monitor->header();
1334     assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1335     return;
1336   }
1337   (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1338 }
1339 
1340 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
1341                                            const InflateCause cause) {
1342   if (EnableValhalla) {
1343     guarantee(!object->klass()->is_inline_klass(), "Attempt to inflate inline type");
1344   }
1345 
1346   EventJavaMonitorInflate event;
1347 
1348   for (;;) {
1349     const markWord mark = object->mark_acquire();
1350 
1351     // The mark can be in one of the following states:
1352     // *  inflated     - Just return if using stack-locking.
1353     //                   If using fast-locking and the ObjectMonitor owner
1354     //                   is anonymous and the current thread owns the
1355     //                   object lock, then we make the current thread the
1356     //                   ObjectMonitor owner and remove the lock from the
1357     //                   current thread's lock stack.
1358     // *  fast-locked  - Coerce it to inflated from fast-locked.
1359     // *  stack-locked - Coerce it to inflated from stack-locked.
1360     // *  INFLATING    - Busy wait for conversion from stack-locked to
1361     //                   inflated.
1362     // *  neutral      - Aggressively inflate the object.
1363 
1364     // CASE: inflated
1365     if (mark.has_monitor()) {
< prev index next >