< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page

 280 // removed from the system.
 281 //
 282 // Note: If the _in_use_list max exceeds the ceiling, then
 283 // monitors_used_above_threshold() will use the in_use_list max instead
 284 // of the thread count derived ceiling because we have used more
 285 // ObjectMonitors than the estimated average.
 286 //
 287 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
 288 // no-progress async monitor deflation cycles in a row, then the ceiling
 289 // is adjusted upwards by monitors_used_above_threshold().
 290 //
 291 // Start the ceiling with the estimate for one thread in initialize()
 292 // which is called after cmd line options are processed.
 293 static size_t _in_use_list_ceiling = 0;
 294 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
 295 bool volatile ObjectSynchronizer::_is_final_audit = false;
 296 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
 297 static uintx _no_progress_cnt = 0;
 298 static bool _no_progress_skip_increment = false;
 299 














 300 // =====================> Quick functions
 301 
 302 // The quick_* forms are special fast-path variants used to improve
 303 // performance.  In the simplest case, a "quick_*" implementation could
 304 // simply return false, in which case the caller will perform the necessary
 305 // state transitions and call the slow-path form.
 306 // The fast-path is designed to handle frequently arising cases in an efficient
 307 // manner and is just a degenerate "optimistic" variant of the slow-path.
 308 // returns true  -- to indicate the call was satisfied.
 309 // returns false -- to indicate the call needs the services of the slow-path.
 310 // A no-loitering ordinance is in effect for code in the quick_* family
 311 // operators: safepoints or indefinite blocking (blocking that might span a
 312 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
 313 // entry.
 314 //
 315 // Consider: An interesting optimization is to have the JIT recognize the
 316 // following common idiom:
 317 //   synchronized (someobj) { .... ; notify(); }
 318 // That is, we find a notify() or notifyAll() call that immediately precedes
 319 // the monitorexit operation.  In that case the JIT could fuse the operations
 320 // into a single notifyAndExit() runtime primitive.
 321 
 322 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
 323   assert(current->thread_state() == _thread_in_Java, "invariant");
 324   NoSafepointVerifier nsv;
 325   if (obj == nullptr) return false;  // slow-path for invalid obj

 326   const markWord mark = obj->mark();
 327 
 328   if (LockingMode == LM_LIGHTWEIGHT) {
 329     if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
 330       // Degenerate notify
 331       // fast-locked by caller so by definition the implied waitset is empty.
 332       return true;
 333     }
 334   } else if (LockingMode == LM_LEGACY) {
 335     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 336       // Degenerate notify
 337       // stack-locked by caller so by definition the implied waitset is empty.
 338       return true;
 339     }
 340   }
 341 
 342   if (mark.has_monitor()) {
 343     ObjectMonitor* const mon = mark.monitor();
 344     assert(mon->object() == oop(obj), "invariant");
 345     if (mon->owner() != current) return false;  // slow-path for IMS exception

 362     }
 363     return true;
 364   }
 365 
 366   // other IMS exception states take the slow-path
 367   return false;
 368 }
 369 
 370 
 371 // The LockNode emitted directly at the synchronization site would have
 372 // been too big if it were to have included support for the cases of inflated
 373 // recursive enter and exit, so they go here instead.
 374 // Note that we can't safely call AsyncPrintJavaStack() from within
 375 // quick_enter() as our thread state remains _in_Java.
 376 
 377 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
 378                                      BasicLock * lock) {
 379   assert(current->thread_state() == _thread_in_Java, "invariant");
 380   NoSafepointVerifier nsv;
 381   if (obj == nullptr) return false;       // Need to throw NPE

 382 
 383   if (obj->klass()->is_value_based()) {
 384     return false;
 385   }
 386 
 387   const markWord mark = obj->mark();
 388 
 389   if (mark.has_monitor()) {
 390     ObjectMonitor* const m = mark.monitor();
 391     // An async deflation or GC can race us before we manage to make
 392     // the ObjectMonitor busy by setting the owner below. If we detect
 393     // that race we just bail out to the slow-path here.
 394     if (m->object_peek() == nullptr) {
 395       return false;
 396     }
 397     JavaThread* const owner = static_cast<JavaThread*>(m->owner_raw());
 398 
 399     // Lock contention and Transactional Lock Elision (TLE) diagnostics
 400     // and observability
 401     // Case: light contention possibly amenable to TLE

 483   if (bcp_was_adjusted) {
 484     last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
 485   }
 486 }
 487 
 488 static bool useHeavyMonitors() {
 489 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
 490   return LockingMode == LM_MONITOR;
 491 #else
 492   return false;
 493 #endif
 494 }
 495 
 496 // -----------------------------------------------------------------------------
 497 // Monitor Enter/Exit
 498 // The interpreter and compiler assembly code tries to lock using the fast path
 499 // of this algorithm. Make sure to update that code if the following function is
 500 // changed. The implementation is extremely sensitive to race condition. Be careful.
 501 
 502 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {

 503   if (obj->klass()->is_value_based()) {
 504     handle_sync_on_value_based_class(obj, current);
 505   }
 506 
 507   current->inc_held_monitor_count();
 508 
 509   if (!useHeavyMonitors()) {
 510     if (LockingMode == LM_LIGHTWEIGHT) {
 511       // Fast-locking does not use the 'lock' argument.
 512       LockStack& lock_stack = current->lock_stack();
 513       if (lock_stack.can_push()) {
 514         markWord mark = obj()->mark_acquire();
 515         if (mark.is_neutral()) {
 516           assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
 517           // Try to swing into 'fast-locked' state.
 518           markWord locked_mark = mark.set_fast_locked();
 519           markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
 520           if (old_mark == mark) {
 521             // Successfully fast-locked, push object to lock-stack and return.
 522             lock_stack.push(obj());

 552   } else if (VerifyHeavyMonitors) {
 553     guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 554   }
 555 
 556   // An async deflation can race after the inflate() call and before
 557   // enter() can make the ObjectMonitor busy. enter() returns false if
 558   // we have lost the race to async deflation and we simply try again.
 559   while (true) {
 560     ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
 561     if (monitor->enter(current)) {
 562       return;
 563     }
 564   }
 565 }
 566 
 567 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
 568   current->dec_held_monitor_count();
 569 
 570   if (!useHeavyMonitors()) {
 571     markWord mark = object->mark();



 572     if (LockingMode == LM_LIGHTWEIGHT) {
 573       // Fast-locking does not use the 'lock' argument.
 574       if (mark.is_fast_locked()) {
 575         markWord unlocked_mark = mark.set_unlocked();
 576         markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
 577         if (old_mark != mark) {
 578           // Another thread won the CAS, it must have inflated the monitor.
 579           // It can only have installed an anonymously locked monitor at this point.
 580           // Fetch that monitor, set owner correctly to this thread, and
 581           // exit it (allowing waiting threads to enter).
 582           assert(old_mark.has_monitor(), "must have monitor");
 583           ObjectMonitor* monitor = old_mark.monitor();
 584           assert(monitor->is_owner_anonymous(), "must be anonymous owner");
 585           monitor->set_owner_from_anonymous(current);
 586           monitor->exit(current);
 587         }
 588         LockStack& lock_stack = current->lock_stack();
 589         lock_stack.remove(object);
 590         return;
 591       }

 637   // The ObjectMonitor* can't be async deflated until ownership is
 638   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 639   ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
 640   if (LockingMode == LM_LIGHTWEIGHT && monitor->is_owner_anonymous()) {
 641     // It must be owned by us. Pop lock object from lock stack.
 642     LockStack& lock_stack = current->lock_stack();
 643     oop popped = lock_stack.pop();
 644     assert(popped == object, "must be owned by this thread");
 645     monitor->set_owner_from_anonymous(current);
 646   }
 647   monitor->exit(current);
 648 }
 649 
 650 // -----------------------------------------------------------------------------
 651 // JNI locks on java objects
 652 // NOTE: must use heavy weight monitor to handle jni monitor enter
 653 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
 654   if (obj->klass()->is_value_based()) {
 655     handle_sync_on_value_based_class(obj, current);
 656   }

 657 
 658   // the current locking is from JNI instead of Java code
 659   current->set_current_pending_monitor_is_from_java(false);
 660   // An async deflation can race after the inflate() call and before
 661   // enter() can make the ObjectMonitor busy. enter() returns false if
 662   // we have lost the race to async deflation and we simply try again.
 663   while (true) {
 664     ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
 665     if (monitor->enter(current)) {
 666       current->inc_held_monitor_count(1, true);
 667       break;
 668     }
 669   }
 670   current->set_current_pending_monitor_is_from_java(true);
 671 }
 672 
 673 // NOTE: must use heavy weight monitor to handle jni monitor exit
 674 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
 675   JavaThread* current = THREAD;

 676 
 677   // The ObjectMonitor* can't be async deflated until ownership is
 678   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 679   ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
 680   // If this thread has locked the object, exit the monitor. We
 681   // intentionally do not use CHECK on check_owner because we must exit the
 682   // monitor even if an exception was already pending.
 683   if (monitor->check_owner(THREAD)) {
 684     monitor->exit(current);
 685     current->dec_held_monitor_count(1, true);
 686   }
 687 }
 688 
 689 // -----------------------------------------------------------------------------
 690 // Internal VM locks on java objects
 691 // standard constructor, allows locking failures
 692 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
 693   _thread = thread;
 694   _thread->check_for_valid_safepoint_state();
 695   _obj = obj;
 696 
 697   if (_obj() != nullptr) {
 698     ObjectSynchronizer::enter(_obj, &_lock, _thread);
 699   }
 700 }
 701 
 702 ObjectLocker::~ObjectLocker() {
 703   if (_obj() != nullptr) {
 704     ObjectSynchronizer::exit(_obj(), &_lock, _thread);
 705   }
 706 }
 707 
 708 
 709 // -----------------------------------------------------------------------------
 710 //  Wait/Notify/NotifyAll
 711 // NOTE: must use heavy weight monitor to handle wait()
 712 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 713   JavaThread* current = THREAD;

 714   if (millis < 0) {
 715     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 716   }
 717   // The ObjectMonitor* can't be async deflated because the _waiters
 718   // field is incremented before ownership is dropped and decremented
 719   // after ownership is regained.
 720   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
 721 
 722   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
 723   monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
 724 
 725   // This dummy call is in place to get around dtrace bug 6254741.  Once
 726   // that's fixed we can uncomment the following line, remove the call
 727   // and change this function back into a "void" func.
 728   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 729   int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
 730   return ret_code;
 731 }
 732 
 733 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 734   JavaThread* current = THREAD;

 735 
 736   markWord mark = obj->mark();
 737   if (LockingMode == LM_LIGHTWEIGHT) {
 738     if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 739       // Not inflated so there can't be any waiters to notify.
 740       return;
 741     }
 742   } else if (LockingMode == LM_LEGACY) {
 743     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 744       // Not inflated so there can't be any waiters to notify.
 745       return;
 746     }
 747   }
 748   // The ObjectMonitor* can't be async deflated until ownership is
 749   // dropped by the calling thread.
 750   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
 751   monitor->notify(CHECK);
 752 }
 753 
 754 // NOTE: see comment of notify()
 755 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 756   JavaThread* current = THREAD;

 757 
 758   markWord mark = obj->mark();
 759   if (LockingMode == LM_LIGHTWEIGHT) {
 760     if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 761       // Not inflated so there can't be any waiters to notify.
 762       return;
 763     }
 764   } else if (LockingMode == LM_LEGACY) {
 765     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 766       // Not inflated so there can't be any waiters to notify.
 767       return;
 768     }
 769   }
 770   // The ObjectMonitor* can't be async deflated until ownership is
 771   // dropped by the calling thread.
 772   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
 773   monitor->notifyAll(CHECK);
 774 }
 775 
 776 // -----------------------------------------------------------------------------

 898     unsigned v = current->_hashStateW;
 899     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
 900     current->_hashStateW = v;
 901     value = v;
 902   }
 903 
 904   value &= markWord::hash_mask;
 905   if (value == 0) value = 0xBAD;
 906   assert(value != markWord::no_hash, "invariant");
 907   return value;
 908 }
 909 
 910 // Can be called from non JavaThreads (e.g., VMThread) for FastHashCode
 911 // calculations as part of JVM/TI tagging.
 912 static bool is_lock_owned(Thread* thread, oop obj) {
 913   assert(LockingMode == LM_LIGHTWEIGHT, "only call this with new lightweight locking enabled");
 914   return thread->is_Java_thread() ? JavaThread::cast(thread)->lock_stack().contains(obj) : false;
 915 }
 916 
 917 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {




 918 
 919   while (true) {
 920     ObjectMonitor* monitor = nullptr;
 921     markWord temp, test;
 922     intptr_t hash;
 923     markWord mark = read_stable_mark(obj);
 924     if (VerifyHeavyMonitors) {
 925       assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
 926       guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 927     }
 928     if (mark.is_neutral()) {               // if this is a normal header
 929       hash = mark.hash();
 930       if (hash != 0) {                     // if it has a hash, just return it
 931         return hash;
 932       }
 933       hash = get_next_hash(current, obj);  // get a new hash
 934       temp = mark.copy_set_hash(hash);     // merge the hash into header
 935                                            // try to install the hash
 936       test = obj->cas_set_mark(temp, mark);
 937       if (test == mark) {                  // if the hash was installed, return it

1018         hash = test.hash();
1019         assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1020         assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1021       }
1022       if (monitor->is_being_async_deflated()) {
1023         // If we detect that async deflation has occurred, then we
1024         // attempt to restore the header/dmw to the object's header
1025         // so that we only retry once if the deflater thread happens
1026         // to be slow.
1027         monitor->install_displaced_markword_in_object(obj);
1028         continue;
1029       }
1030     }
1031     // We finally get the hash.
1032     return hash;
1033   }
1034 }
1035 
1036 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1037                                                    Handle h_obj) {



1038   assert(current == JavaThread::current(), "Can only be called on current thread");
1039   oop obj = h_obj();
1040 
1041   markWord mark = read_stable_mark(obj);
1042 
1043   if (LockingMode == LM_LEGACY && mark.has_locker()) {
1044     // stack-locked case, header points into owner's stack
1045     return current->is_lock_owned((address)mark.locker());
1046   }
1047 
1048   if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1049     // fast-locking case, see if lock is in current's lock stack
1050     return current->lock_stack().contains(h_obj());
1051   }
1052 
1053   if (mark.has_monitor()) {
1054     // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1055     // The first stage of async deflation does not affect any field
1056     // used by this comparison so the ObjectMonitor* is usable here.
1057     ObjectMonitor* monitor = mark.monitor();

1296   event->set_monitorClass(obj->klass());
1297   event->set_address((uintptr_t)(void*)obj);
1298   event->set_cause((u1)cause);
1299   event->commit();
1300 }
1301 
1302 // Fast path code shared by multiple functions
1303 void ObjectSynchronizer::inflate_helper(oop obj) {
1304   markWord mark = obj->mark_acquire();
1305   if (mark.has_monitor()) {
1306     ObjectMonitor* monitor = mark.monitor();
1307     markWord dmw = monitor->header();
1308     assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1309     return;
1310   }
1311   (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1312 }
1313 
1314 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
1315                                            const InflateCause cause) {




1316   EventJavaMonitorInflate event;
1317 
1318   for (;;) {
1319     const markWord mark = object->mark_acquire();
1320 
1321     // The mark can be in one of the following states:
1322     // *  inflated     - Just return if using stack-locking.
1323     //                   If using fast-locking and the ObjectMonitor owner
1324     //                   is anonymous and the current thread owns the
1325     //                   object lock, then we make the current thread the
1326     //                   ObjectMonitor owner and remove the lock from the
1327     //                   current thread's lock stack.
1328     // *  fast-locked  - Coerce it to inflated from fast-locked.
1329     // *  stack-locked - Coerce it to inflated from stack-locked.
1330     // *  INFLATING    - Busy wait for conversion from stack-locked to
1331     //                   inflated.
1332     // *  neutral      - Aggressively inflate the object.
1333 
1334     // CASE: inflated
1335     if (mark.has_monitor()) {

 280 // removed from the system.
 281 //
 282 // Note: If the _in_use_list max exceeds the ceiling, then
 283 // monitors_used_above_threshold() will use the in_use_list max instead
 284 // of the thread count derived ceiling because we have used more
 285 // ObjectMonitors than the estimated average.
 286 //
 287 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
 288 // no-progress async monitor deflation cycles in a row, then the ceiling
 289 // is adjusted upwards by monitors_used_above_threshold().
 290 //
 291 // Start the ceiling with the estimate for one thread in initialize()
 292 // which is called after cmd line options are processed.
 293 static size_t _in_use_list_ceiling = 0;
 294 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
 295 bool volatile ObjectSynchronizer::_is_final_audit = false;
 296 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
 297 static uintx _no_progress_cnt = 0;
 298 static bool _no_progress_skip_increment = false;
 299 
 300 #define CHECK_THROW_NOSYNC_IMSE(obj)  \
 301   if (EnableValhalla && (obj)->mark().is_inline_type()) {  \
 302     JavaThread* THREAD = current;           \
 303     ResourceMark rm(THREAD);                \
 304     THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
 305   }
 306 
 307 #define CHECK_THROW_NOSYNC_IMSE_0(obj)  \
 308   if (EnableValhalla && (obj)->mark().is_inline_type()) {  \
 309     JavaThread* THREAD = current;             \
 310     ResourceMark rm(THREAD);                  \
 311     THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
 312   }
 313 
 314 // =====================> Quick functions
 315 
 316 // The quick_* forms are special fast-path variants used to improve
 317 // performance.  In the simplest case, a "quick_*" implementation could
 318 // simply return false, in which case the caller will perform the necessary
 319 // state transitions and call the slow-path form.
 320 // The fast-path is designed to handle frequently arising cases in an efficient
 321 // manner and is just a degenerate "optimistic" variant of the slow-path.
 322 // returns true  -- to indicate the call was satisfied.
 323 // returns false -- to indicate the call needs the services of the slow-path.
 324 // A no-loitering ordinance is in effect for code in the quick_* family
 325 // operators: safepoints or indefinite blocking (blocking that might span a
 326 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
 327 // entry.
 328 //
 329 // Consider: An interesting optimization is to have the JIT recognize the
 330 // following common idiom:
 331 //   synchronized (someobj) { .... ; notify(); }
 332 // That is, we find a notify() or notifyAll() call that immediately precedes
 333 // the monitorexit operation.  In that case the JIT could fuse the operations
 334 // into a single notifyAndExit() runtime primitive.
 335 
 336 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
 337   assert(current->thread_state() == _thread_in_Java, "invariant");
 338   NoSafepointVerifier nsv;
 339   if (obj == nullptr) return false;  // slow-path for invalid obj
 340   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
 341   const markWord mark = obj->mark();
 342 
 343   if (LockingMode == LM_LIGHTWEIGHT) {
 344     if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
 345       // Degenerate notify
 346       // fast-locked by caller so by definition the implied waitset is empty.
 347       return true;
 348     }
 349   } else if (LockingMode == LM_LEGACY) {
 350     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 351       // Degenerate notify
 352       // stack-locked by caller so by definition the implied waitset is empty.
 353       return true;
 354     }
 355   }
 356 
 357   if (mark.has_monitor()) {
 358     ObjectMonitor* const mon = mark.monitor();
 359     assert(mon->object() == oop(obj), "invariant");
 360     if (mon->owner() != current) return false;  // slow-path for IMS exception

 377     }
 378     return true;
 379   }
 380 
 381   // other IMS exception states take the slow-path
 382   return false;
 383 }
 384 
 385 
 386 // The LockNode emitted directly at the synchronization site would have
 387 // been too big if it were to have included support for the cases of inflated
 388 // recursive enter and exit, so they go here instead.
 389 // Note that we can't safely call AsyncPrintJavaStack() from within
 390 // quick_enter() as our thread state remains _in_Java.
 391 
 392 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
 393                                      BasicLock * lock) {
 394   assert(current->thread_state() == _thread_in_Java, "invariant");
 395   NoSafepointVerifier nsv;
 396   if (obj == nullptr) return false;       // Need to throw NPE
 397   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
 398 
 399   if (obj->klass()->is_value_based()) {
 400     return false;
 401   }
 402 
 403   const markWord mark = obj->mark();
 404 
 405   if (mark.has_monitor()) {
 406     ObjectMonitor* const m = mark.monitor();
 407     // An async deflation or GC can race us before we manage to make
 408     // the ObjectMonitor busy by setting the owner below. If we detect
 409     // that race we just bail out to the slow-path here.
 410     if (m->object_peek() == nullptr) {
 411       return false;
 412     }
 413     JavaThread* const owner = static_cast<JavaThread*>(m->owner_raw());
 414 
 415     // Lock contention and Transactional Lock Elision (TLE) diagnostics
 416     // and observability
 417     // Case: light contention possibly amenable to TLE

 499   if (bcp_was_adjusted) {
 500     last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
 501   }
 502 }
 503 
 504 static bool useHeavyMonitors() {
 505 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
 506   return LockingMode == LM_MONITOR;
 507 #else
 508   return false;
 509 #endif
 510 }
 511 
 512 // -----------------------------------------------------------------------------
 513 // Monitor Enter/Exit
 514 // The interpreter and compiler assembly code tries to lock using the fast path
 515 // of this algorithm. Make sure to update that code if the following function is
 516 // changed. The implementation is extremely sensitive to race condition. Be careful.
 517 
 518 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
 519   CHECK_THROW_NOSYNC_IMSE(obj);
 520   if (obj->klass()->is_value_based()) {
 521     handle_sync_on_value_based_class(obj, current);
 522   }
 523 
 524   current->inc_held_monitor_count();
 525 
 526   if (!useHeavyMonitors()) {
 527     if (LockingMode == LM_LIGHTWEIGHT) {
 528       // Fast-locking does not use the 'lock' argument.
 529       LockStack& lock_stack = current->lock_stack();
 530       if (lock_stack.can_push()) {
 531         markWord mark = obj()->mark_acquire();
 532         if (mark.is_neutral()) {
 533           assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
 534           // Try to swing into 'fast-locked' state.
 535           markWord locked_mark = mark.set_fast_locked();
 536           markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
 537           if (old_mark == mark) {
 538             // Successfully fast-locked, push object to lock-stack and return.
 539             lock_stack.push(obj());

 569   } else if (VerifyHeavyMonitors) {
 570     guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 571   }
 572 
 573   // An async deflation can race after the inflate() call and before
 574   // enter() can make the ObjectMonitor busy. enter() returns false if
 575   // we have lost the race to async deflation and we simply try again.
 576   while (true) {
 577     ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
 578     if (monitor->enter(current)) {
 579       return;
 580     }
 581   }
 582 }
 583 
 584 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
 585   current->dec_held_monitor_count();
 586 
 587   if (!useHeavyMonitors()) {
 588     markWord mark = object->mark();
 589     if (EnableValhalla && mark.is_inline_type()) {
 590       return;
 591     }
 592     if (LockingMode == LM_LIGHTWEIGHT) {
 593       // Fast-locking does not use the 'lock' argument.
 594       if (mark.is_fast_locked()) {
 595         markWord unlocked_mark = mark.set_unlocked();
 596         markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
 597         if (old_mark != mark) {
 598           // Another thread won the CAS, it must have inflated the monitor.
 599           // It can only have installed an anonymously locked monitor at this point.
 600           // Fetch that monitor, set owner correctly to this thread, and
 601           // exit it (allowing waiting threads to enter).
 602           assert(old_mark.has_monitor(), "must have monitor");
 603           ObjectMonitor* monitor = old_mark.monitor();
 604           assert(monitor->is_owner_anonymous(), "must be anonymous owner");
 605           monitor->set_owner_from_anonymous(current);
 606           monitor->exit(current);
 607         }
 608         LockStack& lock_stack = current->lock_stack();
 609         lock_stack.remove(object);
 610         return;
 611       }

 657   // The ObjectMonitor* can't be async deflated until ownership is
 658   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 659   ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
 660   if (LockingMode == LM_LIGHTWEIGHT && monitor->is_owner_anonymous()) {
 661     // It must be owned by us. Pop lock object from lock stack.
 662     LockStack& lock_stack = current->lock_stack();
 663     oop popped = lock_stack.pop();
 664     assert(popped == object, "must be owned by this thread");
 665     monitor->set_owner_from_anonymous(current);
 666   }
 667   monitor->exit(current);
 668 }
 669 
 670 // -----------------------------------------------------------------------------
 671 // JNI locks on java objects
 672 // NOTE: must use heavy weight monitor to handle jni monitor enter
 673 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
 674   if (obj->klass()->is_value_based()) {
 675     handle_sync_on_value_based_class(obj, current);
 676   }
 677   CHECK_THROW_NOSYNC_IMSE(obj);
 678 
 679   // the current locking is from JNI instead of Java code
 680   current->set_current_pending_monitor_is_from_java(false);
 681   // An async deflation can race after the inflate() call and before
 682   // enter() can make the ObjectMonitor busy. enter() returns false if
 683   // we have lost the race to async deflation and we simply try again.
 684   while (true) {
 685     ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
 686     if (monitor->enter(current)) {
 687       current->inc_held_monitor_count(1, true);
 688       break;
 689     }
 690   }
 691   current->set_current_pending_monitor_is_from_java(true);
 692 }
 693 
 694 // NOTE: must use heavy weight monitor to handle jni monitor exit
 695 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
 696   JavaThread* current = THREAD;
 697   CHECK_THROW_NOSYNC_IMSE(obj);
 698 
 699   // The ObjectMonitor* can't be async deflated until ownership is
 700   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 701   ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
 702   // If this thread has locked the object, exit the monitor. We
 703   // intentionally do not use CHECK on check_owner because we must exit the
 704   // monitor even if an exception was already pending.
 705   if (monitor->check_owner(THREAD)) {
 706     monitor->exit(current);
 707     current->dec_held_monitor_count(1, true);
 708   }
 709 }
 710 
 711 // -----------------------------------------------------------------------------
 712 // Internal VM locks on java objects
 713 // standard constructor, allows locking failures
 714 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
 715   _thread = thread;
 716   _thread->check_for_valid_safepoint_state();
 717   _obj = obj;
 718 
 719   if (_obj() != nullptr) {
 720     ObjectSynchronizer::enter(_obj, &_lock, _thread);
 721   }
 722 }
 723 
 724 ObjectLocker::~ObjectLocker() {
 725   if (_obj() != nullptr) {
 726     ObjectSynchronizer::exit(_obj(), &_lock, _thread);
 727   }
 728 }
 729 
 730 
 731 // -----------------------------------------------------------------------------
 732 //  Wait/Notify/NotifyAll
 733 // NOTE: must use heavy weight monitor to handle wait()
 734 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 735   JavaThread* current = THREAD;
 736   CHECK_THROW_NOSYNC_IMSE_0(obj);
 737   if (millis < 0) {
 738     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 739   }
 740   // The ObjectMonitor* can't be async deflated because the _waiters
 741   // field is incremented before ownership is dropped and decremented
 742   // after ownership is regained.
 743   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
 744 
 745   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
 746   monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
 747 
 748   // This dummy call is in place to get around dtrace bug 6254741.  Once
 749   // that's fixed we can uncomment the following line, remove the call
 750   // and change this function back into a "void" func.
 751   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 752   int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
 753   return ret_code;
 754 }
 755 
 756 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 757   JavaThread* current = THREAD;
 758   CHECK_THROW_NOSYNC_IMSE(obj);
 759 
 760   markWord mark = obj->mark();
 761   if (LockingMode == LM_LIGHTWEIGHT) {
 762     if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 763       // Not inflated so there can't be any waiters to notify.
 764       return;
 765     }
 766   } else if (LockingMode == LM_LEGACY) {
 767     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 768       // Not inflated so there can't be any waiters to notify.
 769       return;
 770     }
 771   }
 772   // The ObjectMonitor* can't be async deflated until ownership is
 773   // dropped by the calling thread.
 774   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
 775   monitor->notify(CHECK);
 776 }
 777 
 778 // NOTE: see comment of notify()
 779 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 780   JavaThread* current = THREAD;
 781   CHECK_THROW_NOSYNC_IMSE(obj);
 782 
 783   markWord mark = obj->mark();
 784   if (LockingMode == LM_LIGHTWEIGHT) {
 785     if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 786       // Not inflated so there can't be any waiters to notify.
 787       return;
 788     }
 789   } else if (LockingMode == LM_LEGACY) {
 790     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 791       // Not inflated so there can't be any waiters to notify.
 792       return;
 793     }
 794   }
 795   // The ObjectMonitor* can't be async deflated until ownership is
 796   // dropped by the calling thread.
 797   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
 798   monitor->notifyAll(CHECK);
 799 }
 800 
 801 // -----------------------------------------------------------------------------

 923     unsigned v = current->_hashStateW;
 924     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
 925     current->_hashStateW = v;
 926     value = v;
 927   }
 928 
 929   value &= markWord::hash_mask;
 930   if (value == 0) value = 0xBAD;
 931   assert(value != markWord::no_hash, "invariant");
 932   return value;
 933 }
 934 
 935 // Can be called from non JavaThreads (e.g., VMThread) for FastHashCode
 936 // calculations as part of JVM/TI tagging.
 937 static bool is_lock_owned(Thread* thread, oop obj) {
 938   assert(LockingMode == LM_LIGHTWEIGHT, "only call this with new lightweight locking enabled");
 939   return thread->is_Java_thread() ? JavaThread::cast(thread)->lock_stack().contains(obj) : false;
 940 }
 941 
 942 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
 943   if (EnableValhalla && obj->klass()->is_inline_klass()) {
 944     // VM should be calling bootstrap method
 945     ShouldNotReachHere();
 946   }
 947 
 948   while (true) {
 949     ObjectMonitor* monitor = nullptr;
 950     markWord temp, test;
 951     intptr_t hash;
 952     markWord mark = read_stable_mark(obj);
 953     if (VerifyHeavyMonitors) {
 954       assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
 955       guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 956     }
 957     if (mark.is_neutral()) {               // if this is a normal header
 958       hash = mark.hash();
 959       if (hash != 0) {                     // if it has a hash, just return it
 960         return hash;
 961       }
 962       hash = get_next_hash(current, obj);  // get a new hash
 963       temp = mark.copy_set_hash(hash);     // merge the hash into header
 964                                            // try to install the hash
 965       test = obj->cas_set_mark(temp, mark);
 966       if (test == mark) {                  // if the hash was installed, return it

1047         hash = test.hash();
1048         assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1049         assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1050       }
1051       if (monitor->is_being_async_deflated()) {
1052         // If we detect that async deflation has occurred, then we
1053         // attempt to restore the header/dmw to the object's header
1054         // so that we only retry once if the deflater thread happens
1055         // to be slow.
1056         monitor->install_displaced_markword_in_object(obj);
1057         continue;
1058       }
1059     }
1060     // We finally get the hash.
1061     return hash;
1062   }
1063 }
1064 
1065 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1066                                                    Handle h_obj) {
1067   if (EnableValhalla && h_obj->mark().is_inline_type()) {
1068     return false;
1069   }
1070   assert(current == JavaThread::current(), "Can only be called on current thread");
1071   oop obj = h_obj();
1072 
1073   markWord mark = read_stable_mark(obj);
1074 
1075   if (LockingMode == LM_LEGACY && mark.has_locker()) {
1076     // stack-locked case, header points into owner's stack
1077     return current->is_lock_owned((address)mark.locker());
1078   }
1079 
1080   if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1081     // fast-locking case, see if lock is in current's lock stack
1082     return current->lock_stack().contains(h_obj());
1083   }
1084 
1085   if (mark.has_monitor()) {
1086     // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1087     // The first stage of async deflation does not affect any field
1088     // used by this comparison so the ObjectMonitor* is usable here.
1089     ObjectMonitor* monitor = mark.monitor();

1328   event->set_monitorClass(obj->klass());
1329   event->set_address((uintptr_t)(void*)obj);
1330   event->set_cause((u1)cause);
1331   event->commit();
1332 }
1333 
1334 // Fast path code shared by multiple functions
1335 void ObjectSynchronizer::inflate_helper(oop obj) {
1336   markWord mark = obj->mark_acquire();
1337   if (mark.has_monitor()) {
1338     ObjectMonitor* monitor = mark.monitor();
1339     markWord dmw = monitor->header();
1340     assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1341     return;
1342   }
1343   (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1344 }
1345 
1346 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
1347                                            const InflateCause cause) {
1348   if (EnableValhalla) {
1349     guarantee(!object->klass()->is_inline_klass(), "Attempt to inflate inline type");
1350   }
1351 
1352   EventJavaMonitorInflate event;
1353 
1354   for (;;) {
1355     const markWord mark = object->mark_acquire();
1356 
1357     // The mark can be in one of the following states:
1358     // *  inflated     - Just return if using stack-locking.
1359     //                   If using fast-locking and the ObjectMonitor owner
1360     //                   is anonymous and the current thread owns the
1361     //                   object lock, then we make the current thread the
1362     //                   ObjectMonitor owner and remove the lock from the
1363     //                   current thread's lock stack.
1364     // *  fast-locked  - Coerce it to inflated from fast-locked.
1365     // *  stack-locked - Coerce it to inflated from stack-locked.
1366     // *  INFLATING    - Busy wait for conversion from stack-locked to
1367     //                   inflated.
1368     // *  neutral      - Aggressively inflate the object.
1369 
1370     // CASE: inflated
1371     if (mark.has_monitor()) {
< prev index next >