281 // removed from the system.
282 //
283 // Note: If the _in_use_list max exceeds the ceiling, then
284 // monitors_used_above_threshold() will use the in_use_list max instead
285 // of the thread count derived ceiling because we have used more
286 // ObjectMonitors than the estimated average.
287 //
288 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
289 // no-progress async monitor deflation cycles in a row, then the ceiling
290 // is adjusted upwards by monitors_used_above_threshold().
291 //
292 // Start the ceiling with the estimate for one thread in initialize()
293 // which is called after cmd line options are processed.
294 static size_t _in_use_list_ceiling = 0;
295 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
296 bool volatile ObjectSynchronizer::_is_final_audit = false;
297 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
298 static uintx _no_progress_cnt = 0;
299 static bool _no_progress_skip_increment = false;
300
301 // =====================> Quick functions
302
303 // The quick_* forms are special fast-path variants used to improve
304 // performance. In the simplest case, a "quick_*" implementation could
305 // simply return false, in which case the caller will perform the necessary
306 // state transitions and call the slow-path form.
307 // The fast-path is designed to handle frequently arising cases in an efficient
308 // manner and is just a degenerate "optimistic" variant of the slow-path.
309 // returns true -- to indicate the call was satisfied.
310 // returns false -- to indicate the call needs the services of the slow-path.
311 // A no-loitering ordinance is in effect for code in the quick_* family
312 // operators: safepoints or indefinite blocking (blocking that might span a
313 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
314 // entry.
315 //
316 // Consider: An interesting optimization is to have the JIT recognize the
317 // following common idiom:
318 // synchronized (someobj) { .... ; notify(); }
319 // That is, we find a notify() or notifyAll() call that immediately precedes
320 // the monitorexit operation. In that case the JIT could fuse the operations
321 // into a single notifyAndExit() runtime primitive.
322
323 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
324 assert(current->thread_state() == _thread_in_Java, "invariant");
325 NoSafepointVerifier nsv;
326 if (obj == nullptr) return false; // slow-path for invalid obj
327 const markWord mark = obj->mark();
328
329 if (LockingMode == LM_LIGHTWEIGHT) {
330 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
331 // Degenerate notify
332 // fast-locked by caller so by definition the implied waitset is empty.
333 return true;
334 }
335 } else if (LockingMode == LM_LEGACY) {
336 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
337 // Degenerate notify
338 // stack-locked by caller so by definition the implied waitset is empty.
339 return true;
340 }
341 }
342
343 if (mark.has_monitor()) {
344 ObjectMonitor* const mon = mark.monitor();
345 assert(mon->object() == oop(obj), "invariant");
346 if (mon->owner() != current) return false; // slow-path for IMS exception
363 }
364 return true;
365 }
366
367 // other IMS exception states take the slow-path
368 return false;
369 }
370
371
372 // The LockNode emitted directly at the synchronization site would have
373 // been too big if it were to have included support for the cases of inflated
374 // recursive enter and exit, so they go here instead.
375 // Note that we can't safely call AsyncPrintJavaStack() from within
376 // quick_enter() as our thread state remains _in_Java.
377
378 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
379 BasicLock * lock) {
380 assert(current->thread_state() == _thread_in_Java, "invariant");
381 NoSafepointVerifier nsv;
382 if (obj == nullptr) return false; // Need to throw NPE
383
384 if (obj->klass()->is_value_based()) {
385 return false;
386 }
387
388 const markWord mark = obj->mark();
389
390 if (mark.has_monitor()) {
391 ObjectMonitor* const m = mark.monitor();
392 // An async deflation or GC can race us before we manage to make
393 // the ObjectMonitor busy by setting the owner below. If we detect
394 // that race we just bail out to the slow-path here.
395 if (m->object_peek() == nullptr) {
396 return false;
397 }
398 JavaThread* const owner = static_cast<JavaThread*>(m->owner_raw());
399
400 // Lock contention and Transactional Lock Elision (TLE) diagnostics
401 // and observability
402 // Case: light contention possibly amenable to TLE
484 if (bcp_was_adjusted) {
485 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
486 }
487 }
488
489 static bool useHeavyMonitors() {
490 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
491 return LockingMode == LM_MONITOR;
492 #else
493 return false;
494 #endif
495 }
496
497 // -----------------------------------------------------------------------------
498 // Monitor Enter/Exit
499 // The interpreter and compiler assembly code tries to lock using the fast path
500 // of this algorithm. Make sure to update that code if the following function is
501 // changed. The implementation is extremely sensitive to race condition. Be careful.
502
503 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
504 if (obj->klass()->is_value_based()) {
505 handle_sync_on_value_based_class(obj, current);
506 }
507
508 current->inc_held_monitor_count();
509
510 if (!useHeavyMonitors()) {
511 if (LockingMode == LM_LIGHTWEIGHT) {
512 // Fast-locking does not use the 'lock' argument.
513 LockStack& lock_stack = current->lock_stack();
514 if (lock_stack.can_push()) {
515 markWord mark = obj()->mark_acquire();
516 if (mark.is_neutral()) {
517 assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
518 // Try to swing into 'fast-locked' state.
519 markWord locked_mark = mark.set_fast_locked();
520 markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
521 if (old_mark == mark) {
522 // Successfully fast-locked, push object to lock-stack and return.
523 lock_stack.push(obj());
553 } else if (VerifyHeavyMonitors) {
554 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
555 }
556
557 // An async deflation can race after the inflate() call and before
558 // enter() can make the ObjectMonitor busy. enter() returns false if
559 // we have lost the race to async deflation and we simply try again.
560 while (true) {
561 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
562 if (monitor->enter(current)) {
563 return;
564 }
565 }
566 }
567
568 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
569 current->dec_held_monitor_count();
570
571 if (!useHeavyMonitors()) {
572 markWord mark = object->mark();
573 if (LockingMode == LM_LIGHTWEIGHT) {
574 // Fast-locking does not use the 'lock' argument.
575 if (mark.is_fast_locked()) {
576 markWord unlocked_mark = mark.set_unlocked();
577 markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
578 if (old_mark != mark) {
579 // Another thread won the CAS, it must have inflated the monitor.
580 // It can only have installed an anonymously locked monitor at this point.
581 // Fetch that monitor, set owner correctly to this thread, and
582 // exit it (allowing waiting threads to enter).
583 assert(old_mark.has_monitor(), "must have monitor");
584 ObjectMonitor* monitor = old_mark.monitor();
585 assert(monitor->is_owner_anonymous(), "must be anonymous owner");
586 monitor->set_owner_from_anonymous(current);
587 monitor->exit(current);
588 }
589 LockStack& lock_stack = current->lock_stack();
590 lock_stack.remove(object);
591 return;
592 }
638 // The ObjectMonitor* can't be async deflated until ownership is
639 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
640 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
641 if (LockingMode == LM_LIGHTWEIGHT && monitor->is_owner_anonymous()) {
642 // It must be owned by us. Pop lock object from lock stack.
643 LockStack& lock_stack = current->lock_stack();
644 oop popped = lock_stack.pop();
645 assert(popped == object, "must be owned by this thread");
646 monitor->set_owner_from_anonymous(current);
647 }
648 monitor->exit(current);
649 }
650
651 // -----------------------------------------------------------------------------
652 // JNI locks on java objects
653 // NOTE: must use heavy weight monitor to handle jni monitor enter
654 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
655 if (obj->klass()->is_value_based()) {
656 handle_sync_on_value_based_class(obj, current);
657 }
658
659 // the current locking is from JNI instead of Java code
660 current->set_current_pending_monitor_is_from_java(false);
661 // An async deflation can race after the inflate() call and before
662 // enter() can make the ObjectMonitor busy. enter() returns false if
663 // we have lost the race to async deflation and we simply try again.
664 while (true) {
665 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
666 if (monitor->enter(current)) {
667 current->inc_held_monitor_count(1, true);
668 break;
669 }
670 }
671 current->set_current_pending_monitor_is_from_java(true);
672 }
673
674 // NOTE: must use heavy weight monitor to handle jni monitor exit
675 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
676 JavaThread* current = THREAD;
677
678 // The ObjectMonitor* can't be async deflated until ownership is
679 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
680 ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
681 // If this thread has locked the object, exit the monitor. We
682 // intentionally do not use CHECK on check_owner because we must exit the
683 // monitor even if an exception was already pending.
684 if (monitor->check_owner(THREAD)) {
685 monitor->exit(current);
686 current->dec_held_monitor_count(1, true);
687 }
688 }
689
690 // -----------------------------------------------------------------------------
691 // Internal VM locks on java objects
692 // standard constructor, allows locking failures
693 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
694 _thread = thread;
695 _thread->check_for_valid_safepoint_state();
696 _obj = obj;
697
698 if (_obj() != nullptr) {
699 ObjectSynchronizer::enter(_obj, &_lock, _thread);
700 }
701 }
702
703 ObjectLocker::~ObjectLocker() {
704 if (_obj() != nullptr) {
705 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
706 }
707 }
708
709
710 // -----------------------------------------------------------------------------
711 // Wait/Notify/NotifyAll
712 // NOTE: must use heavy weight monitor to handle wait()
713 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
714 JavaThread* current = THREAD;
715 if (millis < 0) {
716 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
717 }
718 // The ObjectMonitor* can't be async deflated because the _waiters
719 // field is incremented before ownership is dropped and decremented
720 // after ownership is regained.
721 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
722
723 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
724 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
725
726 // This dummy call is in place to get around dtrace bug 6254741. Once
727 // that's fixed we can uncomment the following line, remove the call
728 // and change this function back into a "void" func.
729 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
730 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
731 return ret_code;
732 }
733
734 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
735 JavaThread* current = THREAD;
736
737 markWord mark = obj->mark();
738 if (LockingMode == LM_LIGHTWEIGHT) {
739 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
740 // Not inflated so there can't be any waiters to notify.
741 return;
742 }
743 } else if (LockingMode == LM_LEGACY) {
744 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
745 // Not inflated so there can't be any waiters to notify.
746 return;
747 }
748 }
749 // The ObjectMonitor* can't be async deflated until ownership is
750 // dropped by the calling thread.
751 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
752 monitor->notify(CHECK);
753 }
754
755 // NOTE: see comment of notify()
756 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
757 JavaThread* current = THREAD;
758
759 markWord mark = obj->mark();
760 if (LockingMode == LM_LIGHTWEIGHT) {
761 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
762 // Not inflated so there can't be any waiters to notify.
763 return;
764 }
765 } else if (LockingMode == LM_LEGACY) {
766 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
767 // Not inflated so there can't be any waiters to notify.
768 return;
769 }
770 }
771 // The ObjectMonitor* can't be async deflated until ownership is
772 // dropped by the calling thread.
773 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
774 monitor->notifyAll(CHECK);
775 }
776
777 // -----------------------------------------------------------------------------
899 unsigned v = current->_hashStateW;
900 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
901 current->_hashStateW = v;
902 value = v;
903 }
904
905 value &= markWord::hash_mask;
906 if (value == 0) value = 0xBAD;
907 assert(value != markWord::no_hash, "invariant");
908 return value;
909 }
910
911 // Can be called from non JavaThreads (e.g., VMThread) for FastHashCode
912 // calculations as part of JVM/TI tagging.
913 static bool is_lock_owned(Thread* thread, oop obj) {
914 assert(LockingMode == LM_LIGHTWEIGHT, "only call this with new lightweight locking enabled");
915 return thread->is_Java_thread() ? JavaThread::cast(thread)->lock_stack().contains(obj) : false;
916 }
917
918 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
919
920 while (true) {
921 ObjectMonitor* monitor = nullptr;
922 markWord temp, test;
923 intptr_t hash;
924 markWord mark = read_stable_mark(obj);
925 if (VerifyHeavyMonitors) {
926 assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
927 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
928 }
929 if (mark.is_neutral()) { // if this is a normal header
930 hash = mark.hash();
931 if (hash != 0) { // if it has a hash, just return it
932 return hash;
933 }
934 hash = get_next_hash(current, obj); // get a new hash
935 temp = mark.copy_set_hash(hash); // merge the hash into header
936 // try to install the hash
937 test = obj->cas_set_mark(temp, mark);
938 if (test == mark) { // if the hash was installed, return it
1019 hash = test.hash();
1020 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1021 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1022 }
1023 if (monitor->is_being_async_deflated()) {
1024 // If we detect that async deflation has occurred, then we
1025 // attempt to restore the header/dmw to the object's header
1026 // so that we only retry once if the deflater thread happens
1027 // to be slow.
1028 monitor->install_displaced_markword_in_object(obj);
1029 continue;
1030 }
1031 }
1032 // We finally get the hash.
1033 return hash;
1034 }
1035 }
1036
1037 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1038 Handle h_obj) {
1039 assert(current == JavaThread::current(), "Can only be called on current thread");
1040 oop obj = h_obj();
1041
1042 markWord mark = read_stable_mark(obj);
1043
1044 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1045 // stack-locked case, header points into owner's stack
1046 return current->is_lock_owned((address)mark.locker());
1047 }
1048
1049 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1050 // fast-locking case, see if lock is in current's lock stack
1051 return current->lock_stack().contains(h_obj());
1052 }
1053
1054 if (mark.has_monitor()) {
1055 // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1056 // The first stage of async deflation does not affect any field
1057 // used by this comparison so the ObjectMonitor* is usable here.
1058 ObjectMonitor* monitor = mark.monitor();
1297 event->set_monitorClass(obj->klass());
1298 event->set_address((uintptr_t)(void*)obj);
1299 event->set_cause((u1)cause);
1300 event->commit();
1301 }
1302
1303 // Fast path code shared by multiple functions
1304 void ObjectSynchronizer::inflate_helper(oop obj) {
1305 markWord mark = obj->mark_acquire();
1306 if (mark.has_monitor()) {
1307 ObjectMonitor* monitor = mark.monitor();
1308 markWord dmw = monitor->header();
1309 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1310 return;
1311 }
1312 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1313 }
1314
1315 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
1316 const InflateCause cause) {
1317 EventJavaMonitorInflate event;
1318
1319 for (;;) {
1320 const markWord mark = object->mark_acquire();
1321
1322 // The mark can be in one of the following states:
1323 // * inflated - Just return if using stack-locking.
1324 // If using fast-locking and the ObjectMonitor owner
1325 // is anonymous and the current thread owns the
1326 // object lock, then we make the current thread the
1327 // ObjectMonitor owner and remove the lock from the
1328 // current thread's lock stack.
1329 // * fast-locked - Coerce it to inflated from fast-locked.
1330 // * stack-locked - Coerce it to inflated from stack-locked.
1331 // * INFLATING - Busy wait for conversion from stack-locked to
1332 // inflated.
1333 // * neutral - Aggressively inflate the object.
1334
1335 // CASE: inflated
1336 if (mark.has_monitor()) {
1715 // safely read the mark-word and look-through to the object-monitor, without
1716 // being afraid that the object-monitor is going away.
1717 VM_RendezvousGCThreads sync_gc;
1718 VMThread::execute(&sync_gc);
1719
1720 if (ls != nullptr) {
1721 ls->print_cr("after handshaking: in_use_list stats: ceiling="
1722 SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
1723 in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
1724 timer.start();
1725 }
1726 } else {
1727 // This is not a monitor deflation thread.
1728 // No handshake or rendezvous is needed when we are already at safepoint.
1729 assert_at_safepoint();
1730 }
1731
1732 // After the handshake, safely free the ObjectMonitors that were
1733 // deflated and unlinked in this cycle.
1734 if (current->is_Java_thread()) {
1735 if (ls != NULL) {
1736 timer.stop();
1737 ls->print_cr("before setting blocked: unlinked_count=" SIZE_FORMAT
1738 ", in_use_list stats: ceiling=" SIZE_FORMAT ", count="
1739 SIZE_FORMAT ", max=" SIZE_FORMAT,
1740 unlinked_count, in_use_list_ceiling(),
1741 _in_use_list.count(), _in_use_list.max());
1742 }
1743 // Mark the calling JavaThread blocked (safepoint safe) while we free
1744 // the ObjectMonitors so we don't delay safepoints whilst doing that.
1745 ThreadBlockInVM tbivm(JavaThread::cast(current));
1746 if (ls != NULL) {
1747 ls->print_cr("after setting blocked: in_use_list stats: ceiling="
1748 SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
1749 in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
1750 timer.start();
1751 }
1752 deleted_count = delete_monitors(&delete_list);
1753 // ThreadBlockInVM is destroyed here
1754 } else {
1755 // A non-JavaThread can just free the ObjectMonitors:
1756 deleted_count = delete_monitors(&delete_list);
1757 }
1758 assert(unlinked_count == deleted_count, "must be");
1759 }
1760
1761 if (ls != nullptr) {
1762 timer.stop();
1763 if (deflated_count != 0 || unlinked_count != 0 || log_is_enabled(Debug, monitorinflation)) {
1764 ls->print_cr("deflated_count=" SIZE_FORMAT ", {unlinked,deleted}_count=" SIZE_FORMAT " monitors in %3.7f secs",
1765 deflated_count, unlinked_count, timer.seconds());
1766 }
|
281 // removed from the system.
282 //
283 // Note: If the _in_use_list max exceeds the ceiling, then
284 // monitors_used_above_threshold() will use the in_use_list max instead
285 // of the thread count derived ceiling because we have used more
286 // ObjectMonitors than the estimated average.
287 //
288 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
289 // no-progress async monitor deflation cycles in a row, then the ceiling
290 // is adjusted upwards by monitors_used_above_threshold().
291 //
292 // Start the ceiling with the estimate for one thread in initialize()
293 // which is called after cmd line options are processed.
294 static size_t _in_use_list_ceiling = 0;
295 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
296 bool volatile ObjectSynchronizer::_is_final_audit = false;
297 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
298 static uintx _no_progress_cnt = 0;
299 static bool _no_progress_skip_increment = false;
300
301 #define CHECK_THROW_NOSYNC_IMSE(obj) \
302 if (EnableValhalla && (obj)->mark().is_inline_type()) { \
303 JavaThread* THREAD = current; \
304 ResourceMark rm(THREAD); \
305 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
306 }
307
308 #define CHECK_THROW_NOSYNC_IMSE_0(obj) \
309 if (EnableValhalla && (obj)->mark().is_inline_type()) { \
310 JavaThread* THREAD = current; \
311 ResourceMark rm(THREAD); \
312 THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
313 }
314
315 // =====================> Quick functions
316
317 // The quick_* forms are special fast-path variants used to improve
318 // performance. In the simplest case, a "quick_*" implementation could
319 // simply return false, in which case the caller will perform the necessary
320 // state transitions and call the slow-path form.
321 // The fast-path is designed to handle frequently arising cases in an efficient
322 // manner and is just a degenerate "optimistic" variant of the slow-path.
323 // returns true -- to indicate the call was satisfied.
324 // returns false -- to indicate the call needs the services of the slow-path.
325 // A no-loitering ordinance is in effect for code in the quick_* family
326 // operators: safepoints or indefinite blocking (blocking that might span a
327 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
328 // entry.
329 //
330 // Consider: An interesting optimization is to have the JIT recognize the
331 // following common idiom:
332 // synchronized (someobj) { .... ; notify(); }
333 // That is, we find a notify() or notifyAll() call that immediately precedes
334 // the monitorexit operation. In that case the JIT could fuse the operations
335 // into a single notifyAndExit() runtime primitive.
336
337 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
338 assert(current->thread_state() == _thread_in_Java, "invariant");
339 NoSafepointVerifier nsv;
340 if (obj == nullptr) return false; // slow-path for invalid obj
341 assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
342 const markWord mark = obj->mark();
343
344 if (LockingMode == LM_LIGHTWEIGHT) {
345 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
346 // Degenerate notify
347 // fast-locked by caller so by definition the implied waitset is empty.
348 return true;
349 }
350 } else if (LockingMode == LM_LEGACY) {
351 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
352 // Degenerate notify
353 // stack-locked by caller so by definition the implied waitset is empty.
354 return true;
355 }
356 }
357
358 if (mark.has_monitor()) {
359 ObjectMonitor* const mon = mark.monitor();
360 assert(mon->object() == oop(obj), "invariant");
361 if (mon->owner() != current) return false; // slow-path for IMS exception
378 }
379 return true;
380 }
381
382 // other IMS exception states take the slow-path
383 return false;
384 }
385
386
387 // The LockNode emitted directly at the synchronization site would have
388 // been too big if it were to have included support for the cases of inflated
389 // recursive enter and exit, so they go here instead.
390 // Note that we can't safely call AsyncPrintJavaStack() from within
391 // quick_enter() as our thread state remains _in_Java.
392
393 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
394 BasicLock * lock) {
395 assert(current->thread_state() == _thread_in_Java, "invariant");
396 NoSafepointVerifier nsv;
397 if (obj == nullptr) return false; // Need to throw NPE
398 assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
399
400 if (obj->klass()->is_value_based()) {
401 return false;
402 }
403
404 const markWord mark = obj->mark();
405
406 if (mark.has_monitor()) {
407 ObjectMonitor* const m = mark.monitor();
408 // An async deflation or GC can race us before we manage to make
409 // the ObjectMonitor busy by setting the owner below. If we detect
410 // that race we just bail out to the slow-path here.
411 if (m->object_peek() == nullptr) {
412 return false;
413 }
414 JavaThread* const owner = static_cast<JavaThread*>(m->owner_raw());
415
416 // Lock contention and Transactional Lock Elision (TLE) diagnostics
417 // and observability
418 // Case: light contention possibly amenable to TLE
500 if (bcp_was_adjusted) {
501 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
502 }
503 }
504
505 static bool useHeavyMonitors() {
506 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
507 return LockingMode == LM_MONITOR;
508 #else
509 return false;
510 #endif
511 }
512
513 // -----------------------------------------------------------------------------
514 // Monitor Enter/Exit
515 // The interpreter and compiler assembly code tries to lock using the fast path
516 // of this algorithm. Make sure to update that code if the following function is
517 // changed. The implementation is extremely sensitive to race condition. Be careful.
518
519 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
520 CHECK_THROW_NOSYNC_IMSE(obj);
521 if (obj->klass()->is_value_based()) {
522 handle_sync_on_value_based_class(obj, current);
523 }
524
525 current->inc_held_monitor_count();
526
527 if (!useHeavyMonitors()) {
528 if (LockingMode == LM_LIGHTWEIGHT) {
529 // Fast-locking does not use the 'lock' argument.
530 LockStack& lock_stack = current->lock_stack();
531 if (lock_stack.can_push()) {
532 markWord mark = obj()->mark_acquire();
533 if (mark.is_neutral()) {
534 assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
535 // Try to swing into 'fast-locked' state.
536 markWord locked_mark = mark.set_fast_locked();
537 markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
538 if (old_mark == mark) {
539 // Successfully fast-locked, push object to lock-stack and return.
540 lock_stack.push(obj());
570 } else if (VerifyHeavyMonitors) {
571 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
572 }
573
574 // An async deflation can race after the inflate() call and before
575 // enter() can make the ObjectMonitor busy. enter() returns false if
576 // we have lost the race to async deflation and we simply try again.
577 while (true) {
578 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
579 if (monitor->enter(current)) {
580 return;
581 }
582 }
583 }
584
585 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
586 current->dec_held_monitor_count();
587
588 if (!useHeavyMonitors()) {
589 markWord mark = object->mark();
590 if (EnableValhalla && mark.is_inline_type()) {
591 return;
592 }
593 if (LockingMode == LM_LIGHTWEIGHT) {
594 // Fast-locking does not use the 'lock' argument.
595 if (mark.is_fast_locked()) {
596 markWord unlocked_mark = mark.set_unlocked();
597 markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
598 if (old_mark != mark) {
599 // Another thread won the CAS, it must have inflated the monitor.
600 // It can only have installed an anonymously locked monitor at this point.
601 // Fetch that monitor, set owner correctly to this thread, and
602 // exit it (allowing waiting threads to enter).
603 assert(old_mark.has_monitor(), "must have monitor");
604 ObjectMonitor* monitor = old_mark.monitor();
605 assert(monitor->is_owner_anonymous(), "must be anonymous owner");
606 monitor->set_owner_from_anonymous(current);
607 monitor->exit(current);
608 }
609 LockStack& lock_stack = current->lock_stack();
610 lock_stack.remove(object);
611 return;
612 }
658 // The ObjectMonitor* can't be async deflated until ownership is
659 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
660 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
661 if (LockingMode == LM_LIGHTWEIGHT && monitor->is_owner_anonymous()) {
662 // It must be owned by us. Pop lock object from lock stack.
663 LockStack& lock_stack = current->lock_stack();
664 oop popped = lock_stack.pop();
665 assert(popped == object, "must be owned by this thread");
666 monitor->set_owner_from_anonymous(current);
667 }
668 monitor->exit(current);
669 }
670
671 // -----------------------------------------------------------------------------
672 // JNI locks on java objects
673 // NOTE: must use heavy weight monitor to handle jni monitor enter
674 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
675 if (obj->klass()->is_value_based()) {
676 handle_sync_on_value_based_class(obj, current);
677 }
678 CHECK_THROW_NOSYNC_IMSE(obj);
679
680 // the current locking is from JNI instead of Java code
681 current->set_current_pending_monitor_is_from_java(false);
682 // An async deflation can race after the inflate() call and before
683 // enter() can make the ObjectMonitor busy. enter() returns false if
684 // we have lost the race to async deflation and we simply try again.
685 while (true) {
686 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
687 if (monitor->enter(current)) {
688 current->inc_held_monitor_count(1, true);
689 break;
690 }
691 }
692 current->set_current_pending_monitor_is_from_java(true);
693 }
694
695 // NOTE: must use heavy weight monitor to handle jni monitor exit
696 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
697 JavaThread* current = THREAD;
698 CHECK_THROW_NOSYNC_IMSE(obj);
699
700 // The ObjectMonitor* can't be async deflated until ownership is
701 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
702 ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
703 // If this thread has locked the object, exit the monitor. We
704 // intentionally do not use CHECK on check_owner because we must exit the
705 // monitor even if an exception was already pending.
706 if (monitor->check_owner(THREAD)) {
707 monitor->exit(current);
708 current->dec_held_monitor_count(1, true);
709 }
710 }
711
712 // -----------------------------------------------------------------------------
713 // Internal VM locks on java objects
714 // standard constructor, allows locking failures
715 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
716 _thread = thread;
717 _thread->check_for_valid_safepoint_state();
718 _obj = obj;
719
720 if (_obj() != nullptr) {
721 ObjectSynchronizer::enter(_obj, &_lock, _thread);
722 }
723 }
724
725 ObjectLocker::~ObjectLocker() {
726 if (_obj() != nullptr) {
727 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
728 }
729 }
730
731
732 // -----------------------------------------------------------------------------
733 // Wait/Notify/NotifyAll
734 // NOTE: must use heavy weight monitor to handle wait()
735 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
736 JavaThread* current = THREAD;
737 CHECK_THROW_NOSYNC_IMSE_0(obj);
738 if (millis < 0) {
739 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
740 }
741 // The ObjectMonitor* can't be async deflated because the _waiters
742 // field is incremented before ownership is dropped and decremented
743 // after ownership is regained.
744 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
745
746 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
747 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
748
749 // This dummy call is in place to get around dtrace bug 6254741. Once
750 // that's fixed we can uncomment the following line, remove the call
751 // and change this function back into a "void" func.
752 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
753 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
754 return ret_code;
755 }
756
757 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
758 JavaThread* current = THREAD;
759 CHECK_THROW_NOSYNC_IMSE(obj);
760
761 markWord mark = obj->mark();
762 if (LockingMode == LM_LIGHTWEIGHT) {
763 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
764 // Not inflated so there can't be any waiters to notify.
765 return;
766 }
767 } else if (LockingMode == LM_LEGACY) {
768 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
769 // Not inflated so there can't be any waiters to notify.
770 return;
771 }
772 }
773 // The ObjectMonitor* can't be async deflated until ownership is
774 // dropped by the calling thread.
775 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
776 monitor->notify(CHECK);
777 }
778
779 // NOTE: see comment of notify()
780 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
781 JavaThread* current = THREAD;
782 CHECK_THROW_NOSYNC_IMSE(obj);
783
784 markWord mark = obj->mark();
785 if (LockingMode == LM_LIGHTWEIGHT) {
786 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
787 // Not inflated so there can't be any waiters to notify.
788 return;
789 }
790 } else if (LockingMode == LM_LEGACY) {
791 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
792 // Not inflated so there can't be any waiters to notify.
793 return;
794 }
795 }
796 // The ObjectMonitor* can't be async deflated until ownership is
797 // dropped by the calling thread.
798 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
799 monitor->notifyAll(CHECK);
800 }
801
802 // -----------------------------------------------------------------------------
924 unsigned v = current->_hashStateW;
925 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
926 current->_hashStateW = v;
927 value = v;
928 }
929
930 value &= markWord::hash_mask;
931 if (value == 0) value = 0xBAD;
932 assert(value != markWord::no_hash, "invariant");
933 return value;
934 }
935
936 // Can be called from non JavaThreads (e.g., VMThread) for FastHashCode
937 // calculations as part of JVM/TI tagging.
938 static bool is_lock_owned(Thread* thread, oop obj) {
939 assert(LockingMode == LM_LIGHTWEIGHT, "only call this with new lightweight locking enabled");
940 return thread->is_Java_thread() ? JavaThread::cast(thread)->lock_stack().contains(obj) : false;
941 }
942
943 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
944 if (EnableValhalla && obj->klass()->is_inline_klass()) {
945 // VM should be calling bootstrap method
946 ShouldNotReachHere();
947 }
948
949 while (true) {
950 ObjectMonitor* monitor = nullptr;
951 markWord temp, test;
952 intptr_t hash;
953 markWord mark = read_stable_mark(obj);
954 if (VerifyHeavyMonitors) {
955 assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
956 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
957 }
958 if (mark.is_neutral()) { // if this is a normal header
959 hash = mark.hash();
960 if (hash != 0) { // if it has a hash, just return it
961 return hash;
962 }
963 hash = get_next_hash(current, obj); // get a new hash
964 temp = mark.copy_set_hash(hash); // merge the hash into header
965 // try to install the hash
966 test = obj->cas_set_mark(temp, mark);
967 if (test == mark) { // if the hash was installed, return it
1048 hash = test.hash();
1049 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1050 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1051 }
1052 if (monitor->is_being_async_deflated()) {
1053 // If we detect that async deflation has occurred, then we
1054 // attempt to restore the header/dmw to the object's header
1055 // so that we only retry once if the deflater thread happens
1056 // to be slow.
1057 monitor->install_displaced_markword_in_object(obj);
1058 continue;
1059 }
1060 }
1061 // We finally get the hash.
1062 return hash;
1063 }
1064 }
1065
1066 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1067 Handle h_obj) {
1068 if (EnableValhalla && h_obj->mark().is_inline_type()) {
1069 return false;
1070 }
1071 assert(current == JavaThread::current(), "Can only be called on current thread");
1072 oop obj = h_obj();
1073
1074 markWord mark = read_stable_mark(obj);
1075
1076 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1077 // stack-locked case, header points into owner's stack
1078 return current->is_lock_owned((address)mark.locker());
1079 }
1080
1081 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1082 // fast-locking case, see if lock is in current's lock stack
1083 return current->lock_stack().contains(h_obj());
1084 }
1085
1086 if (mark.has_monitor()) {
1087 // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1088 // The first stage of async deflation does not affect any field
1089 // used by this comparison so the ObjectMonitor* is usable here.
1090 ObjectMonitor* monitor = mark.monitor();
1329 event->set_monitorClass(obj->klass());
1330 event->set_address((uintptr_t)(void*)obj);
1331 event->set_cause((u1)cause);
1332 event->commit();
1333 }
1334
1335 // Fast path code shared by multiple functions
1336 void ObjectSynchronizer::inflate_helper(oop obj) {
1337 markWord mark = obj->mark_acquire();
1338 if (mark.has_monitor()) {
1339 ObjectMonitor* monitor = mark.monitor();
1340 markWord dmw = monitor->header();
1341 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1342 return;
1343 }
1344 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1345 }
1346
1347 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
1348 const InflateCause cause) {
1349 if (EnableValhalla) {
1350 guarantee(!object->klass()->is_inline_klass(), "Attempt to inflate inline type");
1351 }
1352
1353 EventJavaMonitorInflate event;
1354
1355 for (;;) {
1356 const markWord mark = object->mark_acquire();
1357
1358 // The mark can be in one of the following states:
1359 // * inflated - Just return if using stack-locking.
1360 // If using fast-locking and the ObjectMonitor owner
1361 // is anonymous and the current thread owns the
1362 // object lock, then we make the current thread the
1363 // ObjectMonitor owner and remove the lock from the
1364 // current thread's lock stack.
1365 // * fast-locked - Coerce it to inflated from fast-locked.
1366 // * stack-locked - Coerce it to inflated from stack-locked.
1367 // * INFLATING - Busy wait for conversion from stack-locked to
1368 // inflated.
1369 // * neutral - Aggressively inflate the object.
1370
1371 // CASE: inflated
1372 if (mark.has_monitor()) {
1751 // safely read the mark-word and look-through to the object-monitor, without
1752 // being afraid that the object-monitor is going away.
1753 VM_RendezvousGCThreads sync_gc;
1754 VMThread::execute(&sync_gc);
1755
1756 if (ls != nullptr) {
1757 ls->print_cr("after handshaking: in_use_list stats: ceiling="
1758 SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
1759 in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
1760 timer.start();
1761 }
1762 } else {
1763 // This is not a monitor deflation thread.
1764 // No handshake or rendezvous is needed when we are already at safepoint.
1765 assert_at_safepoint();
1766 }
1767
1768 // After the handshake, safely free the ObjectMonitors that were
1769 // deflated and unlinked in this cycle.
1770 if (current->is_Java_thread()) {
1771 if (ls != nullptr) {
1772 timer.stop();
1773 ls->print_cr("before setting blocked: unlinked_count=" SIZE_FORMAT
1774 ", in_use_list stats: ceiling=" SIZE_FORMAT ", count="
1775 SIZE_FORMAT ", max=" SIZE_FORMAT,
1776 unlinked_count, in_use_list_ceiling(),
1777 _in_use_list.count(), _in_use_list.max());
1778 }
1779 // Mark the calling JavaThread blocked (safepoint safe) while we free
1780 // the ObjectMonitors so we don't delay safepoints whilst doing that.
1781 ThreadBlockInVM tbivm(JavaThread::cast(current));
1782 if (ls != nullptr) {
1783 ls->print_cr("after setting blocked: in_use_list stats: ceiling="
1784 SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
1785 in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
1786 timer.start();
1787 }
1788 deleted_count = delete_monitors(&delete_list);
1789 // ThreadBlockInVM is destroyed here
1790 } else {
1791 // A non-JavaThread can just free the ObjectMonitors:
1792 deleted_count = delete_monitors(&delete_list);
1793 }
1794 assert(unlinked_count == deleted_count, "must be");
1795 }
1796
1797 if (ls != nullptr) {
1798 timer.stop();
1799 if (deflated_count != 0 || unlinked_count != 0 || log_is_enabled(Debug, monitorinflation)) {
1800 ls->print_cr("deflated_count=" SIZE_FORMAT ", {unlinked,deleted}_count=" SIZE_FORMAT " monitors in %3.7f secs",
1801 deflated_count, unlinked_count, timer.seconds());
1802 }
|