242 // removed from the system.
243 //
244 // Note: If the _in_use_list max exceeds the ceiling, then
245 // monitors_used_above_threshold() will use the in_use_list max instead
246 // of the thread count derived ceiling because we have used more
247 // ObjectMonitors than the estimated average.
248 //
249 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
250 // no-progress async monitor deflation cycles in a row, then the ceiling
251 // is adjusted upwards by monitors_used_above_threshold().
252 //
253 // Start the ceiling with the estimate for one thread in initialize()
254 // which is called after cmd line options are processed.
255 static size_t _in_use_list_ceiling = 0;
256 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
257 bool volatile ObjectSynchronizer::_is_final_audit = false;
258 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
259 static uintx _no_progress_cnt = 0;
260 static bool _no_progress_skip_increment = false;
261
262 // =====================> Quick functions
263
264 // The quick_* forms are special fast-path variants used to improve
265 // performance. In the simplest case, a "quick_*" implementation could
266 // simply return false, in which case the caller will perform the necessary
267 // state transitions and call the slow-path form.
268 // The fast-path is designed to handle frequently arising cases in an efficient
269 // manner and is just a degenerate "optimistic" variant of the slow-path.
270 // returns true -- to indicate the call was satisfied.
271 // returns false -- to indicate the call needs the services of the slow-path.
272 // A no-loitering ordinance is in effect for code in the quick_* family
273 // operators: safepoints or indefinite blocking (blocking that might span a
274 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
275 // entry.
276 //
277 // Consider: An interesting optimization is to have the JIT recognize the
278 // following common idiom:
279 // synchronized (someobj) { .... ; notify(); }
280 // That is, we find a notify() or notifyAll() call that immediately precedes
281 // the monitorexit operation. In that case the JIT could fuse the operations
282 // into a single notifyAndExit() runtime primitive.
283
284 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
285 assert(current->thread_state() == _thread_in_Java, "invariant");
286 NoSafepointVerifier nsv;
287 if (obj == nullptr) return false; // slow-path for invalid obj
288 const markWord mark = obj->mark();
289
290 if (LockingMode == LM_LIGHTWEIGHT) {
291 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
292 // Degenerate notify
293 // fast-locked by caller so by definition the implied waitset is empty.
294 return true;
295 }
296 } else if (LockingMode == LM_LEGACY) {
297 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
298 // Degenerate notify
299 // stack-locked by caller so by definition the implied waitset is empty.
300 return true;
301 }
302 }
303
304 if (mark.has_monitor()) {
305 ObjectMonitor* const mon = mark.monitor();
306 assert(mon->object() == oop(obj), "invariant");
307 if (mon->owner() != current) return false; // slow-path for IMS exception
324 }
325 return true;
326 }
327
328 // other IMS exception states take the slow-path
329 return false;
330 }
331
332
333 // The LockNode emitted directly at the synchronization site would have
334 // been too big if it were to have included support for the cases of inflated
335 // recursive enter and exit, so they go here instead.
336 // Note that we can't safely call AsyncPrintJavaStack() from within
337 // quick_enter() as our thread state remains _in_Java.
338
339 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
340 BasicLock * lock) {
341 assert(current->thread_state() == _thread_in_Java, "invariant");
342 NoSafepointVerifier nsv;
343 if (obj == nullptr) return false; // Need to throw NPE
344
345 if (obj->klass()->is_value_based()) {
346 return false;
347 }
348
349 const markWord mark = obj->mark();
350
351 if (mark.has_monitor()) {
352 ObjectMonitor* const m = mark.monitor();
353 // An async deflation or GC can race us before we manage to make
354 // the ObjectMonitor busy by setting the owner below. If we detect
355 // that race we just bail out to the slow-path here.
356 if (m->object_peek() == nullptr) {
357 return false;
358 }
359 JavaThread* const owner = static_cast<JavaThread*>(m->owner_raw());
360
361 // Lock contention and Transactional Lock Elision (TLE) diagnostics
362 // and observability
363 // Case: light contention possibly amenable to TLE
445 if (bcp_was_adjusted) {
446 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
447 }
448 }
449
450 static bool useHeavyMonitors() {
451 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
452 return LockingMode == LM_MONITOR;
453 #else
454 return false;
455 #endif
456 }
457
458 // -----------------------------------------------------------------------------
459 // Monitor Enter/Exit
460 // The interpreter and compiler assembly code tries to lock using the fast path
461 // of this algorithm. Make sure to update that code if the following function is
462 // changed. The implementation is extremely sensitive to race condition. Be careful.
463
464 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
465 if (obj->klass()->is_value_based()) {
466 handle_sync_on_value_based_class(obj, current);
467 }
468
469 current->inc_held_monitor_count();
470
471 if (!useHeavyMonitors()) {
472 if (LockingMode == LM_LIGHTWEIGHT) {
473 // Fast-locking does not use the 'lock' argument.
474 LockStack& lock_stack = current->lock_stack();
475 if (lock_stack.can_push()) {
476 markWord mark = obj()->mark_acquire();
477 if (mark.is_neutral()) {
478 assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
479 // Try to swing into 'fast-locked' state.
480 markWord locked_mark = mark.set_fast_locked();
481 markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
482 if (old_mark == mark) {
483 // Successfully fast-locked, push object to lock-stack and return.
484 lock_stack.push(obj());
514 } else if (VerifyHeavyMonitors) {
515 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
516 }
517
518 // An async deflation can race after the inflate() call and before
519 // enter() can make the ObjectMonitor busy. enter() returns false if
520 // we have lost the race to async deflation and we simply try again.
521 while (true) {
522 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
523 if (monitor->enter(current)) {
524 return;
525 }
526 }
527 }
528
529 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
530 current->dec_held_monitor_count();
531
532 if (!useHeavyMonitors()) {
533 markWord mark = object->mark();
534 if (LockingMode == LM_LIGHTWEIGHT) {
535 // Fast-locking does not use the 'lock' argument.
536 if (mark.is_fast_locked()) {
537 markWord unlocked_mark = mark.set_unlocked();
538 markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
539 if (old_mark != mark) {
540 // Another thread won the CAS, it must have inflated the monitor.
541 // It can only have installed an anonymously locked monitor at this point.
542 // Fetch that monitor, set owner correctly to this thread, and
543 // exit it (allowing waiting threads to enter).
544 assert(old_mark.has_monitor(), "must have monitor");
545 ObjectMonitor* monitor = old_mark.monitor();
546 assert(monitor->is_owner_anonymous(), "must be anonymous owner");
547 monitor->set_owner_from_anonymous(current);
548 monitor->exit(current);
549 }
550 LockStack& lock_stack = current->lock_stack();
551 lock_stack.remove(object);
552 return;
553 }
599 // The ObjectMonitor* can't be async deflated until ownership is
600 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
601 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
602 if (LockingMode == LM_LIGHTWEIGHT && monitor->is_owner_anonymous()) {
603 // It must be owned by us. Pop lock object from lock stack.
604 LockStack& lock_stack = current->lock_stack();
605 oop popped = lock_stack.pop();
606 assert(popped == object, "must be owned by this thread");
607 monitor->set_owner_from_anonymous(current);
608 }
609 monitor->exit(current);
610 }
611
612 // -----------------------------------------------------------------------------
613 // JNI locks on java objects
614 // NOTE: must use heavy weight monitor to handle jni monitor enter
615 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
616 if (obj->klass()->is_value_based()) {
617 handle_sync_on_value_based_class(obj, current);
618 }
619
620 // the current locking is from JNI instead of Java code
621 current->set_current_pending_monitor_is_from_java(false);
622 // An async deflation can race after the inflate() call and before
623 // enter() can make the ObjectMonitor busy. enter() returns false if
624 // we have lost the race to async deflation and we simply try again.
625 while (true) {
626 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
627 if (monitor->enter(current)) {
628 current->inc_held_monitor_count(1, true);
629 break;
630 }
631 }
632 current->set_current_pending_monitor_is_from_java(true);
633 }
634
635 // NOTE: must use heavy weight monitor to handle jni monitor exit
636 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
637 JavaThread* current = THREAD;
638
639 // The ObjectMonitor* can't be async deflated until ownership is
640 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
641 ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
642 // If this thread has locked the object, exit the monitor. We
643 // intentionally do not use CHECK on check_owner because we must exit the
644 // monitor even if an exception was already pending.
645 if (monitor->check_owner(THREAD)) {
646 monitor->exit(current);
647 current->dec_held_monitor_count(1, true);
648 }
649 }
650
651 // -----------------------------------------------------------------------------
652 // Internal VM locks on java objects
653 // standard constructor, allows locking failures
654 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
655 _thread = thread;
656 _thread->check_for_valid_safepoint_state();
657 _obj = obj;
658
659 if (_obj() != nullptr) {
660 ObjectSynchronizer::enter(_obj, &_lock, _thread);
661 }
662 }
663
664 ObjectLocker::~ObjectLocker() {
665 if (_obj() != nullptr) {
666 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
667 }
668 }
669
670
671 // -----------------------------------------------------------------------------
672 // Wait/Notify/NotifyAll
673 // NOTE: must use heavy weight monitor to handle wait()
674 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
675 JavaThread* current = THREAD;
676 if (millis < 0) {
677 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
678 }
679 // The ObjectMonitor* can't be async deflated because the _waiters
680 // field is incremented before ownership is dropped and decremented
681 // after ownership is regained.
682 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
683
684 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
685 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
686
687 // This dummy call is in place to get around dtrace bug 6254741. Once
688 // that's fixed we can uncomment the following line, remove the call
689 // and change this function back into a "void" func.
690 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
691 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
692 return ret_code;
693 }
694
695 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
696 JavaThread* current = THREAD;
697
698 markWord mark = obj->mark();
699 if (LockingMode == LM_LIGHTWEIGHT) {
700 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
701 // Not inflated so there can't be any waiters to notify.
702 return;
703 }
704 } else if (LockingMode == LM_LEGACY) {
705 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
706 // Not inflated so there can't be any waiters to notify.
707 return;
708 }
709 }
710 // The ObjectMonitor* can't be async deflated until ownership is
711 // dropped by the calling thread.
712 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
713 monitor->notify(CHECK);
714 }
715
716 // NOTE: see comment of notify()
717 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
718 JavaThread* current = THREAD;
719
720 markWord mark = obj->mark();
721 if (LockingMode == LM_LIGHTWEIGHT) {
722 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
723 // Not inflated so there can't be any waiters to notify.
724 return;
725 }
726 } else if (LockingMode == LM_LEGACY) {
727 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
728 // Not inflated so there can't be any waiters to notify.
729 return;
730 }
731 }
732 // The ObjectMonitor* can't be async deflated until ownership is
733 // dropped by the calling thread.
734 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
735 monitor->notifyAll(CHECK);
736 }
737
738 // -----------------------------------------------------------------------------
860 unsigned v = current->_hashStateW;
861 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
862 current->_hashStateW = v;
863 value = v;
864 }
865
866 value &= markWord::hash_mask;
867 if (value == 0) value = 0xBAD;
868 assert(value != markWord::no_hash, "invariant");
869 return value;
870 }
871
872 // Can be called from non JavaThreads (e.g., VMThread) for FastHashCode
873 // calculations as part of JVM/TI tagging.
874 static bool is_lock_owned(Thread* thread, oop obj) {
875 assert(LockingMode == LM_LIGHTWEIGHT, "only call this with new lightweight locking enabled");
876 return thread->is_Java_thread() ? JavaThread::cast(thread)->lock_stack().contains(obj) : false;
877 }
878
879 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
880
881 while (true) {
882 ObjectMonitor* monitor = nullptr;
883 markWord temp, test;
884 intptr_t hash;
885 markWord mark = read_stable_mark(obj);
886 if (VerifyHeavyMonitors) {
887 assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
888 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
889 }
890 if (mark.is_neutral()) { // if this is a normal header
891 hash = mark.hash();
892 if (hash != 0) { // if it has a hash, just return it
893 return hash;
894 }
895 hash = get_next_hash(current, obj); // get a new hash
896 temp = mark.copy_set_hash(hash); // merge the hash into header
897 // try to install the hash
898 test = obj->cas_set_mark(temp, mark);
899 if (test == mark) { // if the hash was installed, return it
980 hash = test.hash();
981 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
982 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
983 }
984 if (monitor->is_being_async_deflated()) {
985 // If we detect that async deflation has occurred, then we
986 // attempt to restore the header/dmw to the object's header
987 // so that we only retry once if the deflater thread happens
988 // to be slow.
989 monitor->install_displaced_markword_in_object(obj);
990 continue;
991 }
992 }
993 // We finally get the hash.
994 return hash;
995 }
996 }
997
998 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
999 Handle h_obj) {
1000 assert(current == JavaThread::current(), "Can only be called on current thread");
1001 oop obj = h_obj();
1002
1003 markWord mark = read_stable_mark(obj);
1004
1005 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1006 // stack-locked case, header points into owner's stack
1007 return current->is_lock_owned((address)mark.locker());
1008 }
1009
1010 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1011 // fast-locking case, see if lock is in current's lock stack
1012 return current->lock_stack().contains(h_obj());
1013 }
1014
1015 if (mark.has_monitor()) {
1016 // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1017 // The first stage of async deflation does not affect any field
1018 // used by this comparison so the ObjectMonitor* is usable here.
1019 ObjectMonitor* monitor = mark.monitor();
1252 event->set_monitorClass(obj->klass());
1253 event->set_address((uintptr_t)(void*)obj);
1254 event->set_cause((u1)cause);
1255 event->commit();
1256 }
1257
1258 // Fast path code shared by multiple functions
1259 void ObjectSynchronizer::inflate_helper(oop obj) {
1260 markWord mark = obj->mark_acquire();
1261 if (mark.has_monitor()) {
1262 ObjectMonitor* monitor = mark.monitor();
1263 markWord dmw = monitor->header();
1264 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1265 return;
1266 }
1267 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1268 }
1269
1270 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
1271 const InflateCause cause) {
1272 EventJavaMonitorInflate event;
1273
1274 for (;;) {
1275 const markWord mark = object->mark_acquire();
1276
1277 // The mark can be in one of the following states:
1278 // * inflated - Just return if using stack-locking.
1279 // If using fast-locking and the ObjectMonitor owner
1280 // is anonymous and the current thread owns the
1281 // object lock, then we make the current thread the
1282 // ObjectMonitor owner and remove the lock from the
1283 // current thread's lock stack.
1284 // * fast-locked - Coerce it to inflated from fast-locked.
1285 // * stack-locked - Coerce it to inflated from stack-locked.
1286 // * INFLATING - Busy wait for conversion from stack-locked to
1287 // inflated.
1288 // * neutral - Aggressively inflate the object.
1289
1290 // CASE: inflated
1291 if (mark.has_monitor()) {
1646 // safely read the mark-word and look-through to the object-monitor, without
1647 // being afraid that the object-monitor is going away.
1648 VM_RendezvousGCThreads sync_gc;
1649 VMThread::execute(&sync_gc);
1650
1651 if (ls != nullptr) {
1652 ls->print_cr("after handshaking: in_use_list stats: ceiling="
1653 SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
1654 in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
1655 timer.start();
1656 }
1657 } else {
1658 // This is not a monitor deflation thread.
1659 // No handshake or rendezvous is needed when we are already at safepoint.
1660 assert_at_safepoint();
1661 }
1662
1663 // After the handshake, safely free the ObjectMonitors that were
1664 // deflated and unlinked in this cycle.
1665 if (current->is_Java_thread()) {
1666 if (ls != NULL) {
1667 timer.stop();
1668 ls->print_cr("before setting blocked: unlinked_count=" SIZE_FORMAT
1669 ", in_use_list stats: ceiling=" SIZE_FORMAT ", count="
1670 SIZE_FORMAT ", max=" SIZE_FORMAT,
1671 unlinked_count, in_use_list_ceiling(),
1672 _in_use_list.count(), _in_use_list.max());
1673 }
1674 // Mark the calling JavaThread blocked (safepoint safe) while we free
1675 // the ObjectMonitors so we don't delay safepoints whilst doing that.
1676 ThreadBlockInVM tbivm(JavaThread::cast(current));
1677 if (ls != NULL) {
1678 ls->print_cr("after setting blocked: in_use_list stats: ceiling="
1679 SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
1680 in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
1681 timer.start();
1682 }
1683 deleted_count = delete_monitors(&delete_list);
1684 // ThreadBlockInVM is destroyed here
1685 } else {
1686 // A non-JavaThread can just free the ObjectMonitors:
1687 deleted_count = delete_monitors(&delete_list);
1688 }
1689 assert(unlinked_count == deleted_count, "must be");
1690 }
1691
1692 if (ls != nullptr) {
1693 timer.stop();
1694 if (deflated_count != 0 || unlinked_count != 0 || log_is_enabled(Debug, monitorinflation)) {
1695 ls->print_cr("deflated_count=" SIZE_FORMAT ", {unlinked,deleted}_count=" SIZE_FORMAT " monitors in %3.7f secs",
1696 deflated_count, unlinked_count, timer.seconds());
1697 }
|
242 // removed from the system.
243 //
244 // Note: If the _in_use_list max exceeds the ceiling, then
245 // monitors_used_above_threshold() will use the in_use_list max instead
246 // of the thread count derived ceiling because we have used more
247 // ObjectMonitors than the estimated average.
248 //
249 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
250 // no-progress async monitor deflation cycles in a row, then the ceiling
251 // is adjusted upwards by monitors_used_above_threshold().
252 //
253 // Start the ceiling with the estimate for one thread in initialize()
254 // which is called after cmd line options are processed.
255 static size_t _in_use_list_ceiling = 0;
256 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
257 bool volatile ObjectSynchronizer::_is_final_audit = false;
258 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
259 static uintx _no_progress_cnt = 0;
260 static bool _no_progress_skip_increment = false;
261
262 #define CHECK_THROW_NOSYNC_IMSE(obj) \
263 if (EnableValhalla && (obj)->mark().is_inline_type()) { \
264 JavaThread* THREAD = current; \
265 ResourceMark rm(THREAD); \
266 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
267 }
268
269 #define CHECK_THROW_NOSYNC_IMSE_0(obj) \
270 if (EnableValhalla && (obj)->mark().is_inline_type()) { \
271 JavaThread* THREAD = current; \
272 ResourceMark rm(THREAD); \
273 THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
274 }
275
276 // =====================> Quick functions
277
278 // The quick_* forms are special fast-path variants used to improve
279 // performance. In the simplest case, a "quick_*" implementation could
280 // simply return false, in which case the caller will perform the necessary
281 // state transitions and call the slow-path form.
282 // The fast-path is designed to handle frequently arising cases in an efficient
283 // manner and is just a degenerate "optimistic" variant of the slow-path.
284 // returns true -- to indicate the call was satisfied.
285 // returns false -- to indicate the call needs the services of the slow-path.
286 // A no-loitering ordinance is in effect for code in the quick_* family
287 // operators: safepoints or indefinite blocking (blocking that might span a
288 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
289 // entry.
290 //
291 // Consider: An interesting optimization is to have the JIT recognize the
292 // following common idiom:
293 // synchronized (someobj) { .... ; notify(); }
294 // That is, we find a notify() or notifyAll() call that immediately precedes
295 // the monitorexit operation. In that case the JIT could fuse the operations
296 // into a single notifyAndExit() runtime primitive.
297
298 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
299 assert(current->thread_state() == _thread_in_Java, "invariant");
300 NoSafepointVerifier nsv;
301 if (obj == nullptr) return false; // slow-path for invalid obj
302 assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
303 const markWord mark = obj->mark();
304
305 if (LockingMode == LM_LIGHTWEIGHT) {
306 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
307 // Degenerate notify
308 // fast-locked by caller so by definition the implied waitset is empty.
309 return true;
310 }
311 } else if (LockingMode == LM_LEGACY) {
312 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
313 // Degenerate notify
314 // stack-locked by caller so by definition the implied waitset is empty.
315 return true;
316 }
317 }
318
319 if (mark.has_monitor()) {
320 ObjectMonitor* const mon = mark.monitor();
321 assert(mon->object() == oop(obj), "invariant");
322 if (mon->owner() != current) return false; // slow-path for IMS exception
339 }
340 return true;
341 }
342
343 // other IMS exception states take the slow-path
344 return false;
345 }
346
347
348 // The LockNode emitted directly at the synchronization site would have
349 // been too big if it were to have included support for the cases of inflated
350 // recursive enter and exit, so they go here instead.
351 // Note that we can't safely call AsyncPrintJavaStack() from within
352 // quick_enter() as our thread state remains _in_Java.
353
354 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
355 BasicLock * lock) {
356 assert(current->thread_state() == _thread_in_Java, "invariant");
357 NoSafepointVerifier nsv;
358 if (obj == nullptr) return false; // Need to throw NPE
359 assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
360
361 if (obj->klass()->is_value_based()) {
362 return false;
363 }
364
365 const markWord mark = obj->mark();
366
367 if (mark.has_monitor()) {
368 ObjectMonitor* const m = mark.monitor();
369 // An async deflation or GC can race us before we manage to make
370 // the ObjectMonitor busy by setting the owner below. If we detect
371 // that race we just bail out to the slow-path here.
372 if (m->object_peek() == nullptr) {
373 return false;
374 }
375 JavaThread* const owner = static_cast<JavaThread*>(m->owner_raw());
376
377 // Lock contention and Transactional Lock Elision (TLE) diagnostics
378 // and observability
379 // Case: light contention possibly amenable to TLE
461 if (bcp_was_adjusted) {
462 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
463 }
464 }
465
466 static bool useHeavyMonitors() {
467 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
468 return LockingMode == LM_MONITOR;
469 #else
470 return false;
471 #endif
472 }
473
474 // -----------------------------------------------------------------------------
475 // Monitor Enter/Exit
476 // The interpreter and compiler assembly code tries to lock using the fast path
477 // of this algorithm. Make sure to update that code if the following function is
478 // changed. The implementation is extremely sensitive to race condition. Be careful.
479
480 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
481 CHECK_THROW_NOSYNC_IMSE(obj);
482 if (obj->klass()->is_value_based()) {
483 handle_sync_on_value_based_class(obj, current);
484 }
485
486 current->inc_held_monitor_count();
487
488 if (!useHeavyMonitors()) {
489 if (LockingMode == LM_LIGHTWEIGHT) {
490 // Fast-locking does not use the 'lock' argument.
491 LockStack& lock_stack = current->lock_stack();
492 if (lock_stack.can_push()) {
493 markWord mark = obj()->mark_acquire();
494 if (mark.is_neutral()) {
495 assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
496 // Try to swing into 'fast-locked' state.
497 markWord locked_mark = mark.set_fast_locked();
498 markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
499 if (old_mark == mark) {
500 // Successfully fast-locked, push object to lock-stack and return.
501 lock_stack.push(obj());
531 } else if (VerifyHeavyMonitors) {
532 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
533 }
534
535 // An async deflation can race after the inflate() call and before
536 // enter() can make the ObjectMonitor busy. enter() returns false if
537 // we have lost the race to async deflation and we simply try again.
538 while (true) {
539 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
540 if (monitor->enter(current)) {
541 return;
542 }
543 }
544 }
545
546 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
547 current->dec_held_monitor_count();
548
549 if (!useHeavyMonitors()) {
550 markWord mark = object->mark();
551 if (EnableValhalla && mark.is_inline_type()) {
552 return;
553 }
554 if (LockingMode == LM_LIGHTWEIGHT) {
555 // Fast-locking does not use the 'lock' argument.
556 if (mark.is_fast_locked()) {
557 markWord unlocked_mark = mark.set_unlocked();
558 markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
559 if (old_mark != mark) {
560 // Another thread won the CAS, it must have inflated the monitor.
561 // It can only have installed an anonymously locked monitor at this point.
562 // Fetch that monitor, set owner correctly to this thread, and
563 // exit it (allowing waiting threads to enter).
564 assert(old_mark.has_monitor(), "must have monitor");
565 ObjectMonitor* monitor = old_mark.monitor();
566 assert(monitor->is_owner_anonymous(), "must be anonymous owner");
567 monitor->set_owner_from_anonymous(current);
568 monitor->exit(current);
569 }
570 LockStack& lock_stack = current->lock_stack();
571 lock_stack.remove(object);
572 return;
573 }
619 // The ObjectMonitor* can't be async deflated until ownership is
620 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
621 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
622 if (LockingMode == LM_LIGHTWEIGHT && monitor->is_owner_anonymous()) {
623 // It must be owned by us. Pop lock object from lock stack.
624 LockStack& lock_stack = current->lock_stack();
625 oop popped = lock_stack.pop();
626 assert(popped == object, "must be owned by this thread");
627 monitor->set_owner_from_anonymous(current);
628 }
629 monitor->exit(current);
630 }
631
632 // -----------------------------------------------------------------------------
633 // JNI locks on java objects
634 // NOTE: must use heavy weight monitor to handle jni monitor enter
635 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
636 if (obj->klass()->is_value_based()) {
637 handle_sync_on_value_based_class(obj, current);
638 }
639 CHECK_THROW_NOSYNC_IMSE(obj);
640
641 // the current locking is from JNI instead of Java code
642 current->set_current_pending_monitor_is_from_java(false);
643 // An async deflation can race after the inflate() call and before
644 // enter() can make the ObjectMonitor busy. enter() returns false if
645 // we have lost the race to async deflation and we simply try again.
646 while (true) {
647 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
648 if (monitor->enter(current)) {
649 current->inc_held_monitor_count(1, true);
650 break;
651 }
652 }
653 current->set_current_pending_monitor_is_from_java(true);
654 }
655
656 // NOTE: must use heavy weight monitor to handle jni monitor exit
657 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
658 JavaThread* current = THREAD;
659 CHECK_THROW_NOSYNC_IMSE(obj);
660
661 // The ObjectMonitor* can't be async deflated until ownership is
662 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
663 ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
664 // If this thread has locked the object, exit the monitor. We
665 // intentionally do not use CHECK on check_owner because we must exit the
666 // monitor even if an exception was already pending.
667 if (monitor->check_owner(THREAD)) {
668 monitor->exit(current);
669 current->dec_held_monitor_count(1, true);
670 }
671 }
672
673 // -----------------------------------------------------------------------------
674 // Internal VM locks on java objects
675 // standard constructor, allows locking failures
676 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
677 _thread = thread;
678 _thread->check_for_valid_safepoint_state();
679 _obj = obj;
680
681 if (_obj() != nullptr) {
682 ObjectSynchronizer::enter(_obj, &_lock, _thread);
683 }
684 }
685
686 ObjectLocker::~ObjectLocker() {
687 if (_obj() != nullptr) {
688 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
689 }
690 }
691
692
693 // -----------------------------------------------------------------------------
694 // Wait/Notify/NotifyAll
695 // NOTE: must use heavy weight monitor to handle wait()
696 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
697 JavaThread* current = THREAD;
698 CHECK_THROW_NOSYNC_IMSE_0(obj);
699 if (millis < 0) {
700 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
701 }
702 // The ObjectMonitor* can't be async deflated because the _waiters
703 // field is incremented before ownership is dropped and decremented
704 // after ownership is regained.
705 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
706
707 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
708 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
709
710 // This dummy call is in place to get around dtrace bug 6254741. Once
711 // that's fixed we can uncomment the following line, remove the call
712 // and change this function back into a "void" func.
713 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
714 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
715 return ret_code;
716 }
717
718 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
719 JavaThread* current = THREAD;
720 CHECK_THROW_NOSYNC_IMSE(obj);
721
722 markWord mark = obj->mark();
723 if (LockingMode == LM_LIGHTWEIGHT) {
724 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
725 // Not inflated so there can't be any waiters to notify.
726 return;
727 }
728 } else if (LockingMode == LM_LEGACY) {
729 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
730 // Not inflated so there can't be any waiters to notify.
731 return;
732 }
733 }
734 // The ObjectMonitor* can't be async deflated until ownership is
735 // dropped by the calling thread.
736 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
737 monitor->notify(CHECK);
738 }
739
740 // NOTE: see comment of notify()
741 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
742 JavaThread* current = THREAD;
743 CHECK_THROW_NOSYNC_IMSE(obj);
744
745 markWord mark = obj->mark();
746 if (LockingMode == LM_LIGHTWEIGHT) {
747 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
748 // Not inflated so there can't be any waiters to notify.
749 return;
750 }
751 } else if (LockingMode == LM_LEGACY) {
752 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
753 // Not inflated so there can't be any waiters to notify.
754 return;
755 }
756 }
757 // The ObjectMonitor* can't be async deflated until ownership is
758 // dropped by the calling thread.
759 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
760 monitor->notifyAll(CHECK);
761 }
762
763 // -----------------------------------------------------------------------------
885 unsigned v = current->_hashStateW;
886 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
887 current->_hashStateW = v;
888 value = v;
889 }
890
891 value &= markWord::hash_mask;
892 if (value == 0) value = 0xBAD;
893 assert(value != markWord::no_hash, "invariant");
894 return value;
895 }
896
897 // Can be called from non JavaThreads (e.g., VMThread) for FastHashCode
898 // calculations as part of JVM/TI tagging.
899 static bool is_lock_owned(Thread* thread, oop obj) {
900 assert(LockingMode == LM_LIGHTWEIGHT, "only call this with new lightweight locking enabled");
901 return thread->is_Java_thread() ? JavaThread::cast(thread)->lock_stack().contains(obj) : false;
902 }
903
904 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
905 if (EnableValhalla && obj->klass()->is_inline_klass()) {
906 // VM should be calling bootstrap method
907 ShouldNotReachHere();
908 }
909
910 while (true) {
911 ObjectMonitor* monitor = nullptr;
912 markWord temp, test;
913 intptr_t hash;
914 markWord mark = read_stable_mark(obj);
915 if (VerifyHeavyMonitors) {
916 assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
917 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
918 }
919 if (mark.is_neutral()) { // if this is a normal header
920 hash = mark.hash();
921 if (hash != 0) { // if it has a hash, just return it
922 return hash;
923 }
924 hash = get_next_hash(current, obj); // get a new hash
925 temp = mark.copy_set_hash(hash); // merge the hash into header
926 // try to install the hash
927 test = obj->cas_set_mark(temp, mark);
928 if (test == mark) { // if the hash was installed, return it
1009 hash = test.hash();
1010 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1011 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1012 }
1013 if (monitor->is_being_async_deflated()) {
1014 // If we detect that async deflation has occurred, then we
1015 // attempt to restore the header/dmw to the object's header
1016 // so that we only retry once if the deflater thread happens
1017 // to be slow.
1018 monitor->install_displaced_markword_in_object(obj);
1019 continue;
1020 }
1021 }
1022 // We finally get the hash.
1023 return hash;
1024 }
1025 }
1026
1027 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1028 Handle h_obj) {
1029 if (EnableValhalla && h_obj->mark().is_inline_type()) {
1030 return false;
1031 }
1032 assert(current == JavaThread::current(), "Can only be called on current thread");
1033 oop obj = h_obj();
1034
1035 markWord mark = read_stable_mark(obj);
1036
1037 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1038 // stack-locked case, header points into owner's stack
1039 return current->is_lock_owned((address)mark.locker());
1040 }
1041
1042 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1043 // fast-locking case, see if lock is in current's lock stack
1044 return current->lock_stack().contains(h_obj());
1045 }
1046
1047 if (mark.has_monitor()) {
1048 // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1049 // The first stage of async deflation does not affect any field
1050 // used by this comparison so the ObjectMonitor* is usable here.
1051 ObjectMonitor* monitor = mark.monitor();
1284 event->set_monitorClass(obj->klass());
1285 event->set_address((uintptr_t)(void*)obj);
1286 event->set_cause((u1)cause);
1287 event->commit();
1288 }
1289
1290 // Fast path code shared by multiple functions
1291 void ObjectSynchronizer::inflate_helper(oop obj) {
1292 markWord mark = obj->mark_acquire();
1293 if (mark.has_monitor()) {
1294 ObjectMonitor* monitor = mark.monitor();
1295 markWord dmw = monitor->header();
1296 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1297 return;
1298 }
1299 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1300 }
1301
1302 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
1303 const InflateCause cause) {
1304 if (EnableValhalla) {
1305 guarantee(!object->klass()->is_inline_klass(), "Attempt to inflate inline type");
1306 }
1307
1308 EventJavaMonitorInflate event;
1309
1310 for (;;) {
1311 const markWord mark = object->mark_acquire();
1312
1313 // The mark can be in one of the following states:
1314 // * inflated - Just return if using stack-locking.
1315 // If using fast-locking and the ObjectMonitor owner
1316 // is anonymous and the current thread owns the
1317 // object lock, then we make the current thread the
1318 // ObjectMonitor owner and remove the lock from the
1319 // current thread's lock stack.
1320 // * fast-locked - Coerce it to inflated from fast-locked.
1321 // * stack-locked - Coerce it to inflated from stack-locked.
1322 // * INFLATING - Busy wait for conversion from stack-locked to
1323 // inflated.
1324 // * neutral - Aggressively inflate the object.
1325
1326 // CASE: inflated
1327 if (mark.has_monitor()) {
1682 // safely read the mark-word and look-through to the object-monitor, without
1683 // being afraid that the object-monitor is going away.
1684 VM_RendezvousGCThreads sync_gc;
1685 VMThread::execute(&sync_gc);
1686
1687 if (ls != nullptr) {
1688 ls->print_cr("after handshaking: in_use_list stats: ceiling="
1689 SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
1690 in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
1691 timer.start();
1692 }
1693 } else {
1694 // This is not a monitor deflation thread.
1695 // No handshake or rendezvous is needed when we are already at safepoint.
1696 assert_at_safepoint();
1697 }
1698
1699 // After the handshake, safely free the ObjectMonitors that were
1700 // deflated and unlinked in this cycle.
1701 if (current->is_Java_thread()) {
1702 if (ls != nullptr) {
1703 timer.stop();
1704 ls->print_cr("before setting blocked: unlinked_count=" SIZE_FORMAT
1705 ", in_use_list stats: ceiling=" SIZE_FORMAT ", count="
1706 SIZE_FORMAT ", max=" SIZE_FORMAT,
1707 unlinked_count, in_use_list_ceiling(),
1708 _in_use_list.count(), _in_use_list.max());
1709 }
1710 // Mark the calling JavaThread blocked (safepoint safe) while we free
1711 // the ObjectMonitors so we don't delay safepoints whilst doing that.
1712 ThreadBlockInVM tbivm(JavaThread::cast(current));
1713 if (ls != nullptr) {
1714 ls->print_cr("after setting blocked: in_use_list stats: ceiling="
1715 SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
1716 in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
1717 timer.start();
1718 }
1719 deleted_count = delete_monitors(&delete_list);
1720 // ThreadBlockInVM is destroyed here
1721 } else {
1722 // A non-JavaThread can just free the ObjectMonitors:
1723 deleted_count = delete_monitors(&delete_list);
1724 }
1725 assert(unlinked_count == deleted_count, "must be");
1726 }
1727
1728 if (ls != nullptr) {
1729 timer.stop();
1730 if (deflated_count != 0 || unlinked_count != 0 || log_is_enabled(Debug, monitorinflation)) {
1731 ls->print_cr("deflated_count=" SIZE_FORMAT ", {unlinked,deleted}_count=" SIZE_FORMAT " monitors in %3.7f secs",
1732 deflated_count, unlinked_count, timer.seconds());
1733 }
|