334 if (obj == nullptr) return false; // slow-path for invalid obj
335 const markWord mark = obj->mark();
336
337 if (LockingMode == LM_LIGHTWEIGHT) {
338 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
339 // Degenerate notify
340 // fast-locked by caller so by definition the implied waitset is empty.
341 return true;
342 }
343 } else if (LockingMode == LM_LEGACY) {
344 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
345 // Degenerate notify
346 // stack-locked by caller so by definition the implied waitset is empty.
347 return true;
348 }
349 }
350
351 if (mark.has_monitor()) {
352 ObjectMonitor* const mon = mark.monitor();
353 assert(mon->object() == oop(obj), "invariant");
354 if (mon->owner() != current) return false; // slow-path for IMS exception
355
356 if (mon->first_waiter() != nullptr) {
357 // We have one or more waiters. Since this is an inflated monitor
358 // that we own, we can transfer one or more threads from the waitset
359 // to the entrylist here and now, avoiding the slow-path.
360 if (all) {
361 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current);
362 } else {
363 DTRACE_MONITOR_PROBE(notify, mon, obj, current);
364 }
365 int free_count = 0;
366 do {
367 mon->INotify(current);
368 ++free_count;
369 } while (mon->first_waiter() != nullptr && all);
370 OM_PERFDATA_OP(Notifications, inc(free_count));
371 }
372 return true;
373 }
374
384 // quick_enter() as our thread state remains _in_Java.
385
386 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
387 BasicLock * lock) {
388 assert(current->thread_state() == _thread_in_Java, "invariant");
389 NoSafepointVerifier nsv;
390 if (obj == nullptr) return false; // Need to throw NPE
391
392 if (obj->klass()->is_value_based()) {
393 return false;
394 }
395
396 if (LockingMode == LM_LIGHTWEIGHT) {
397 LockStack& lock_stack = current->lock_stack();
398 if (lock_stack.is_full()) {
399 // Always go into runtime if the lock stack is full.
400 return false;
401 }
402 if (lock_stack.try_recursive_enter(obj)) {
403 // Recursive lock successful.
404 current->inc_held_monitor_count();
405 return true;
406 }
407 }
408
409 const markWord mark = obj->mark();
410
411 if (mark.has_monitor()) {
412 ObjectMonitor* const m = mark.monitor();
413 // An async deflation or GC can race us before we manage to make
414 // the ObjectMonitor busy by setting the owner below. If we detect
415 // that race we just bail out to the slow-path here.
416 if (m->object_peek() == nullptr) {
417 return false;
418 }
419 JavaThread* const owner = static_cast<JavaThread*>(m->owner_raw());
420
421 // Lock contention and Transactional Lock Elision (TLE) diagnostics
422 // and observability
423 // Case: light contention possibly amenable to TLE
424 // Case: TLE inimical operations such as nested/recursive synchronization
425
426 if (owner == current) {
427 m->_recursions++;
428 current->inc_held_monitor_count();
429 return true;
430 }
431
432 if (LockingMode != LM_LIGHTWEIGHT) {
433 // This Java Monitor is inflated so obj's header will never be
434 // displaced to this thread's BasicLock. Make the displaced header
435 // non-null so this BasicLock is not seen as recursive nor as
436 // being locked. We do this unconditionally so that this thread's
437 // BasicLock cannot be mis-interpreted by any stack walkers. For
438 // performance reasons, stack walkers generally first check for
439 // stack-locking in the object's header, the second check is for
440 // recursive stack-locking in the displaced header in the BasicLock,
441 // and last are the inflated Java Monitor (ObjectMonitor) checks.
442 lock->set_displaced_header(markWord::unused_mark());
443 }
444
445 if (owner == nullptr && m->try_set_owner_from(nullptr, current) == nullptr) {
446 assert(m->_recursions == 0, "invariant");
447 current->inc_held_monitor_count();
448 return true;
449 }
450 }
451
452 // Note that we could inflate in quick_enter.
453 // This is likely a useful optimization
454 // Critically, in quick_enter() we must not:
455 // -- block indefinitely, or
456 // -- reach a safepoint
457
458 return false; // revert to slow-path
459 }
460
461 // Handle notifications when synchronizing on value based classes
462 void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* locking_thread) {
463 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
464 frame last_frame = locking_thread->last_frame();
465 bool bcp_was_adjusted = false;
466 // Don't decrement bcp if it points to the frame's first instruction. This happens when
467 // handle_sync_on_value_based_class() is called because of a synchronized method. There
507 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
508 }
509 }
510
511 static bool useHeavyMonitors() {
512 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
513 return LockingMode == LM_MONITOR;
514 #else
515 return false;
516 #endif
517 }
518
519 // -----------------------------------------------------------------------------
520 // Monitor Enter/Exit
521
522 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
523 // When called with locking_thread != Thread::current() some mechanism must synchronize
524 // the locking_thread with respect to the current thread. Currently only used when
525 // deoptimizing and re-locking locks. See Deoptimization::relock_objects
526 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
527 if (!enter_fast_impl(obj, lock, locking_thread)) {
528 // Inflated ObjectMonitor::enter_for is required
529
530 // An async deflation can race after the inflate_for() call and before
531 // enter_for() can make the ObjectMonitor busy. enter_for() returns false
532 // if we have lost the race to async deflation and we simply try again.
533 while (true) {
534 ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter);
535 if (monitor->enter_for(locking_thread)) {
536 return;
537 }
538 assert(monitor->is_being_async_deflated(), "must be");
539 }
540 }
541 }
542
543 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
544 assert(current == Thread::current(), "must be");
545 if (!enter_fast_impl(obj, lock, current)) {
546 // Inflated ObjectMonitor::enter is required
547
548 // An async deflation can race after the inflate() call and before
549 // enter() can make the ObjectMonitor busy. enter() returns false if
550 // we have lost the race to async deflation and we simply try again.
551 while (true) {
552 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
553 if (monitor->enter(current)) {
554 return;
555 }
556 }
557 }
558 }
559
560 // The interpreter and compiler assembly code tries to lock using the fast path
561 // of this algorithm. Make sure to update that code if the following function is
562 // changed. The implementation is extremely sensitive to race condition. Be careful.
563 bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
564
565 if (obj->klass()->is_value_based()) {
566 handle_sync_on_value_based_class(obj, locking_thread);
567 }
568
569 locking_thread->inc_held_monitor_count();
570
571 if (!useHeavyMonitors()) {
572 if (LockingMode == LM_LIGHTWEIGHT) {
573 // Fast-locking does not use the 'lock' argument.
574 LockStack& lock_stack = locking_thread->lock_stack();
575 if (lock_stack.is_full()) {
576 // We unconditionally make room on the lock stack by inflating
577 // the least recently locked object on the lock stack.
578
579 // About the choice to inflate least recently locked object.
580 // First we must chose to inflate a lock, either some lock on
581 // the lock-stack or the lock that is currently being entered
582 // (which may or may not be on the lock-stack).
583 // Second the best lock to inflate is a lock which is entered
584 // in a control flow where there are only a very few locks being
585 // used, as the costly part of inflated locking is inflation,
586 // not locking. But this property is entirely program dependent.
587 // Third inflating the lock currently being entered on when it
588 // is not present on the lock-stack will result in a still full
589 // lock-stack. This creates a scenario where every deeper nested
590 // monitorenter must call into the runtime.
591 // The rational here is as follows:
592 // Because we cannot (currently) figure out the second, and want
593 // to avoid the third, we inflate a lock on the lock-stack.
594 // The least recently locked lock is chosen as it is the lock
595 // with the longest critical section.
596
597 log_info(monitorinflation)("LockStack capacity exceeded, inflating.");
598 ObjectMonitor* monitor = inflate_for(locking_thread, lock_stack.bottom(), inflate_cause_vm_internal);
599 assert(monitor->owner() == Thread::current(), "must be owner=" PTR_FORMAT " current=" PTR_FORMAT " mark=" PTR_FORMAT,
600 p2i(monitor->owner()), p2i(Thread::current()), monitor->object()->mark_acquire().value());
601 assert(!lock_stack.is_full(), "must have made room here");
602 }
603
604 markWord mark = obj()->mark_acquire();
605 while (mark.is_unlocked()) {
606 // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
607 // Try to swing into 'fast-locked' state.
608 assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
609 const markWord locked_mark = mark.set_fast_locked();
610 const markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
611 if (old_mark == mark) {
612 // Successfully fast-locked, push object to lock-stack and return.
613 lock_stack.push(obj());
614 return true;
615 }
616 mark = old_mark;
617 }
618
619 if (mark.is_fast_locked() && lock_stack.try_recursive_enter(obj())) {
620 // Recursive lock successful.
621 return true;
622 }
623
624 // Failed to fast lock.
625 return false;
626 } else if (LockingMode == LM_LEGACY) {
627 markWord mark = obj->mark();
628 if (mark.is_unlocked()) {
629 // Anticipate successful CAS -- the ST of the displaced mark must
630 // be visible <= the ST performed by the CAS.
631 lock->set_displaced_header(mark);
632 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
633 return true;
634 }
635 } else if (mark.has_locker() &&
636 locking_thread->is_lock_owned((address) mark.locker())) {
637 assert(lock != mark.locker(), "must not re-lock the same lock");
638 assert(lock != (BasicLock*) obj->mark().value(), "don't relock with same BasicLock");
639 lock->set_displaced_header(markWord::from_pointer(nullptr));
640 return true;
641 }
642
643 // The object header will never be displaced to this lock,
644 // so it does not matter what the value is, except that it
645 // must be non-zero to avoid looking like a re-entrant lock,
646 // and must not look locked either.
647 lock->set_displaced_header(markWord::unused_mark());
648
649 // Failed to fast lock.
650 return false;
651 }
652 } else if (VerifyHeavyMonitors) {
653 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
654 }
655
656 return false;
657 }
658
659 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
660 current->dec_held_monitor_count();
661
662 if (!useHeavyMonitors()) {
663 markWord mark = object->mark();
664 if (LockingMode == LM_LIGHTWEIGHT) {
665 // Fast-locking does not use the 'lock' argument.
666 LockStack& lock_stack = current->lock_stack();
667 if (mark.is_fast_locked() && lock_stack.try_recursive_exit(object)) {
668 // Recursively unlocked.
669 return;
670 }
671
672 if (mark.is_fast_locked() && lock_stack.is_recursive(object)) {
673 // This lock is recursive but is not at the top of the lock stack so we're
674 // doing an unbalanced exit. We have to fall thru to inflation below and
675 // let ObjectMonitor::exit() do the unlock.
676 } else {
677 while (mark.is_fast_locked()) {
678 // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
679 const markWord unlocked_mark = mark.set_unlocked();
680 const markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
705 // This is a special case where the Java Monitor was inflated
706 // after this thread entered the stack-lock recursively. When a
707 // Java Monitor is inflated, we cannot safely walk the Java
708 // Monitor owner's stack and update the BasicLocks because a
709 // Java Monitor can be asynchronously inflated by a thread that
710 // does not own the Java Monitor.
711 ObjectMonitor* m = mark.monitor();
712 assert(m->object()->mark() == mark, "invariant");
713 assert(m->is_entered(current), "invariant");
714 }
715 }
716 #endif
717 return;
718 }
719
720 if (mark == markWord::from_pointer(lock)) {
721 // If the object is stack-locked by the current thread, try to
722 // swing the displaced header from the BasicLock back to the mark.
723 assert(dhw.is_neutral(), "invariant");
724 if (object->cas_set_mark(dhw, mark) == mark) {
725 return;
726 }
727 }
728 }
729 } else if (VerifyHeavyMonitors) {
730 guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
731 }
732
733 // We have to take the slow-path of possible inflation and then exit.
734 // The ObjectMonitor* can't be async deflated until ownership is
735 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
736 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
737 assert(!monitor->is_owner_anonymous(), "must not be");
738 monitor->exit(current);
739 }
740
741 // -----------------------------------------------------------------------------
742 // JNI locks on java objects
743 // NOTE: must use heavy weight monitor to handle jni monitor enter
744 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
766 JavaThread* current = THREAD;
767
768 // The ObjectMonitor* can't be async deflated until ownership is
769 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
770 ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
771 // If this thread has locked the object, exit the monitor. We
772 // intentionally do not use CHECK on check_owner because we must exit the
773 // monitor even if an exception was already pending.
774 if (monitor->check_owner(THREAD)) {
775 monitor->exit(current);
776 current->dec_held_monitor_count(1, true);
777 }
778 }
779
780 // -----------------------------------------------------------------------------
781 // Internal VM locks on java objects
782 // standard constructor, allows locking failures
783 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
784 _thread = thread;
785 _thread->check_for_valid_safepoint_state();
786 _obj = obj;
787
788 if (_obj() != nullptr) {
789 ObjectSynchronizer::enter(_obj, &_lock, _thread);
790 }
791 }
792
793 ObjectLocker::~ObjectLocker() {
794 if (_obj() != nullptr) {
795 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
796 }
797 }
798
799
800 // -----------------------------------------------------------------------------
801 // Wait/Notify/NotifyAll
802 // NOTE: must use heavy weight monitor to handle wait()
803 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
804 JavaThread* current = THREAD;
805 if (millis < 0) {
806 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
807 }
808 // The ObjectMonitor* can't be async deflated because the _waiters
809 // field is incremented before ownership is dropped and decremented
810 // after ownership is regained.
811 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
812
813 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
1147
1148 if (mark.has_monitor()) {
1149 // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1150 // The first stage of async deflation does not affect any field
1151 // used by this comparison so the ObjectMonitor* is usable here.
1152 ObjectMonitor* monitor = mark.monitor();
1153 return monitor->is_entered(current) != 0;
1154 }
1155 // Unlocked case, header in place
1156 assert(mark.is_unlocked(), "sanity check");
1157 return false;
1158 }
1159
1160 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
1161 oop obj = h_obj();
1162 markWord mark = read_stable_mark(obj);
1163
1164 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1165 // stack-locked so header points into owner's stack.
1166 // owning_thread_from_monitor_owner() may also return null here:
1167 return Threads::owning_thread_from_monitor_owner(t_list, (address) mark.locker());
1168 }
1169
1170 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1171 // fast-locked so get owner from the object.
1172 // owning_thread_from_object() may also return null here:
1173 return Threads::owning_thread_from_object(t_list, h_obj());
1174 }
1175
1176 if (mark.has_monitor()) {
1177 // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1178 // The first stage of async deflation does not affect any field
1179 // used by this comparison so the ObjectMonitor* is usable here.
1180 ObjectMonitor* monitor = mark.monitor();
1181 assert(monitor != nullptr, "monitor should be non-null");
1182 // owning_thread_from_monitor() may also return null here:
1183 return Threads::owning_thread_from_monitor(t_list, monitor);
1184 }
1185
1186 // Unlocked case, header in place
1187 // Cannot have assertion since this object may have been
1197 template <typename Function>
1198 void ObjectSynchronizer::monitors_iterate(Function function) {
1199 MonitorList::Iterator iter = _in_use_list.iterator();
1200 while (iter.has_next()) {
1201 ObjectMonitor* monitor = iter.next();
1202 function(monitor);
1203 }
1204 }
1205
1206 // Iterate ObjectMonitors owned by any thread and where the owner `filter`
1207 // returns true.
1208 template <typename OwnerFilter>
1209 void ObjectSynchronizer::owned_monitors_iterate_filtered(MonitorClosure* closure, OwnerFilter filter) {
1210 monitors_iterate([&](ObjectMonitor* monitor) {
1211 // This function is only called at a safepoint or when the
1212 // target thread is suspended or when the target thread is
1213 // operating on itself. The current closures in use today are
1214 // only interested in an owned ObjectMonitor and ownership
1215 // cannot be dropped under the calling contexts so the
1216 // ObjectMonitor cannot be async deflated.
1217 if (monitor->has_owner() && filter(monitor->owner_raw())) {
1218 assert(!monitor->is_being_async_deflated(), "Owned monitors should not be deflating");
1219
1220 closure->do_monitor(monitor);
1221 }
1222 });
1223 }
1224
1225 // Iterate ObjectMonitors where the owner == thread; this does NOT include
1226 // ObjectMonitors where owner is set to a stack-lock address in thread.
1227 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure, JavaThread* thread) {
1228 auto thread_filter = [&](void* owner) { return owner == thread; };
1229 return owned_monitors_iterate_filtered(closure, thread_filter);
1230 }
1231
1232 // Iterate ObjectMonitors owned by any thread.
1233 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure) {
1234 auto all_filter = [&](void* owner) { return true; };
1235 return owned_monitors_iterate_filtered(closure, all_filter);
1236 }
1237
1238 static bool monitors_used_above_threshold(MonitorList* list) {
1239 if (MonitorUsedDeflationThreshold == 0) { // disabled case is easy
1240 return false;
1241 }
1242 // Start with ceiling based on a per-thread estimate:
1243 size_t ceiling = ObjectSynchronizer::in_use_list_ceiling();
1244 size_t old_ceiling = ceiling;
1245 if (ceiling < list->max()) {
1246 // The max used by the system has exceeded the ceiling so use that:
1247 ceiling = list->max();
1248 }
1249 size_t monitors_used = list->count();
1250 if (monitors_used == 0) { // empty list is easy
1251 return false;
1252 }
1253 if (NoAsyncDeflationProgressMax != 0 &&
1254 _no_progress_cnt >= NoAsyncDeflationProgressMax) {
1384 event->set_monitorClass(obj->klass());
1385 event->set_address((uintptr_t)(void*)obj);
1386 event->set_cause((u1)cause);
1387 event->commit();
1388 }
1389
1390 // Fast path code shared by multiple functions
1391 void ObjectSynchronizer::inflate_helper(oop obj) {
1392 markWord mark = obj->mark_acquire();
1393 if (mark.has_monitor()) {
1394 ObjectMonitor* monitor = mark.monitor();
1395 markWord dmw = monitor->header();
1396 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1397 return;
1398 }
1399 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1400 }
1401
1402 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
1403 assert(current == Thread::current(), "must be");
1404 if (LockingMode == LM_LIGHTWEIGHT && current->is_Java_thread()) {
1405 return inflate_impl(JavaThread::cast(current), obj, cause);
1406 }
1407 return inflate_impl(nullptr, obj, cause);
1408 }
1409
1410 ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
1411 assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
1412 return inflate_impl(thread, obj, cause);
1413 }
1414
1415 ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* inflating_thread, oop object, const InflateCause cause) {
1416 // The JavaThread* inflating_thread parameter is only used by LM_LIGHTWEIGHT and requires
1417 // that the inflating_thread == Thread::current() or is suspended throughout the call by
1418 // some other mechanism.
1419 // Even with LM_LIGHTWEIGHT the thread might be nullptr when called from a non
1420 // JavaThread. (As may still be the case from FastHashCode). However it is only
1421 // important for the correctness of the LM_LIGHTWEIGHT algorithm that the thread
1422 // is set when called from ObjectSynchronizer::enter from the owning thread,
1423 // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
1424 EventJavaMonitorInflate event;
1425
1426 for (;;) {
1427 const markWord mark = object->mark_acquire();
1428
1429 // The mark can be in one of the following states:
1430 // * inflated - Just return if using stack-locking.
1431 // If using fast-locking and the ObjectMonitor owner
1432 // is anonymous and the inflating_thread owns the
1433 // object lock, then we make the inflating_thread
1434 // the ObjectMonitor owner and remove the lock from
1435 // the inflating_thread's lock stack.
1436 // * fast-locked - Coerce it to inflated from fast-locked.
1437 // * stack-locked - Coerce it to inflated from stack-locked.
1438 // * INFLATING - Busy wait for conversion from stack-locked to
1439 // inflated.
1440 // * unlocked - Aggressively inflate the object.
1441
1442 // CASE: inflated
1443 if (mark.has_monitor()) {
1444 ObjectMonitor* inf = mark.monitor();
1445 markWord dmw = inf->header();
1446 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1447 if (LockingMode == LM_LIGHTWEIGHT && inf->is_owner_anonymous() &&
1448 inflating_thread != nullptr && inflating_thread->lock_stack().contains(object)) {
1449 inf->set_owner_from_anonymous(inflating_thread);
1450 size_t removed = inflating_thread->lock_stack().remove(object);
1451 inf->set_recursions(removed - 1);
1452 }
1453 return inf;
1454 }
1455
1456 if (LockingMode != LM_LIGHTWEIGHT) {
1457 // New lightweight locking does not use INFLATING.
1458 // CASE: inflation in progress - inflating over a stack-lock.
1459 // Some other thread is converting from stack-locked to inflated.
1460 // Only that thread can complete inflation -- other threads must wait.
1461 // The INFLATING value is transient.
1462 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1463 // We could always eliminate polling by parking the thread on some auxiliary list.
1464 if (mark == markWord::INFLATING()) {
1465 read_stable_mark(object);
1466 continue;
1467 }
1468 }
1469
1470 // CASE: fast-locked
1471 // Could be fast-locked either by the inflating_thread or by some other thread.
1565 // the 0 causes the owner to stall if the owner happens to try to
1566 // drop the lock (restoring the header from the BasicLock to the object)
1567 // while inflation is in-progress. This protocol avoids races that might
1568 // would otherwise permit hashCode values to change or "flicker" for an object.
1569 // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable.
1570 // 0 serves as a "BUSY" inflate-in-progress indicator.
1571
1572
1573 // fetch the displaced mark from the owner's stack.
1574 // The owner can't die or unwind past the lock while our INFLATING
1575 // object is in the mark. Furthermore the owner can't complete
1576 // an unlock on the object, either.
1577 markWord dmw = mark.displaced_mark_helper();
1578 // Catch if the object's header is not neutral (not locked and
1579 // not marked is what we care about here).
1580 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1581
1582 // Setup monitor fields to proper values -- prepare the monitor
1583 m->set_header(dmw);
1584
1585 // Optimization: if the mark.locker stack address is associated
1586 // with this thread we could simply set m->_owner = current.
1587 // Note that a thread can inflate an object
1588 // that it has stack-locked -- as might happen in wait() -- directly
1589 // with CAS. That is, we can avoid the xchg-nullptr .... ST idiom.
1590 m->set_owner_from(nullptr, mark.locker());
1591 // TODO-FIXME: assert BasicLock->dhw != 0.
1592
1593 // Must preserve store ordering. The monitor state must
1594 // be stable at the time of publishing the monitor address.
1595 guarantee(object->mark() == markWord::INFLATING(), "invariant");
1596 // Release semantics so that above set_object() is seen first.
1597 object->release_set_mark(markWord::encode(m));
1598
1599 // Once ObjectMonitor is configured and the object is associated
1600 // with the ObjectMonitor, it is safe to allow async deflation:
1601 _in_use_list.add(m);
1602
1603 // Hopefully the performance counters are allocated on distinct cache lines
1604 // to avoid false sharing on MP systems ...
1605 OM_PERFDATA_OP(Inflations, inc());
1606 if (log_is_enabled(Trace, monitorinflation)) {
1607 ResourceMark rm;
1608 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1609 INTPTR_FORMAT ", type='%s'", p2i(object),
1610 object->mark().value(), object->klass()->external_name());
1868 } else if (_no_progress_skip_increment) {
1869 _no_progress_skip_increment = false;
1870 } else {
1871 _no_progress_cnt++;
1872 }
1873
1874 return deflated_count;
1875 }
1876
1877 // Monitor cleanup on JavaThread::exit
1878
1879 // Iterate through monitor cache and attempt to release thread's monitors
1880 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1881 private:
1882 JavaThread* _thread;
1883
1884 public:
1885 ReleaseJavaMonitorsClosure(JavaThread* thread) : _thread(thread) {}
1886 void do_monitor(ObjectMonitor* mid) {
1887 intx rec = mid->complete_exit(_thread);
1888 _thread->dec_held_monitor_count(rec + 1);
1889 }
1890 };
1891
1892 // Release all inflated monitors owned by current thread. Lightweight monitors are
1893 // ignored. This is meant to be called during JNI thread detach which assumes
1894 // all remaining monitors are heavyweight. All exceptions are swallowed.
1895 // Scanning the extant monitor list can be time consuming.
1896 // A simple optimization is to add a per-thread flag that indicates a thread
1897 // called jni_monitorenter() during its lifetime.
1898 //
1899 // Instead of NoSafepointVerifier it might be cheaper to
1900 // use an idiom of the form:
1901 // auto int tmp = SafepointSynchronize::_safepoint_counter ;
1902 // <code that must not run at safepoint>
1903 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1904 // Since the tests are extremely cheap we could leave them enabled
1905 // for normal product builds.
1906
1907 void ObjectSynchronizer::release_monitors_owned_by_thread(JavaThread* current) {
1908 assert(current == JavaThread::current(), "must be current Java thread");
1909 NoSafepointVerifier nsv;
1910 ReleaseJavaMonitorsClosure rjmc(current);
1911 ObjectSynchronizer::owned_monitors_iterate(&rjmc, current);
1912 assert(!current->has_pending_exception(), "Should not be possible");
1913 current->clear_pending_exception();
1914 assert(current->held_monitor_count() == 0, "Should not be possible");
1915 // All monitors (including entered via JNI) have been unlocked above, so we need to clear jni count.
1916 current->clear_jni_monitor_count();
1917 }
1918
1919 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
1920 switch (cause) {
1921 case inflate_cause_vm_internal: return "VM Internal";
1922 case inflate_cause_monitor_enter: return "Monitor Enter";
1923 case inflate_cause_wait: return "Monitor Wait";
1924 case inflate_cause_notify: return "Monitor Notify";
1925 case inflate_cause_hash_code: return "Monitor Hash Code";
1926 case inflate_cause_jni_enter: return "JNI Monitor Enter";
1927 case inflate_cause_jni_exit: return "JNI Monitor Exit";
1928 default:
1929 ShouldNotReachHere();
1930 }
1931 return "Unknown";
1932 }
1933
1934 //------------------------------------------------------------------------------
1935 // Debugging code
1936
1937 u_char* ObjectSynchronizer::get_gvars_addr() {
1938 return (u_char*)&GVars;
1939 }
1940
1941 u_char* ObjectSynchronizer::get_gvars_hc_sequence_addr() {
1942 return (u_char*)&GVars.hc_sequence;
1943 }
1944
1945 size_t ObjectSynchronizer::get_gvars_size() {
1946 return sizeof(SharedGlobals);
1947 }
|
334 if (obj == nullptr) return false; // slow-path for invalid obj
335 const markWord mark = obj->mark();
336
337 if (LockingMode == LM_LIGHTWEIGHT) {
338 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
339 // Degenerate notify
340 // fast-locked by caller so by definition the implied waitset is empty.
341 return true;
342 }
343 } else if (LockingMode == LM_LEGACY) {
344 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
345 // Degenerate notify
346 // stack-locked by caller so by definition the implied waitset is empty.
347 return true;
348 }
349 }
350
351 if (mark.has_monitor()) {
352 ObjectMonitor* const mon = mark.monitor();
353 assert(mon->object() == oop(obj), "invariant");
354 if (!mon->is_owner(current)) return false; // slow-path for IMS exception
355
356 if (mon->first_waiter() != nullptr) {
357 // We have one or more waiters. Since this is an inflated monitor
358 // that we own, we can transfer one or more threads from the waitset
359 // to the entrylist here and now, avoiding the slow-path.
360 if (all) {
361 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current);
362 } else {
363 DTRACE_MONITOR_PROBE(notify, mon, obj, current);
364 }
365 int free_count = 0;
366 do {
367 mon->INotify(current);
368 ++free_count;
369 } while (mon->first_waiter() != nullptr && all);
370 OM_PERFDATA_OP(Notifications, inc(free_count));
371 }
372 return true;
373 }
374
384 // quick_enter() as our thread state remains _in_Java.
385
386 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
387 BasicLock * lock) {
388 assert(current->thread_state() == _thread_in_Java, "invariant");
389 NoSafepointVerifier nsv;
390 if (obj == nullptr) return false; // Need to throw NPE
391
392 if (obj->klass()->is_value_based()) {
393 return false;
394 }
395
396 if (LockingMode == LM_LIGHTWEIGHT) {
397 LockStack& lock_stack = current->lock_stack();
398 if (lock_stack.is_full()) {
399 // Always go into runtime if the lock stack is full.
400 return false;
401 }
402 if (lock_stack.try_recursive_enter(obj)) {
403 // Recursive lock successful.
404 NOT_LOOM_MONITOR_SUPPORT(current->inc_held_monitor_count();)
405 return true;
406 }
407 }
408
409 const markWord mark = obj->mark();
410
411 if (mark.has_monitor()) {
412 ObjectMonitor* const m = mark.monitor();
413 // An async deflation or GC can race us before we manage to make
414 // the ObjectMonitor busy by setting the owner below. If we detect
415 // that race we just bail out to the slow-path here.
416 if (m->object_peek() == nullptr) {
417 return false;
418 }
419
420 // Lock contention and Transactional Lock Elision (TLE) diagnostics
421 // and observability
422 // Case: light contention possibly amenable to TLE
423 // Case: TLE inimical operations such as nested/recursive synchronization
424
425 if (m->is_owner(current)) {
426 m->_recursions++;
427 NOT_LOOM_MONITOR_SUPPORT(current->inc_held_monitor_count();)
428 return true;
429 }
430
431 if (LockingMode != LM_LIGHTWEIGHT) {
432 // This Java Monitor is inflated so obj's header will never be
433 // displaced to this thread's BasicLock. Make the displaced header
434 // non-null so this BasicLock is not seen as recursive nor as
435 // being locked. We do this unconditionally so that this thread's
436 // BasicLock cannot be mis-interpreted by any stack walkers. For
437 // performance reasons, stack walkers generally first check for
438 // stack-locking in the object's header, the second check is for
439 // recursive stack-locking in the displaced header in the BasicLock,
440 // and last are the inflated Java Monitor (ObjectMonitor) checks.
441 lock->set_displaced_header(markWord::unused_mark());
442 }
443
444 if (!m->has_owner() && m->try_set_owner_from(nullptr, current) == nullptr) {
445 assert(m->_recursions == 0, "invariant");
446 NOT_LOOM_MONITOR_SUPPORT(current->inc_held_monitor_count();)
447 return true;
448 }
449 }
450
451 // Note that we could inflate in quick_enter.
452 // This is likely a useful optimization
453 // Critically, in quick_enter() we must not:
454 // -- block indefinitely, or
455 // -- reach a safepoint
456
457 return false; // revert to slow-path
458 }
459
460 // Handle notifications when synchronizing on value based classes
461 void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* locking_thread) {
462 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
463 frame last_frame = locking_thread->last_frame();
464 bool bcp_was_adjusted = false;
465 // Don't decrement bcp if it points to the frame's first instruction. This happens when
466 // handle_sync_on_value_based_class() is called because of a synchronized method. There
506 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
507 }
508 }
509
510 static bool useHeavyMonitors() {
511 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
512 return LockingMode == LM_MONITOR;
513 #else
514 return false;
515 #endif
516 }
517
518 // -----------------------------------------------------------------------------
519 // Monitor Enter/Exit
520
521 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
522 // When called with locking_thread != Thread::current() some mechanism must synchronize
523 // the locking_thread with respect to the current thread. Currently only used when
524 // deoptimizing and re-locking locks. See Deoptimization::relock_objects
525 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
526
527 NOT_LOOM_MONITOR_SUPPORT(locking_thread->inc_held_monitor_count();)
528
529 if (!enter_fast_impl(obj, lock, locking_thread)) {
530 // Inflated ObjectMonitor::enter_for is required
531
532 // An async deflation can race after the inflate_for() call and before
533 // enter_for() can make the ObjectMonitor busy. enter_for() returns false
534 // if we have lost the race to async deflation and we simply try again.
535 while (true) {
536 ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter);
537 if (monitor->enter_for(locking_thread)) {
538 return;
539 }
540 assert(monitor->is_being_async_deflated(), "must be");
541 }
542 }
543 }
544
545 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
546 assert(current == Thread::current(), "must be");
547
548 NOT_LOOM_MONITOR_SUPPORT(current->inc_held_monitor_count();)
549
550 if (!enter_fast_impl(obj, lock, current)) {
551 // Inflated ObjectMonitor::enter is required
552
553 // An async deflation can race after the inflate() call and before
554 // enter() can make the ObjectMonitor busy. enter() returns false if
555 // we have lost the race to async deflation and we simply try again.
556 while (true) {
557 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
558 if (monitor->enter(current)) {
559 return;
560 }
561 }
562 }
563 }
564
565 // The interpreter and compiler assembly code tries to lock using the fast path
566 // of this algorithm. Make sure to update that code if the following function is
567 // changed. The implementation is extremely sensitive to race condition. Be careful.
568 bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
569 if (obj->klass()->is_value_based()) {
570 handle_sync_on_value_based_class(obj, locking_thread);
571 }
572
573 if (!useHeavyMonitors()) {
574 if (LockingMode == LM_LIGHTWEIGHT) {
575 // Fast-locking does not use the 'lock' argument.
576 LockStack& lock_stack = locking_thread->lock_stack();
577 if (lock_stack.is_full()) {
578 // We unconditionally make room on the lock stack by inflating
579 // the least recently locked object on the lock stack.
580
581 // About the choice to inflate least recently locked object.
582 // First we must chose to inflate a lock, either some lock on
583 // the lock-stack or the lock that is currently being entered
584 // (which may or may not be on the lock-stack).
585 // Second the best lock to inflate is a lock which is entered
586 // in a control flow where there are only a very few locks being
587 // used, as the costly part of inflated locking is inflation,
588 // not locking. But this property is entirely program dependent.
589 // Third inflating the lock currently being entered on when it
590 // is not present on the lock-stack will result in a still full
591 // lock-stack. This creates a scenario where every deeper nested
592 // monitorenter must call into the runtime.
593 // The rational here is as follows:
594 // Because we cannot (currently) figure out the second, and want
595 // to avoid the third, we inflate a lock on the lock-stack.
596 // The least recently locked lock is chosen as it is the lock
597 // with the longest critical section.
598
599 log_info(monitorinflation)("LockStack capacity exceeded, inflating.");
600 ObjectMonitor* monitor = inflate_for(locking_thread, lock_stack.bottom(), inflate_cause_vm_internal);
601 assert(monitor->is_owner(JavaThread::current()), "must be owner=" PTR_FORMAT " current=" PTR_FORMAT " mark=" PTR_FORMAT,
602 p2i(monitor->owner()), p2i(Thread::current()), monitor->object()->mark_acquire().value());
603 assert(!lock_stack.is_full(), "must have made room here");
604 }
605
606 markWord mark = obj()->mark_acquire();
607 while (mark.is_unlocked()) {
608 // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
609 // Try to swing into 'fast-locked' state.
610 assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
611 const markWord locked_mark = mark.set_fast_locked();
612 const markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
613 if (old_mark == mark) {
614 // Successfully fast-locked, push object to lock-stack and return.
615 lock_stack.push(obj());
616 return true;
617 }
618 mark = old_mark;
619 }
620
621 if (mark.is_fast_locked() && lock_stack.try_recursive_enter(obj())) {
622 // Recursive lock successful.
623 return true;
624 }
625
626 // Failed to fast lock.
627 return false;
628 } else if (LockingMode == LM_LEGACY) {
629 markWord mark = obj->mark();
630 if (mark.is_unlocked()) {
631 // Anticipate successful CAS -- the ST of the displaced mark must
632 // be visible <= the ST performed by the CAS.
633 lock->set_displaced_header(mark);
634 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
635 LOOM_MONITOR_SUPPORT_ONLY(locking_thread->inc_held_monitor_count();)
636 return true;
637 }
638 } else if (mark.has_locker() &&
639 locking_thread->is_lock_owned((address) mark.locker())) {
640 assert(lock != mark.locker(), "must not re-lock the same lock");
641 assert(lock != (BasicLock*) obj->mark().value(), "don't relock with same BasicLock");
642 lock->set_displaced_header(markWord::from_pointer(nullptr));
643 return true;
644 }
645
646 // The object header will never be displaced to this lock,
647 // so it does not matter what the value is, except that it
648 // must be non-zero to avoid looking like a re-entrant lock,
649 // and must not look locked either.
650 lock->set_displaced_header(markWord::unused_mark());
651
652 // Failed to fast lock.
653 return false;
654 }
655 } else if (VerifyHeavyMonitors) {
656 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
657 }
658
659 return false;
660 }
661
662 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
663 NOT_LOOM_MONITOR_SUPPORT(current->dec_held_monitor_count();)
664
665 if (!useHeavyMonitors()) {
666 markWord mark = object->mark();
667 if (LockingMode == LM_LIGHTWEIGHT) {
668 // Fast-locking does not use the 'lock' argument.
669 LockStack& lock_stack = current->lock_stack();
670 if (mark.is_fast_locked() && lock_stack.try_recursive_exit(object)) {
671 // Recursively unlocked.
672 return;
673 }
674
675 if (mark.is_fast_locked() && lock_stack.is_recursive(object)) {
676 // This lock is recursive but is not at the top of the lock stack so we're
677 // doing an unbalanced exit. We have to fall thru to inflation below and
678 // let ObjectMonitor::exit() do the unlock.
679 } else {
680 while (mark.is_fast_locked()) {
681 // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
682 const markWord unlocked_mark = mark.set_unlocked();
683 const markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
708 // This is a special case where the Java Monitor was inflated
709 // after this thread entered the stack-lock recursively. When a
710 // Java Monitor is inflated, we cannot safely walk the Java
711 // Monitor owner's stack and update the BasicLocks because a
712 // Java Monitor can be asynchronously inflated by a thread that
713 // does not own the Java Monitor.
714 ObjectMonitor* m = mark.monitor();
715 assert(m->object()->mark() == mark, "invariant");
716 assert(m->is_entered(current), "invariant");
717 }
718 }
719 #endif
720 return;
721 }
722
723 if (mark == markWord::from_pointer(lock)) {
724 // If the object is stack-locked by the current thread, try to
725 // swing the displaced header from the BasicLock back to the mark.
726 assert(dhw.is_neutral(), "invariant");
727 if (object->cas_set_mark(dhw, mark) == mark) {
728 LOOM_MONITOR_SUPPORT_ONLY(current->dec_held_monitor_count();)
729 return;
730 }
731 }
732 }
733 } else if (VerifyHeavyMonitors) {
734 guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
735 }
736
737 // We have to take the slow-path of possible inflation and then exit.
738 // The ObjectMonitor* can't be async deflated until ownership is
739 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
740 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
741 assert(!monitor->is_owner_anonymous(), "must not be");
742 monitor->exit(current);
743 }
744
745 // -----------------------------------------------------------------------------
746 // JNI locks on java objects
747 // NOTE: must use heavy weight monitor to handle jni monitor enter
748 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
770 JavaThread* current = THREAD;
771
772 // The ObjectMonitor* can't be async deflated until ownership is
773 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
774 ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
775 // If this thread has locked the object, exit the monitor. We
776 // intentionally do not use CHECK on check_owner because we must exit the
777 // monitor even if an exception was already pending.
778 if (monitor->check_owner(THREAD)) {
779 monitor->exit(current);
780 current->dec_held_monitor_count(1, true);
781 }
782 }
783
784 // -----------------------------------------------------------------------------
785 // Internal VM locks on java objects
786 // standard constructor, allows locking failures
787 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
788 _thread = thread;
789 _thread->check_for_valid_safepoint_state();
790 DEBUG_ONLY(_thread->inc_obj_locker_count();)
791 _obj = obj;
792
793 if (_obj() != nullptr) {
794 ObjectSynchronizer::enter(_obj, &_lock, _thread);
795 }
796 }
797
798 ObjectLocker::~ObjectLocker() {
799 DEBUG_ONLY(_thread->dec_obj_locker_count();)
800 if (_obj() != nullptr) {
801 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
802 }
803 }
804
805
806 // -----------------------------------------------------------------------------
807 // Wait/Notify/NotifyAll
808 // NOTE: must use heavy weight monitor to handle wait()
809 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
810 JavaThread* current = THREAD;
811 if (millis < 0) {
812 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
813 }
814 // The ObjectMonitor* can't be async deflated because the _waiters
815 // field is incremented before ownership is dropped and decremented
816 // after ownership is regained.
817 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
818
819 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
1153
1154 if (mark.has_monitor()) {
1155 // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1156 // The first stage of async deflation does not affect any field
1157 // used by this comparison so the ObjectMonitor* is usable here.
1158 ObjectMonitor* monitor = mark.monitor();
1159 return monitor->is_entered(current) != 0;
1160 }
1161 // Unlocked case, header in place
1162 assert(mark.is_unlocked(), "sanity check");
1163 return false;
1164 }
1165
1166 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
1167 oop obj = h_obj();
1168 markWord mark = read_stable_mark(obj);
1169
1170 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1171 // stack-locked so header points into owner's stack.
1172 // owning_thread_from_monitor_owner() may also return null here:
1173 return Threads::owning_thread_from_stacklock(t_list, (address) mark.locker());
1174 }
1175
1176 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1177 // fast-locked so get owner from the object.
1178 // owning_thread_from_object() may also return null here:
1179 return Threads::owning_thread_from_object(t_list, h_obj());
1180 }
1181
1182 if (mark.has_monitor()) {
1183 // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1184 // The first stage of async deflation does not affect any field
1185 // used by this comparison so the ObjectMonitor* is usable here.
1186 ObjectMonitor* monitor = mark.monitor();
1187 assert(monitor != nullptr, "monitor should be non-null");
1188 // owning_thread_from_monitor() may also return null here:
1189 return Threads::owning_thread_from_monitor(t_list, monitor);
1190 }
1191
1192 // Unlocked case, header in place
1193 // Cannot have assertion since this object may have been
1203 template <typename Function>
1204 void ObjectSynchronizer::monitors_iterate(Function function) {
1205 MonitorList::Iterator iter = _in_use_list.iterator();
1206 while (iter.has_next()) {
1207 ObjectMonitor* monitor = iter.next();
1208 function(monitor);
1209 }
1210 }
1211
1212 // Iterate ObjectMonitors owned by any thread and where the owner `filter`
1213 // returns true.
1214 template <typename OwnerFilter>
1215 void ObjectSynchronizer::owned_monitors_iterate_filtered(MonitorClosure* closure, OwnerFilter filter) {
1216 monitors_iterate([&](ObjectMonitor* monitor) {
1217 // This function is only called at a safepoint or when the
1218 // target thread is suspended or when the target thread is
1219 // operating on itself. The current closures in use today are
1220 // only interested in an owned ObjectMonitor and ownership
1221 // cannot be dropped under the calling contexts so the
1222 // ObjectMonitor cannot be async deflated.
1223 if (monitor->has_owner() && filter(monitor)) {
1224 assert(!monitor->is_being_async_deflated(), "Owned monitors should not be deflating");
1225
1226 closure->do_monitor(monitor);
1227 }
1228 });
1229 }
1230
1231 // Iterate ObjectMonitors where the owner == thread; this does NOT include
1232 // ObjectMonitors where owner is set to a stack-lock address in thread.
1233 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure, JavaThread* thread) {
1234 auto thread_filter = [&](ObjectMonitor* monitor) { return monitor->is_owner(thread); };
1235 return owned_monitors_iterate_filtered(closure, thread_filter);
1236 }
1237
1238 // Iterate ObjectMonitors owned by any thread.
1239 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure) {
1240 auto all_filter = [&](ObjectMonitor* monitor) { return true; };
1241 return owned_monitors_iterate_filtered(closure, all_filter);
1242 }
1243
1244 static bool monitors_used_above_threshold(MonitorList* list) {
1245 if (MonitorUsedDeflationThreshold == 0) { // disabled case is easy
1246 return false;
1247 }
1248 // Start with ceiling based on a per-thread estimate:
1249 size_t ceiling = ObjectSynchronizer::in_use_list_ceiling();
1250 size_t old_ceiling = ceiling;
1251 if (ceiling < list->max()) {
1252 // The max used by the system has exceeded the ceiling so use that:
1253 ceiling = list->max();
1254 }
1255 size_t monitors_used = list->count();
1256 if (monitors_used == 0) { // empty list is easy
1257 return false;
1258 }
1259 if (NoAsyncDeflationProgressMax != 0 &&
1260 _no_progress_cnt >= NoAsyncDeflationProgressMax) {
1390 event->set_monitorClass(obj->klass());
1391 event->set_address((uintptr_t)(void*)obj);
1392 event->set_cause((u1)cause);
1393 event->commit();
1394 }
1395
1396 // Fast path code shared by multiple functions
1397 void ObjectSynchronizer::inflate_helper(oop obj) {
1398 markWord mark = obj->mark_acquire();
1399 if (mark.has_monitor()) {
1400 ObjectMonitor* monitor = mark.monitor();
1401 markWord dmw = monitor->header();
1402 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1403 return;
1404 }
1405 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1406 }
1407
1408 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
1409 assert(current == Thread::current(), "must be");
1410 return inflate_impl(current->is_Java_thread() ? JavaThread::cast(current) : nullptr, obj, cause);
1411 }
1412
1413 ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
1414 assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
1415 return inflate_impl(thread, obj, cause);
1416 }
1417
1418 ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* inflating_thread, oop object, const InflateCause cause) {
1419 // The JavaThread* inflating_thread requires that the inflating_thread == Thread::current() or
1420 // is suspended throughout the call by some other mechanism.
1421 // The thread might be nullptr when called from a non JavaThread. (As may still be
1422 // the case from FastHashCode). However it is only important for correctness that the
1423 // thread is set when called from ObjectSynchronizer::enter from the owning thread,
1424 // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
1425 EventJavaMonitorInflate event;
1426
1427 for (;;) {
1428 const markWord mark = object->mark_acquire();
1429
1430 // The mark can be in one of the following states:
1431 // * inflated - If the ObjectMonitor owner is anonymous and the
1432 // inflating_thread owns the object lock, then we
1433 // make the inflating_thread the ObjectMonitor owner.
1434 // For LM_LIGHTWEIGHT we also remove the lock from
1435 // the inflating_thread's lock stack.
1436 // * fast-locked - Coerce it to inflated from fast-locked.
1437 // * stack-locked - Coerce it to inflated from stack-locked.
1438 // * INFLATING - Busy wait for conversion from stack-locked to
1439 // inflated.
1440 // * unlocked - Aggressively inflate the object.
1441
1442 // CASE: inflated
1443 if (mark.has_monitor()) {
1444 ObjectMonitor* inf = mark.monitor();
1445 markWord dmw = inf->header();
1446 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1447 if (inf->is_owner_anonymous() && inflating_thread != nullptr) {
1448 if (LockingMode == LM_LIGHTWEIGHT) {
1449 if (inflating_thread->lock_stack().contains(object)) {
1450 inf->set_owner_from_anonymous(inflating_thread);
1451 size_t removed = inflating_thread->lock_stack().remove(object);
1452 inf->set_recursions(removed - 1);
1453 }
1454 } else {
1455 assert(LockingMode == LM_LEGACY, "invariant");
1456 if (inflating_thread->is_lock_owned((address)inf->stack_locker())) {
1457 inf->set_owner_from_BasicLock(inflating_thread);
1458 // Decrement monitor count now since this monitor is okay for freezing
1459 LOOM_MONITOR_SUPPORT_ONLY(inflating_thread->dec_held_monitor_count();)
1460 }
1461 }
1462 }
1463 return inf;
1464 }
1465
1466 if (LockingMode != LM_LIGHTWEIGHT) {
1467 // New lightweight locking does not use INFLATING.
1468 // CASE: inflation in progress - inflating over a stack-lock.
1469 // Some other thread is converting from stack-locked to inflated.
1470 // Only that thread can complete inflation -- other threads must wait.
1471 // The INFLATING value is transient.
1472 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1473 // We could always eliminate polling by parking the thread on some auxiliary list.
1474 if (mark == markWord::INFLATING()) {
1475 read_stable_mark(object);
1476 continue;
1477 }
1478 }
1479
1480 // CASE: fast-locked
1481 // Could be fast-locked either by the inflating_thread or by some other thread.
1575 // the 0 causes the owner to stall if the owner happens to try to
1576 // drop the lock (restoring the header from the BasicLock to the object)
1577 // while inflation is in-progress. This protocol avoids races that might
1578 // would otherwise permit hashCode values to change or "flicker" for an object.
1579 // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable.
1580 // 0 serves as a "BUSY" inflate-in-progress indicator.
1581
1582
1583 // fetch the displaced mark from the owner's stack.
1584 // The owner can't die or unwind past the lock while our INFLATING
1585 // object is in the mark. Furthermore the owner can't complete
1586 // an unlock on the object, either.
1587 markWord dmw = mark.displaced_mark_helper();
1588 // Catch if the object's header is not neutral (not locked and
1589 // not marked is what we care about here).
1590 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1591
1592 // Setup monitor fields to proper values -- prepare the monitor
1593 m->set_header(dmw);
1594
1595 // Note that a thread can inflate an object
1596 // that it has stack-locked -- as might happen in wait() -- directly
1597 // with CAS. That is, we can avoid the xchg-nullptr .... ST idiom.
1598 if (inflating_thread != nullptr && inflating_thread->is_lock_owned((address)mark.locker())) {
1599 m->set_owner_from(nullptr, inflating_thread);
1600 // Decrement monitor count now since this monitor is okay for freezing
1601 LOOM_MONITOR_SUPPORT_ONLY(inflating_thread->dec_held_monitor_count();)
1602 } else {
1603 // Use ANONYMOUS_OWNER to indicate that the owner is the BasicLock on the stack,
1604 // and set the stack locker field in the monitor.
1605 m->set_stack_locker(mark.locker());
1606 m->set_owner_anonymous(); // second
1607 }
1608 // TODO-FIXME: assert BasicLock->dhw != 0.
1609
1610 // Must preserve store ordering. The monitor state must
1611 // be stable at the time of publishing the monitor address.
1612 guarantee(object->mark() == markWord::INFLATING(), "invariant");
1613 // Release semantics so that above set_object() is seen first.
1614 object->release_set_mark(markWord::encode(m));
1615
1616 // Once ObjectMonitor is configured and the object is associated
1617 // with the ObjectMonitor, it is safe to allow async deflation:
1618 _in_use_list.add(m);
1619
1620 // Hopefully the performance counters are allocated on distinct cache lines
1621 // to avoid false sharing on MP systems ...
1622 OM_PERFDATA_OP(Inflations, inc());
1623 if (log_is_enabled(Trace, monitorinflation)) {
1624 ResourceMark rm;
1625 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1626 INTPTR_FORMAT ", type='%s'", p2i(object),
1627 object->mark().value(), object->klass()->external_name());
1885 } else if (_no_progress_skip_increment) {
1886 _no_progress_skip_increment = false;
1887 } else {
1888 _no_progress_cnt++;
1889 }
1890
1891 return deflated_count;
1892 }
1893
1894 // Monitor cleanup on JavaThread::exit
1895
1896 // Iterate through monitor cache and attempt to release thread's monitors
1897 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1898 private:
1899 JavaThread* _thread;
1900
1901 public:
1902 ReleaseJavaMonitorsClosure(JavaThread* thread) : _thread(thread) {}
1903 void do_monitor(ObjectMonitor* mid) {
1904 intx rec = mid->complete_exit(_thread);
1905 _thread->dec_held_monitor_count(NOT_LOOM_MONITOR_SUPPORT((rec + 1)));
1906 }
1907 };
1908
1909 // Release all inflated monitors owned by current thread. Lightweight monitors are
1910 // ignored. This is meant to be called during JNI thread detach which assumes
1911 // all remaining monitors are heavyweight. All exceptions are swallowed.
1912 // Scanning the extant monitor list can be time consuming.
1913 // A simple optimization is to add a per-thread flag that indicates a thread
1914 // called jni_monitorenter() during its lifetime.
1915 //
1916 // Instead of NoSafepointVerifier it might be cheaper to
1917 // use an idiom of the form:
1918 // auto int tmp = SafepointSynchronize::_safepoint_counter ;
1919 // <code that must not run at safepoint>
1920 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1921 // Since the tests are extremely cheap we could leave them enabled
1922 // for normal product builds.
1923
1924 void ObjectSynchronizer::release_monitors_owned_by_thread(JavaThread* current) {
1925 assert(current == JavaThread::current(), "must be current Java thread");
1926 NoSafepointVerifier nsv;
1927 ReleaseJavaMonitorsClosure rjmc(current);
1928 ObjectSynchronizer::owned_monitors_iterate(&rjmc, current);
1929 assert(!current->has_pending_exception(), "Should not be possible");
1930 current->clear_pending_exception();
1931 assert(current->held_monitor_count() == 0, "Should not be possible");
1932 // All monitors (including entered via JNI) have been unlocked above, so we need to clear jni count.
1933 current->clear_jni_monitor_count();
1934 }
1935
1936 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
1937 switch (cause) {
1938 case inflate_cause_vm_internal: return "VM Internal";
1939 case inflate_cause_monitor_enter: return "Monitor Enter";
1940 case inflate_cause_wait: return "Monitor Wait";
1941 case inflate_cause_notify: return "Monitor Notify";
1942 case inflate_cause_hash_code: return "Monitor Hash Code";
1943 case inflate_cause_jni_enter: return "JNI Monitor Enter";
1944 case inflate_cause_jni_exit: return "JNI Monitor Exit";
1945 case inflate_cause_cont_freeze: return "Continuation Freeze";
1946 default:
1947 ShouldNotReachHere();
1948 }
1949 return "Unknown";
1950 }
1951
1952 //------------------------------------------------------------------------------
1953 // Debugging code
1954
1955 u_char* ObjectSynchronizer::get_gvars_addr() {
1956 return (u_char*)&GVars;
1957 }
1958
1959 u_char* ObjectSynchronizer::get_gvars_hc_sequence_addr() {
1960 return (u_char*)&GVars.hc_sequence;
1961 }
1962
1963 size_t ObjectSynchronizer::get_gvars_size() {
1964 return sizeof(SharedGlobals);
1965 }
|