< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page

 334   if (obj == nullptr) return false;  // slow-path for invalid obj
 335   const markWord mark = obj->mark();
 336 
 337   if (LockingMode == LM_LIGHTWEIGHT) {
 338     if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
 339       // Degenerate notify
 340       // fast-locked by caller so by definition the implied waitset is empty.
 341       return true;
 342     }
 343   } else if (LockingMode == LM_LEGACY) {
 344     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 345       // Degenerate notify
 346       // stack-locked by caller so by definition the implied waitset is empty.
 347       return true;
 348     }
 349   }
 350 
 351   if (mark.has_monitor()) {
 352     ObjectMonitor* const mon = mark.monitor();
 353     assert(mon->object() == oop(obj), "invariant");
 354     if (mon->owner() != current) return false;  // slow-path for IMS exception
 355 
 356     if (mon->first_waiter() != nullptr) {
 357       // We have one or more waiters. Since this is an inflated monitor
 358       // that we own, we can transfer one or more threads from the waitset
 359       // to the entrylist here and now, avoiding the slow-path.
 360       if (all) {
 361         DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current);
 362       } else {
 363         DTRACE_MONITOR_PROBE(notify, mon, obj, current);
 364       }
 365       int free_count = 0;
 366       do {
 367         mon->INotify(current);
 368         ++free_count;
 369       } while (mon->first_waiter() != nullptr && all);
 370       OM_PERFDATA_OP(Notifications, inc(free_count));
 371     }
 372     return true;
 373   }
 374 

 384 // quick_enter() as our thread state remains _in_Java.
 385 
 386 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
 387                                      BasicLock * lock) {
 388   assert(current->thread_state() == _thread_in_Java, "invariant");
 389   NoSafepointVerifier nsv;
 390   if (obj == nullptr) return false;       // Need to throw NPE
 391 
 392   if (obj->klass()->is_value_based()) {
 393     return false;
 394   }
 395 
 396   if (LockingMode == LM_LIGHTWEIGHT) {
 397     LockStack& lock_stack = current->lock_stack();
 398     if (lock_stack.is_full()) {
 399       // Always go into runtime if the lock stack is full.
 400       return false;
 401     }
 402     if (lock_stack.try_recursive_enter(obj)) {
 403       // Recursive lock successful.
 404       current->inc_held_monitor_count();
 405       return true;
 406     }
 407   }
 408 
 409   const markWord mark = obj->mark();
 410 
 411   if (mark.has_monitor()) {
 412     ObjectMonitor* const m = mark.monitor();
 413     // An async deflation or GC can race us before we manage to make
 414     // the ObjectMonitor busy by setting the owner below. If we detect
 415     // that race we just bail out to the slow-path here.
 416     if (m->object_peek() == nullptr) {
 417       return false;
 418     }
 419     JavaThread* const owner = static_cast<JavaThread*>(m->owner_raw());
 420 
 421     // Lock contention and Transactional Lock Elision (TLE) diagnostics
 422     // and observability
 423     // Case: light contention possibly amenable to TLE
 424     // Case: TLE inimical operations such as nested/recursive synchronization
 425 
 426     if (owner == current) {
 427       m->_recursions++;
 428       current->inc_held_monitor_count();
 429       return true;
 430     }
 431 
 432     if (LockingMode != LM_LIGHTWEIGHT) {
 433       // This Java Monitor is inflated so obj's header will never be
 434       // displaced to this thread's BasicLock. Make the displaced header
 435       // non-null so this BasicLock is not seen as recursive nor as
 436       // being locked. We do this unconditionally so that this thread's
 437       // BasicLock cannot be mis-interpreted by any stack walkers. For
 438       // performance reasons, stack walkers generally first check for
 439       // stack-locking in the object's header, the second check is for
 440       // recursive stack-locking in the displaced header in the BasicLock,
 441       // and last are the inflated Java Monitor (ObjectMonitor) checks.
 442       lock->set_displaced_header(markWord::unused_mark());
 443     }
 444 
 445     if (owner == nullptr && m->try_set_owner_from(nullptr, current) == nullptr) {
 446       assert(m->_recursions == 0, "invariant");
 447       current->inc_held_monitor_count();
 448       return true;
 449     }
 450   }
 451 
 452   // Note that we could inflate in quick_enter.
 453   // This is likely a useful optimization
 454   // Critically, in quick_enter() we must not:
 455   // -- block indefinitely, or
 456   // -- reach a safepoint
 457 
 458   return false;        // revert to slow-path
 459 }
 460 
 461 // Handle notifications when synchronizing on value based classes
 462 void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* locking_thread) {
 463   assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
 464   frame last_frame = locking_thread->last_frame();
 465   bool bcp_was_adjusted = false;
 466   // Don't decrement bcp if it points to the frame's first instruction.  This happens when
 467   // handle_sync_on_value_based_class() is called because of a synchronized method.  There

 507     last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
 508   }
 509 }
 510 
 511 static bool useHeavyMonitors() {
 512 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
 513   return LockingMode == LM_MONITOR;
 514 #else
 515   return false;
 516 #endif
 517 }
 518 
 519 // -----------------------------------------------------------------------------
 520 // Monitor Enter/Exit
 521 
 522 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
 523   // When called with locking_thread != Thread::current() some mechanism must synchronize
 524   // the locking_thread with respect to the current thread. Currently only used when
 525   // deoptimizing and re-locking locks. See Deoptimization::relock_objects
 526   assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");



 527   if (!enter_fast_impl(obj, lock, locking_thread)) {
 528     // Inflated ObjectMonitor::enter_for is required
 529 
 530     // An async deflation can race after the inflate_for() call and before
 531     // enter_for() can make the ObjectMonitor busy. enter_for() returns false
 532     // if we have lost the race to async deflation and we simply try again.
 533     while (true) {
 534       ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter);
 535       if (monitor->enter_for(locking_thread)) {
 536         return;
 537       }
 538       assert(monitor->is_being_async_deflated(), "must be");
 539     }
 540   }
 541 }
 542 
 543 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
 544   assert(current == Thread::current(), "must be");



 545   if (!enter_fast_impl(obj, lock, current)) {
 546     // Inflated ObjectMonitor::enter is required
 547 
 548     // An async deflation can race after the inflate() call and before
 549     // enter() can make the ObjectMonitor busy. enter() returns false if
 550     // we have lost the race to async deflation and we simply try again.
 551     while (true) {
 552       ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
 553       if (monitor->enter(current)) {
 554         return;
 555       }
 556     }
 557   }
 558 }
 559 
 560 // The interpreter and compiler assembly code tries to lock using the fast path
 561 // of this algorithm. Make sure to update that code if the following function is
 562 // changed. The implementation is extremely sensitive to race condition. Be careful.
 563 bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
 564 
 565   if (obj->klass()->is_value_based()) {
 566     handle_sync_on_value_based_class(obj, locking_thread);
 567   }
 568 
 569   locking_thread->inc_held_monitor_count();
 570 
 571   if (!useHeavyMonitors()) {
 572     if (LockingMode == LM_LIGHTWEIGHT) {
 573       // Fast-locking does not use the 'lock' argument.
 574       LockStack& lock_stack = locking_thread->lock_stack();
 575       if (lock_stack.is_full()) {
 576         // We unconditionally make room on the lock stack by inflating
 577         // the least recently locked object on the lock stack.
 578 
 579         // About the choice to inflate least recently locked object.
 580         // First we must chose to inflate a lock, either some lock on
 581         // the lock-stack or the lock that is currently being entered
 582         // (which may or may not be on the lock-stack).
 583         // Second the best lock to inflate is a lock which is entered
 584         // in a control flow where there are only a very few locks being
 585         // used, as the costly part of inflated locking is inflation,
 586         // not locking. But this property is entirely program dependent.
 587         // Third inflating the lock currently being entered on when it
 588         // is not present on the lock-stack will result in a still full
 589         // lock-stack. This creates a scenario where every deeper nested
 590         // monitorenter must call into the runtime.
 591         // The rational here is as follows:
 592         // Because we cannot (currently) figure out the second, and want
 593         // to avoid the third, we inflate a lock on the lock-stack.
 594         // The least recently locked lock is chosen as it is the lock
 595         // with the longest critical section.
 596 
 597         log_info(monitorinflation)("LockStack capacity exceeded, inflating.");
 598         ObjectMonitor* monitor = inflate_for(locking_thread, lock_stack.bottom(), inflate_cause_vm_internal);
 599         assert(monitor->owner() == Thread::current(), "must be owner=" PTR_FORMAT " current=" PTR_FORMAT " mark=" PTR_FORMAT,
 600                p2i(monitor->owner()), p2i(Thread::current()), monitor->object()->mark_acquire().value());
 601         assert(!lock_stack.is_full(), "must have made room here");
 602       }
 603 
 604       markWord mark = obj()->mark_acquire();
 605       while (mark.is_neutral()) {
 606         // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
 607         // Try to swing into 'fast-locked' state.
 608         assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
 609         const markWord locked_mark = mark.set_fast_locked();
 610         const markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
 611         if (old_mark == mark) {
 612           // Successfully fast-locked, push object to lock-stack and return.
 613           lock_stack.push(obj());
 614           return true;
 615         }
 616         mark = old_mark;
 617       }
 618 
 619       if (mark.is_fast_locked() && lock_stack.try_recursive_enter(obj())) {
 620         // Recursive lock successful.
 621         return true;
 622       }
 623 
 624       // Failed to fast lock.
 625       return false;
 626     } else if (LockingMode == LM_LEGACY) {
 627       markWord mark = obj->mark();
 628       if (mark.is_neutral()) {
 629         // Anticipate successful CAS -- the ST of the displaced mark must
 630         // be visible <= the ST performed by the CAS.
 631         lock->set_displaced_header(mark);
 632         if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {

 633           return true;
 634         }
 635       } else if (mark.has_locker() &&
 636                  locking_thread->is_lock_owned((address) mark.locker())) {
 637         assert(lock != mark.locker(), "must not re-lock the same lock");
 638         assert(lock != (BasicLock*) obj->mark().value(), "don't relock with same BasicLock");
 639         lock->set_displaced_header(markWord::from_pointer(nullptr));
 640         return true;
 641       }
 642 
 643       // The object header will never be displaced to this lock,
 644       // so it does not matter what the value is, except that it
 645       // must be non-zero to avoid looking like a re-entrant lock,
 646       // and must not look locked either.
 647       lock->set_displaced_header(markWord::unused_mark());
 648 
 649       // Failed to fast lock.
 650       return false;
 651     }
 652   } else if (VerifyHeavyMonitors) {
 653     guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 654   }
 655 
 656   return false;
 657 }
 658 
 659 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
 660   current->dec_held_monitor_count();
 661 
 662   if (!useHeavyMonitors()) {
 663     markWord mark = object->mark();
 664     if (LockingMode == LM_LIGHTWEIGHT) {
 665       // Fast-locking does not use the 'lock' argument.
 666       LockStack& lock_stack = current->lock_stack();
 667       if (mark.is_fast_locked() && lock_stack.try_recursive_exit(object)) {
 668         // Recursively unlocked.
 669         return;
 670       }
 671 
 672       if (mark.is_fast_locked() && lock_stack.is_recursive(object)) {
 673         // This lock is recursive but is not at the top of the lock stack so we're
 674         // doing an unbalanced exit. We have to fall thru to inflation below and
 675         // let ObjectMonitor::exit() do the unlock.
 676       } else {
 677         while (mark.is_fast_locked()) {
 678           // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
 679           const markWord unlocked_mark = mark.set_unlocked();
 680           const markWord old_mark = object->cas_set_mark(unlocked_mark, mark);

 705             // This is a special case where the Java Monitor was inflated
 706             // after this thread entered the stack-lock recursively. When a
 707             // Java Monitor is inflated, we cannot safely walk the Java
 708             // Monitor owner's stack and update the BasicLocks because a
 709             // Java Monitor can be asynchronously inflated by a thread that
 710             // does not own the Java Monitor.
 711             ObjectMonitor* m = mark.monitor();
 712             assert(m->object()->mark() == mark, "invariant");
 713             assert(m->is_entered(current), "invariant");
 714           }
 715         }
 716 #endif
 717         return;
 718       }
 719 
 720       if (mark == markWord::from_pointer(lock)) {
 721         // If the object is stack-locked by the current thread, try to
 722         // swing the displaced header from the BasicLock back to the mark.
 723         assert(dhw.is_neutral(), "invariant");
 724         if (object->cas_set_mark(dhw, mark) == mark) {

 725           return;
 726         }
 727       }
 728     }
 729   } else if (VerifyHeavyMonitors) {
 730     guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 731   }
 732 
 733   // We have to take the slow-path of possible inflation and then exit.
 734   // The ObjectMonitor* can't be async deflated until ownership is
 735   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 736   ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
 737   assert(!monitor->is_owner_anonymous(), "must not be");
 738   monitor->exit(current);
 739 }
 740 
 741 // -----------------------------------------------------------------------------
 742 // JNI locks on java objects
 743 // NOTE: must use heavy weight monitor to handle jni monitor enter
 744 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {

 766   JavaThread* current = THREAD;
 767 
 768   // The ObjectMonitor* can't be async deflated until ownership is
 769   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 770   ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
 771   // If this thread has locked the object, exit the monitor. We
 772   // intentionally do not use CHECK on check_owner because we must exit the
 773   // monitor even if an exception was already pending.
 774   if (monitor->check_owner(THREAD)) {
 775     monitor->exit(current);
 776     current->dec_held_monitor_count(1, true);
 777   }
 778 }
 779 
 780 // -----------------------------------------------------------------------------
 781 // Internal VM locks on java objects
 782 // standard constructor, allows locking failures
 783 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
 784   _thread = thread;
 785   _thread->check_for_valid_safepoint_state();

 786   _obj = obj;
 787 
 788   if (_obj() != nullptr) {
 789     ObjectSynchronizer::enter(_obj, &_lock, _thread);
 790   }
 791 }
 792 
 793 ObjectLocker::~ObjectLocker() {

 794   if (_obj() != nullptr) {
 795     ObjectSynchronizer::exit(_obj(), &_lock, _thread);
 796   }
 797 }
 798 
 799 
 800 // -----------------------------------------------------------------------------
 801 //  Wait/Notify/NotifyAll
 802 // NOTE: must use heavy weight monitor to handle wait()
 803 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 804   JavaThread* current = THREAD;
 805   if (millis < 0) {
 806     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 807   }
 808   // The ObjectMonitor* can't be async deflated because the _waiters
 809   // field is incremented before ownership is dropped and decremented
 810   // after ownership is regained.
 811   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
 812 
 813   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);

1133 
1134   if (mark.has_monitor()) {
1135     // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1136     // The first stage of async deflation does not affect any field
1137     // used by this comparison so the ObjectMonitor* is usable here.
1138     ObjectMonitor* monitor = mark.monitor();
1139     return monitor->is_entered(current) != 0;
1140   }
1141   // Unlocked case, header in place
1142   assert(mark.is_neutral(), "sanity check");
1143   return false;
1144 }
1145 
1146 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
1147   oop obj = h_obj();
1148   markWord mark = read_stable_mark(obj);
1149 
1150   if (LockingMode == LM_LEGACY && mark.has_locker()) {
1151     // stack-locked so header points into owner's stack.
1152     // owning_thread_from_monitor_owner() may also return null here:
1153     return Threads::owning_thread_from_monitor_owner(t_list, (address) mark.locker());
1154   }
1155 
1156   if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1157     // fast-locked so get owner from the object.
1158     // owning_thread_from_object() may also return null here:
1159     return Threads::owning_thread_from_object(t_list, h_obj());
1160   }
1161 
1162   if (mark.has_monitor()) {
1163     // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1164     // The first stage of async deflation does not affect any field
1165     // used by this comparison so the ObjectMonitor* is usable here.
1166     ObjectMonitor* monitor = mark.monitor();
1167     assert(monitor != nullptr, "monitor should be non-null");
1168     // owning_thread_from_monitor() may also return null here:
1169     return Threads::owning_thread_from_monitor(t_list, monitor);
1170   }
1171 
1172   // Unlocked case, header in place
1173   // Cannot have assertion since this object may have been

1183 template <typename Function>
1184 void ObjectSynchronizer::monitors_iterate(Function function) {
1185   MonitorList::Iterator iter = _in_use_list.iterator();
1186   while (iter.has_next()) {
1187     ObjectMonitor* monitor = iter.next();
1188     function(monitor);
1189   }
1190 }
1191 
1192 // Iterate ObjectMonitors owned by any thread and where the owner `filter`
1193 // returns true.
1194 template <typename OwnerFilter>
1195 void ObjectSynchronizer::owned_monitors_iterate_filtered(MonitorClosure* closure, OwnerFilter filter) {
1196   monitors_iterate([&](ObjectMonitor* monitor) {
1197     // This function is only called at a safepoint or when the
1198     // target thread is suspended or when the target thread is
1199     // operating on itself. The current closures in use today are
1200     // only interested in an owned ObjectMonitor and ownership
1201     // cannot be dropped under the calling contexts so the
1202     // ObjectMonitor cannot be async deflated.
1203     if (monitor->has_owner() && filter(monitor->owner_raw())) {
1204       assert(!monitor->is_being_async_deflated(), "Owned monitors should not be deflating");
1205 
1206       closure->do_monitor(monitor);
1207     }
1208   });
1209 }
1210 
1211 // Iterate ObjectMonitors where the owner == thread; this does NOT include
1212 // ObjectMonitors where owner is set to a stack-lock address in thread.
1213 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure, JavaThread* thread) {
1214   auto thread_filter = [&](void* owner) { return owner == thread; };
1215   return owned_monitors_iterate_filtered(closure, thread_filter);
1216 }
1217 
1218 // Iterate ObjectMonitors owned by any thread.
1219 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure) {
1220   auto all_filter = [&](void* owner) { return true; };
1221   return owned_monitors_iterate_filtered(closure, all_filter);
1222 }
1223 
1224 static bool monitors_used_above_threshold(MonitorList* list) {
1225   if (MonitorUsedDeflationThreshold == 0) {  // disabled case is easy
1226     return false;
1227   }
1228   // Start with ceiling based on a per-thread estimate:
1229   size_t ceiling = ObjectSynchronizer::in_use_list_ceiling();
1230   size_t old_ceiling = ceiling;
1231   if (ceiling < list->max()) {
1232     // The max used by the system has exceeded the ceiling so use that:
1233     ceiling = list->max();
1234   }
1235   size_t monitors_used = list->count();
1236   if (monitors_used == 0) {  // empty list is easy
1237     return false;
1238   }
1239   if (NoAsyncDeflationProgressMax != 0 &&
1240       _no_progress_cnt >= NoAsyncDeflationProgressMax) {

1370   event->set_monitorClass(obj->klass());
1371   event->set_address((uintptr_t)(void*)obj);
1372   event->set_cause((u1)cause);
1373   event->commit();
1374 }
1375 
1376 // Fast path code shared by multiple functions
1377 void ObjectSynchronizer::inflate_helper(oop obj) {
1378   markWord mark = obj->mark_acquire();
1379   if (mark.has_monitor()) {
1380     ObjectMonitor* monitor = mark.monitor();
1381     markWord dmw = monitor->header();
1382     assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1383     return;
1384   }
1385   (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1386 }
1387 
1388 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
1389   assert(current == Thread::current(), "must be");
1390   if (LockingMode == LM_LIGHTWEIGHT && current->is_Java_thread()) {
1391     return inflate_impl(JavaThread::cast(current), obj, cause);
1392   }
1393   return inflate_impl(nullptr, obj, cause);
1394 }
1395 
1396 ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
1397   assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
1398   return inflate_impl(thread, obj, cause);
1399 }
1400 
1401 ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* inflating_thread, oop object, const InflateCause cause) {
1402   // The JavaThread* inflating_thread parameter is only used by LM_LIGHTWEIGHT and requires
1403   // that the inflating_thread == Thread::current() or is suspended throughout the call by
1404   // some other mechanism.
1405   // Even with LM_LIGHTWEIGHT the thread might be nullptr when called from a non
1406   // JavaThread. (As may still be the case from FastHashCode). However it is only
1407   // important for the correctness of the LM_LIGHTWEIGHT algorithm that the thread
1408   // is set when called from ObjectSynchronizer::enter from the owning thread,
1409   // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
1410   EventJavaMonitorInflate event;
1411 
1412   for (;;) {
1413     const markWord mark = object->mark_acquire();
1414 
1415     // The mark can be in one of the following states:
1416     // *  inflated     - Just return if using stack-locking.
1417     //                   If using fast-locking and the ObjectMonitor owner
1418     //                   is anonymous and the inflating_thread owns the
1419     //                   object lock, then we make the inflating_thread
1420     //                   the ObjectMonitor owner and remove the lock from
1421     //                   the inflating_thread's lock stack.
1422     // *  fast-locked  - Coerce it to inflated from fast-locked.
1423     // *  stack-locked - Coerce it to inflated from stack-locked.
1424     // *  INFLATING    - Busy wait for conversion from stack-locked to
1425     //                   inflated.
1426     // *  neutral      - Aggressively inflate the object.
1427 
1428     // CASE: inflated
1429     if (mark.has_monitor()) {
1430       ObjectMonitor* inf = mark.monitor();
1431       markWord dmw = inf->header();
1432       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1433       if (LockingMode == LM_LIGHTWEIGHT && inf->is_owner_anonymous() &&
1434           inflating_thread != nullptr && inflating_thread->lock_stack().contains(object)) {
1435         inf->set_owner_from_anonymous(inflating_thread);
1436         size_t removed = inflating_thread->lock_stack().remove(object);
1437         inf->set_recursions(removed - 1);










1438       }
1439       return inf;
1440     }
1441 
1442     if (LockingMode != LM_LIGHTWEIGHT) {
1443       // New lightweight locking does not use INFLATING.
1444       // CASE: inflation in progress - inflating over a stack-lock.
1445       // Some other thread is converting from stack-locked to inflated.
1446       // Only that thread can complete inflation -- other threads must wait.
1447       // The INFLATING value is transient.
1448       // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1449       // We could always eliminate polling by parking the thread on some auxiliary list.
1450       if (mark == markWord::INFLATING()) {
1451         read_stable_mark(object);
1452         continue;
1453       }
1454     }
1455 
1456     // CASE: fast-locked
1457     // Could be fast-locked either by the inflating_thread or by some other thread.

1551       // the 0 causes the owner to stall if the owner happens to try to
1552       // drop the lock (restoring the header from the BasicLock to the object)
1553       // while inflation is in-progress.  This protocol avoids races that might
1554       // would otherwise permit hashCode values to change or "flicker" for an object.
1555       // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable.
1556       // 0 serves as a "BUSY" inflate-in-progress indicator.
1557 
1558 
1559       // fetch the displaced mark from the owner's stack.
1560       // The owner can't die or unwind past the lock while our INFLATING
1561       // object is in the mark.  Furthermore the owner can't complete
1562       // an unlock on the object, either.
1563       markWord dmw = mark.displaced_mark_helper();
1564       // Catch if the object's header is not neutral (not locked and
1565       // not marked is what we care about here).
1566       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1567 
1568       // Setup monitor fields to proper values -- prepare the monitor
1569       m->set_header(dmw);
1570 
1571       // Optimization: if the mark.locker stack address is associated
1572       // with this thread we could simply set m->_owner = current.
1573       // Note that a thread can inflate an object
1574       // that it has stack-locked -- as might happen in wait() -- directly
1575       // with CAS.  That is, we can avoid the xchg-nullptr .... ST idiom.
1576       m->set_owner_from(nullptr, mark.locker());









1577       // TODO-FIXME: assert BasicLock->dhw != 0.
1578 
1579       // Must preserve store ordering. The monitor state must
1580       // be stable at the time of publishing the monitor address.
1581       guarantee(object->mark() == markWord::INFLATING(), "invariant");
1582       // Release semantics so that above set_object() is seen first.
1583       object->release_set_mark(markWord::encode(m));
1584 
1585       // Once ObjectMonitor is configured and the object is associated
1586       // with the ObjectMonitor, it is safe to allow async deflation:
1587       _in_use_list.add(m);
1588 
1589       // Hopefully the performance counters are allocated on distinct cache lines
1590       // to avoid false sharing on MP systems ...
1591       OM_PERFDATA_OP(Inflations, inc());
1592       if (log_is_enabled(Trace, monitorinflation)) {
1593         ResourceMark rm;
1594         lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1595                      INTPTR_FORMAT ", type='%s'", p2i(object),
1596                      object->mark().value(), object->klass()->external_name());

1856   } else if (_no_progress_skip_increment) {
1857     _no_progress_skip_increment = false;
1858   } else {
1859     _no_progress_cnt++;
1860   }
1861 
1862   return deflated_count;
1863 }
1864 
1865 // Monitor cleanup on JavaThread::exit
1866 
1867 // Iterate through monitor cache and attempt to release thread's monitors
1868 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1869  private:
1870   JavaThread* _thread;
1871 
1872  public:
1873   ReleaseJavaMonitorsClosure(JavaThread* thread) : _thread(thread) {}
1874   void do_monitor(ObjectMonitor* mid) {
1875     intx rec = mid->complete_exit(_thread);
1876     _thread->dec_held_monitor_count(rec + 1);
1877   }
1878 };
1879 
1880 // Release all inflated monitors owned by current thread.  Lightweight monitors are
1881 // ignored.  This is meant to be called during JNI thread detach which assumes
1882 // all remaining monitors are heavyweight.  All exceptions are swallowed.
1883 // Scanning the extant monitor list can be time consuming.
1884 // A simple optimization is to add a per-thread flag that indicates a thread
1885 // called jni_monitorenter() during its lifetime.
1886 //
1887 // Instead of NoSafepointVerifier it might be cheaper to
1888 // use an idiom of the form:
1889 //   auto int tmp = SafepointSynchronize::_safepoint_counter ;
1890 //   <code that must not run at safepoint>
1891 //   guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1892 // Since the tests are extremely cheap we could leave them enabled
1893 // for normal product builds.
1894 
1895 void ObjectSynchronizer::release_monitors_owned_by_thread(JavaThread* current) {
1896   assert(current == JavaThread::current(), "must be current Java thread");
1897   NoSafepointVerifier nsv;
1898   ReleaseJavaMonitorsClosure rjmc(current);
1899   ObjectSynchronizer::owned_monitors_iterate(&rjmc, current);
1900   assert(!current->has_pending_exception(), "Should not be possible");
1901   current->clear_pending_exception();
1902   assert(current->held_monitor_count() == 0, "Should not be possible");
1903   // All monitors (including entered via JNI) have been unlocked above, so we need to clear jni count.
1904   current->clear_jni_monitor_count();
1905 }
1906 
1907 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
1908   switch (cause) {
1909     case inflate_cause_vm_internal:    return "VM Internal";
1910     case inflate_cause_monitor_enter:  return "Monitor Enter";
1911     case inflate_cause_wait:           return "Monitor Wait";
1912     case inflate_cause_notify:         return "Monitor Notify";
1913     case inflate_cause_hash_code:      return "Monitor Hash Code";
1914     case inflate_cause_jni_enter:      return "JNI Monitor Enter";
1915     case inflate_cause_jni_exit:       return "JNI Monitor Exit";

1916     default:
1917       ShouldNotReachHere();
1918   }
1919   return "Unknown";
1920 }
1921 
1922 //------------------------------------------------------------------------------
1923 // Debugging code
1924 
1925 u_char* ObjectSynchronizer::get_gvars_addr() {
1926   return (u_char*)&GVars;
1927 }
1928 
1929 u_char* ObjectSynchronizer::get_gvars_hc_sequence_addr() {
1930   return (u_char*)&GVars.hc_sequence;
1931 }
1932 
1933 size_t ObjectSynchronizer::get_gvars_size() {
1934   return sizeof(SharedGlobals);
1935 }

 334   if (obj == nullptr) return false;  // slow-path for invalid obj
 335   const markWord mark = obj->mark();
 336 
 337   if (LockingMode == LM_LIGHTWEIGHT) {
 338     if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
 339       // Degenerate notify
 340       // fast-locked by caller so by definition the implied waitset is empty.
 341       return true;
 342     }
 343   } else if (LockingMode == LM_LEGACY) {
 344     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 345       // Degenerate notify
 346       // stack-locked by caller so by definition the implied waitset is empty.
 347       return true;
 348     }
 349   }
 350 
 351   if (mark.has_monitor()) {
 352     ObjectMonitor* const mon = mark.monitor();
 353     assert(mon->object() == oop(obj), "invariant");
 354     if (!mon->is_owner(current)) return false;  // slow-path for IMS exception
 355 
 356     if (mon->first_waiter() != nullptr) {
 357       // We have one or more waiters. Since this is an inflated monitor
 358       // that we own, we can transfer one or more threads from the waitset
 359       // to the entrylist here and now, avoiding the slow-path.
 360       if (all) {
 361         DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current);
 362       } else {
 363         DTRACE_MONITOR_PROBE(notify, mon, obj, current);
 364       }
 365       int free_count = 0;
 366       do {
 367         mon->INotify(current);
 368         ++free_count;
 369       } while (mon->first_waiter() != nullptr && all);
 370       OM_PERFDATA_OP(Notifications, inc(free_count));
 371     }
 372     return true;
 373   }
 374 

 384 // quick_enter() as our thread state remains _in_Java.
 385 
 386 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
 387                                      BasicLock * lock) {
 388   assert(current->thread_state() == _thread_in_Java, "invariant");
 389   NoSafepointVerifier nsv;
 390   if (obj == nullptr) return false;       // Need to throw NPE
 391 
 392   if (obj->klass()->is_value_based()) {
 393     return false;
 394   }
 395 
 396   if (LockingMode == LM_LIGHTWEIGHT) {
 397     LockStack& lock_stack = current->lock_stack();
 398     if (lock_stack.is_full()) {
 399       // Always go into runtime if the lock stack is full.
 400       return false;
 401     }
 402     if (lock_stack.try_recursive_enter(obj)) {
 403       // Recursive lock successful.
 404       NOT_LOOM_MONITOR_SUPPORT(current->inc_held_monitor_count();)
 405       return true;
 406     }
 407   }
 408 
 409   const markWord mark = obj->mark();
 410 
 411   if (mark.has_monitor()) {
 412     ObjectMonitor* const m = mark.monitor();
 413     // An async deflation or GC can race us before we manage to make
 414     // the ObjectMonitor busy by setting the owner below. If we detect
 415     // that race we just bail out to the slow-path here.
 416     if (m->object_peek() == nullptr) {
 417       return false;
 418     }

 419 
 420     // Lock contention and Transactional Lock Elision (TLE) diagnostics
 421     // and observability
 422     // Case: light contention possibly amenable to TLE
 423     // Case: TLE inimical operations such as nested/recursive synchronization
 424 
 425     if (m->is_owner(current)) {
 426       m->_recursions++;
 427       NOT_LOOM_MONITOR_SUPPORT(current->inc_held_monitor_count();)
 428       return true;
 429     }
 430 
 431     if (LockingMode != LM_LIGHTWEIGHT) {
 432       // This Java Monitor is inflated so obj's header will never be
 433       // displaced to this thread's BasicLock. Make the displaced header
 434       // non-null so this BasicLock is not seen as recursive nor as
 435       // being locked. We do this unconditionally so that this thread's
 436       // BasicLock cannot be mis-interpreted by any stack walkers. For
 437       // performance reasons, stack walkers generally first check for
 438       // stack-locking in the object's header, the second check is for
 439       // recursive stack-locking in the displaced header in the BasicLock,
 440       // and last are the inflated Java Monitor (ObjectMonitor) checks.
 441       lock->set_displaced_header(markWord::unused_mark());
 442     }
 443 
 444     if (!m->has_owner() && m->try_set_owner_from(nullptr, current) == nullptr) {
 445       assert(m->_recursions == 0, "invariant");
 446       NOT_LOOM_MONITOR_SUPPORT(current->inc_held_monitor_count();)
 447       return true;
 448     }
 449   }
 450 
 451   // Note that we could inflate in quick_enter.
 452   // This is likely a useful optimization
 453   // Critically, in quick_enter() we must not:
 454   // -- block indefinitely, or
 455   // -- reach a safepoint
 456 
 457   return false;        // revert to slow-path
 458 }
 459 
 460 // Handle notifications when synchronizing on value based classes
 461 void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* locking_thread) {
 462   assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
 463   frame last_frame = locking_thread->last_frame();
 464   bool bcp_was_adjusted = false;
 465   // Don't decrement bcp if it points to the frame's first instruction.  This happens when
 466   // handle_sync_on_value_based_class() is called because of a synchronized method.  There

 506     last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
 507   }
 508 }
 509 
 510 static bool useHeavyMonitors() {
 511 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
 512   return LockingMode == LM_MONITOR;
 513 #else
 514   return false;
 515 #endif
 516 }
 517 
 518 // -----------------------------------------------------------------------------
 519 // Monitor Enter/Exit
 520 
 521 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
 522   // When called with locking_thread != Thread::current() some mechanism must synchronize
 523   // the locking_thread with respect to the current thread. Currently only used when
 524   // deoptimizing and re-locking locks. See Deoptimization::relock_objects
 525   assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
 526 
 527   NOT_LOOM_MONITOR_SUPPORT(locking_thread->inc_held_monitor_count();)
 528 
 529   if (!enter_fast_impl(obj, lock, locking_thread)) {
 530     // Inflated ObjectMonitor::enter_for is required
 531 
 532     // An async deflation can race after the inflate_for() call and before
 533     // enter_for() can make the ObjectMonitor busy. enter_for() returns false
 534     // if we have lost the race to async deflation and we simply try again.
 535     while (true) {
 536       ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter);
 537       if (monitor->enter_for(locking_thread)) {
 538         return;
 539       }
 540       assert(monitor->is_being_async_deflated(), "must be");
 541     }
 542   }
 543 }
 544 
 545 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
 546   assert(current == Thread::current(), "must be");
 547 
 548   NOT_LOOM_MONITOR_SUPPORT(current->inc_held_monitor_count();)
 549 
 550   if (!enter_fast_impl(obj, lock, current)) {
 551     // Inflated ObjectMonitor::enter is required
 552 
 553     // An async deflation can race after the inflate() call and before
 554     // enter() can make the ObjectMonitor busy. enter() returns false if
 555     // we have lost the race to async deflation and we simply try again.
 556     while (true) {
 557       ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
 558       if (monitor->enter(current)) {
 559         return;
 560       }
 561     }
 562   }
 563 }
 564 
 565 // The interpreter and compiler assembly code tries to lock using the fast path
 566 // of this algorithm. Make sure to update that code if the following function is
 567 // changed. The implementation is extremely sensitive to race condition. Be careful.
 568 bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {

 569   if (obj->klass()->is_value_based()) {
 570     handle_sync_on_value_based_class(obj, locking_thread);
 571   }
 572 


 573   if (!useHeavyMonitors()) {
 574     if (LockingMode == LM_LIGHTWEIGHT) {
 575       // Fast-locking does not use the 'lock' argument.
 576       LockStack& lock_stack = locking_thread->lock_stack();
 577       if (lock_stack.is_full()) {
 578         // We unconditionally make room on the lock stack by inflating
 579         // the least recently locked object on the lock stack.
 580 
 581         // About the choice to inflate least recently locked object.
 582         // First we must chose to inflate a lock, either some lock on
 583         // the lock-stack or the lock that is currently being entered
 584         // (which may or may not be on the lock-stack).
 585         // Second the best lock to inflate is a lock which is entered
 586         // in a control flow where there are only a very few locks being
 587         // used, as the costly part of inflated locking is inflation,
 588         // not locking. But this property is entirely program dependent.
 589         // Third inflating the lock currently being entered on when it
 590         // is not present on the lock-stack will result in a still full
 591         // lock-stack. This creates a scenario where every deeper nested
 592         // monitorenter must call into the runtime.
 593         // The rational here is as follows:
 594         // Because we cannot (currently) figure out the second, and want
 595         // to avoid the third, we inflate a lock on the lock-stack.
 596         // The least recently locked lock is chosen as it is the lock
 597         // with the longest critical section.
 598 
 599         log_info(monitorinflation)("LockStack capacity exceeded, inflating.");
 600         ObjectMonitor* monitor = inflate_for(locking_thread, lock_stack.bottom(), inflate_cause_vm_internal);
 601         assert(monitor->is_owner(JavaThread::current()), "must be owner=" PTR_FORMAT " current=" PTR_FORMAT " mark=" PTR_FORMAT,
 602                p2i(monitor->owner()), p2i(Thread::current()), monitor->object()->mark_acquire().value());
 603         assert(!lock_stack.is_full(), "must have made room here");
 604       }
 605 
 606       markWord mark = obj()->mark_acquire();
 607       while (mark.is_neutral()) {
 608         // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
 609         // Try to swing into 'fast-locked' state.
 610         assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
 611         const markWord locked_mark = mark.set_fast_locked();
 612         const markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
 613         if (old_mark == mark) {
 614           // Successfully fast-locked, push object to lock-stack and return.
 615           lock_stack.push(obj());
 616           return true;
 617         }
 618         mark = old_mark;
 619       }
 620 
 621       if (mark.is_fast_locked() && lock_stack.try_recursive_enter(obj())) {
 622         // Recursive lock successful.
 623         return true;
 624       }
 625 
 626       // Failed to fast lock.
 627       return false;
 628     } else if (LockingMode == LM_LEGACY) {
 629       markWord mark = obj->mark();
 630       if (mark.is_neutral()) {
 631         // Anticipate successful CAS -- the ST of the displaced mark must
 632         // be visible <= the ST performed by the CAS.
 633         lock->set_displaced_header(mark);
 634         if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
 635           LOOM_MONITOR_SUPPORT_ONLY(locking_thread->inc_held_monitor_count();)
 636           return true;
 637         }
 638       } else if (mark.has_locker() &&
 639                  locking_thread->is_lock_owned((address) mark.locker())) {
 640         assert(lock != mark.locker(), "must not re-lock the same lock");
 641         assert(lock != (BasicLock*) obj->mark().value(), "don't relock with same BasicLock");
 642         lock->set_displaced_header(markWord::from_pointer(nullptr));
 643         return true;
 644       }
 645 
 646       // The object header will never be displaced to this lock,
 647       // so it does not matter what the value is, except that it
 648       // must be non-zero to avoid looking like a re-entrant lock,
 649       // and must not look locked either.
 650       lock->set_displaced_header(markWord::unused_mark());
 651 
 652       // Failed to fast lock.
 653       return false;
 654     }
 655   } else if (VerifyHeavyMonitors) {
 656     guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 657   }
 658 
 659   return false;
 660 }
 661 
 662 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
 663   NOT_LOOM_MONITOR_SUPPORT(current->dec_held_monitor_count();)
 664 
 665   if (!useHeavyMonitors()) {
 666     markWord mark = object->mark();
 667     if (LockingMode == LM_LIGHTWEIGHT) {
 668       // Fast-locking does not use the 'lock' argument.
 669       LockStack& lock_stack = current->lock_stack();
 670       if (mark.is_fast_locked() && lock_stack.try_recursive_exit(object)) {
 671         // Recursively unlocked.
 672         return;
 673       }
 674 
 675       if (mark.is_fast_locked() && lock_stack.is_recursive(object)) {
 676         // This lock is recursive but is not at the top of the lock stack so we're
 677         // doing an unbalanced exit. We have to fall thru to inflation below and
 678         // let ObjectMonitor::exit() do the unlock.
 679       } else {
 680         while (mark.is_fast_locked()) {
 681           // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
 682           const markWord unlocked_mark = mark.set_unlocked();
 683           const markWord old_mark = object->cas_set_mark(unlocked_mark, mark);

 708             // This is a special case where the Java Monitor was inflated
 709             // after this thread entered the stack-lock recursively. When a
 710             // Java Monitor is inflated, we cannot safely walk the Java
 711             // Monitor owner's stack and update the BasicLocks because a
 712             // Java Monitor can be asynchronously inflated by a thread that
 713             // does not own the Java Monitor.
 714             ObjectMonitor* m = mark.monitor();
 715             assert(m->object()->mark() == mark, "invariant");
 716             assert(m->is_entered(current), "invariant");
 717           }
 718         }
 719 #endif
 720         return;
 721       }
 722 
 723       if (mark == markWord::from_pointer(lock)) {
 724         // If the object is stack-locked by the current thread, try to
 725         // swing the displaced header from the BasicLock back to the mark.
 726         assert(dhw.is_neutral(), "invariant");
 727         if (object->cas_set_mark(dhw, mark) == mark) {
 728           LOOM_MONITOR_SUPPORT_ONLY(current->dec_held_monitor_count();)
 729           return;
 730         }
 731       }
 732     }
 733   } else if (VerifyHeavyMonitors) {
 734     guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 735   }
 736 
 737   // We have to take the slow-path of possible inflation and then exit.
 738   // The ObjectMonitor* can't be async deflated until ownership is
 739   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 740   ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
 741   assert(!monitor->is_owner_anonymous(), "must not be");
 742   monitor->exit(current);
 743 }
 744 
 745 // -----------------------------------------------------------------------------
 746 // JNI locks on java objects
 747 // NOTE: must use heavy weight monitor to handle jni monitor enter
 748 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {

 770   JavaThread* current = THREAD;
 771 
 772   // The ObjectMonitor* can't be async deflated until ownership is
 773   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 774   ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
 775   // If this thread has locked the object, exit the monitor. We
 776   // intentionally do not use CHECK on check_owner because we must exit the
 777   // monitor even if an exception was already pending.
 778   if (monitor->check_owner(THREAD)) {
 779     monitor->exit(current);
 780     current->dec_held_monitor_count(1, true);
 781   }
 782 }
 783 
 784 // -----------------------------------------------------------------------------
 785 // Internal VM locks on java objects
 786 // standard constructor, allows locking failures
 787 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
 788   _thread = thread;
 789   _thread->check_for_valid_safepoint_state();
 790   DEBUG_ONLY(_thread->inc_obj_locker_count();)
 791   _obj = obj;
 792 
 793   if (_obj() != nullptr) {
 794     ObjectSynchronizer::enter(_obj, &_lock, _thread);
 795   }
 796 }
 797 
 798 ObjectLocker::~ObjectLocker() {
 799   DEBUG_ONLY(_thread->dec_obj_locker_count();)
 800   if (_obj() != nullptr) {
 801     ObjectSynchronizer::exit(_obj(), &_lock, _thread);
 802   }
 803 }
 804 
 805 
 806 // -----------------------------------------------------------------------------
 807 //  Wait/Notify/NotifyAll
 808 // NOTE: must use heavy weight monitor to handle wait()
 809 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 810   JavaThread* current = THREAD;
 811   if (millis < 0) {
 812     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 813   }
 814   // The ObjectMonitor* can't be async deflated because the _waiters
 815   // field is incremented before ownership is dropped and decremented
 816   // after ownership is regained.
 817   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
 818 
 819   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);

1139 
1140   if (mark.has_monitor()) {
1141     // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1142     // The first stage of async deflation does not affect any field
1143     // used by this comparison so the ObjectMonitor* is usable here.
1144     ObjectMonitor* monitor = mark.monitor();
1145     return monitor->is_entered(current) != 0;
1146   }
1147   // Unlocked case, header in place
1148   assert(mark.is_neutral(), "sanity check");
1149   return false;
1150 }
1151 
1152 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
1153   oop obj = h_obj();
1154   markWord mark = read_stable_mark(obj);
1155 
1156   if (LockingMode == LM_LEGACY && mark.has_locker()) {
1157     // stack-locked so header points into owner's stack.
1158     // owning_thread_from_monitor_owner() may also return null here:
1159     return Threads::owning_thread_from_stacklock(t_list, (address) mark.locker());
1160   }
1161 
1162   if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1163     // fast-locked so get owner from the object.
1164     // owning_thread_from_object() may also return null here:
1165     return Threads::owning_thread_from_object(t_list, h_obj());
1166   }
1167 
1168   if (mark.has_monitor()) {
1169     // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1170     // The first stage of async deflation does not affect any field
1171     // used by this comparison so the ObjectMonitor* is usable here.
1172     ObjectMonitor* monitor = mark.monitor();
1173     assert(monitor != nullptr, "monitor should be non-null");
1174     // owning_thread_from_monitor() may also return null here:
1175     return Threads::owning_thread_from_monitor(t_list, monitor);
1176   }
1177 
1178   // Unlocked case, header in place
1179   // Cannot have assertion since this object may have been

1189 template <typename Function>
1190 void ObjectSynchronizer::monitors_iterate(Function function) {
1191   MonitorList::Iterator iter = _in_use_list.iterator();
1192   while (iter.has_next()) {
1193     ObjectMonitor* monitor = iter.next();
1194     function(monitor);
1195   }
1196 }
1197 
1198 // Iterate ObjectMonitors owned by any thread and where the owner `filter`
1199 // returns true.
1200 template <typename OwnerFilter>
1201 void ObjectSynchronizer::owned_monitors_iterate_filtered(MonitorClosure* closure, OwnerFilter filter) {
1202   monitors_iterate([&](ObjectMonitor* monitor) {
1203     // This function is only called at a safepoint or when the
1204     // target thread is suspended or when the target thread is
1205     // operating on itself. The current closures in use today are
1206     // only interested in an owned ObjectMonitor and ownership
1207     // cannot be dropped under the calling contexts so the
1208     // ObjectMonitor cannot be async deflated.
1209     if (monitor->has_owner() && filter(monitor)) {
1210       assert(!monitor->is_being_async_deflated(), "Owned monitors should not be deflating");
1211 
1212       closure->do_monitor(monitor);
1213     }
1214   });
1215 }
1216 
1217 // Iterate ObjectMonitors where the owner == thread; this does NOT include
1218 // ObjectMonitors where owner is set to a stack-lock address in thread.
1219 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure, JavaThread* thread) {
1220   auto thread_filter = [&](ObjectMonitor* monitor) { return monitor->is_owner(thread); };
1221   return owned_monitors_iterate_filtered(closure, thread_filter);
1222 }
1223 
1224 // Iterate ObjectMonitors owned by any thread.
1225 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure) {
1226   auto all_filter = [&](ObjectMonitor* monitor) { return true; };
1227   return owned_monitors_iterate_filtered(closure, all_filter);
1228 }
1229 
1230 static bool monitors_used_above_threshold(MonitorList* list) {
1231   if (MonitorUsedDeflationThreshold == 0) {  // disabled case is easy
1232     return false;
1233   }
1234   // Start with ceiling based on a per-thread estimate:
1235   size_t ceiling = ObjectSynchronizer::in_use_list_ceiling();
1236   size_t old_ceiling = ceiling;
1237   if (ceiling < list->max()) {
1238     // The max used by the system has exceeded the ceiling so use that:
1239     ceiling = list->max();
1240   }
1241   size_t monitors_used = list->count();
1242   if (monitors_used == 0) {  // empty list is easy
1243     return false;
1244   }
1245   if (NoAsyncDeflationProgressMax != 0 &&
1246       _no_progress_cnt >= NoAsyncDeflationProgressMax) {

1376   event->set_monitorClass(obj->klass());
1377   event->set_address((uintptr_t)(void*)obj);
1378   event->set_cause((u1)cause);
1379   event->commit();
1380 }
1381 
1382 // Fast path code shared by multiple functions
1383 void ObjectSynchronizer::inflate_helper(oop obj) {
1384   markWord mark = obj->mark_acquire();
1385   if (mark.has_monitor()) {
1386     ObjectMonitor* monitor = mark.monitor();
1387     markWord dmw = monitor->header();
1388     assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1389     return;
1390   }
1391   (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1392 }
1393 
1394 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
1395   assert(current == Thread::current(), "must be");
1396   return inflate_impl(current->is_Java_thread() ? JavaThread::cast(current) : nullptr, obj, cause);



1397 }
1398 
1399 ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
1400   assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
1401   return inflate_impl(thread, obj, cause);
1402 }
1403 
1404 ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* inflating_thread, oop object, const InflateCause cause) {
1405   // The JavaThread* inflating_thread requires that the inflating_thread == Thread::current() or
1406   // is suspended throughout the call by some other mechanism.
1407   // The thread might be nullptr when called from a non JavaThread. (As may still be
1408   // the case from FastHashCode). However it is only important for correctness that the
1409   // thread is set when called from ObjectSynchronizer::enter from the owning thread,


1410   // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
1411   EventJavaMonitorInflate event;
1412 
1413   for (;;) {
1414     const markWord mark = object->mark_acquire();
1415 
1416     // The mark can be in one of the following states:
1417     // *  inflated     - If the ObjectMonitor owner is anonymous and the
1418     //                   inflating_thread owns the object lock, then we
1419     //                   make the inflating_thread the ObjectMonitor owner.
1420     //                   For LM_LIGHTWEIGHT we also remove the lock from

1421     //                   the inflating_thread's lock stack.
1422     // *  fast-locked  - Coerce it to inflated from fast-locked.
1423     // *  stack-locked - Coerce it to inflated from stack-locked.
1424     // *  INFLATING    - Busy wait for conversion from stack-locked to
1425     //                   inflated.
1426     // *  neutral      - Aggressively inflate the object.
1427 
1428     // CASE: inflated
1429     if (mark.has_monitor()) {
1430       ObjectMonitor* inf = mark.monitor();
1431       markWord dmw = inf->header();
1432       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1433       if (inf->is_owner_anonymous() && inflating_thread != nullptr) {
1434         if (LockingMode == LM_LIGHTWEIGHT) {
1435           if (inflating_thread->lock_stack().contains(object)) {
1436             inf->set_owner_from_anonymous(inflating_thread);
1437             size_t removed = inflating_thread->lock_stack().remove(object);
1438             inf->set_recursions(removed - 1);
1439           }
1440         } else {
1441           assert(LockingMode == LM_LEGACY, "invariant");
1442           if (inflating_thread->is_lock_owned((address)inf->stack_locker())) {
1443             inf->set_owner_from_BasicLock(inflating_thread);
1444             // Decrement monitor count now since this monitor is okay for freezing
1445             LOOM_MONITOR_SUPPORT_ONLY(inflating_thread->dec_held_monitor_count();)
1446           }
1447         }
1448       }
1449       return inf;
1450     }
1451 
1452     if (LockingMode != LM_LIGHTWEIGHT) {
1453       // New lightweight locking does not use INFLATING.
1454       // CASE: inflation in progress - inflating over a stack-lock.
1455       // Some other thread is converting from stack-locked to inflated.
1456       // Only that thread can complete inflation -- other threads must wait.
1457       // The INFLATING value is transient.
1458       // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1459       // We could always eliminate polling by parking the thread on some auxiliary list.
1460       if (mark == markWord::INFLATING()) {
1461         read_stable_mark(object);
1462         continue;
1463       }
1464     }
1465 
1466     // CASE: fast-locked
1467     // Could be fast-locked either by the inflating_thread or by some other thread.

1561       // the 0 causes the owner to stall if the owner happens to try to
1562       // drop the lock (restoring the header from the BasicLock to the object)
1563       // while inflation is in-progress.  This protocol avoids races that might
1564       // would otherwise permit hashCode values to change or "flicker" for an object.
1565       // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable.
1566       // 0 serves as a "BUSY" inflate-in-progress indicator.
1567 
1568 
1569       // fetch the displaced mark from the owner's stack.
1570       // The owner can't die or unwind past the lock while our INFLATING
1571       // object is in the mark.  Furthermore the owner can't complete
1572       // an unlock on the object, either.
1573       markWord dmw = mark.displaced_mark_helper();
1574       // Catch if the object's header is not neutral (not locked and
1575       // not marked is what we care about here).
1576       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1577 
1578       // Setup monitor fields to proper values -- prepare the monitor
1579       m->set_header(dmw);
1580 


1581       // Note that a thread can inflate an object
1582       // that it has stack-locked -- as might happen in wait() -- directly
1583       // with CAS.  That is, we can avoid the xchg-nullptr .... ST idiom.
1584       if (inflating_thread != nullptr && inflating_thread->is_lock_owned((address)mark.locker())) {
1585         m->set_owner_from(nullptr, inflating_thread);
1586         // Decrement monitor count now since this monitor is okay for freezing
1587         LOOM_MONITOR_SUPPORT_ONLY(inflating_thread->dec_held_monitor_count();)
1588       } else {
1589         // Use ANONYMOUS_OWNER to indicate that the owner is the BasicLock on the stack,
1590         // and set the stack locker field in the monitor.
1591         m->set_stack_locker(mark.locker());
1592         m->set_owner_anonymous();  // second
1593       }
1594       // TODO-FIXME: assert BasicLock->dhw != 0.
1595 
1596       // Must preserve store ordering. The monitor state must
1597       // be stable at the time of publishing the monitor address.
1598       guarantee(object->mark() == markWord::INFLATING(), "invariant");
1599       // Release semantics so that above set_object() is seen first.
1600       object->release_set_mark(markWord::encode(m));
1601 
1602       // Once ObjectMonitor is configured and the object is associated
1603       // with the ObjectMonitor, it is safe to allow async deflation:
1604       _in_use_list.add(m);
1605 
1606       // Hopefully the performance counters are allocated on distinct cache lines
1607       // to avoid false sharing on MP systems ...
1608       OM_PERFDATA_OP(Inflations, inc());
1609       if (log_is_enabled(Trace, monitorinflation)) {
1610         ResourceMark rm;
1611         lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1612                      INTPTR_FORMAT ", type='%s'", p2i(object),
1613                      object->mark().value(), object->klass()->external_name());

1873   } else if (_no_progress_skip_increment) {
1874     _no_progress_skip_increment = false;
1875   } else {
1876     _no_progress_cnt++;
1877   }
1878 
1879   return deflated_count;
1880 }
1881 
1882 // Monitor cleanup on JavaThread::exit
1883 
1884 // Iterate through monitor cache and attempt to release thread's monitors
1885 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1886  private:
1887   JavaThread* _thread;
1888 
1889  public:
1890   ReleaseJavaMonitorsClosure(JavaThread* thread) : _thread(thread) {}
1891   void do_monitor(ObjectMonitor* mid) {
1892     intx rec = mid->complete_exit(_thread);
1893     _thread->dec_held_monitor_count(NOT_LOOM_MONITOR_SUPPORT((rec + 1)));
1894   }
1895 };
1896 
1897 // Release all inflated monitors owned by current thread.  Lightweight monitors are
1898 // ignored.  This is meant to be called during JNI thread detach which assumes
1899 // all remaining monitors are heavyweight.  All exceptions are swallowed.
1900 // Scanning the extant monitor list can be time consuming.
1901 // A simple optimization is to add a per-thread flag that indicates a thread
1902 // called jni_monitorenter() during its lifetime.
1903 //
1904 // Instead of NoSafepointVerifier it might be cheaper to
1905 // use an idiom of the form:
1906 //   auto int tmp = SafepointSynchronize::_safepoint_counter ;
1907 //   <code that must not run at safepoint>
1908 //   guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1909 // Since the tests are extremely cheap we could leave them enabled
1910 // for normal product builds.
1911 
1912 void ObjectSynchronizer::release_monitors_owned_by_thread(JavaThread* current) {
1913   assert(current == JavaThread::current(), "must be current Java thread");
1914   NoSafepointVerifier nsv;
1915   ReleaseJavaMonitorsClosure rjmc(current);
1916   ObjectSynchronizer::owned_monitors_iterate(&rjmc, current);
1917   assert(!current->has_pending_exception(), "Should not be possible");
1918   current->clear_pending_exception();
1919   assert(current->held_monitor_count() == 0, "Should not be possible");
1920   // All monitors (including entered via JNI) have been unlocked above, so we need to clear jni count.
1921   current->clear_jni_monitor_count();
1922 }
1923 
1924 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
1925   switch (cause) {
1926     case inflate_cause_vm_internal:    return "VM Internal";
1927     case inflate_cause_monitor_enter:  return "Monitor Enter";
1928     case inflate_cause_wait:           return "Monitor Wait";
1929     case inflate_cause_notify:         return "Monitor Notify";
1930     case inflate_cause_hash_code:      return "Monitor Hash Code";
1931     case inflate_cause_jni_enter:      return "JNI Monitor Enter";
1932     case inflate_cause_jni_exit:       return "JNI Monitor Exit";
1933     case inflate_cause_cont_freeze:    return "Continuation Freeze";
1934     default:
1935       ShouldNotReachHere();
1936   }
1937   return "Unknown";
1938 }
1939 
1940 //------------------------------------------------------------------------------
1941 // Debugging code
1942 
1943 u_char* ObjectSynchronizer::get_gvars_addr() {
1944   return (u_char*)&GVars;
1945 }
1946 
1947 u_char* ObjectSynchronizer::get_gvars_hc_sequence_addr() {
1948   return (u_char*)&GVars.hc_sequence;
1949 }
1950 
1951 size_t ObjectSynchronizer::get_gvars_size() {
1952   return sizeof(SharedGlobals);
1953 }
< prev index next >