1 /*
   2  * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "classfile/vmSymbols.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "jfr/jfrEvents.hpp"
  28 #include "logging/log.hpp"
  29 #include "logging/logStream.hpp"
  30 #include "memory/allocation.inline.hpp"
  31 #include "memory/padded.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "memory/universe.hpp"
  34 #include "oops/markWord.hpp"
  35 #include "oops/oop.inline.hpp"
  36 #include "runtime/atomic.hpp"
  37 #include "runtime/basicLock.inline.hpp"
  38 #include "runtime/frame.inline.hpp"
  39 #include "runtime/globals.hpp"
  40 #include "runtime/handles.inline.hpp"
  41 #include "runtime/handshake.hpp"
  42 #include "runtime/interfaceSupport.inline.hpp"
  43 #include "runtime/javaThread.hpp"
  44 #include "runtime/lightweightSynchronizer.hpp"
  45 #include "runtime/lockStack.inline.hpp"
  46 #include "runtime/mutexLocker.hpp"
  47 #include "runtime/objectMonitor.hpp"
  48 #include "runtime/objectMonitor.inline.hpp"
  49 #include "runtime/os.inline.hpp"
  50 #include "runtime/osThread.hpp"
  51 #include "runtime/perfData.hpp"
  52 #include "runtime/safepointMechanism.inline.hpp"
  53 #include "runtime/safepointVerifiers.hpp"
  54 #include "runtime/sharedRuntime.hpp"
  55 #include "runtime/stubRoutines.hpp"
  56 #include "runtime/synchronizer.inline.hpp"
  57 #include "runtime/threads.hpp"
  58 #include "runtime/timer.hpp"
  59 #include "runtime/trimNativeHeap.hpp"
  60 #include "runtime/vframe.hpp"
  61 #include "runtime/vmThread.hpp"
  62 #include "utilities/align.hpp"
  63 #include "utilities/dtrace.hpp"
  64 #include "utilities/events.hpp"
  65 #include "utilities/globalCounter.inline.hpp"
  66 #include "utilities/globalDefinitions.hpp"
  67 #include "utilities/linkedlist.hpp"
  68 #include "utilities/preserveException.hpp"
  69 
  70 class ObjectMonitorDeflationLogging;
  71 
  72 void MonitorList::add(ObjectMonitor* m) {
  73   ObjectMonitor* head;
  74   do {
  75     head = Atomic::load(&_head);
  76     m->set_next_om(head);
  77   } while (Atomic::cmpxchg(&_head, head, m) != head);
  78 
  79   size_t count = Atomic::add(&_count, 1u);
  80   if (count > max()) {
  81     Atomic::inc(&_max);
  82   }
  83 }
  84 
  85 size_t MonitorList::count() const {
  86   return Atomic::load(&_count);
  87 }
  88 
  89 size_t MonitorList::max() const {
  90   return Atomic::load(&_max);
  91 }
  92 
  93 class ObjectMonitorDeflationSafepointer : public StackObj {
  94   JavaThread* const                    _current;
  95   ObjectMonitorDeflationLogging* const _log;
  96 
  97 public:
  98   ObjectMonitorDeflationSafepointer(JavaThread* current, ObjectMonitorDeflationLogging* log)
  99     : _current(current), _log(log) {}
 100 
 101   void block_for_safepoint(const char* op_name, const char* count_name, size_t counter);
 102 };
 103 
 104 // Walk the in-use list and unlink deflated ObjectMonitors.
 105 // Returns the number of unlinked ObjectMonitors.
 106 size_t MonitorList::unlink_deflated(size_t deflated_count,
 107                                     GrowableArray<ObjectMonitor*>* unlinked_list,
 108                                     ObjectMonitorDeflationSafepointer* safepointer) {
 109   size_t unlinked_count = 0;
 110   ObjectMonitor* prev = nullptr;
 111   ObjectMonitor* m = Atomic::load_acquire(&_head);
 112 
 113   while (m != nullptr) {
 114     if (m->is_being_async_deflated()) {
 115       // Find next live ObjectMonitor. Batch up the unlinkable monitors, so we can
 116       // modify the list once per batch. The batch starts at "m".
 117       size_t unlinked_batch = 0;
 118       ObjectMonitor* next = m;
 119       // Look for at most MonitorUnlinkBatch monitors, or the number of
 120       // deflated and not unlinked monitors, whatever comes first.
 121       assert(deflated_count >= unlinked_count, "Sanity: underflow");
 122       size_t unlinked_batch_limit = MIN2<size_t>(deflated_count - unlinked_count, MonitorUnlinkBatch);
 123       do {
 124         ObjectMonitor* next_next = next->next_om();
 125         unlinked_batch++;
 126         unlinked_list->append(next);
 127         next = next_next;
 128         if (unlinked_batch >= unlinked_batch_limit) {
 129           // Reached the max batch, so bail out of the gathering loop.
 130           break;
 131         }
 132         if (prev == nullptr && Atomic::load(&_head) != m) {
 133           // Current batch used to be at head, but it is not at head anymore.
 134           // Bail out and figure out where we currently are. This avoids long
 135           // walks searching for new prev during unlink under heavy list inserts.
 136           break;
 137         }
 138       } while (next != nullptr && next->is_being_async_deflated());
 139 
 140       // Unlink the found batch.
 141       if (prev == nullptr) {
 142         // The current batch is the first batch, so there is a chance that it starts at head.
 143         // Optimistically assume no inserts happened, and try to unlink the entire batch from the head.
 144         ObjectMonitor* prev_head = Atomic::cmpxchg(&_head, m, next);
 145         if (prev_head != m) {
 146           // Something must have updated the head. Figure out the actual prev for this batch.
 147           for (ObjectMonitor* n = prev_head; n != m; n = n->next_om()) {
 148             prev = n;
 149           }
 150           assert(prev != nullptr, "Should have found the prev for the current batch");
 151           prev->set_next_om(next);
 152         }
 153       } else {
 154         // The current batch is preceded by another batch. This guarantees the current batch
 155         // does not start at head. Unlink the entire current batch without updating the head.
 156         assert(Atomic::load(&_head) != m, "Sanity");
 157         prev->set_next_om(next);
 158       }
 159 
 160       unlinked_count += unlinked_batch;
 161       if (unlinked_count >= deflated_count) {
 162         // Reached the max so bail out of the searching loop.
 163         // There should be no more deflated monitors left.
 164         break;
 165       }
 166       m = next;
 167     } else {
 168       prev = m;
 169       m = m->next_om();
 170     }
 171 
 172     // Must check for a safepoint/handshake and honor it.
 173     safepointer->block_for_safepoint("unlinking", "unlinked_count", unlinked_count);
 174   }
 175 
 176 #ifdef ASSERT
 177   // Invariant: the code above should unlink all deflated monitors.
 178   // The code that runs after this unlinking does not expect deflated monitors.
 179   // Notably, attempting to deflate the already deflated monitor would break.
 180   {
 181     ObjectMonitor* m = Atomic::load_acquire(&_head);
 182     while (m != nullptr) {
 183       assert(!m->is_being_async_deflated(), "All deflated monitors should be unlinked");
 184       m = m->next_om();
 185     }
 186   }
 187 #endif
 188 
 189   Atomic::sub(&_count, unlinked_count);
 190   return unlinked_count;
 191 }
 192 
 193 MonitorList::Iterator MonitorList::iterator() const {
 194   return Iterator(Atomic::load_acquire(&_head));
 195 }
 196 
 197 ObjectMonitor* MonitorList::Iterator::next() {
 198   ObjectMonitor* current = _current;
 199   _current = current->next_om();
 200   return current;
 201 }
 202 
 203 // The "core" versions of monitor enter and exit reside in this file.
 204 // The interpreter and compilers contain specialized transliterated
 205 // variants of the enter-exit fast-path operations.  See c2_MacroAssembler_x86.cpp
 206 // fast_lock(...) for instance.  If you make changes here, make sure to modify the
 207 // interpreter, and both C1 and C2 fast-path inline locking code emission.
 208 //
 209 // -----------------------------------------------------------------------------
 210 
 211 #ifdef DTRACE_ENABLED
 212 
 213 // Only bother with this argument setup if dtrace is available
 214 // TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
 215 
 216 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread)                           \
 217   char* bytes = nullptr;                                                      \
 218   int len = 0;                                                             \
 219   jlong jtid = SharedRuntime::get_java_tid(thread);                        \
 220   Symbol* klassname = obj->klass()->name();                                \
 221   if (klassname != nullptr) {                                                 \
 222     bytes = (char*)klassname->bytes();                                     \
 223     len = klassname->utf8_length();                                        \
 224   }
 225 
 226 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis)            \
 227   {                                                                        \
 228     if (DTraceMonitorProbes) {                                             \
 229       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
 230       HOTSPOT_MONITOR_WAIT(jtid,                                           \
 231                            (uintptr_t)(monitor), bytes, len, (millis));    \
 232     }                                                                      \
 233   }
 234 
 235 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY
 236 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL
 237 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED
 238 
 239 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread)                  \
 240   {                                                                        \
 241     if (DTraceMonitorProbes) {                                             \
 242       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
 243       HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */             \
 244                                     (uintptr_t)(monitor), bytes, len);     \
 245     }                                                                      \
 246   }
 247 
 248 #else //  ndef DTRACE_ENABLED
 249 
 250 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
 251 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon)          {;}
 252 
 253 #endif // ndef DTRACE_ENABLED
 254 
 255 // This exists only as a workaround of dtrace bug 6254741
 256 static int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, JavaThread* thr) {
 257   DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
 258   return 0;
 259 }
 260 
 261 static constexpr size_t inflation_lock_count() {
 262   return 256;
 263 }
 264 
 265 // Static storage for an array of PlatformMutex.
 266 alignas(PlatformMutex) static uint8_t _inflation_locks[inflation_lock_count()][sizeof(PlatformMutex)];
 267 
 268 static inline PlatformMutex* inflation_lock(size_t index) {
 269   return reinterpret_cast<PlatformMutex*>(_inflation_locks[index]);
 270 }
 271 
 272 void ObjectSynchronizer::initialize() {
 273   for (size_t i = 0; i < inflation_lock_count(); i++) {
 274     ::new(static_cast<void*>(inflation_lock(i))) PlatformMutex();
 275   }
 276   // Start the ceiling with the estimate for one thread.
 277   set_in_use_list_ceiling(AvgMonitorsPerThreadEstimate);
 278 
 279   // Start the timer for deflations, so it does not trigger immediately.
 280   _last_async_deflation_time_ns = os::javaTimeNanos();
 281 
 282   if (LockingMode == LM_LIGHTWEIGHT) {
 283     LightweightSynchronizer::initialize();
 284   }
 285 }
 286 
 287 MonitorList ObjectSynchronizer::_in_use_list;
 288 // monitors_used_above_threshold() policy is as follows:
 289 //
 290 // The ratio of the current _in_use_list count to the ceiling is used
 291 // to determine if we are above MonitorUsedDeflationThreshold and need
 292 // to do an async monitor deflation cycle. The ceiling is increased by
 293 // AvgMonitorsPerThreadEstimate when a thread is added to the system
 294 // and is decreased by AvgMonitorsPerThreadEstimate when a thread is
 295 // removed from the system.
 296 //
 297 // Note: If the _in_use_list max exceeds the ceiling, then
 298 // monitors_used_above_threshold() will use the in_use_list max instead
 299 // of the thread count derived ceiling because we have used more
 300 // ObjectMonitors than the estimated average.
 301 //
 302 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
 303 // no-progress async monitor deflation cycles in a row, then the ceiling
 304 // is adjusted upwards by monitors_used_above_threshold().
 305 //
 306 // Start the ceiling with the estimate for one thread in initialize()
 307 // which is called after cmd line options are processed.
 308 static size_t _in_use_list_ceiling = 0;
 309 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
 310 bool volatile ObjectSynchronizer::_is_final_audit = false;
 311 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
 312 static uintx _no_progress_cnt = 0;
 313 static bool _no_progress_skip_increment = false;
 314 
 315 // =====================> Quick functions
 316 
 317 // The quick_* forms are special fast-path variants used to improve
 318 // performance.  In the simplest case, a "quick_*" implementation could
 319 // simply return false, in which case the caller will perform the necessary
 320 // state transitions and call the slow-path form.
 321 // The fast-path is designed to handle frequently arising cases in an efficient
 322 // manner and is just a degenerate "optimistic" variant of the slow-path.
 323 // returns true  -- to indicate the call was satisfied.
 324 // returns false -- to indicate the call needs the services of the slow-path.
 325 // A no-loitering ordinance is in effect for code in the quick_* family
 326 // operators: safepoints or indefinite blocking (blocking that might span a
 327 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
 328 // entry.
 329 //
 330 // Consider: An interesting optimization is to have the JIT recognize the
 331 // following common idiom:
 332 //   synchronized (someobj) { .... ; notify(); }
 333 // That is, we find a notify() or notifyAll() call that immediately precedes
 334 // the monitorexit operation.  In that case the JIT could fuse the operations
 335 // into a single notifyAndExit() runtime primitive.
 336 
 337 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
 338   assert(current->thread_state() == _thread_in_Java, "invariant");
 339   NoSafepointVerifier nsv;
 340   if (obj == nullptr) return false;  // slow-path for invalid obj
 341   const markWord mark = obj->mark();
 342 
 343   if (LockingMode == LM_LIGHTWEIGHT) {
 344     if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
 345       // Degenerate notify
 346       // fast-locked by caller so by definition the implied waitset is empty.
 347       return true;
 348     }
 349   } else if (LockingMode == LM_LEGACY) {
 350     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 351       // Degenerate notify
 352       // stack-locked by caller so by definition the implied waitset is empty.
 353       return true;
 354     }
 355   }
 356 
 357   if (mark.has_monitor()) {
 358     ObjectMonitor* const mon = read_monitor(current, obj, mark);
 359     if (LockingMode == LM_LIGHTWEIGHT && mon == nullptr) {
 360       // Racing with inflation/deflation go slow path
 361       return false;
 362     }
 363     assert(mon->object() == oop(obj), "invariant");
 364     if (!mon->has_owner(current)) return false;  // slow-path for IMS exception
 365 
 366     if (mon->first_waiter() != nullptr) {
 367       // We have one or more waiters. Since this is an inflated monitor
 368       // that we own, we can transfer one or more threads from the waitset
 369       // to the entrylist here and now, avoiding the slow-path.
 370       if (all) {
 371         DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current);
 372       } else {
 373         DTRACE_MONITOR_PROBE(notify, mon, obj, current);
 374       }
 375       int free_count = 0;
 376       do {
 377         mon->INotify(current);
 378         ++free_count;
 379       } while (mon->first_waiter() != nullptr && all);
 380       OM_PERFDATA_OP(Notifications, inc(free_count));
 381     }
 382     return true;
 383   }
 384 
 385   // other IMS exception states take the slow-path
 386   return false;
 387 }
 388 
 389 static bool useHeavyMonitors() {
 390 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
 391   return LockingMode == LM_MONITOR;
 392 #else
 393   return false;
 394 #endif
 395 }
 396 
 397 // The LockNode emitted directly at the synchronization site would have
 398 // been too big if it were to have included support for the cases of inflated
 399 // recursive enter and exit, so they go here instead.
 400 // Note that we can't safely call AsyncPrintJavaStack() from within
 401 // quick_enter() as our thread state remains _in_Java.
 402 
 403 bool ObjectSynchronizer::quick_enter_legacy(oop obj, BasicLock* lock, JavaThread* current) {
 404   assert(current->thread_state() == _thread_in_Java, "invariant");
 405 
 406   if (useHeavyMonitors()) {
 407     return false;  // Slow path
 408   }
 409 
 410   if (LockingMode == LM_LIGHTWEIGHT) {
 411     return LightweightSynchronizer::quick_enter(obj, lock, current);
 412   }
 413 
 414   assert(LockingMode == LM_LEGACY, "legacy mode below");
 415 
 416   const markWord mark = obj->mark();
 417 
 418   if (mark.has_monitor()) {
 419 
 420     ObjectMonitor* const m = read_monitor(mark);
 421     // An async deflation or GC can race us before we manage to make
 422     // the ObjectMonitor busy by setting the owner below. If we detect
 423     // that race we just bail out to the slow-path here.
 424     if (m->object_peek() == nullptr) {
 425       return false;
 426     }
 427 
 428     // Lock contention and Transactional Lock Elision (TLE) diagnostics
 429     // and observability
 430     // Case: light contention possibly amenable to TLE
 431     // Case: TLE inimical operations such as nested/recursive synchronization
 432 
 433     if (m->has_owner(current)) {
 434       m->_recursions++;
 435       current->inc_held_monitor_count();
 436       return true;
 437     }
 438 
 439     // This Java Monitor is inflated so obj's header will never be
 440     // displaced to this thread's BasicLock. Make the displaced header
 441     // non-null so this BasicLock is not seen as recursive nor as
 442     // being locked. We do this unconditionally so that this thread's
 443     // BasicLock cannot be mis-interpreted by any stack walkers. For
 444     // performance reasons, stack walkers generally first check for
 445     // stack-locking in the object's header, the second check is for
 446     // recursive stack-locking in the displaced header in the BasicLock,
 447     // and last are the inflated Java Monitor (ObjectMonitor) checks.
 448     lock->set_displaced_header(markWord::unused_mark());
 449 
 450     if (!m->has_owner() && m->try_set_owner(current)) {
 451       assert(m->_recursions == 0, "invariant");
 452       current->inc_held_monitor_count();
 453       return true;
 454     }
 455   }
 456 
 457   // Note that we could inflate in quick_enter.
 458   // This is likely a useful optimization
 459   // Critically, in quick_enter() we must not:
 460   // -- block indefinitely, or
 461   // -- reach a safepoint
 462 
 463   return false;        // revert to slow-path
 464 }
 465 
 466 // Handle notifications when synchronizing on value based classes
 467 void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* locking_thread) {
 468   assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
 469   frame last_frame = locking_thread->last_frame();
 470   bool bcp_was_adjusted = false;
 471   // Don't decrement bcp if it points to the frame's first instruction.  This happens when
 472   // handle_sync_on_value_based_class() is called because of a synchronized method.  There
 473   // is no actual monitorenter instruction in the byte code in this case.
 474   if (last_frame.is_interpreted_frame() &&
 475       (last_frame.interpreter_frame_method()->code_base() < last_frame.interpreter_frame_bcp())) {
 476     // adjust bcp to point back to monitorenter so that we print the correct line numbers
 477     last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() - 1);
 478     bcp_was_adjusted = true;
 479   }
 480 
 481   if (DiagnoseSyncOnValueBasedClasses == FATAL_EXIT) {
 482     ResourceMark rm;
 483     stringStream ss;
 484     locking_thread->print_active_stack_on(&ss);
 485     char* base = (char*)strstr(ss.base(), "at");
 486     char* newline = (char*)strchr(ss.base(), '\n');
 487     if (newline != nullptr) {
 488       *newline = '\0';
 489     }
 490     fatal("Synchronizing on object " INTPTR_FORMAT " of klass %s %s", p2i(obj()), obj->klass()->external_name(), base);
 491   } else {
 492     assert(DiagnoseSyncOnValueBasedClasses == LOG_WARNING, "invalid value for DiagnoseSyncOnValueBasedClasses");
 493     ResourceMark rm;
 494     Log(valuebasedclasses) vblog;
 495 
 496     vblog.info("Synchronizing on object " INTPTR_FORMAT " of klass %s", p2i(obj()), obj->klass()->external_name());
 497     if (locking_thread->has_last_Java_frame()) {
 498       LogStream info_stream(vblog.info());
 499       locking_thread->print_active_stack_on(&info_stream);
 500     } else {
 501       vblog.info("Cannot find the last Java frame");
 502     }
 503 
 504     EventSyncOnValueBasedClass event;
 505     if (event.should_commit()) {
 506       event.set_valueBasedClass(obj->klass());
 507       event.commit();
 508     }
 509   }
 510 
 511   if (bcp_was_adjusted) {
 512     last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
 513   }
 514 }
 515 
 516 // -----------------------------------------------------------------------------
 517 // Monitor Enter/Exit
 518 
 519 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
 520   // When called with locking_thread != Thread::current() some mechanism must synchronize
 521   // the locking_thread with respect to the current thread. Currently only used when
 522   // deoptimizing and re-locking locks. See Deoptimization::relock_objects
 523   assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
 524 
 525   if (LockingMode == LM_LIGHTWEIGHT) {
 526     return LightweightSynchronizer::enter_for(obj, lock, locking_thread);
 527   }
 528 
 529   if (!enter_fast_impl(obj, lock, locking_thread)) {
 530     // Inflated ObjectMonitor::enter_for is required
 531 
 532     // An async deflation can race after the inflate_for() call and before
 533     // enter_for() can make the ObjectMonitor busy. enter_for() returns false
 534     // if we have lost the race to async deflation and we simply try again.
 535     while (true) {
 536       ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter);
 537       if (monitor->enter_for(locking_thread)) {
 538         return;
 539       }
 540       assert(monitor->is_being_async_deflated(), "must be");
 541     }
 542   }
 543 }
 544 
 545 void ObjectSynchronizer::enter_legacy(Handle obj, BasicLock* lock, JavaThread* current) {
 546   if (!enter_fast_impl(obj, lock, current)) {
 547     // Inflated ObjectMonitor::enter is required
 548 
 549     // An async deflation can race after the inflate() call and before
 550     // enter() can make the ObjectMonitor busy. enter() returns false if
 551     // we have lost the race to async deflation and we simply try again.
 552     while (true) {
 553       ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
 554       if (monitor->enter(current)) {
 555         return;
 556       }
 557     }
 558   }
 559 }
 560 
 561 // The interpreter and compiler assembly code tries to lock using the fast path
 562 // of this algorithm. Make sure to update that code if the following function is
 563 // changed. The implementation is extremely sensitive to race condition. Be careful.
 564 bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
 565   assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer");
 566 
 567   if (obj->klass()->is_value_based()) {
 568     handle_sync_on_value_based_class(obj, locking_thread);
 569   }
 570 
 571   locking_thread->inc_held_monitor_count();
 572 
 573   if (!useHeavyMonitors()) {
 574     if (LockingMode == LM_LEGACY) {
 575       markWord mark = obj->mark();
 576       if (mark.is_unlocked()) {
 577         // Anticipate successful CAS -- the ST of the displaced mark must
 578         // be visible <= the ST performed by the CAS.
 579         lock->set_displaced_header(mark);
 580         if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
 581           return true;
 582         }
 583       } else if (mark.has_locker() &&
 584                  locking_thread->is_lock_owned((address) mark.locker())) {
 585         assert(lock != mark.locker(), "must not re-lock the same lock");
 586         assert(lock != (BasicLock*) obj->mark().value(), "don't relock with same BasicLock");
 587         lock->set_displaced_header(markWord::from_pointer(nullptr));
 588         return true;
 589       }
 590 
 591       // The object header will never be displaced to this lock,
 592       // so it does not matter what the value is, except that it
 593       // must be non-zero to avoid looking like a re-entrant lock,
 594       // and must not look locked either.
 595       lock->set_displaced_header(markWord::unused_mark());
 596 
 597       // Failed to fast lock.
 598       return false;
 599     }
 600   } else if (VerifyHeavyMonitors) {
 601     guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 602   }
 603 
 604   return false;
 605 }
 606 
 607 void ObjectSynchronizer::exit_legacy(oop object, BasicLock* lock, JavaThread* current) {
 608   assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer");
 609 
 610   if (!useHeavyMonitors()) {
 611     markWord mark = object->mark();
 612     if (LockingMode == LM_LEGACY) {
 613       markWord dhw = lock->displaced_header();
 614       if (dhw.value() == 0) {
 615         // If the displaced header is null, then this exit matches up with
 616         // a recursive enter. No real work to do here except for diagnostics.
 617 #ifndef PRODUCT
 618         if (mark != markWord::INFLATING()) {
 619           // Only do diagnostics if we are not racing an inflation. Simply
 620           // exiting a recursive enter of a Java Monitor that is being
 621           // inflated is safe; see the has_monitor() comment below.
 622           assert(!mark.is_unlocked(), "invariant");
 623           assert(!mark.has_locker() ||
 624                  current->is_lock_owned((address)mark.locker()), "invariant");
 625           if (mark.has_monitor()) {
 626             // The BasicLock's displaced_header is marked as a recursive
 627             // enter and we have an inflated Java Monitor (ObjectMonitor).
 628             // This is a special case where the Java Monitor was inflated
 629             // after this thread entered the stack-lock recursively. When a
 630             // Java Monitor is inflated, we cannot safely walk the Java
 631             // Monitor owner's stack and update the BasicLocks because a
 632             // Java Monitor can be asynchronously inflated by a thread that
 633             // does not own the Java Monitor.
 634             ObjectMonitor* m = read_monitor(mark);
 635             assert(m->object()->mark() == mark, "invariant");
 636             assert(m->is_entered(current), "invariant");
 637           }
 638         }
 639 #endif
 640         return;
 641       }
 642 
 643       if (mark == markWord::from_pointer(lock)) {
 644         // If the object is stack-locked by the current thread, try to
 645         // swing the displaced header from the BasicLock back to the mark.
 646         assert(dhw.is_neutral(), "invariant");
 647         if (object->cas_set_mark(dhw, mark) == mark) {
 648           return;
 649         }
 650       }
 651     }
 652   } else if (VerifyHeavyMonitors) {
 653     guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 654   }
 655 
 656   // We have to take the slow-path of possible inflation and then exit.
 657   // The ObjectMonitor* can't be async deflated until ownership is
 658   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 659   ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
 660   assert(!monitor->has_anonymous_owner(), "must not be");
 661   monitor->exit(current);
 662 }
 663 
 664 // -----------------------------------------------------------------------------
 665 // JNI locks on java objects
 666 // NOTE: must use heavy weight monitor to handle jni monitor enter
 667 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
 668   // Top native frames in the stack will not be seen if we attempt
 669   // preemption, since we start walking from the last Java anchor.
 670   NoPreemptMark npm(current);
 671 
 672   if (obj->klass()->is_value_based()) {
 673     handle_sync_on_value_based_class(obj, current);
 674   }
 675 
 676   // the current locking is from JNI instead of Java code
 677   current->set_current_pending_monitor_is_from_java(false);
 678   // An async deflation can race after the inflate() call and before
 679   // enter() can make the ObjectMonitor busy. enter() returns false if
 680   // we have lost the race to async deflation and we simply try again.
 681   while (true) {
 682     ObjectMonitor* monitor;
 683     bool entered;
 684     if (LockingMode == LM_LIGHTWEIGHT) {
 685       entered = LightweightSynchronizer::inflate_and_enter(obj(), inflate_cause_jni_enter, current, current) != nullptr;
 686     } else {
 687       monitor = inflate(current, obj(), inflate_cause_jni_enter);
 688       entered = monitor->enter(current);
 689     }
 690 
 691     if (entered) {
 692       current->inc_held_monitor_count(1, true);
 693       break;
 694     }
 695   }
 696   current->set_current_pending_monitor_is_from_java(true);
 697 }
 698 
 699 // NOTE: must use heavy weight monitor to handle jni monitor exit
 700 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
 701   JavaThread* current = THREAD;
 702 
 703   ObjectMonitor* monitor;
 704   if (LockingMode == LM_LIGHTWEIGHT) {
 705     monitor = LightweightSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
 706   } else {
 707     // The ObjectMonitor* can't be async deflated until ownership is
 708     // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 709     monitor = inflate(current, obj, inflate_cause_jni_exit);
 710   }
 711   // If this thread has locked the object, exit the monitor. We
 712   // intentionally do not use CHECK on check_owner because we must exit the
 713   // monitor even if an exception was already pending.
 714   if (monitor->check_owner(THREAD)) {
 715     monitor->exit(current);
 716     current->dec_held_monitor_count(1, true);
 717   }
 718 }
 719 
 720 // -----------------------------------------------------------------------------
 721 // Internal VM locks on java objects
 722 // standard constructor, allows locking failures
 723 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) : _npm(thread) {
 724   _thread = thread;
 725   _thread->check_for_valid_safepoint_state();
 726   _obj = obj;
 727 
 728   if (_obj() != nullptr) {
 729     ObjectSynchronizer::enter(_obj, &_lock, _thread);
 730   }
 731 }
 732 
 733 ObjectLocker::~ObjectLocker() {
 734   if (_obj() != nullptr) {
 735     ObjectSynchronizer::exit(_obj(), &_lock, _thread);
 736   }
 737 }
 738 
 739 
 740 // -----------------------------------------------------------------------------
 741 //  Wait/Notify/NotifyAll
 742 // NOTE: must use heavy weight monitor to handle wait()
 743 
 744 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 745   JavaThread* current = THREAD;
 746   if (millis < 0) {
 747     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 748   }
 749 
 750   ObjectMonitor* monitor;
 751   if (LockingMode == LM_LIGHTWEIGHT) {
 752     monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
 753   } else {
 754     // The ObjectMonitor* can't be async deflated because the _waiters
 755     // field is incremented before ownership is dropped and decremented
 756     // after ownership is regained.
 757     monitor = inflate(current, obj(), inflate_cause_wait);
 758   }
 759 
 760   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
 761   monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
 762 
 763   // This dummy call is in place to get around dtrace bug 6254741.  Once
 764   // that's fixed we can uncomment the following line, remove the call
 765   // and change this function back into a "void" func.
 766   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 767   int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
 768   return ret_code;
 769 }
 770 
 771 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
 772   if (millis < 0) {
 773     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 774   }
 775 
 776   ObjectMonitor* monitor;
 777   if (LockingMode == LM_LIGHTWEIGHT) {
 778     monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
 779   } else {
 780     monitor = inflate(THREAD, obj(), inflate_cause_wait);
 781   }
 782   monitor->wait(millis, false, THREAD);
 783 }
 784 
 785 
 786 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 787   JavaThread* current = THREAD;
 788 
 789   markWord mark = obj->mark();
 790   if (LockingMode == LM_LIGHTWEIGHT) {
 791     if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 792       // Not inflated so there can't be any waiters to notify.
 793       return;
 794     }
 795   } else if (LockingMode == LM_LEGACY) {
 796     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 797       // Not inflated so there can't be any waiters to notify.
 798       return;
 799     }
 800   }
 801 
 802   ObjectMonitor* monitor;
 803   if (LockingMode == LM_LIGHTWEIGHT) {
 804     monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
 805   } else {
 806     // The ObjectMonitor* can't be async deflated until ownership is
 807     // dropped by the calling thread.
 808     monitor = inflate(current, obj(), inflate_cause_notify);
 809   }
 810   monitor->notify(CHECK);
 811 }
 812 
 813 // NOTE: see comment of notify()
 814 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 815   JavaThread* current = THREAD;
 816 
 817   markWord mark = obj->mark();
 818   if (LockingMode == LM_LIGHTWEIGHT) {
 819     if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 820       // Not inflated so there can't be any waiters to notify.
 821       return;
 822     }
 823   } else if (LockingMode == LM_LEGACY) {
 824     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 825       // Not inflated so there can't be any waiters to notify.
 826       return;
 827     }
 828   }
 829 
 830   ObjectMonitor* monitor;
 831   if (LockingMode == LM_LIGHTWEIGHT) {
 832     monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
 833   } else {
 834     // The ObjectMonitor* can't be async deflated until ownership is
 835     // dropped by the calling thread.
 836     monitor = inflate(current, obj(), inflate_cause_notify);
 837   }
 838   monitor->notifyAll(CHECK);
 839 }
 840 
 841 // -----------------------------------------------------------------------------
 842 // Hash Code handling
 843 
 844 struct SharedGlobals {
 845   char         _pad_prefix[OM_CACHE_LINE_SIZE];
 846   // This is a highly shared mostly-read variable.
 847   // To avoid false-sharing it needs to be the sole occupant of a cache line.
 848   volatile int stw_random;
 849   DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));
 850   // Hot RW variable -- Sequester to avoid false-sharing
 851   volatile int hc_sequence;
 852   DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int));
 853 };
 854 
 855 static SharedGlobals GVars;
 856 
 857 static markWord read_stable_mark(oop obj) {
 858   markWord mark = obj->mark_acquire();
 859   if (!mark.is_being_inflated() || LockingMode == LM_LIGHTWEIGHT) {
 860     // New lightweight locking does not use the markWord::INFLATING() protocol.
 861     return mark;       // normal fast-path return
 862   }
 863 
 864   int its = 0;
 865   for (;;) {
 866     markWord mark = obj->mark_acquire();
 867     if (!mark.is_being_inflated()) {
 868       return mark;    // normal fast-path return
 869     }
 870 
 871     // The object is being inflated by some other thread.
 872     // The caller of read_stable_mark() must wait for inflation to complete.
 873     // Avoid live-lock.
 874 
 875     ++its;
 876     if (its > 10000 || !os::is_MP()) {
 877       if (its & 1) {
 878         os::naked_yield();
 879       } else {
 880         // Note that the following code attenuates the livelock problem but is not
 881         // a complete remedy.  A more complete solution would require that the inflating
 882         // thread hold the associated inflation lock.  The following code simply restricts
 883         // the number of spinners to at most one.  We'll have N-2 threads blocked
 884         // on the inflationlock, 1 thread holding the inflation lock and using
 885         // a yield/park strategy, and 1 thread in the midst of inflation.
 886         // A more refined approach would be to change the encoding of INFLATING
 887         // to allow encapsulation of a native thread pointer.  Threads waiting for
 888         // inflation to complete would use CAS to push themselves onto a singly linked
 889         // list rooted at the markword.  Once enqueued, they'd loop, checking a per-thread flag
 890         // and calling park().  When inflation was complete the thread that accomplished inflation
 891         // would detach the list and set the markword to inflated with a single CAS and
 892         // then for each thread on the list, set the flag and unpark() the thread.
 893 
 894         // Index into the lock array based on the current object address.
 895         static_assert(is_power_of_2(inflation_lock_count()), "must be");
 896         size_t ix = (cast_from_oop<intptr_t>(obj) >> 5) & (inflation_lock_count() - 1);
 897         int YieldThenBlock = 0;
 898         assert(ix < inflation_lock_count(), "invariant");
 899         inflation_lock(ix)->lock();
 900         while (obj->mark_acquire() == markWord::INFLATING()) {
 901           // Beware: naked_yield() is advisory and has almost no effect on some platforms
 902           // so we periodically call current->_ParkEvent->park(1).
 903           // We use a mixed spin/yield/block mechanism.
 904           if ((YieldThenBlock++) >= 16) {
 905             Thread::current()->_ParkEvent->park(1);
 906           } else {
 907             os::naked_yield();
 908           }
 909         }
 910         inflation_lock(ix)->unlock();
 911       }
 912     } else {
 913       SpinPause();       // SMP-polite spinning
 914     }
 915   }
 916 }
 917 
 918 // hashCode() generation :
 919 //
 920 // Possibilities:
 921 // * MD5Digest of {obj,stw_random}
 922 // * CRC32 of {obj,stw_random} or any linear-feedback shift register function.
 923 // * A DES- or AES-style SBox[] mechanism
 924 // * One of the Phi-based schemes, such as:
 925 //   2654435761 = 2^32 * Phi (golden ratio)
 926 //   HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stw_random ;
 927 // * A variation of Marsaglia's shift-xor RNG scheme.
 928 // * (obj ^ stw_random) is appealing, but can result
 929 //   in undesirable regularity in the hashCode values of adjacent objects
 930 //   (objects allocated back-to-back, in particular).  This could potentially
 931 //   result in hashtable collisions and reduced hashtable efficiency.
 932 //   There are simple ways to "diffuse" the middle address bits over the
 933 //   generated hashCode values:
 934 
 935 static intptr_t get_next_hash(Thread* current, oop obj) {
 936   intptr_t value = 0;
 937   if (hashCode == 0) {
 938     // This form uses global Park-Miller RNG.
 939     // On MP system we'll have lots of RW access to a global, so the
 940     // mechanism induces lots of coherency traffic.
 941     value = os::random();
 942   } else if (hashCode == 1) {
 943     // This variation has the property of being stable (idempotent)
 944     // between STW operations.  This can be useful in some of the 1-0
 945     // synchronization schemes.
 946     intptr_t addr_bits = cast_from_oop<intptr_t>(obj) >> 3;
 947     value = addr_bits ^ (addr_bits >> 5) ^ GVars.stw_random;
 948   } else if (hashCode == 2) {
 949     value = 1;            // for sensitivity testing
 950   } else if (hashCode == 3) {
 951     value = ++GVars.hc_sequence;
 952   } else if (hashCode == 4) {
 953     value = cast_from_oop<intptr_t>(obj);
 954   } else {
 955     // Marsaglia's xor-shift scheme with thread-specific state
 956     // This is probably the best overall implementation -- we'll
 957     // likely make this the default in future releases.
 958     unsigned t = current->_hashStateX;
 959     t ^= (t << 11);
 960     current->_hashStateX = current->_hashStateY;
 961     current->_hashStateY = current->_hashStateZ;
 962     current->_hashStateZ = current->_hashStateW;
 963     unsigned v = current->_hashStateW;
 964     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
 965     current->_hashStateW = v;
 966     value = v;
 967   }
 968 
 969   value &= markWord::hash_mask;
 970   if (value == 0) value = 0xBAD;
 971   assert(value != markWord::no_hash, "invariant");
 972   return value;
 973 }
 974 
 975 static intptr_t install_hash_code(Thread* current, oop obj) {
 976   assert(UseObjectMonitorTable && LockingMode == LM_LIGHTWEIGHT, "must be");
 977 
 978   markWord mark = obj->mark_acquire();
 979   for (;;) {
 980     intptr_t hash = mark.hash();
 981     if (hash != 0) {
 982       return hash;
 983     }
 984 
 985     hash = get_next_hash(current, obj);
 986     const markWord old_mark = mark;
 987     const markWord new_mark = old_mark.copy_set_hash(hash);
 988 
 989     mark = obj->cas_set_mark(new_mark, old_mark);
 990     if (old_mark == mark) {
 991       return hash;
 992     }
 993   }
 994 }
 995 
 996 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
 997   if (UseObjectMonitorTable) {
 998     // Since the monitor isn't in the object header, the hash can simply be
 999     // installed in the object header.
1000     return install_hash_code(current, obj);
1001   }
1002 
1003   while (true) {
1004     ObjectMonitor* monitor = nullptr;
1005     markWord temp, test;
1006     intptr_t hash;
1007     markWord mark = read_stable_mark(obj);
1008     if (VerifyHeavyMonitors) {
1009       assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
1010       guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
1011     }
1012     if (mark.is_unlocked() || (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked())) {
1013       hash = mark.hash();
1014       if (hash != 0) {                     // if it has a hash, just return it
1015         return hash;
1016       }
1017       hash = get_next_hash(current, obj);  // get a new hash
1018       temp = mark.copy_set_hash(hash);     // merge the hash into header
1019                                            // try to install the hash
1020       test = obj->cas_set_mark(temp, mark);
1021       if (test == mark) {                  // if the hash was installed, return it
1022         return hash;
1023       }
1024       if (LockingMode == LM_LIGHTWEIGHT) {
1025         // CAS failed, retry
1026         continue;
1027       }
1028       // Failed to install the hash. It could be that another thread
1029       // installed the hash just before our attempt or inflation has
1030       // occurred or... so we fall thru to inflate the monitor for
1031       // stability and then install the hash.
1032     } else if (mark.has_monitor()) {
1033       monitor = mark.monitor();
1034       temp = monitor->header();
1035       assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1036       hash = temp.hash();
1037       if (hash != 0) {
1038         // It has a hash.
1039 
1040         // Separate load of dmw/header above from the loads in
1041         // is_being_async_deflated().
1042 
1043         // dmw/header and _contentions may get written by different threads.
1044         // Make sure to observe them in the same order when having several observers.
1045         OrderAccess::loadload_for_IRIW();
1046 
1047         if (monitor->is_being_async_deflated()) {
1048           // But we can't safely use the hash if we detect that async
1049           // deflation has occurred. So we attempt to restore the
1050           // header/dmw to the object's header so that we only retry
1051           // once if the deflater thread happens to be slow.
1052           monitor->install_displaced_markword_in_object(obj);
1053           continue;
1054         }
1055         return hash;
1056       }
1057       // Fall thru so we only have one place that installs the hash in
1058       // the ObjectMonitor.
1059     } else if (LockingMode == LM_LEGACY && mark.has_locker()
1060                && current->is_Java_thread()
1061                && JavaThread::cast(current)->is_lock_owned((address)mark.locker())) {
1062       // This is a stack-lock owned by the calling thread so fetch the
1063       // displaced markWord from the BasicLock on the stack.
1064       temp = mark.displaced_mark_helper();
1065       assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1066       hash = temp.hash();
1067       if (hash != 0) {                  // if it has a hash, just return it
1068         return hash;
1069       }
1070       // WARNING:
1071       // The displaced header in the BasicLock on a thread's stack
1072       // is strictly immutable. It CANNOT be changed in ANY cases.
1073       // So we have to inflate the stack-lock into an ObjectMonitor
1074       // even if the current thread owns the lock. The BasicLock on
1075       // a thread's stack can be asynchronously read by other threads
1076       // during an inflate() call so any change to that stack memory
1077       // may not propagate to other threads correctly.
1078     }
1079 
1080     // Inflate the monitor to set the hash.
1081 
1082     // There's no need to inflate if the mark has already got a monitor.
1083     // NOTE: an async deflation can race after we get the monitor and
1084     // before we can update the ObjectMonitor's header with the hash
1085     // value below.
1086     monitor = mark.has_monitor() ? mark.monitor() : inflate(current, obj, inflate_cause_hash_code);
1087     // Load ObjectMonitor's header/dmw field and see if it has a hash.
1088     mark = monitor->header();
1089     assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
1090     hash = mark.hash();
1091     if (hash == 0) {                       // if it does not have a hash
1092       hash = get_next_hash(current, obj);  // get a new hash
1093       temp = mark.copy_set_hash(hash)   ;  // merge the hash into header
1094       assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1095       uintptr_t v = Atomic::cmpxchg(monitor->metadata_addr(), mark.value(), temp.value());
1096       test = markWord(v);
1097       if (test != mark) {
1098         // The attempt to update the ObjectMonitor's header/dmw field
1099         // did not work. This can happen if another thread managed to
1100         // merge in the hash just before our cmpxchg().
1101         // If we add any new usages of the header/dmw field, this code
1102         // will need to be updated.
1103         hash = test.hash();
1104         assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1105         assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1106       }
1107       if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
1108         // If we detect that async deflation has occurred, then we
1109         // attempt to restore the header/dmw to the object's header
1110         // so that we only retry once if the deflater thread happens
1111         // to be slow.
1112         monitor->install_displaced_markword_in_object(obj);
1113         continue;
1114       }
1115     }
1116     // We finally get the hash.
1117     return hash;
1118   }
1119 }
1120 
1121 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1122                                                    Handle h_obj) {
1123   assert(current == JavaThread::current(), "Can only be called on current thread");
1124   oop obj = h_obj();
1125 
1126   markWord mark = read_stable_mark(obj);
1127 
1128   if (LockingMode == LM_LEGACY && mark.has_locker()) {
1129     // stack-locked case, header points into owner's stack
1130     return current->is_lock_owned((address)mark.locker());
1131   }
1132 
1133   if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1134     // fast-locking case, see if lock is in current's lock stack
1135     return current->lock_stack().contains(h_obj());
1136   }
1137 
1138   while (LockingMode == LM_LIGHTWEIGHT && mark.has_monitor()) {
1139     ObjectMonitor* monitor = read_monitor(current, obj, mark);
1140     if (monitor != nullptr) {
1141       return monitor->is_entered(current) != 0;
1142     }
1143     // Racing with inflation/deflation, retry
1144     mark = obj->mark_acquire();
1145 
1146     if (mark.is_fast_locked()) {
1147       // Some other thread fast_locked, current could not have held the lock
1148       return false;
1149     }
1150   }
1151 
1152   if (LockingMode != LM_LIGHTWEIGHT && mark.has_monitor()) {
1153     // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1154     // The first stage of async deflation does not affect any field
1155     // used by this comparison so the ObjectMonitor* is usable here.
1156     ObjectMonitor* monitor = read_monitor(mark);
1157     return monitor->is_entered(current) != 0;
1158   }
1159   // Unlocked case, header in place
1160   assert(mark.is_unlocked(), "sanity check");
1161   return false;
1162 }
1163 
1164 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
1165   oop obj = h_obj();
1166   markWord mark = read_stable_mark(obj);
1167 
1168   if (LockingMode == LM_LEGACY && mark.has_locker()) {
1169     // stack-locked so header points into owner's stack.
1170     // owning_thread_from_monitor_owner() may also return null here:
1171     return Threads::owning_thread_from_stacklock(t_list, (address) mark.locker());
1172   }
1173 
1174   if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1175     // fast-locked so get owner from the object.
1176     // owning_thread_from_object() may also return null here:
1177     return Threads::owning_thread_from_object(t_list, h_obj());
1178   }
1179 
1180   while (LockingMode == LM_LIGHTWEIGHT && mark.has_monitor()) {
1181     ObjectMonitor* monitor = read_monitor(Thread::current(), obj, mark);
1182     if (monitor != nullptr) {
1183       return Threads::owning_thread_from_monitor(t_list, monitor);
1184     }
1185     // Racing with inflation/deflation, retry
1186     mark = obj->mark_acquire();
1187 
1188     if (mark.is_fast_locked()) {
1189       // Some other thread fast_locked
1190       return Threads::owning_thread_from_object(t_list, h_obj());
1191     }
1192   }
1193 
1194   if (LockingMode != LM_LIGHTWEIGHT && mark.has_monitor()) {
1195     // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1196     // The first stage of async deflation does not affect any field
1197     // used by this comparison so the ObjectMonitor* is usable here.
1198     ObjectMonitor* monitor = read_monitor(mark);
1199     assert(monitor != nullptr, "monitor should be non-null");
1200     // owning_thread_from_monitor() may also return null here:
1201     return Threads::owning_thread_from_monitor(t_list, monitor);
1202   }
1203 
1204   // Unlocked case, header in place
1205   // Cannot have assertion since this object may have been
1206   // locked by another thread when reaching here.
1207   // assert(mark.is_unlocked(), "sanity check");
1208 
1209   return nullptr;
1210 }
1211 
1212 // Visitors ...
1213 
1214 // Iterate over all ObjectMonitors.
1215 template <typename Function>
1216 void ObjectSynchronizer::monitors_iterate(Function function) {
1217   MonitorList::Iterator iter = _in_use_list.iterator();
1218   while (iter.has_next()) {
1219     ObjectMonitor* monitor = iter.next();
1220     function(monitor);
1221   }
1222 }
1223 
1224 // Iterate ObjectMonitors owned by any thread and where the owner `filter`
1225 // returns true.
1226 template <typename OwnerFilter>
1227 void ObjectSynchronizer::owned_monitors_iterate_filtered(MonitorClosure* closure, OwnerFilter filter) {
1228   monitors_iterate([&](ObjectMonitor* monitor) {
1229     // This function is only called at a safepoint or when the
1230     // target thread is suspended or when the target thread is
1231     // operating on itself. The current closures in use today are
1232     // only interested in an owned ObjectMonitor and ownership
1233     // cannot be dropped under the calling contexts so the
1234     // ObjectMonitor cannot be async deflated.
1235     if (monitor->has_owner() && filter(monitor)) {
1236       assert(!monitor->is_being_async_deflated(), "Owned monitors should not be deflating");
1237 
1238       closure->do_monitor(monitor);
1239     }
1240   });
1241 }
1242 
1243 // Iterate ObjectMonitors where the owner == thread; this does NOT include
1244 // ObjectMonitors where owner is set to a stack-lock address in thread.
1245 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure, JavaThread* thread) {
1246   int64_t key = ObjectMonitor::owner_id_from(thread);
1247   auto thread_filter = [&](ObjectMonitor* monitor) { return monitor->owner() == key; };
1248   return owned_monitors_iterate_filtered(closure, thread_filter);
1249 }
1250 
1251 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure, oop vthread) {
1252   int64_t key = ObjectMonitor::owner_id_from(vthread);
1253   auto thread_filter = [&](ObjectMonitor* monitor) { return monitor->owner() == key; };
1254   return owned_monitors_iterate_filtered(closure, thread_filter);
1255 }
1256 
1257 // Iterate ObjectMonitors owned by any thread.
1258 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure) {
1259   auto all_filter = [&](ObjectMonitor* monitor) { return true; };
1260   return owned_monitors_iterate_filtered(closure, all_filter);
1261 }
1262 
1263 static bool monitors_used_above_threshold(MonitorList* list) {
1264   if (MonitorUsedDeflationThreshold == 0) {  // disabled case is easy
1265     return false;
1266   }
1267   size_t monitors_used = list->count();
1268   if (monitors_used == 0) {  // empty list is easy
1269     return false;
1270   }
1271   size_t old_ceiling = ObjectSynchronizer::in_use_list_ceiling();
1272   // Make sure that we use a ceiling value that is not lower than
1273   // previous, not lower than the recorded max used by the system, and
1274   // not lower than the current number of monitors in use (which can
1275   // race ahead of max). The result is guaranteed > 0.
1276   size_t ceiling = MAX3(old_ceiling, list->max(), monitors_used);
1277 
1278   // Check if our monitor usage is above the threshold:
1279   size_t monitor_usage = (monitors_used * 100LL) / ceiling;
1280   if (int(monitor_usage) > MonitorUsedDeflationThreshold) {
1281     // Deflate monitors if over the threshold percentage, unless no
1282     // progress on previous deflations.
1283     bool is_above_threshold = true;
1284 
1285     // Check if it's time to adjust the in_use_list_ceiling up, due
1286     // to too many async deflation attempts without any progress.
1287     if (NoAsyncDeflationProgressMax != 0 &&
1288         _no_progress_cnt >= NoAsyncDeflationProgressMax) {
1289       double remainder = (100.0 - MonitorUsedDeflationThreshold) / 100.0;
1290       size_t delta = (size_t)(ceiling * remainder) + 1;
1291       size_t new_ceiling = (ceiling > SIZE_MAX - delta)
1292         ? SIZE_MAX         // Overflow, let's clamp new_ceiling.
1293         : ceiling + delta;
1294 
1295       ObjectSynchronizer::set_in_use_list_ceiling(new_ceiling);
1296       log_info(monitorinflation)("Too many deflations without progress; "
1297                                  "bumping in_use_list_ceiling from %zu"
1298                                  " to %zu", old_ceiling, new_ceiling);
1299       _no_progress_cnt = 0;
1300       ceiling = new_ceiling;
1301 
1302       // Check if our monitor usage is still above the threshold:
1303       monitor_usage = (monitors_used * 100LL) / ceiling;
1304       is_above_threshold = int(monitor_usage) > MonitorUsedDeflationThreshold;
1305     }
1306     log_info(monitorinflation)("monitors_used=%zu, ceiling=%zu"
1307                                ", monitor_usage=%zu, threshold=%d",
1308                                monitors_used, ceiling, monitor_usage, MonitorUsedDeflationThreshold);
1309     return is_above_threshold;
1310   }
1311 
1312   return false;
1313 }
1314 
1315 size_t ObjectSynchronizer::in_use_list_ceiling() {
1316   return _in_use_list_ceiling;
1317 }
1318 
1319 void ObjectSynchronizer::dec_in_use_list_ceiling() {
1320   Atomic::sub(&_in_use_list_ceiling, AvgMonitorsPerThreadEstimate);
1321 }
1322 
1323 void ObjectSynchronizer::inc_in_use_list_ceiling() {
1324   Atomic::add(&_in_use_list_ceiling, AvgMonitorsPerThreadEstimate);
1325 }
1326 
1327 void ObjectSynchronizer::set_in_use_list_ceiling(size_t new_value) {
1328   _in_use_list_ceiling = new_value;
1329 }
1330 
1331 bool ObjectSynchronizer::is_async_deflation_needed() {
1332   if (is_async_deflation_requested()) {
1333     // Async deflation request.
1334     log_info(monitorinflation)("Async deflation needed: explicit request");
1335     return true;
1336   }
1337 
1338   jlong time_since_last = time_since_last_async_deflation_ms();
1339 
1340   if (AsyncDeflationInterval > 0 &&
1341       time_since_last > AsyncDeflationInterval &&
1342       monitors_used_above_threshold(&_in_use_list)) {
1343     // It's been longer than our specified deflate interval and there
1344     // are too many monitors in use. We don't deflate more frequently
1345     // than AsyncDeflationInterval (unless is_async_deflation_requested)
1346     // in order to not swamp the MonitorDeflationThread.
1347     log_info(monitorinflation)("Async deflation needed: monitors used are above the threshold");
1348     return true;
1349   }
1350 
1351   if (GuaranteedAsyncDeflationInterval > 0 &&
1352       time_since_last > GuaranteedAsyncDeflationInterval) {
1353     // It's been longer than our specified guaranteed deflate interval.
1354     // We need to clean up the used monitors even if the threshold is
1355     // not reached, to keep the memory utilization at bay when many threads
1356     // touched many monitors.
1357     log_info(monitorinflation)("Async deflation needed: guaranteed interval (%zd ms) "
1358                                "is greater than time since last deflation (" JLONG_FORMAT " ms)",
1359                                GuaranteedAsyncDeflationInterval, time_since_last);
1360 
1361     // If this deflation has no progress, then it should not affect the no-progress
1362     // tracking, otherwise threshold heuristics would think it was triggered, experienced
1363     // no progress, and needs to backoff more aggressively. In this "no progress" case,
1364     // the generic code would bump the no-progress counter, and we compensate for that
1365     // by telling it to skip the update.
1366     //
1367     // If this deflation has progress, then it should let non-progress tracking
1368     // know about this, otherwise the threshold heuristics would kick in, potentially
1369     // experience no-progress due to aggressive cleanup by this deflation, and think
1370     // it is still in no-progress stride. In this "progress" case, the generic code would
1371     // zero the counter, and we allow it to happen.
1372     _no_progress_skip_increment = true;
1373 
1374     return true;
1375   }
1376 
1377   return false;
1378 }
1379 
1380 void ObjectSynchronizer::request_deflate_idle_monitors() {
1381   MonitorLocker ml(MonitorDeflation_lock, Mutex::_no_safepoint_check_flag);
1382   set_is_async_deflation_requested(true);
1383   ml.notify_all();
1384 }
1385 
1386 bool ObjectSynchronizer::request_deflate_idle_monitors_from_wb() {
1387   JavaThread* current = JavaThread::current();
1388   bool ret_code = false;
1389 
1390   jlong last_time = last_async_deflation_time_ns();
1391 
1392   request_deflate_idle_monitors();
1393 
1394   const int N_CHECKS = 5;
1395   for (int i = 0; i < N_CHECKS; i++) {  // sleep for at most 5 seconds
1396     if (last_async_deflation_time_ns() > last_time) {
1397       log_info(monitorinflation)("Async Deflation happened after %d check(s).", i);
1398       ret_code = true;
1399       break;
1400     }
1401     {
1402       // JavaThread has to honor the blocking protocol.
1403       ThreadBlockInVM tbivm(current);
1404       os::naked_short_sleep(999);  // sleep for almost 1 second
1405     }
1406   }
1407   if (!ret_code) {
1408     log_info(monitorinflation)("Async Deflation DID NOT happen after %d checks.", N_CHECKS);
1409   }
1410 
1411   return ret_code;
1412 }
1413 
1414 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {
1415   return (os::javaTimeNanos() - last_async_deflation_time_ns()) / (NANOUNITS / MILLIUNITS);
1416 }
1417 
1418 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1419                                        const oop obj,
1420                                        ObjectSynchronizer::InflateCause cause) {
1421   assert(event != nullptr, "invariant");
1422   event->set_monitorClass(obj->klass());
1423   event->set_address((uintptr_t)(void*)obj);
1424   event->set_cause((u1)cause);
1425   event->commit();
1426 }
1427 
1428 // Fast path code shared by multiple functions
1429 void ObjectSynchronizer::inflate_helper(oop obj) {
1430   assert(LockingMode != LM_LIGHTWEIGHT, "only inflate through enter");
1431   markWord mark = obj->mark_acquire();
1432   if (mark.has_monitor()) {
1433     ObjectMonitor* monitor = read_monitor(mark);
1434     markWord dmw = monitor->header();
1435     assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1436     return;
1437   }
1438   (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1439 }
1440 
1441 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
1442   assert(current == Thread::current(), "must be");
1443   assert(LockingMode != LM_LIGHTWEIGHT, "only inflate through enter");
1444   return inflate_impl(current->is_Java_thread() ? JavaThread::cast(current) : nullptr, obj, cause);
1445 }
1446 
1447 ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
1448   assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
1449   assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_for");
1450   return inflate_impl(thread, obj, cause);
1451 }
1452 
1453 ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* locking_thread, oop object, const InflateCause cause) {
1454   // The JavaThread* locking_thread requires that the locking_thread == Thread::current() or
1455   // is suspended throughout the call by some other mechanism.
1456   // The thread might be nullptr when called from a non JavaThread. (As may still be
1457   // the case from FastHashCode). However it is only important for correctness that the
1458   // thread is set when called from ObjectSynchronizer::enter from the owning thread,
1459   // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
1460   assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_impl");
1461   EventJavaMonitorInflate event;
1462 
1463   for (;;) {
1464     const markWord mark = object->mark_acquire();
1465 
1466     // The mark can be in one of the following states:
1467     // *  inflated     - If the ObjectMonitor owner is anonymous and the
1468     //                   locking_thread owns the object lock, then we
1469     //                   make the locking_thread the ObjectMonitor owner.
1470     // *  stack-locked - Coerce it to inflated from stack-locked.
1471     // *  INFLATING    - Busy wait for conversion from stack-locked to
1472     //                   inflated.
1473     // *  unlocked     - Aggressively inflate the object.
1474 
1475     // CASE: inflated
1476     if (mark.has_monitor()) {
1477       ObjectMonitor* inf = mark.monitor();
1478       markWord dmw = inf->header();
1479       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1480       if (inf->has_anonymous_owner() && locking_thread != nullptr) {
1481         assert(LockingMode == LM_LEGACY, "invariant");
1482         if (locking_thread->is_lock_owned((address)inf->stack_locker())) {
1483           inf->set_stack_locker(nullptr);
1484           inf->set_owner_from_anonymous(locking_thread);
1485         }
1486       }
1487       return inf;
1488     }
1489 
1490     // CASE: inflation in progress - inflating over a stack-lock.
1491     // Some other thread is converting from stack-locked to inflated.
1492     // Only that thread can complete inflation -- other threads must wait.
1493     // The INFLATING value is transient.
1494     // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1495     // We could always eliminate polling by parking the thread on some auxiliary list.
1496     if (mark == markWord::INFLATING()) {
1497       read_stable_mark(object);
1498       continue;
1499     }
1500 
1501     // CASE: stack-locked
1502     // Could be stack-locked either by current or by some other thread.
1503     //
1504     // Note that we allocate the ObjectMonitor speculatively, _before_ attempting
1505     // to install INFLATING into the mark word.  We originally installed INFLATING,
1506     // allocated the ObjectMonitor, and then finally STed the address of the
1507     // ObjectMonitor into the mark.  This was correct, but artificially lengthened
1508     // the interval in which INFLATING appeared in the mark, thus increasing
1509     // the odds of inflation contention. If we lose the race to set INFLATING,
1510     // then we just delete the ObjectMonitor and loop around again.
1511     //
1512     LogStreamHandle(Trace, monitorinflation) lsh;
1513     if (LockingMode == LM_LEGACY && mark.has_locker()) {
1514       ObjectMonitor* m = new ObjectMonitor(object);
1515       // Optimistically prepare the ObjectMonitor - anticipate successful CAS
1516       // We do this before the CAS in order to minimize the length of time
1517       // in which INFLATING appears in the mark.
1518 
1519       markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
1520       if (cmp != mark) {
1521         delete m;
1522         continue;       // Interference -- just retry
1523       }
1524 
1525       // We've successfully installed INFLATING (0) into the mark-word.
1526       // This is the only case where 0 will appear in a mark-word.
1527       // Only the singular thread that successfully swings the mark-word
1528       // to 0 can perform (or more precisely, complete) inflation.
1529       //
1530       // Why do we CAS a 0 into the mark-word instead of just CASing the
1531       // mark-word from the stack-locked value directly to the new inflated state?
1532       // Consider what happens when a thread unlocks a stack-locked object.
1533       // It attempts to use CAS to swing the displaced header value from the
1534       // on-stack BasicLock back into the object header.  Recall also that the
1535       // header value (hash code, etc) can reside in (a) the object header, or
1536       // (b) a displaced header associated with the stack-lock, or (c) a displaced
1537       // header in an ObjectMonitor.  The inflate() routine must copy the header
1538       // value from the BasicLock on the owner's stack to the ObjectMonitor, all
1539       // the while preserving the hashCode stability invariants.  If the owner
1540       // decides to release the lock while the value is 0, the unlock will fail
1541       // and control will eventually pass from slow_exit() to inflate.  The owner
1542       // will then spin, waiting for the 0 value to disappear.   Put another way,
1543       // the 0 causes the owner to stall if the owner happens to try to
1544       // drop the lock (restoring the header from the BasicLock to the object)
1545       // while inflation is in-progress.  This protocol avoids races that might
1546       // would otherwise permit hashCode values to change or "flicker" for an object.
1547       // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable.
1548       // 0 serves as a "BUSY" inflate-in-progress indicator.
1549 
1550 
1551       // fetch the displaced mark from the owner's stack.
1552       // The owner can't die or unwind past the lock while our INFLATING
1553       // object is in the mark.  Furthermore the owner can't complete
1554       // an unlock on the object, either.
1555       markWord dmw = mark.displaced_mark_helper();
1556       // Catch if the object's header is not neutral (not locked and
1557       // not marked is what we care about here).
1558       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1559 
1560       // Setup monitor fields to proper values -- prepare the monitor
1561       m->set_header(dmw);
1562 
1563       // Note that a thread can inflate an object
1564       // that it has stack-locked -- as might happen in wait() -- directly
1565       // with CAS.  That is, we can avoid the xchg-nullptr .... ST idiom.
1566       if (locking_thread != nullptr && locking_thread->is_lock_owned((address)mark.locker())) {
1567         m->set_owner(locking_thread);
1568       } else {
1569         // Use ANONYMOUS_OWNER to indicate that the owner is the BasicLock on the stack,
1570         // and set the stack locker field in the monitor.
1571         m->set_stack_locker(mark.locker());
1572         m->set_anonymous_owner();
1573       }
1574       // TODO-FIXME: assert BasicLock->dhw != 0.
1575 
1576       // Must preserve store ordering. The monitor state must
1577       // be stable at the time of publishing the monitor address.
1578       guarantee(object->mark() == markWord::INFLATING(), "invariant");
1579       // Release semantics so that above set_object() is seen first.
1580       object->release_set_mark(markWord::encode(m));
1581 
1582       // Once ObjectMonitor is configured and the object is associated
1583       // with the ObjectMonitor, it is safe to allow async deflation:
1584       _in_use_list.add(m);
1585 
1586       // Hopefully the performance counters are allocated on distinct cache lines
1587       // to avoid false sharing on MP systems ...
1588       OM_PERFDATA_OP(Inflations, inc());
1589       if (log_is_enabled(Trace, monitorinflation)) {
1590         ResourceMark rm;
1591         lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1592                      INTPTR_FORMAT ", type='%s'", p2i(object),
1593                      object->mark().value(), object->klass()->external_name());
1594       }
1595       if (event.should_commit()) {
1596         post_monitor_inflate_event(&event, object, cause);
1597       }
1598       return m;
1599     }
1600 
1601     // CASE: unlocked
1602     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1603     // If we know we're inflating for entry it's better to inflate by swinging a
1604     // pre-locked ObjectMonitor pointer into the object header.   A successful
1605     // CAS inflates the object *and* confers ownership to the inflating thread.
1606     // In the current implementation we use a 2-step mechanism where we CAS()
1607     // to inflate and then CAS() again to try to swing _owner from null to current.
1608     // An inflateTry() method that we could call from enter() would be useful.
1609 
1610     assert(mark.is_unlocked(), "invariant: header=" INTPTR_FORMAT, mark.value());
1611     ObjectMonitor* m = new ObjectMonitor(object);
1612     // prepare m for installation - set monitor to initial state
1613     m->set_header(mark);
1614 
1615     if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
1616       delete m;
1617       m = nullptr;
1618       continue;
1619       // interference - the markword changed - just retry.
1620       // The state-transitions are one-way, so there's no chance of
1621       // live-lock -- "Inflated" is an absorbing state.
1622     }
1623 
1624     // Once the ObjectMonitor is configured and object is associated
1625     // with the ObjectMonitor, it is safe to allow async deflation:
1626     _in_use_list.add(m);
1627 
1628     // Hopefully the performance counters are allocated on distinct
1629     // cache lines to avoid false sharing on MP systems ...
1630     OM_PERFDATA_OP(Inflations, inc());
1631     if (log_is_enabled(Trace, monitorinflation)) {
1632       ResourceMark rm;
1633       lsh.print_cr("inflate(unlocked): object=" INTPTR_FORMAT ", mark="
1634                    INTPTR_FORMAT ", type='%s'", p2i(object),
1635                    object->mark().value(), object->klass()->external_name());
1636     }
1637     if (event.should_commit()) {
1638       post_monitor_inflate_event(&event, object, cause);
1639     }
1640     return m;
1641   }
1642 }
1643 
1644 // Walk the in-use list and deflate (at most MonitorDeflationMax) idle
1645 // ObjectMonitors. Returns the number of deflated ObjectMonitors.
1646 //
1647 size_t ObjectSynchronizer::deflate_monitor_list(ObjectMonitorDeflationSafepointer* safepointer) {
1648   MonitorList::Iterator iter = _in_use_list.iterator();
1649   size_t deflated_count = 0;
1650   Thread* current = Thread::current();
1651 
1652   while (iter.has_next()) {
1653     if (deflated_count >= (size_t)MonitorDeflationMax) {
1654       break;
1655     }
1656     ObjectMonitor* mid = iter.next();
1657     if (mid->deflate_monitor(current)) {
1658       deflated_count++;
1659     }
1660 
1661     // Must check for a safepoint/handshake and honor it.
1662     safepointer->block_for_safepoint("deflation", "deflated_count", deflated_count);
1663   }
1664 
1665   return deflated_count;
1666 }
1667 
1668 class HandshakeForDeflation : public HandshakeClosure {
1669  public:
1670   HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {}
1671 
1672   void do_thread(Thread* thread) {
1673     log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread="
1674                                 INTPTR_FORMAT, p2i(thread));
1675     if (thread->is_Java_thread()) {
1676       // Clear OM cache
1677       JavaThread* jt = JavaThread::cast(thread);
1678       jt->om_clear_monitor_cache();
1679     }
1680   }
1681 };
1682 
1683 class VM_RendezvousGCThreads : public VM_Operation {
1684 public:
1685   bool evaluate_at_safepoint() const override { return false; }
1686   VMOp_Type type() const override { return VMOp_RendezvousGCThreads; }
1687   void doit() override {
1688     Universe::heap()->safepoint_synchronize_begin();
1689     Universe::heap()->safepoint_synchronize_end();
1690   };
1691 };
1692 
1693 static size_t delete_monitors(GrowableArray<ObjectMonitor*>* delete_list,
1694                               ObjectMonitorDeflationSafepointer* safepointer) {
1695   NativeHeapTrimmer::SuspendMark sm("monitor deletion");
1696   size_t deleted_count = 0;
1697   for (ObjectMonitor* monitor: *delete_list) {
1698     delete monitor;
1699     deleted_count++;
1700     // A JavaThread must check for a safepoint/handshake and honor it.
1701     safepointer->block_for_safepoint("deletion", "deleted_count", deleted_count);
1702   }
1703   return deleted_count;
1704 }
1705 
1706 class ObjectMonitorDeflationLogging: public StackObj {
1707   LogStreamHandle(Debug, monitorinflation) _debug;
1708   LogStreamHandle(Info, monitorinflation)  _info;
1709   LogStream*                               _stream;
1710   elapsedTimer                             _timer;
1711 
1712   size_t ceiling() const { return ObjectSynchronizer::in_use_list_ceiling(); }
1713   size_t count() const   { return ObjectSynchronizer::_in_use_list.count(); }
1714   size_t max() const     { return ObjectSynchronizer::_in_use_list.max(); }
1715 
1716 public:
1717   ObjectMonitorDeflationLogging()
1718     : _debug(), _info(), _stream(nullptr) {
1719     if (_debug.is_enabled()) {
1720       _stream = &_debug;
1721     } else if (_info.is_enabled()) {
1722       _stream = &_info;
1723     }
1724   }
1725 
1726   void begin() {
1727     if (_stream != nullptr) {
1728       _stream->print_cr("begin deflating: in_use_list stats: ceiling=%zu, count=%zu, max=%zu",
1729                         ceiling(), count(), max());
1730       _timer.start();
1731     }
1732   }
1733 
1734   void before_handshake(size_t unlinked_count) {
1735     if (_stream != nullptr) {
1736       _timer.stop();
1737       _stream->print_cr("before handshaking: unlinked_count=%zu"
1738                         ", in_use_list stats: ceiling=%zu, count="
1739                         "%zu, max=%zu",
1740                         unlinked_count, ceiling(), count(), max());
1741     }
1742   }
1743 
1744   void after_handshake() {
1745     if (_stream != nullptr) {
1746       _stream->print_cr("after handshaking: in_use_list stats: ceiling="
1747                         "%zu, count=%zu, max=%zu",
1748                         ceiling(), count(), max());
1749       _timer.start();
1750     }
1751   }
1752 
1753   void end(size_t deflated_count, size_t unlinked_count) {
1754     if (_stream != nullptr) {
1755       _timer.stop();
1756       if (deflated_count != 0 || unlinked_count != 0 || _debug.is_enabled()) {
1757         _stream->print_cr("deflated_count=%zu, {unlinked,deleted}_count=%zu monitors in %3.7f secs",
1758                           deflated_count, unlinked_count, _timer.seconds());
1759       }
1760       _stream->print_cr("end deflating: in_use_list stats: ceiling=%zu, count=%zu, max=%zu",
1761                         ceiling(), count(), max());
1762     }
1763   }
1764 
1765   void before_block_for_safepoint(const char* op_name, const char* cnt_name, size_t cnt) {
1766     if (_stream != nullptr) {
1767       _timer.stop();
1768       _stream->print_cr("pausing %s: %s=%zu, in_use_list stats: ceiling="
1769                         "%zu, count=%zu, max=%zu",
1770                         op_name, cnt_name, cnt, ceiling(), count(), max());
1771     }
1772   }
1773 
1774   void after_block_for_safepoint(const char* op_name) {
1775     if (_stream != nullptr) {
1776       _stream->print_cr("resuming %s: in_use_list stats: ceiling=%zu"
1777                         ", count=%zu, max=%zu", op_name,
1778                         ceiling(), count(), max());
1779       _timer.start();
1780     }
1781   }
1782 };
1783 
1784 void ObjectMonitorDeflationSafepointer::block_for_safepoint(const char* op_name, const char* count_name, size_t counter) {
1785   if (!SafepointMechanism::should_process(_current)) {
1786     return;
1787   }
1788 
1789   // A safepoint/handshake has started.
1790   _log->before_block_for_safepoint(op_name, count_name, counter);
1791 
1792   {
1793     // Honor block request.
1794     ThreadBlockInVM tbivm(_current);
1795   }
1796 
1797   _log->after_block_for_safepoint(op_name);
1798 }
1799 
1800 // This function is called by the MonitorDeflationThread to deflate
1801 // ObjectMonitors.
1802 size_t ObjectSynchronizer::deflate_idle_monitors() {
1803   JavaThread* current = JavaThread::current();
1804   assert(current->is_monitor_deflation_thread(), "The only monitor deflater");
1805 
1806   // The async deflation request has been processed.
1807   _last_async_deflation_time_ns = os::javaTimeNanos();
1808   set_is_async_deflation_requested(false);
1809 
1810   ObjectMonitorDeflationLogging log;
1811   ObjectMonitorDeflationSafepointer safepointer(current, &log);
1812 
1813   log.begin();
1814 
1815   // Deflate some idle ObjectMonitors.
1816   size_t deflated_count = deflate_monitor_list(&safepointer);
1817 
1818   // Unlink the deflated ObjectMonitors from the in-use list.
1819   size_t unlinked_count = 0;
1820   size_t deleted_count = 0;
1821   if (deflated_count > 0) {
1822     ResourceMark rm(current);
1823     GrowableArray<ObjectMonitor*> delete_list((int)deflated_count);
1824     unlinked_count = _in_use_list.unlink_deflated(deflated_count, &delete_list, &safepointer);
1825 
1826 #ifdef ASSERT
1827     if (UseObjectMonitorTable) {
1828       for (ObjectMonitor* monitor : delete_list) {
1829         assert(!LightweightSynchronizer::contains_monitor(current, monitor), "Should have been removed");
1830       }
1831     }
1832 #endif
1833 
1834     log.before_handshake(unlinked_count);
1835 
1836     // A JavaThread needs to handshake in order to safely free the
1837     // ObjectMonitors that were deflated in this cycle.
1838     HandshakeForDeflation hfd_hc;
1839     Handshake::execute(&hfd_hc);
1840     // Also, we sync and desync GC threads around the handshake, so that they can
1841     // safely read the mark-word and look-through to the object-monitor, without
1842     // being afraid that the object-monitor is going away.
1843     VM_RendezvousGCThreads sync_gc;
1844     VMThread::execute(&sync_gc);
1845 
1846     log.after_handshake();
1847 
1848     // After the handshake, safely free the ObjectMonitors that were
1849     // deflated and unlinked in this cycle.
1850 
1851     // Delete the unlinked ObjectMonitors.
1852     deleted_count = delete_monitors(&delete_list, &safepointer);
1853     assert(unlinked_count == deleted_count, "must be");
1854   }
1855 
1856   log.end(deflated_count, unlinked_count);
1857 
1858   OM_PERFDATA_OP(MonExtant, set_value(_in_use_list.count()));
1859   OM_PERFDATA_OP(Deflations, inc(deflated_count));
1860 
1861   GVars.stw_random = os::random();
1862 
1863   if (deflated_count != 0) {
1864     _no_progress_cnt = 0;
1865   } else if (_no_progress_skip_increment) {
1866     _no_progress_skip_increment = false;
1867   } else {
1868     _no_progress_cnt++;
1869   }
1870 
1871   return deflated_count;
1872 }
1873 
1874 // Monitor cleanup on JavaThread::exit
1875 
1876 // Iterate through monitor cache and attempt to release thread's monitors
1877 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1878  private:
1879   JavaThread* _thread;
1880 
1881  public:
1882   ReleaseJavaMonitorsClosure(JavaThread* thread) : _thread(thread) {}
1883   void do_monitor(ObjectMonitor* mid) {
1884     intx rec = mid->complete_exit(_thread);
1885     _thread->dec_held_monitor_count(rec + 1);
1886   }
1887 };
1888 
1889 // Release all inflated monitors owned by current thread.  Lightweight monitors are
1890 // ignored.  This is meant to be called during JNI thread detach which assumes
1891 // all remaining monitors are heavyweight.  All exceptions are swallowed.
1892 // Scanning the extant monitor list can be time consuming.
1893 // A simple optimization is to add a per-thread flag that indicates a thread
1894 // called jni_monitorenter() during its lifetime.
1895 //
1896 // Instead of NoSafepointVerifier it might be cheaper to
1897 // use an idiom of the form:
1898 //   auto int tmp = SafepointSynchronize::_safepoint_counter ;
1899 //   <code that must not run at safepoint>
1900 //   guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1901 // Since the tests are extremely cheap we could leave them enabled
1902 // for normal product builds.
1903 
1904 void ObjectSynchronizer::release_monitors_owned_by_thread(JavaThread* current) {
1905   assert(current == JavaThread::current(), "must be current Java thread");
1906   NoSafepointVerifier nsv;
1907   ReleaseJavaMonitorsClosure rjmc(current);
1908   ObjectSynchronizer::owned_monitors_iterate(&rjmc, current);
1909   assert(!current->has_pending_exception(), "Should not be possible");
1910   current->clear_pending_exception();
1911   assert(current->held_monitor_count() == 0, "Should not be possible");
1912   // All monitors (including entered via JNI) have been unlocked above, so we need to clear jni count.
1913   current->clear_jni_monitor_count();
1914 }
1915 
1916 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
1917   switch (cause) {
1918     case inflate_cause_vm_internal:    return "VM Internal";
1919     case inflate_cause_monitor_enter:  return "Monitor Enter";
1920     case inflate_cause_wait:           return "Monitor Wait";
1921     case inflate_cause_notify:         return "Monitor Notify";
1922     case inflate_cause_hash_code:      return "Monitor Hash Code";
1923     case inflate_cause_jni_enter:      return "JNI Monitor Enter";
1924     case inflate_cause_jni_exit:       return "JNI Monitor Exit";
1925     default:
1926       ShouldNotReachHere();
1927   }
1928   return "Unknown";
1929 }
1930 
1931 //------------------------------------------------------------------------------
1932 // Debugging code
1933 
1934 u_char* ObjectSynchronizer::get_gvars_addr() {
1935   return (u_char*)&GVars;
1936 }
1937 
1938 u_char* ObjectSynchronizer::get_gvars_hc_sequence_addr() {
1939   return (u_char*)&GVars.hc_sequence;
1940 }
1941 
1942 size_t ObjectSynchronizer::get_gvars_size() {
1943   return sizeof(SharedGlobals);
1944 }
1945 
1946 u_char* ObjectSynchronizer::get_gvars_stw_random_addr() {
1947   return (u_char*)&GVars.stw_random;
1948 }
1949 
1950 // Do the final audit and print of ObjectMonitor stats; must be done
1951 // by the VMThread at VM exit time.
1952 void ObjectSynchronizer::do_final_audit_and_print_stats() {
1953   assert(Thread::current()->is_VM_thread(), "sanity check");
1954 
1955   if (is_final_audit()) {  // Only do the audit once.
1956     return;
1957   }
1958   set_is_final_audit();
1959   log_info(monitorinflation)("Starting the final audit.");
1960 
1961   if (log_is_enabled(Info, monitorinflation)) {
1962     LogStreamHandle(Info, monitorinflation) ls;
1963     audit_and_print_stats(&ls, true /* on_exit */);
1964   }
1965 }
1966 
1967 // This function can be called by the MonitorDeflationThread or it can be called when
1968 // we are trying to exit the VM. The list walker functions can run in parallel with
1969 // the other list operations.
1970 // Calls to this function can be added in various places as a debugging
1971 // aid.
1972 //
1973 void ObjectSynchronizer::audit_and_print_stats(outputStream* ls, bool on_exit) {
1974   int error_cnt = 0;
1975 
1976   ls->print_cr("Checking in_use_list:");
1977   chk_in_use_list(ls, &error_cnt);
1978 
1979   if (error_cnt == 0) {
1980     ls->print_cr("No errors found in in_use_list checks.");
1981   } else {
1982     log_error(monitorinflation)("found in_use_list errors: error_cnt=%d", error_cnt);
1983   }
1984 
1985   // When exiting, only log the interesting entries at the Info level.
1986   // When called at intervals by the MonitorDeflationThread, log output
1987   // at the Trace level since there can be a lot of it.
1988   if (!on_exit && log_is_enabled(Trace, monitorinflation)) {
1989     LogStreamHandle(Trace, monitorinflation) ls_tr;
1990     log_in_use_monitor_details(&ls_tr, true /* log_all */);
1991   } else if (on_exit) {
1992     log_in_use_monitor_details(ls, false /* log_all */);
1993   }
1994 
1995   ls->flush();
1996 
1997   guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt);
1998 }
1999 
2000 // Check the in_use_list; log the results of the checks.
2001 void ObjectSynchronizer::chk_in_use_list(outputStream* out, int *error_cnt_p) {
2002   size_t l_in_use_count = _in_use_list.count();
2003   size_t l_in_use_max = _in_use_list.max();
2004   out->print_cr("count=%zu, max=%zu", l_in_use_count,
2005                 l_in_use_max);
2006 
2007   size_t ck_in_use_count = 0;
2008   MonitorList::Iterator iter = _in_use_list.iterator();
2009   while (iter.has_next()) {
2010     ObjectMonitor* mid = iter.next();
2011     chk_in_use_entry(mid, out, error_cnt_p);
2012     ck_in_use_count++;
2013   }
2014 
2015   if (l_in_use_count == ck_in_use_count) {
2016     out->print_cr("in_use_count=%zu equals ck_in_use_count=%zu",
2017                   l_in_use_count, ck_in_use_count);
2018   } else {
2019     out->print_cr("WARNING: in_use_count=%zu is not equal to "
2020                   "ck_in_use_count=%zu", l_in_use_count,
2021                   ck_in_use_count);
2022   }
2023 
2024   size_t ck_in_use_max = _in_use_list.max();
2025   if (l_in_use_max == ck_in_use_max) {
2026     out->print_cr("in_use_max=%zu equals ck_in_use_max=%zu",
2027                   l_in_use_max, ck_in_use_max);
2028   } else {
2029     out->print_cr("WARNING: in_use_max=%zu is not equal to "
2030                   "ck_in_use_max=%zu", l_in_use_max, ck_in_use_max);
2031   }
2032 }
2033 
2034 // Check an in-use monitor entry; log any errors.
2035 void ObjectSynchronizer::chk_in_use_entry(ObjectMonitor* n, outputStream* out,
2036                                           int* error_cnt_p) {
2037   if (n->owner_is_DEFLATER_MARKER()) {
2038     // This could happen when monitor deflation blocks for a safepoint.
2039     return;
2040   }
2041 
2042 
2043   if (n->metadata() == 0) {
2044     out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor must "
2045                   "have non-null _metadata (header/hash) field.", p2i(n));
2046     *error_cnt_p = *error_cnt_p + 1;
2047   }
2048 
2049   const oop obj = n->object_peek();
2050   if (obj == nullptr) {
2051     return;
2052   }
2053 
2054   const markWord mark = obj->mark();
2055   if (!mark.has_monitor()) {
2056     out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
2057                   "object does not think it has a monitor: obj="
2058                   INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n),
2059                   p2i(obj), mark.value());
2060     *error_cnt_p = *error_cnt_p + 1;
2061     return;
2062   }
2063 
2064   ObjectMonitor* const obj_mon = read_monitor(Thread::current(), obj, mark);
2065   if (n != obj_mon) {
2066     out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
2067                   "object does not refer to the same monitor: obj="
2068                   INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
2069                   INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
2070     *error_cnt_p = *error_cnt_p + 1;
2071   }
2072 }
2073 
2074 // Log details about ObjectMonitors on the in_use_list. The 'BHL'
2075 // flags indicate why the entry is in-use, 'object' and 'object type'
2076 // indicate the associated object and its type.
2077 void ObjectSynchronizer::log_in_use_monitor_details(outputStream* out, bool log_all) {
2078   if (_in_use_list.count() > 0) {
2079     stringStream ss;
2080     out->print_cr("In-use monitor info%s:", log_all ? "" : " (eliding idle monitors)");
2081     out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
2082     out->print_cr("%18s  %s  %18s  %18s",
2083                   "monitor", "BHL", "object", "object type");
2084     out->print_cr("==================  ===  ==================  ==================");
2085 
2086     auto is_interesting = [&](ObjectMonitor* monitor) {
2087       return log_all || monitor->has_owner() || monitor->is_busy();
2088     };
2089 
2090     monitors_iterate([&](ObjectMonitor* monitor) {
2091       if (is_interesting(monitor)) {
2092         const oop obj = monitor->object_peek();
2093         const intptr_t hash = UseObjectMonitorTable ? monitor->hash() : monitor->header().hash();
2094         ResourceMark rm;
2095         out->print(INTPTR_FORMAT "  %d%d%d  " INTPTR_FORMAT "  %s", p2i(monitor),
2096                    monitor->is_busy(), hash != 0, monitor->has_owner(),
2097                    p2i(obj), obj == nullptr ? "" : obj->klass()->external_name());
2098         if (monitor->is_busy()) {
2099           out->print(" (%s)", monitor->is_busy_to_string(&ss));
2100           ss.reset();
2101         }
2102         out->cr();
2103       }
2104     });
2105   }
2106 
2107   out->flush();
2108 }