1 /*
   2  * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/vmSymbols.hpp"
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "jfr/jfrEvents.hpp"
  29 #include "logging/log.hpp"
  30 #include "logging/logStream.hpp"
  31 #include "memory/allocation.inline.hpp"
  32 #include "memory/padded.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "memory/universe.hpp"
  35 #include "oops/markWord.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "runtime/atomic.hpp"
  38 #include "runtime/frame.inline.hpp"
  39 #include "runtime/handles.inline.hpp"
  40 #include "runtime/handshake.hpp"
  41 #include "runtime/interfaceSupport.inline.hpp"
  42 #include "runtime/javaThread.hpp"
  43 #include "runtime/lockStack.inline.hpp"
  44 #include "runtime/mutexLocker.hpp"
  45 #include "runtime/objectMonitor.hpp"
  46 #include "runtime/objectMonitor.inline.hpp"
  47 #include "runtime/os.inline.hpp"
  48 #include "runtime/osThread.hpp"
  49 #include "runtime/perfData.hpp"
  50 #include "runtime/safepointMechanism.inline.hpp"
  51 #include "runtime/safepointVerifiers.hpp"
  52 #include "runtime/sharedRuntime.hpp"
  53 #include "runtime/stubRoutines.hpp"
  54 #include "runtime/synchronizer.hpp"
  55 #include "runtime/threads.hpp"
  56 #include "runtime/timer.hpp"
  57 #include "runtime/trimNativeHeap.hpp"
  58 #include "runtime/vframe.hpp"
  59 #include "runtime/vmThread.hpp"
  60 #include "utilities/align.hpp"
  61 #include "utilities/dtrace.hpp"
  62 #include "utilities/events.hpp"
  63 #include "utilities/linkedlist.hpp"
  64 #include "utilities/preserveException.hpp"
  65 
  66 void MonitorList::add(ObjectMonitor* m) {
  67   ObjectMonitor* head;
  68   do {
  69     head = Atomic::load(&_head);
  70     m->set_next_om(head);
  71   } while (Atomic::cmpxchg(&_head, head, m) != head);
  72 
  73   size_t count = Atomic::add(&_count, 1u);
  74   if (count > max()) {
  75     Atomic::inc(&_max);
  76   }
  77 }
  78 
  79 size_t MonitorList::count() const {
  80   return Atomic::load(&_count);
  81 }
  82 
  83 size_t MonitorList::max() const {
  84   return Atomic::load(&_max);
  85 }
  86 
  87 // Walk the in-use list and unlink deflated ObjectMonitors.
  88 // Returns the number of unlinked ObjectMonitors.
  89 size_t MonitorList::unlink_deflated(Thread* current, LogStream* ls,
  90                                     elapsedTimer* timer_p,
  91                                     size_t deflated_count,
  92                                     GrowableArray<ObjectMonitor*>* unlinked_list) {
  93   size_t unlinked_count = 0;
  94   ObjectMonitor* prev = nullptr;
  95   ObjectMonitor* m = Atomic::load_acquire(&_head);
  96 
  97   // The in-use list head can be null during the final audit.
  98   while (m != nullptr) {
  99     if (m->is_being_async_deflated()) {
 100       // Find next live ObjectMonitor. Batch up the unlinkable monitors, so we can
 101       // modify the list once per batch. The batch starts at "m".
 102       size_t unlinked_batch = 0;
 103       ObjectMonitor* next = m;
 104       // Look for at most MonitorUnlinkBatch monitors, or the number of
 105       // deflated and not unlinked monitors, whatever comes first.
 106       assert(deflated_count >= unlinked_count, "Sanity: underflow");
 107       size_t unlinked_batch_limit = MIN2<size_t>(deflated_count - unlinked_count, MonitorUnlinkBatch);
 108       do {
 109         ObjectMonitor* next_next = next->next_om();
 110         unlinked_batch++;
 111         unlinked_list->append(next);
 112         next = next_next;
 113         if (unlinked_batch >= unlinked_batch_limit) {
 114           // Reached the max batch, so bail out of the gathering loop.
 115           break;
 116         }
 117         if (prev == nullptr && Atomic::load(&_head) != m) {
 118           // Current batch used to be at head, but it is not at head anymore.
 119           // Bail out and figure out where we currently are. This avoids long
 120           // walks searching for new prev during unlink under heavy list inserts.
 121           break;
 122         }
 123       } while (next != nullptr && next->is_being_async_deflated());
 124 
 125       // Unlink the found batch.
 126       if (prev == nullptr) {
 127         // The current batch is the first batch, so there is a chance that it starts at head.
 128         // Optimistically assume no inserts happened, and try to unlink the entire batch from the head.
 129         ObjectMonitor* prev_head = Atomic::cmpxchg(&_head, m, next);
 130         if (prev_head != m) {
 131           // Something must have updated the head. Figure out the actual prev for this batch.
 132           for (ObjectMonitor* n = prev_head; n != m; n = n->next_om()) {
 133             prev = n;
 134           }
 135           assert(prev != nullptr, "Should have found the prev for the current batch");
 136           prev->set_next_om(next);
 137         }
 138       } else {
 139         // The current batch is preceded by another batch. This guarantees the current batch
 140         // does not start at head. Unlink the entire current batch without updating the head.
 141         assert(Atomic::load(&_head) != m, "Sanity");
 142         prev->set_next_om(next);
 143       }
 144 
 145       unlinked_count += unlinked_batch;
 146       if (unlinked_count >= deflated_count) {
 147         // Reached the max so bail out of the searching loop.
 148         // There should be no more deflated monitors left.
 149         break;
 150       }
 151       m = next;
 152     } else {
 153       prev = m;
 154       m = m->next_om();
 155     }
 156 
 157     if (current->is_Java_thread()) {
 158       // A JavaThread must check for a safepoint/handshake and honor it.
 159       ObjectSynchronizer::chk_for_block_req(JavaThread::cast(current), "unlinking",
 160                                             "unlinked_count", unlinked_count,
 161                                             ls, timer_p);
 162     }
 163   }
 164 
 165 #ifdef ASSERT
 166   // Invariant: the code above should unlink all deflated monitors.
 167   // The code that runs after this unlinking does not expect deflated monitors.
 168   // Notably, attempting to deflate the already deflated monitor would break.
 169   {
 170     ObjectMonitor* m = Atomic::load_acquire(&_head);
 171     while (m != nullptr) {
 172       assert(!m->is_being_async_deflated(), "All deflated monitors should be unlinked");
 173       m = m->next_om();
 174     }
 175   }
 176 #endif
 177 
 178   Atomic::sub(&_count, unlinked_count);
 179   return unlinked_count;
 180 }
 181 
 182 MonitorList::Iterator MonitorList::iterator() const {
 183   return Iterator(Atomic::load_acquire(&_head));
 184 }
 185 
 186 ObjectMonitor* MonitorList::Iterator::next() {
 187   ObjectMonitor* current = _current;
 188   _current = current->next_om();
 189   return current;
 190 }
 191 
 192 // The "core" versions of monitor enter and exit reside in this file.
 193 // The interpreter and compilers contain specialized transliterated
 194 // variants of the enter-exit fast-path operations.  See c2_MacroAssembler_x86.cpp
 195 // fast_lock(...) for instance.  If you make changes here, make sure to modify the
 196 // interpreter, and both C1 and C2 fast-path inline locking code emission.
 197 //
 198 // -----------------------------------------------------------------------------
 199 
 200 #ifdef DTRACE_ENABLED
 201 
 202 // Only bother with this argument setup if dtrace is available
 203 // TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
 204 
 205 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread)                           \
 206   char* bytes = nullptr;                                                      \
 207   int len = 0;                                                             \
 208   jlong jtid = SharedRuntime::get_java_tid(thread);                        \
 209   Symbol* klassname = obj->klass()->name();                                \
 210   if (klassname != nullptr) {                                                 \
 211     bytes = (char*)klassname->bytes();                                     \
 212     len = klassname->utf8_length();                                        \
 213   }
 214 
 215 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis)            \
 216   {                                                                        \
 217     if (DTraceMonitorProbes) {                                             \
 218       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
 219       HOTSPOT_MONITOR_WAIT(jtid,                                           \
 220                            (uintptr_t)(monitor), bytes, len, (millis));    \
 221     }                                                                      \
 222   }
 223 
 224 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY
 225 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL
 226 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED
 227 
 228 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread)                  \
 229   {                                                                        \
 230     if (DTraceMonitorProbes) {                                             \
 231       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
 232       HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */             \
 233                                     (uintptr_t)(monitor), bytes, len);     \
 234     }                                                                      \
 235   }
 236 
 237 #else //  ndef DTRACE_ENABLED
 238 
 239 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
 240 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon)          {;}
 241 
 242 #endif // ndef DTRACE_ENABLED
 243 
 244 // This exists only as a workaround of dtrace bug 6254741
 245 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, JavaThread* thr) {
 246   DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
 247   return 0;
 248 }
 249 
 250 static constexpr size_t inflation_lock_count() {
 251   return 256;
 252 }
 253 
 254 // Static storage for an array of PlatformMutex.
 255 alignas(PlatformMutex) static uint8_t _inflation_locks[inflation_lock_count()][sizeof(PlatformMutex)];
 256 
 257 static inline PlatformMutex* inflation_lock(size_t index) {
 258   return reinterpret_cast<PlatformMutex*>(_inflation_locks[index]);
 259 }
 260 
 261 void ObjectSynchronizer::initialize() {
 262   for (size_t i = 0; i < inflation_lock_count(); i++) {
 263     ::new(static_cast<void*>(inflation_lock(i))) PlatformMutex();
 264   }
 265   // Start the ceiling with the estimate for one thread.
 266   set_in_use_list_ceiling(AvgMonitorsPerThreadEstimate);
 267 
 268   // Start the timer for deflations, so it does not trigger immediately.
 269   _last_async_deflation_time_ns = os::javaTimeNanos();
 270 }
 271 
 272 MonitorList ObjectSynchronizer::_in_use_list;
 273 // monitors_used_above_threshold() policy is as follows:
 274 //
 275 // The ratio of the current _in_use_list count to the ceiling is used
 276 // to determine if we are above MonitorUsedDeflationThreshold and need
 277 // to do an async monitor deflation cycle. The ceiling is increased by
 278 // AvgMonitorsPerThreadEstimate when a thread is added to the system
 279 // and is decreased by AvgMonitorsPerThreadEstimate when a thread is
 280 // removed from the system.
 281 //
 282 // Note: If the _in_use_list max exceeds the ceiling, then
 283 // monitors_used_above_threshold() will use the in_use_list max instead
 284 // of the thread count derived ceiling because we have used more
 285 // ObjectMonitors than the estimated average.
 286 //
 287 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
 288 // no-progress async monitor deflation cycles in a row, then the ceiling
 289 // is adjusted upwards by monitors_used_above_threshold().
 290 //
 291 // Start the ceiling with the estimate for one thread in initialize()
 292 // which is called after cmd line options are processed.
 293 static size_t _in_use_list_ceiling = 0;
 294 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
 295 bool volatile ObjectSynchronizer::_is_final_audit = false;
 296 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
 297 static uintx _no_progress_cnt = 0;
 298 static bool _no_progress_skip_increment = false;
 299 
 300 // =====================> Quick functions
 301 
 302 // The quick_* forms are special fast-path variants used to improve
 303 // performance.  In the simplest case, a "quick_*" implementation could
 304 // simply return false, in which case the caller will perform the necessary
 305 // state transitions and call the slow-path form.
 306 // The fast-path is designed to handle frequently arising cases in an efficient
 307 // manner and is just a degenerate "optimistic" variant of the slow-path.
 308 // returns true  -- to indicate the call was satisfied.
 309 // returns false -- to indicate the call needs the services of the slow-path.
 310 // A no-loitering ordinance is in effect for code in the quick_* family
 311 // operators: safepoints or indefinite blocking (blocking that might span a
 312 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
 313 // entry.
 314 //
 315 // Consider: An interesting optimization is to have the JIT recognize the
 316 // following common idiom:
 317 //   synchronized (someobj) { .... ; notify(); }
 318 // That is, we find a notify() or notifyAll() call that immediately precedes
 319 // the monitorexit operation.  In that case the JIT could fuse the operations
 320 // into a single notifyAndExit() runtime primitive.
 321 
 322 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
 323   assert(current->thread_state() == _thread_in_Java, "invariant");
 324   NoSafepointVerifier nsv;
 325   if (obj == nullptr) return false;  // slow-path for invalid obj
 326   const markWord mark = obj->mark();
 327 
 328   if (LockingMode == LM_LIGHTWEIGHT) {
 329     if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
 330       // Degenerate notify
 331       // fast-locked by caller so by definition the implied waitset is empty.
 332       return true;
 333     }
 334   } else if (LockingMode == LM_LEGACY) {
 335     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 336       // Degenerate notify
 337       // stack-locked by caller so by definition the implied waitset is empty.
 338       return true;
 339     }
 340   }
 341 
 342   if (mark.has_monitor()) {
 343     ObjectMonitor* const mon = mark.monitor();
 344     assert(mon->object() == oop(obj), "invariant");
 345     if (mon->owner() != current) return false;  // slow-path for IMS exception
 346 
 347     if (mon->first_waiter() != nullptr) {
 348       // We have one or more waiters. Since this is an inflated monitor
 349       // that we own, we can transfer one or more threads from the waitset
 350       // to the entrylist here and now, avoiding the slow-path.
 351       if (all) {
 352         DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current);
 353       } else {
 354         DTRACE_MONITOR_PROBE(notify, mon, obj, current);
 355       }
 356       int free_count = 0;
 357       do {
 358         mon->INotify(current);
 359         ++free_count;
 360       } while (mon->first_waiter() != nullptr && all);
 361       OM_PERFDATA_OP(Notifications, inc(free_count));
 362     }
 363     return true;
 364   }
 365 
 366   // other IMS exception states take the slow-path
 367   return false;
 368 }
 369 
 370 
 371 // The LockNode emitted directly at the synchronization site would have
 372 // been too big if it were to have included support for the cases of inflated
 373 // recursive enter and exit, so they go here instead.
 374 // Note that we can't safely call AsyncPrintJavaStack() from within
 375 // quick_enter() as our thread state remains _in_Java.
 376 
 377 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
 378                                      BasicLock * lock) {
 379   assert(current->thread_state() == _thread_in_Java, "invariant");
 380   NoSafepointVerifier nsv;
 381   if (obj == nullptr) return false;       // Need to throw NPE
 382 
 383   if (obj->klass()->is_value_based()) {
 384     return false;
 385   }
 386 
 387   const markWord mark = obj->mark();
 388 
 389   if (mark.has_monitor()) {
 390     ObjectMonitor* const m = mark.monitor();
 391     // An async deflation or GC can race us before we manage to make
 392     // the ObjectMonitor busy by setting the owner below. If we detect
 393     // that race we just bail out to the slow-path here.
 394     if (m->object_peek() == nullptr) {
 395       return false;
 396     }
 397     JavaThread* const owner = static_cast<JavaThread*>(m->owner_raw());
 398 
 399     // Lock contention and Transactional Lock Elision (TLE) diagnostics
 400     // and observability
 401     // Case: light contention possibly amenable to TLE
 402     // Case: TLE inimical operations such as nested/recursive synchronization
 403 
 404     if (owner == current) {
 405       m->_recursions++;
 406       current->inc_held_monitor_count();
 407       return true;
 408     }
 409 
 410     if (LockingMode != LM_LIGHTWEIGHT) {
 411       // This Java Monitor is inflated so obj's header will never be
 412       // displaced to this thread's BasicLock. Make the displaced header
 413       // non-null so this BasicLock is not seen as recursive nor as
 414       // being locked. We do this unconditionally so that this thread's
 415       // BasicLock cannot be mis-interpreted by any stack walkers. For
 416       // performance reasons, stack walkers generally first check for
 417       // stack-locking in the object's header, the second check is for
 418       // recursive stack-locking in the displaced header in the BasicLock,
 419       // and last are the inflated Java Monitor (ObjectMonitor) checks.
 420       lock->set_displaced_header(markWord::unused_mark());
 421     }
 422 
 423     if (owner == nullptr && m->try_set_owner_from(nullptr, current) == nullptr) {
 424       assert(m->_recursions == 0, "invariant");
 425       current->inc_held_monitor_count();
 426       return true;
 427     }
 428   }
 429 
 430   // Note that we could inflate in quick_enter.
 431   // This is likely a useful optimization
 432   // Critically, in quick_enter() we must not:
 433   // -- block indefinitely, or
 434   // -- reach a safepoint
 435 
 436   return false;        // revert to slow-path
 437 }
 438 
 439 // Handle notifications when synchronizing on value based classes
 440 void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* current) {
 441   frame last_frame = current->last_frame();
 442   bool bcp_was_adjusted = false;
 443   // Don't decrement bcp if it points to the frame's first instruction.  This happens when
 444   // handle_sync_on_value_based_class() is called because of a synchronized method.  There
 445   // is no actual monitorenter instruction in the byte code in this case.
 446   if (last_frame.is_interpreted_frame() &&
 447       (last_frame.interpreter_frame_method()->code_base() < last_frame.interpreter_frame_bcp())) {
 448     // adjust bcp to point back to monitorenter so that we print the correct line numbers
 449     last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() - 1);
 450     bcp_was_adjusted = true;
 451   }
 452 
 453   if (DiagnoseSyncOnValueBasedClasses == FATAL_EXIT) {
 454     ResourceMark rm(current);
 455     stringStream ss;
 456     current->print_active_stack_on(&ss);
 457     char* base = (char*)strstr(ss.base(), "at");
 458     char* newline = (char*)strchr(ss.base(), '\n');
 459     if (newline != nullptr) {
 460       *newline = '\0';
 461     }
 462     fatal("Synchronizing on object " INTPTR_FORMAT " of klass %s %s", p2i(obj()), obj->klass()->external_name(), base);
 463   } else {
 464     assert(DiagnoseSyncOnValueBasedClasses == LOG_WARNING, "invalid value for DiagnoseSyncOnValueBasedClasses");
 465     ResourceMark rm(current);
 466     Log(valuebasedclasses) vblog;
 467 
 468     vblog.info("Synchronizing on object " INTPTR_FORMAT " of klass %s", p2i(obj()), obj->klass()->external_name());
 469     if (current->has_last_Java_frame()) {
 470       LogStream info_stream(vblog.info());
 471       current->print_active_stack_on(&info_stream);
 472     } else {
 473       vblog.info("Cannot find the last Java frame");
 474     }
 475 
 476     EventSyncOnValueBasedClass event;
 477     if (event.should_commit()) {
 478       event.set_valueBasedClass(obj->klass());
 479       event.commit();
 480     }
 481   }
 482 
 483   if (bcp_was_adjusted) {
 484     last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
 485   }
 486 }
 487 
 488 static bool useHeavyMonitors() {
 489 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
 490   return LockingMode == LM_MONITOR;
 491 #else
 492   return false;
 493 #endif
 494 }
 495 
 496 // -----------------------------------------------------------------------------
 497 // Monitor Enter/Exit
 498 // The interpreter and compiler assembly code tries to lock using the fast path
 499 // of this algorithm. Make sure to update that code if the following function is
 500 // changed. The implementation is extremely sensitive to race condition. Be careful.
 501 
 502 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
 503   if (obj->klass()->is_value_based()) {
 504     handle_sync_on_value_based_class(obj, current);
 505   }
 506 
 507   current->inc_held_monitor_count();
 508 
 509   if (!useHeavyMonitors()) {
 510     if (LockingMode == LM_LIGHTWEIGHT) {
 511       // Fast-locking does not use the 'lock' argument.
 512       LockStack& lock_stack = current->lock_stack();
 513       if (lock_stack.can_push()) {
 514         markWord mark = obj()->mark_acquire();
 515         while (mark.is_neutral()) {
 516           // Retry until a lock state change has been observed.  cas_set_mark() may collide with non lock bits modifications.
 517           // Try to swing into 'fast-locked' state.
 518           assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
 519           const markWord locked_mark = mark.set_fast_locked();
 520           const markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
 521           if (old_mark == mark) {
 522             // Successfully fast-locked, push object to lock-stack and return.
 523             lock_stack.push(obj());
 524             return;
 525           }
 526           mark = old_mark;
 527         }
 528       }
 529       // All other paths fall-through to inflate-enter.
 530     } else if (LockingMode == LM_LEGACY) {
 531       markWord mark = obj->mark();
 532       if (mark.is_neutral()) {
 533         // Anticipate successful CAS -- the ST of the displaced mark must
 534         // be visible <= the ST performed by the CAS.
 535         lock->set_displaced_header(mark);
 536         if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
 537           return;
 538         }
 539         // Fall through to inflate() ...
 540       } else if (mark.has_locker() &&
 541                  current->is_lock_owned((address) mark.locker())) {
 542         assert(lock != mark.locker(), "must not re-lock the same lock");
 543         assert(lock != (BasicLock*) obj->mark().value(), "don't relock with same BasicLock");
 544         lock->set_displaced_header(markWord::from_pointer(nullptr));
 545         return;
 546       }
 547 
 548       // The object header will never be displaced to this lock,
 549       // so it does not matter what the value is, except that it
 550       // must be non-zero to avoid looking like a re-entrant lock,
 551       // and must not look locked either.
 552       lock->set_displaced_header(markWord::unused_mark());
 553     }
 554   } else if (VerifyHeavyMonitors) {
 555     guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 556   }
 557 
 558   // An async deflation can race after the inflate() call and before
 559   // enter() can make the ObjectMonitor busy. enter() returns false if
 560   // we have lost the race to async deflation and we simply try again.
 561   while (true) {
 562     ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
 563     if (monitor->enter(current)) {
 564       return;
 565     }
 566   }
 567 }
 568 
 569 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
 570   current->dec_held_monitor_count();
 571 
 572   if (!useHeavyMonitors()) {
 573     markWord mark = object->mark();
 574     if (LockingMode == LM_LIGHTWEIGHT) {
 575       // Fast-locking does not use the 'lock' argument.
 576       while (mark.is_fast_locked()) {
 577         // Retry until a lock state change has been observed.  cas_set_mark() may collide with non lock bits modifications.
 578         const markWord unlocked_mark = mark.set_unlocked();
 579         const markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
 580         if (old_mark == mark) {
 581           current->lock_stack().remove(object);
 582           return;
 583         }
 584         mark = old_mark;
 585       }
 586     } else if (LockingMode == LM_LEGACY) {
 587       markWord dhw = lock->displaced_header();
 588       if (dhw.value() == 0) {
 589         // If the displaced header is null, then this exit matches up with
 590         // a recursive enter. No real work to do here except for diagnostics.
 591 #ifndef PRODUCT
 592         if (mark != markWord::INFLATING()) {
 593           // Only do diagnostics if we are not racing an inflation. Simply
 594           // exiting a recursive enter of a Java Monitor that is being
 595           // inflated is safe; see the has_monitor() comment below.
 596           assert(!mark.is_neutral(), "invariant");
 597           assert(!mark.has_locker() ||
 598                  current->is_lock_owned((address)mark.locker()), "invariant");
 599           if (mark.has_monitor()) {
 600             // The BasicLock's displaced_header is marked as a recursive
 601             // enter and we have an inflated Java Monitor (ObjectMonitor).
 602             // This is a special case where the Java Monitor was inflated
 603             // after this thread entered the stack-lock recursively. When a
 604             // Java Monitor is inflated, we cannot safely walk the Java
 605             // Monitor owner's stack and update the BasicLocks because a
 606             // Java Monitor can be asynchronously inflated by a thread that
 607             // does not own the Java Monitor.
 608             ObjectMonitor* m = mark.monitor();
 609             assert(m->object()->mark() == mark, "invariant");
 610             assert(m->is_entered(current), "invariant");
 611           }
 612         }
 613 #endif
 614         return;
 615       }
 616 
 617       if (mark == markWord::from_pointer(lock)) {
 618         // If the object is stack-locked by the current thread, try to
 619         // swing the displaced header from the BasicLock back to the mark.
 620         assert(dhw.is_neutral(), "invariant");
 621         if (object->cas_set_mark(dhw, mark) == mark) {
 622           return;
 623         }
 624       }
 625     }
 626   } else if (VerifyHeavyMonitors) {
 627     guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 628   }
 629 
 630   // We have to take the slow-path of possible inflation and then exit.
 631   // The ObjectMonitor* can't be async deflated until ownership is
 632   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 633   ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
 634   if (LockingMode == LM_LIGHTWEIGHT && monitor->is_owner_anonymous()) {
 635     // It must be owned by us. Pop lock object from lock stack.
 636     LockStack& lock_stack = current->lock_stack();
 637     oop popped = lock_stack.pop();
 638     assert(popped == object, "must be owned by this thread");
 639     monitor->set_owner_from_anonymous(current);
 640   }
 641   monitor->exit(current);
 642 }
 643 
 644 // -----------------------------------------------------------------------------
 645 // JNI locks on java objects
 646 // NOTE: must use heavy weight monitor to handle jni monitor enter
 647 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
 648   if (obj->klass()->is_value_based()) {
 649     handle_sync_on_value_based_class(obj, current);
 650   }
 651 
 652   // the current locking is from JNI instead of Java code
 653   current->set_current_pending_monitor_is_from_java(false);
 654   // An async deflation can race after the inflate() call and before
 655   // enter() can make the ObjectMonitor busy. enter() returns false if
 656   // we have lost the race to async deflation and we simply try again.
 657   while (true) {
 658     ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
 659     if (monitor->enter(current)) {
 660       current->inc_held_monitor_count(1, true);
 661       break;
 662     }
 663   }
 664   current->set_current_pending_monitor_is_from_java(true);
 665 }
 666 
 667 // NOTE: must use heavy weight monitor to handle jni monitor exit
 668 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
 669   JavaThread* current = THREAD;
 670 
 671   // The ObjectMonitor* can't be async deflated until ownership is
 672   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 673   ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
 674   // If this thread has locked the object, exit the monitor. We
 675   // intentionally do not use CHECK on check_owner because we must exit the
 676   // monitor even if an exception was already pending.
 677   if (monitor->check_owner(THREAD)) {
 678     monitor->exit(current);
 679     current->dec_held_monitor_count(1, true);
 680   }
 681 }
 682 
 683 // -----------------------------------------------------------------------------
 684 // Internal VM locks on java objects
 685 // standard constructor, allows locking failures
 686 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
 687   _thread = thread;
 688   _thread->check_for_valid_safepoint_state();
 689   _obj = obj;
 690 
 691   if (_obj() != nullptr) {
 692     ObjectSynchronizer::enter(_obj, &_lock, _thread);
 693   }
 694 }
 695 
 696 ObjectLocker::~ObjectLocker() {
 697   if (_obj() != nullptr) {
 698     ObjectSynchronizer::exit(_obj(), &_lock, _thread);
 699   }
 700 }
 701 
 702 
 703 // -----------------------------------------------------------------------------
 704 //  Wait/Notify/NotifyAll
 705 // NOTE: must use heavy weight monitor to handle wait()
 706 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 707   JavaThread* current = THREAD;
 708   if (millis < 0) {
 709     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 710   }
 711   // The ObjectMonitor* can't be async deflated because the _waiters
 712   // field is incremented before ownership is dropped and decremented
 713   // after ownership is regained.
 714   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
 715 
 716   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
 717   monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
 718 
 719   // This dummy call is in place to get around dtrace bug 6254741.  Once
 720   // that's fixed we can uncomment the following line, remove the call
 721   // and change this function back into a "void" func.
 722   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 723   int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
 724   return ret_code;
 725 }
 726 
 727 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
 728   if (millis < 0) {
 729     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 730   }
 731   ObjectSynchronizer::inflate(THREAD,
 732                               obj(),
 733                               inflate_cause_wait)->wait(millis, false, THREAD);
 734 }
 735 
 736 
 737 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 738   JavaThread* current = THREAD;
 739 
 740   markWord mark = obj->mark();
 741   if (LockingMode == LM_LIGHTWEIGHT) {
 742     if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 743       // Not inflated so there can't be any waiters to notify.
 744       return;
 745     }
 746   } else if (LockingMode == LM_LEGACY) {
 747     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 748       // Not inflated so there can't be any waiters to notify.
 749       return;
 750     }
 751   }
 752   // The ObjectMonitor* can't be async deflated until ownership is
 753   // dropped by the calling thread.
 754   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
 755   monitor->notify(CHECK);
 756 }
 757 
 758 // NOTE: see comment of notify()
 759 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 760   JavaThread* current = THREAD;
 761 
 762   markWord mark = obj->mark();
 763   if (LockingMode == LM_LIGHTWEIGHT) {
 764     if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 765       // Not inflated so there can't be any waiters to notify.
 766       return;
 767     }
 768   } else if (LockingMode == LM_LEGACY) {
 769     if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 770       // Not inflated so there can't be any waiters to notify.
 771       return;
 772     }
 773   }
 774   // The ObjectMonitor* can't be async deflated until ownership is
 775   // dropped by the calling thread.
 776   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
 777   monitor->notifyAll(CHECK);
 778 }
 779 
 780 // -----------------------------------------------------------------------------
 781 // Hash Code handling
 782 
 783 struct SharedGlobals {
 784   char         _pad_prefix[OM_CACHE_LINE_SIZE];
 785   // This is a highly shared mostly-read variable.
 786   // To avoid false-sharing it needs to be the sole occupant of a cache line.
 787   volatile int stw_random;
 788   DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));
 789   // Hot RW variable -- Sequester to avoid false-sharing
 790   volatile int hc_sequence;
 791   DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int));
 792 };
 793 
 794 static SharedGlobals GVars;
 795 
 796 static markWord read_stable_mark(oop obj) {
 797   markWord mark = obj->mark_acquire();
 798   if (!mark.is_being_inflated() || LockingMode == LM_LIGHTWEIGHT) {
 799     // New lightweight locking does not use the markWord::INFLATING() protocol.
 800     return mark;       // normal fast-path return
 801   }
 802 
 803   int its = 0;
 804   for (;;) {
 805     markWord mark = obj->mark_acquire();
 806     if (!mark.is_being_inflated()) {
 807       return mark;    // normal fast-path return
 808     }
 809 
 810     // The object is being inflated by some other thread.
 811     // The caller of read_stable_mark() must wait for inflation to complete.
 812     // Avoid live-lock.
 813 
 814     ++its;
 815     if (its > 10000 || !os::is_MP()) {
 816       if (its & 1) {
 817         os::naked_yield();
 818       } else {
 819         // Note that the following code attenuates the livelock problem but is not
 820         // a complete remedy.  A more complete solution would require that the inflating
 821         // thread hold the associated inflation lock.  The following code simply restricts
 822         // the number of spinners to at most one.  We'll have N-2 threads blocked
 823         // on the inflationlock, 1 thread holding the inflation lock and using
 824         // a yield/park strategy, and 1 thread in the midst of inflation.
 825         // A more refined approach would be to change the encoding of INFLATING
 826         // to allow encapsulation of a native thread pointer.  Threads waiting for
 827         // inflation to complete would use CAS to push themselves onto a singly linked
 828         // list rooted at the markword.  Once enqueued, they'd loop, checking a per-thread flag
 829         // and calling park().  When inflation was complete the thread that accomplished inflation
 830         // would detach the list and set the markword to inflated with a single CAS and
 831         // then for each thread on the list, set the flag and unpark() the thread.
 832 
 833         // Index into the lock array based on the current object address.
 834         static_assert(is_power_of_2(inflation_lock_count()), "must be");
 835         size_t ix = (cast_from_oop<intptr_t>(obj) >> 5) & (inflation_lock_count() - 1);
 836         int YieldThenBlock = 0;
 837         assert(ix < inflation_lock_count(), "invariant");
 838         inflation_lock(ix)->lock();
 839         while (obj->mark_acquire() == markWord::INFLATING()) {
 840           // Beware: naked_yield() is advisory and has almost no effect on some platforms
 841           // so we periodically call current->_ParkEvent->park(1).
 842           // We use a mixed spin/yield/block mechanism.
 843           if ((YieldThenBlock++) >= 16) {
 844             Thread::current()->_ParkEvent->park(1);
 845           } else {
 846             os::naked_yield();
 847           }
 848         }
 849         inflation_lock(ix)->unlock();
 850       }
 851     } else {
 852       SpinPause();       // SMP-polite spinning
 853     }
 854   }
 855 }
 856 
 857 // hashCode() generation :
 858 //
 859 // Possibilities:
 860 // * MD5Digest of {obj,stw_random}
 861 // * CRC32 of {obj,stw_random} or any linear-feedback shift register function.
 862 // * A DES- or AES-style SBox[] mechanism
 863 // * One of the Phi-based schemes, such as:
 864 //   2654435761 = 2^32 * Phi (golden ratio)
 865 //   HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stw_random ;
 866 // * A variation of Marsaglia's shift-xor RNG scheme.
 867 // * (obj ^ stw_random) is appealing, but can result
 868 //   in undesirable regularity in the hashCode values of adjacent objects
 869 //   (objects allocated back-to-back, in particular).  This could potentially
 870 //   result in hashtable collisions and reduced hashtable efficiency.
 871 //   There are simple ways to "diffuse" the middle address bits over the
 872 //   generated hashCode values:
 873 
 874 static inline intptr_t get_next_hash(Thread* current, oop obj) {
 875   intptr_t value = 0;
 876   if (hashCode == 0) {
 877     // This form uses global Park-Miller RNG.
 878     // On MP system we'll have lots of RW access to a global, so the
 879     // mechanism induces lots of coherency traffic.
 880     value = os::random();
 881   } else if (hashCode == 1) {
 882     // This variation has the property of being stable (idempotent)
 883     // between STW operations.  This can be useful in some of the 1-0
 884     // synchronization schemes.
 885     intptr_t addr_bits = cast_from_oop<intptr_t>(obj) >> 3;
 886     value = addr_bits ^ (addr_bits >> 5) ^ GVars.stw_random;
 887   } else if (hashCode == 2) {
 888     value = 1;            // for sensitivity testing
 889   } else if (hashCode == 3) {
 890     value = ++GVars.hc_sequence;
 891   } else if (hashCode == 4) {
 892     value = cast_from_oop<intptr_t>(obj);
 893   } else {
 894     // Marsaglia's xor-shift scheme with thread-specific state
 895     // This is probably the best overall implementation -- we'll
 896     // likely make this the default in future releases.
 897     unsigned t = current->_hashStateX;
 898     t ^= (t << 11);
 899     current->_hashStateX = current->_hashStateY;
 900     current->_hashStateY = current->_hashStateZ;
 901     current->_hashStateZ = current->_hashStateW;
 902     unsigned v = current->_hashStateW;
 903     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
 904     current->_hashStateW = v;
 905     value = v;
 906   }
 907 
 908   value &= markWord::hash_mask;
 909   if (value == 0) value = 0xBAD;
 910   assert(value != markWord::no_hash, "invariant");
 911   return value;
 912 }
 913 
 914 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
 915 
 916   while (true) {
 917     ObjectMonitor* monitor = nullptr;
 918     markWord temp, test;
 919     intptr_t hash;
 920     markWord mark = read_stable_mark(obj);
 921     if (VerifyHeavyMonitors) {
 922       assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
 923       guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
 924     }
 925     if (mark.is_neutral() || (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked())) {
 926       hash = mark.hash();
 927       if (hash != 0) {                     // if it has a hash, just return it
 928         return hash;
 929       }
 930       hash = get_next_hash(current, obj);  // get a new hash
 931       temp = mark.copy_set_hash(hash);     // merge the hash into header
 932                                            // try to install the hash
 933       test = obj->cas_set_mark(temp, mark);
 934       if (test == mark) {                  // if the hash was installed, return it
 935         return hash;
 936       }
 937       if (LockingMode == LM_LIGHTWEIGHT) {
 938         // CAS failed, retry
 939         continue;
 940       }
 941       // Failed to install the hash. It could be that another thread
 942       // installed the hash just before our attempt or inflation has
 943       // occurred or... so we fall thru to inflate the monitor for
 944       // stability and then install the hash.
 945     } else if (mark.has_monitor()) {
 946       monitor = mark.monitor();
 947       temp = monitor->header();
 948       assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
 949       hash = temp.hash();
 950       if (hash != 0) {
 951         // It has a hash.
 952 
 953         // Separate load of dmw/header above from the loads in
 954         // is_being_async_deflated().
 955 
 956         // dmw/header and _contentions may get written by different threads.
 957         // Make sure to observe them in the same order when having several observers.
 958         OrderAccess::loadload_for_IRIW();
 959 
 960         if (monitor->is_being_async_deflated()) {
 961           // But we can't safely use the hash if we detect that async
 962           // deflation has occurred. So we attempt to restore the
 963           // header/dmw to the object's header so that we only retry
 964           // once if the deflater thread happens to be slow.
 965           monitor->install_displaced_markword_in_object(obj);
 966           continue;
 967         }
 968         return hash;
 969       }
 970       // Fall thru so we only have one place that installs the hash in
 971       // the ObjectMonitor.
 972     } else if (LockingMode == LM_LEGACY && mark.has_locker()
 973                && current->is_Java_thread()
 974                && JavaThread::cast(current)->is_lock_owned((address)mark.locker())) {
 975       // This is a stack-lock owned by the calling thread so fetch the
 976       // displaced markWord from the BasicLock on the stack.
 977       temp = mark.displaced_mark_helper();
 978       assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
 979       hash = temp.hash();
 980       if (hash != 0) {                  // if it has a hash, just return it
 981         return hash;
 982       }
 983       // WARNING:
 984       // The displaced header in the BasicLock on a thread's stack
 985       // is strictly immutable. It CANNOT be changed in ANY cases.
 986       // So we have to inflate the stack-lock into an ObjectMonitor
 987       // even if the current thread owns the lock. The BasicLock on
 988       // a thread's stack can be asynchronously read by other threads
 989       // during an inflate() call so any change to that stack memory
 990       // may not propagate to other threads correctly.
 991     }
 992 
 993     // Inflate the monitor to set the hash.
 994 
 995     // An async deflation can race after the inflate() call and before we
 996     // can update the ObjectMonitor's header with the hash value below.
 997     monitor = inflate(current, obj, inflate_cause_hash_code);
 998     // Load ObjectMonitor's header/dmw field and see if it has a hash.
 999     mark = monitor->header();
1000     assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
1001     hash = mark.hash();
1002     if (hash == 0) {                       // if it does not have a hash
1003       hash = get_next_hash(current, obj);  // get a new hash
1004       temp = mark.copy_set_hash(hash)   ;  // merge the hash into header
1005       assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1006       uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value());
1007       test = markWord(v);
1008       if (test != mark) {
1009         // The attempt to update the ObjectMonitor's header/dmw field
1010         // did not work. This can happen if another thread managed to
1011         // merge in the hash just before our cmpxchg().
1012         // If we add any new usages of the header/dmw field, this code
1013         // will need to be updated.
1014         hash = test.hash();
1015         assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1016         assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1017       }
1018       if (monitor->is_being_async_deflated()) {
1019         // If we detect that async deflation has occurred, then we
1020         // attempt to restore the header/dmw to the object's header
1021         // so that we only retry once if the deflater thread happens
1022         // to be slow.
1023         monitor->install_displaced_markword_in_object(obj);
1024         continue;
1025       }
1026     }
1027     // We finally get the hash.
1028     return hash;
1029   }
1030 }
1031 
1032 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1033                                                    Handle h_obj) {
1034   assert(current == JavaThread::current(), "Can only be called on current thread");
1035   oop obj = h_obj();
1036 
1037   markWord mark = read_stable_mark(obj);
1038 
1039   if (LockingMode == LM_LEGACY && mark.has_locker()) {
1040     // stack-locked case, header points into owner's stack
1041     return current->is_lock_owned((address)mark.locker());
1042   }
1043 
1044   if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1045     // fast-locking case, see if lock is in current's lock stack
1046     return current->lock_stack().contains(h_obj());
1047   }
1048 
1049   if (mark.has_monitor()) {
1050     // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1051     // The first stage of async deflation does not affect any field
1052     // used by this comparison so the ObjectMonitor* is usable here.
1053     ObjectMonitor* monitor = mark.monitor();
1054     return monitor->is_entered(current) != 0;
1055   }
1056   // Unlocked case, header in place
1057   assert(mark.is_neutral(), "sanity check");
1058   return false;
1059 }
1060 
1061 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
1062   oop obj = h_obj();
1063   markWord mark = read_stable_mark(obj);
1064 
1065   if (LockingMode == LM_LEGACY && mark.has_locker()) {
1066     // stack-locked so header points into owner's stack.
1067     // owning_thread_from_monitor_owner() may also return null here:
1068     return Threads::owning_thread_from_monitor_owner(t_list, (address) mark.locker());
1069   }
1070 
1071   if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1072     // fast-locked so get owner from the object.
1073     // owning_thread_from_object() may also return null here:
1074     return Threads::owning_thread_from_object(t_list, h_obj());
1075   }
1076 
1077   if (mark.has_monitor()) {
1078     // Inflated monitor so header points to ObjectMonitor (tagged pointer).
1079     // The first stage of async deflation does not affect any field
1080     // used by this comparison so the ObjectMonitor* is usable here.
1081     ObjectMonitor* monitor = mark.monitor();
1082     assert(monitor != nullptr, "monitor should be non-null");
1083     // owning_thread_from_monitor() may also return null here:
1084     return Threads::owning_thread_from_monitor(t_list, monitor);
1085   }
1086 
1087   // Unlocked case, header in place
1088   // Cannot have assertion since this object may have been
1089   // locked by another thread when reaching here.
1090   // assert(mark.is_neutral(), "sanity check");
1091 
1092   return nullptr;
1093 }
1094 
1095 // Visitors ...
1096 
1097 // Iterate over all ObjectMonitors.
1098 template <typename Function>
1099 void ObjectSynchronizer::monitors_iterate(Function function) {
1100   MonitorList::Iterator iter = _in_use_list.iterator();
1101   while (iter.has_next()) {
1102     ObjectMonitor* monitor = iter.next();
1103     function(monitor);
1104   }
1105 }
1106 
1107 // Iterate ObjectMonitors owned by any thread and where the owner `filter`
1108 // returns true.
1109 template <typename OwnerFilter>
1110 void ObjectSynchronizer::owned_monitors_iterate_filtered(MonitorClosure* closure, OwnerFilter filter) {
1111   monitors_iterate([&](ObjectMonitor* monitor) {
1112     // This function is only called at a safepoint or when the
1113     // target thread is suspended or when the target thread is
1114     // operating on itself. The current closures in use today are
1115     // only interested in an owned ObjectMonitor and ownership
1116     // cannot be dropped under the calling contexts so the
1117     // ObjectMonitor cannot be async deflated.
1118     if (monitor->has_owner() && filter(monitor->owner_raw())) {
1119       assert(!monitor->is_being_async_deflated(), "Owned monitors should not be deflating");
1120 
1121       closure->do_monitor(monitor);
1122     }
1123   });
1124 }
1125 
1126 // Iterate ObjectMonitors where the owner == thread; this does NOT include
1127 // ObjectMonitors where owner is set to a stack-lock address in thread.
1128 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure, JavaThread* thread) {
1129   auto thread_filter = [&](void* owner) { return owner == thread; };
1130   return owned_monitors_iterate_filtered(closure, thread_filter);
1131 }
1132 
1133 // Iterate ObjectMonitors owned by any thread.
1134 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure) {
1135   auto all_filter = [&](void* owner) { return true; };
1136   return owned_monitors_iterate_filtered(closure, all_filter);
1137 }
1138 
1139 static bool monitors_used_above_threshold(MonitorList* list) {
1140   if (MonitorUsedDeflationThreshold == 0) {  // disabled case is easy
1141     return false;
1142   }
1143   // Start with ceiling based on a per-thread estimate:
1144   size_t ceiling = ObjectSynchronizer::in_use_list_ceiling();
1145   size_t old_ceiling = ceiling;
1146   if (ceiling < list->max()) {
1147     // The max used by the system has exceeded the ceiling so use that:
1148     ceiling = list->max();
1149   }
1150   size_t monitors_used = list->count();
1151   if (monitors_used == 0) {  // empty list is easy
1152     return false;
1153   }
1154   if (NoAsyncDeflationProgressMax != 0 &&
1155       _no_progress_cnt >= NoAsyncDeflationProgressMax) {
1156     float remainder = (100.0 - MonitorUsedDeflationThreshold) / 100.0;
1157     size_t new_ceiling = ceiling + (ceiling * remainder) + 1;
1158     ObjectSynchronizer::set_in_use_list_ceiling(new_ceiling);
1159     log_info(monitorinflation)("Too many deflations without progress; "
1160                                "bumping in_use_list_ceiling from " SIZE_FORMAT
1161                                " to " SIZE_FORMAT, old_ceiling, new_ceiling);
1162     _no_progress_cnt = 0;
1163     ceiling = new_ceiling;
1164   }
1165 
1166   // Check if our monitor usage is above the threshold:
1167   size_t monitor_usage = (monitors_used * 100LL) / ceiling;
1168   if (int(monitor_usage) > MonitorUsedDeflationThreshold) {
1169     log_info(monitorinflation)("monitors_used=" SIZE_FORMAT ", ceiling=" SIZE_FORMAT
1170                                ", monitor_usage=" SIZE_FORMAT ", threshold=" INTX_FORMAT,
1171                                monitors_used, ceiling, monitor_usage, MonitorUsedDeflationThreshold);
1172     return true;
1173   }
1174 
1175   return false;
1176 }
1177 
1178 size_t ObjectSynchronizer::in_use_list_ceiling() {
1179   return _in_use_list_ceiling;
1180 }
1181 
1182 void ObjectSynchronizer::dec_in_use_list_ceiling() {
1183   Atomic::sub(&_in_use_list_ceiling, AvgMonitorsPerThreadEstimate);
1184 }
1185 
1186 void ObjectSynchronizer::inc_in_use_list_ceiling() {
1187   Atomic::add(&_in_use_list_ceiling, AvgMonitorsPerThreadEstimate);
1188 }
1189 
1190 void ObjectSynchronizer::set_in_use_list_ceiling(size_t new_value) {
1191   _in_use_list_ceiling = new_value;
1192 }
1193 
1194 bool ObjectSynchronizer::is_async_deflation_needed() {
1195   if (is_async_deflation_requested()) {
1196     // Async deflation request.
1197     log_info(monitorinflation)("Async deflation needed: explicit request");
1198     return true;
1199   }
1200 
1201   jlong time_since_last = time_since_last_async_deflation_ms();
1202 
1203   if (AsyncDeflationInterval > 0 &&
1204       time_since_last > AsyncDeflationInterval &&
1205       monitors_used_above_threshold(&_in_use_list)) {
1206     // It's been longer than our specified deflate interval and there
1207     // are too many monitors in use. We don't deflate more frequently
1208     // than AsyncDeflationInterval (unless is_async_deflation_requested)
1209     // in order to not swamp the MonitorDeflationThread.
1210     log_info(monitorinflation)("Async deflation needed: monitors used are above the threshold");
1211     return true;
1212   }
1213 
1214   if (GuaranteedAsyncDeflationInterval > 0 &&
1215       time_since_last > GuaranteedAsyncDeflationInterval) {
1216     // It's been longer than our specified guaranteed deflate interval.
1217     // We need to clean up the used monitors even if the threshold is
1218     // not reached, to keep the memory utilization at bay when many threads
1219     // touched many monitors.
1220     log_info(monitorinflation)("Async deflation needed: guaranteed interval (" INTX_FORMAT " ms) "
1221                                "is greater than time since last deflation (" JLONG_FORMAT " ms)",
1222                                GuaranteedAsyncDeflationInterval, time_since_last);
1223 
1224     // If this deflation has no progress, then it should not affect the no-progress
1225     // tracking, otherwise threshold heuristics would think it was triggered, experienced
1226     // no progress, and needs to backoff more aggressively. In this "no progress" case,
1227     // the generic code would bump the no-progress counter, and we compensate for that
1228     // by telling it to skip the update.
1229     //
1230     // If this deflation has progress, then it should let non-progress tracking
1231     // know about this, otherwise the threshold heuristics would kick in, potentially
1232     // experience no-progress due to aggressive cleanup by this deflation, and think
1233     // it is still in no-progress stride. In this "progress" case, the generic code would
1234     // zero the counter, and we allow it to happen.
1235     _no_progress_skip_increment = true;
1236 
1237     return true;
1238   }
1239 
1240   return false;
1241 }
1242 
1243 void ObjectSynchronizer::request_deflate_idle_monitors() {
1244   MonitorLocker ml(MonitorDeflation_lock, Mutex::_no_safepoint_check_flag);
1245   set_is_async_deflation_requested(true);
1246   ml.notify_all();
1247 }
1248 
1249 bool ObjectSynchronizer::request_deflate_idle_monitors_from_wb() {
1250   JavaThread* current = JavaThread::current();
1251   bool ret_code = false;
1252 
1253   jlong last_time = last_async_deflation_time_ns();
1254 
1255   request_deflate_idle_monitors();
1256 
1257   const int N_CHECKS = 5;
1258   for (int i = 0; i < N_CHECKS; i++) {  // sleep for at most 5 seconds
1259     if (last_async_deflation_time_ns() > last_time) {
1260       log_info(monitorinflation)("Async Deflation happened after %d check(s).", i);
1261       ret_code = true;
1262       break;
1263     }
1264     {
1265       // JavaThread has to honor the blocking protocol.
1266       ThreadBlockInVM tbivm(current);
1267       os::naked_short_sleep(999);  // sleep for almost 1 second
1268     }
1269   }
1270   if (!ret_code) {
1271     log_info(monitorinflation)("Async Deflation DID NOT happen after %d checks.", N_CHECKS);
1272   }
1273 
1274   return ret_code;
1275 }
1276 
1277 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {
1278   return (os::javaTimeNanos() - last_async_deflation_time_ns()) / (NANOUNITS / MILLIUNITS);
1279 }
1280 
1281 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1282                                        const oop obj,
1283                                        ObjectSynchronizer::InflateCause cause) {
1284   assert(event != nullptr, "invariant");
1285   event->set_monitorClass(obj->klass());
1286   event->set_address((uintptr_t)(void*)obj);
1287   event->set_cause((u1)cause);
1288   event->commit();
1289 }
1290 
1291 // Fast path code shared by multiple functions
1292 void ObjectSynchronizer::inflate_helper(oop obj) {
1293   markWord mark = obj->mark_acquire();
1294   if (mark.has_monitor()) {
1295     ObjectMonitor* monitor = mark.monitor();
1296     markWord dmw = monitor->header();
1297     assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1298     return;
1299   }
1300   (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1301 }
1302 
1303 // Can be called from non JavaThreads (e.g., VMThread) for FastHashCode
1304 // calculations as part of JVM/TI tagging.
1305 static bool is_lock_owned(Thread* thread, oop obj) {
1306   assert(LockingMode == LM_LIGHTWEIGHT, "only call this with new lightweight locking enabled");
1307   return thread->is_Java_thread() ? JavaThread::cast(thread)->lock_stack().contains(obj) : false;
1308 }
1309 
1310 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
1311                                            const InflateCause cause) {
1312   EventJavaMonitorInflate event;
1313 
1314   for (;;) {
1315     const markWord mark = object->mark_acquire();
1316 
1317     // The mark can be in one of the following states:
1318     // *  inflated     - Just return if using stack-locking.
1319     //                   If using fast-locking and the ObjectMonitor owner
1320     //                   is anonymous and the current thread owns the
1321     //                   object lock, then we make the current thread the
1322     //                   ObjectMonitor owner and remove the lock from the
1323     //                   current thread's lock stack.
1324     // *  fast-locked  - Coerce it to inflated from fast-locked.
1325     // *  stack-locked - Coerce it to inflated from stack-locked.
1326     // *  INFLATING    - Busy wait for conversion from stack-locked to
1327     //                   inflated.
1328     // *  neutral      - Aggressively inflate the object.
1329 
1330     // CASE: inflated
1331     if (mark.has_monitor()) {
1332       ObjectMonitor* inf = mark.monitor();
1333       markWord dmw = inf->header();
1334       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1335       if (LockingMode == LM_LIGHTWEIGHT && inf->is_owner_anonymous() && is_lock_owned(current, object)) {
1336         inf->set_owner_from_anonymous(current);
1337         JavaThread::cast(current)->lock_stack().remove(object);
1338       }
1339       return inf;
1340     }
1341 
1342     if (LockingMode != LM_LIGHTWEIGHT) {
1343       // New lightweight locking does not use INFLATING.
1344       // CASE: inflation in progress - inflating over a stack-lock.
1345       // Some other thread is converting from stack-locked to inflated.
1346       // Only that thread can complete inflation -- other threads must wait.
1347       // The INFLATING value is transient.
1348       // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1349       // We could always eliminate polling by parking the thread on some auxiliary list.
1350       if (mark == markWord::INFLATING()) {
1351         read_stable_mark(object);
1352         continue;
1353       }
1354     }
1355 
1356     // CASE: fast-locked
1357     // Could be fast-locked either by current or by some other thread.
1358     //
1359     // Note that we allocate the ObjectMonitor speculatively, _before_
1360     // attempting to set the object's mark to the new ObjectMonitor. If
1361     // this thread owns the monitor, then we set the ObjectMonitor's
1362     // owner to this thread. Otherwise, we set the ObjectMonitor's owner
1363     // to anonymous. If we lose the race to set the object's mark to the
1364     // new ObjectMonitor, then we just delete it and loop around again.
1365     //
1366     LogStreamHandle(Trace, monitorinflation) lsh;
1367     if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1368       ObjectMonitor* monitor = new ObjectMonitor(object);
1369       monitor->set_header(mark.set_unlocked());
1370       bool own = is_lock_owned(current, object);
1371       if (own) {
1372         // Owned by us.
1373         monitor->set_owner_from(nullptr, current);
1374       } else {
1375         // Owned by somebody else.
1376         monitor->set_owner_anonymous();
1377       }
1378       markWord monitor_mark = markWord::encode(monitor);
1379       markWord old_mark = object->cas_set_mark(monitor_mark, mark);
1380       if (old_mark == mark) {
1381         // Success! Return inflated monitor.
1382         if (own) {
1383           JavaThread::cast(current)->lock_stack().remove(object);
1384         }
1385         // Once the ObjectMonitor is configured and object is associated
1386         // with the ObjectMonitor, it is safe to allow async deflation:
1387         _in_use_list.add(monitor);
1388 
1389         // Hopefully the performance counters are allocated on distinct
1390         // cache lines to avoid false sharing on MP systems ...
1391         OM_PERFDATA_OP(Inflations, inc());
1392         if (log_is_enabled(Trace, monitorinflation)) {
1393           ResourceMark rm(current);
1394           lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1395                        INTPTR_FORMAT ", type='%s'", p2i(object),
1396                        object->mark().value(), object->klass()->external_name());
1397         }
1398         if (event.should_commit()) {
1399           post_monitor_inflate_event(&event, object, cause);
1400         }
1401         return monitor;
1402       } else {
1403         delete monitor;
1404         continue;  // Interference -- just retry
1405       }
1406     }
1407 
1408     // CASE: stack-locked
1409     // Could be stack-locked either by current or by some other thread.
1410     //
1411     // Note that we allocate the ObjectMonitor speculatively, _before_ attempting
1412     // to install INFLATING into the mark word.  We originally installed INFLATING,
1413     // allocated the ObjectMonitor, and then finally STed the address of the
1414     // ObjectMonitor into the mark.  This was correct, but artificially lengthened
1415     // the interval in which INFLATING appeared in the mark, thus increasing
1416     // the odds of inflation contention. If we lose the race to set INFLATING,
1417     // then we just delete the ObjectMonitor and loop around again.
1418     //
1419     if (LockingMode == LM_LEGACY && mark.has_locker()) {
1420       assert(LockingMode != LM_LIGHTWEIGHT, "cannot happen with new lightweight locking");
1421       ObjectMonitor* m = new ObjectMonitor(object);
1422       // Optimistically prepare the ObjectMonitor - anticipate successful CAS
1423       // We do this before the CAS in order to minimize the length of time
1424       // in which INFLATING appears in the mark.
1425 
1426       markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
1427       if (cmp != mark) {
1428         delete m;
1429         continue;       // Interference -- just retry
1430       }
1431 
1432       // We've successfully installed INFLATING (0) into the mark-word.
1433       // This is the only case where 0 will appear in a mark-word.
1434       // Only the singular thread that successfully swings the mark-word
1435       // to 0 can perform (or more precisely, complete) inflation.
1436       //
1437       // Why do we CAS a 0 into the mark-word instead of just CASing the
1438       // mark-word from the stack-locked value directly to the new inflated state?
1439       // Consider what happens when a thread unlocks a stack-locked object.
1440       // It attempts to use CAS to swing the displaced header value from the
1441       // on-stack BasicLock back into the object header.  Recall also that the
1442       // header value (hash code, etc) can reside in (a) the object header, or
1443       // (b) a displaced header associated with the stack-lock, or (c) a displaced
1444       // header in an ObjectMonitor.  The inflate() routine must copy the header
1445       // value from the BasicLock on the owner's stack to the ObjectMonitor, all
1446       // the while preserving the hashCode stability invariants.  If the owner
1447       // decides to release the lock while the value is 0, the unlock will fail
1448       // and control will eventually pass from slow_exit() to inflate.  The owner
1449       // will then spin, waiting for the 0 value to disappear.   Put another way,
1450       // the 0 causes the owner to stall if the owner happens to try to
1451       // drop the lock (restoring the header from the BasicLock to the object)
1452       // while inflation is in-progress.  This protocol avoids races that might
1453       // would otherwise permit hashCode values to change or "flicker" for an object.
1454       // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable.
1455       // 0 serves as a "BUSY" inflate-in-progress indicator.
1456 
1457 
1458       // fetch the displaced mark from the owner's stack.
1459       // The owner can't die or unwind past the lock while our INFLATING
1460       // object is in the mark.  Furthermore the owner can't complete
1461       // an unlock on the object, either.
1462       markWord dmw = mark.displaced_mark_helper();
1463       // Catch if the object's header is not neutral (not locked and
1464       // not marked is what we care about here).
1465       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1466 
1467       // Setup monitor fields to proper values -- prepare the monitor
1468       m->set_header(dmw);
1469 
1470       // Optimization: if the mark.locker stack address is associated
1471       // with this thread we could simply set m->_owner = current.
1472       // Note that a thread can inflate an object
1473       // that it has stack-locked -- as might happen in wait() -- directly
1474       // with CAS.  That is, we can avoid the xchg-nullptr .... ST idiom.
1475       m->set_owner_from(nullptr, mark.locker());
1476       // TODO-FIXME: assert BasicLock->dhw != 0.
1477 
1478       // Must preserve store ordering. The monitor state must
1479       // be stable at the time of publishing the monitor address.
1480       guarantee(object->mark() == markWord::INFLATING(), "invariant");
1481       // Release semantics so that above set_object() is seen first.
1482       object->release_set_mark(markWord::encode(m));
1483 
1484       // Once ObjectMonitor is configured and the object is associated
1485       // with the ObjectMonitor, it is safe to allow async deflation:
1486       _in_use_list.add(m);
1487 
1488       // Hopefully the performance counters are allocated on distinct cache lines
1489       // to avoid false sharing on MP systems ...
1490       OM_PERFDATA_OP(Inflations, inc());
1491       if (log_is_enabled(Trace, monitorinflation)) {
1492         ResourceMark rm(current);
1493         lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1494                      INTPTR_FORMAT ", type='%s'", p2i(object),
1495                      object->mark().value(), object->klass()->external_name());
1496       }
1497       if (event.should_commit()) {
1498         post_monitor_inflate_event(&event, object, cause);
1499       }
1500       return m;
1501     }
1502 
1503     // CASE: neutral
1504     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1505     // If we know we're inflating for entry it's better to inflate by swinging a
1506     // pre-locked ObjectMonitor pointer into the object header.   A successful
1507     // CAS inflates the object *and* confers ownership to the inflating thread.
1508     // In the current implementation we use a 2-step mechanism where we CAS()
1509     // to inflate and then CAS() again to try to swing _owner from null to current.
1510     // An inflateTry() method that we could call from enter() would be useful.
1511 
1512     // Catch if the object's header is not neutral (not locked and
1513     // not marked is what we care about here).
1514     assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
1515     ObjectMonitor* m = new ObjectMonitor(object);
1516     // prepare m for installation - set monitor to initial state
1517     m->set_header(mark);
1518 
1519     if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
1520       delete m;
1521       m = nullptr;
1522       continue;
1523       // interference - the markword changed - just retry.
1524       // The state-transitions are one-way, so there's no chance of
1525       // live-lock -- "Inflated" is an absorbing state.
1526     }
1527 
1528     // Once the ObjectMonitor is configured and object is associated
1529     // with the ObjectMonitor, it is safe to allow async deflation:
1530     _in_use_list.add(m);
1531 
1532     // Hopefully the performance counters are allocated on distinct
1533     // cache lines to avoid false sharing on MP systems ...
1534     OM_PERFDATA_OP(Inflations, inc());
1535     if (log_is_enabled(Trace, monitorinflation)) {
1536       ResourceMark rm(current);
1537       lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
1538                    INTPTR_FORMAT ", type='%s'", p2i(object),
1539                    object->mark().value(), object->klass()->external_name());
1540     }
1541     if (event.should_commit()) {
1542       post_monitor_inflate_event(&event, object, cause);
1543     }
1544     return m;
1545   }
1546 }
1547 
1548 void ObjectSynchronizer::chk_for_block_req(JavaThread* current, const char* op_name,
1549                                            const char* cnt_name, size_t cnt,
1550                                            LogStream* ls, elapsedTimer* timer_p) {
1551   if (!SafepointMechanism::should_process(current)) {
1552     return;
1553   }
1554 
1555   // A safepoint/handshake has started.
1556   if (ls != nullptr) {
1557     timer_p->stop();
1558     ls->print_cr("pausing %s: %s=" SIZE_FORMAT ", in_use_list stats: ceiling="
1559                  SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
1560                  op_name, cnt_name, cnt, in_use_list_ceiling(),
1561                  _in_use_list.count(), _in_use_list.max());
1562   }
1563 
1564   {
1565     // Honor block request.
1566     ThreadBlockInVM tbivm(current);
1567   }
1568 
1569   if (ls != nullptr) {
1570     ls->print_cr("resuming %s: in_use_list stats: ceiling=" SIZE_FORMAT
1571                  ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT, op_name,
1572                  in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
1573     timer_p->start();
1574   }
1575 }
1576 
1577 // Walk the in-use list and deflate (at most MonitorDeflationMax) idle
1578 // ObjectMonitors. Returns the number of deflated ObjectMonitors.
1579 //
1580 size_t ObjectSynchronizer::deflate_monitor_list(Thread* current, LogStream* ls,
1581                                                 elapsedTimer* timer_p) {
1582   MonitorList::Iterator iter = _in_use_list.iterator();
1583   size_t deflated_count = 0;
1584 
1585   while (iter.has_next()) {
1586     if (deflated_count >= (size_t)MonitorDeflationMax) {
1587       break;
1588     }
1589     ObjectMonitor* mid = iter.next();
1590     if (mid->deflate_monitor()) {
1591       deflated_count++;
1592     }
1593 
1594     if (current->is_Java_thread()) {
1595       // A JavaThread must check for a safepoint/handshake and honor it.
1596       chk_for_block_req(JavaThread::cast(current), "deflation", "deflated_count",
1597                         deflated_count, ls, timer_p);
1598     }
1599   }
1600 
1601   return deflated_count;
1602 }
1603 
1604 class HandshakeForDeflation : public HandshakeClosure {
1605  public:
1606   HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {}
1607 
1608   void do_thread(Thread* thread) {
1609     log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread="
1610                                 INTPTR_FORMAT, p2i(thread));
1611   }
1612 };
1613 
1614 class VM_RendezvousGCThreads : public VM_Operation {
1615 public:
1616   bool evaluate_at_safepoint() const override { return false; }
1617   VMOp_Type type() const override { return VMOp_RendezvousGCThreads; }
1618   void doit() override {
1619     Universe::heap()->safepoint_synchronize_begin();
1620     Universe::heap()->safepoint_synchronize_end();
1621   };
1622 };
1623 
1624 static size_t delete_monitors(Thread* current, GrowableArray<ObjectMonitor*>* delete_list,
1625                               LogStream* ls, elapsedTimer* timer_p) {
1626   NativeHeapTrimmer::SuspendMark sm("monitor deletion");
1627   size_t deleted_count = 0;
1628   for (ObjectMonitor* monitor: *delete_list) {
1629     delete monitor;
1630     deleted_count++;
1631     if (current->is_Java_thread()) {
1632       // A JavaThread must check for a safepoint/handshake and honor it.
1633       ObjectSynchronizer::chk_for_block_req(JavaThread::cast(current), "deletion", "deleted_count",
1634                                             deleted_count, ls, timer_p);
1635     }
1636   }
1637   return deleted_count;
1638 }
1639 
1640 // This function is called by the MonitorDeflationThread to deflate
1641 // ObjectMonitors.
1642 size_t ObjectSynchronizer::deflate_idle_monitors() {
1643   Thread* current = Thread::current();
1644   if (current->is_Java_thread()) {
1645     // The async deflation request has been processed.
1646     _last_async_deflation_time_ns = os::javaTimeNanos();
1647     set_is_async_deflation_requested(false);
1648   }
1649 
1650   LogStreamHandle(Debug, monitorinflation) lsh_debug;
1651   LogStreamHandle(Info, monitorinflation) lsh_info;
1652   LogStream* ls = nullptr;
1653   if (log_is_enabled(Debug, monitorinflation)) {
1654     ls = &lsh_debug;
1655   } else if (log_is_enabled(Info, monitorinflation)) {
1656     ls = &lsh_info;
1657   }
1658 
1659   elapsedTimer timer;
1660   if (ls != nullptr) {
1661     ls->print_cr("begin deflating: in_use_list stats: ceiling=" SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
1662                  in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
1663     timer.start();
1664   }
1665 
1666   // Deflate some idle ObjectMonitors.
1667   size_t deflated_count = deflate_monitor_list(current, ls, &timer);
1668   size_t unlinked_count = 0;
1669   size_t deleted_count = 0;
1670   if (deflated_count > 0) {
1671     // There are ObjectMonitors that have been deflated.
1672 
1673     // Unlink deflated ObjectMonitors from the in-use list.
1674     ResourceMark rm;
1675     GrowableArray<ObjectMonitor*> delete_list((int)deflated_count);
1676     unlinked_count = _in_use_list.unlink_deflated(current, ls, &timer, deflated_count, &delete_list);
1677     if (current->is_monitor_deflation_thread()) {
1678       if (ls != nullptr) {
1679         timer.stop();
1680         ls->print_cr("before handshaking: unlinked_count=" SIZE_FORMAT
1681                      ", in_use_list stats: ceiling=" SIZE_FORMAT ", count="
1682                      SIZE_FORMAT ", max=" SIZE_FORMAT,
1683                      unlinked_count, in_use_list_ceiling(),
1684                      _in_use_list.count(), _in_use_list.max());
1685       }
1686 
1687       // A JavaThread needs to handshake in order to safely free the
1688       // ObjectMonitors that were deflated in this cycle.
1689       HandshakeForDeflation hfd_hc;
1690       Handshake::execute(&hfd_hc);
1691       // Also, we sync and desync GC threads around the handshake, so that they can
1692       // safely read the mark-word and look-through to the object-monitor, without
1693       // being afraid that the object-monitor is going away.
1694       VM_RendezvousGCThreads sync_gc;
1695       VMThread::execute(&sync_gc);
1696 
1697       if (ls != nullptr) {
1698         ls->print_cr("after handshaking: in_use_list stats: ceiling="
1699                      SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
1700                      in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
1701         timer.start();
1702       }
1703     } else {
1704       // This is not a monitor deflation thread.
1705       // No handshake or rendezvous is needed when we are already at safepoint.
1706       assert_at_safepoint();
1707     }
1708 
1709     // After the handshake, safely free the ObjectMonitors that were
1710     // deflated and unlinked in this cycle.
1711     deleted_count = delete_monitors(current, &delete_list, ls, &timer);
1712     assert(unlinked_count == deleted_count, "must be");
1713   }
1714 
1715   if (ls != nullptr) {
1716     timer.stop();
1717     if (deflated_count != 0 || unlinked_count != 0 || log_is_enabled(Debug, monitorinflation)) {
1718       ls->print_cr("deflated_count=" SIZE_FORMAT ", {unlinked,deleted}_count=" SIZE_FORMAT " monitors in %3.7f secs",
1719                    deflated_count, unlinked_count, timer.seconds());
1720     }
1721     ls->print_cr("end deflating: in_use_list stats: ceiling=" SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
1722                  in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
1723   }
1724 
1725   OM_PERFDATA_OP(MonExtant, set_value(_in_use_list.count()));
1726   OM_PERFDATA_OP(Deflations, inc(deflated_count));
1727 
1728   GVars.stw_random = os::random();
1729 
1730   if (deflated_count != 0) {
1731     _no_progress_cnt = 0;
1732   } else if (_no_progress_skip_increment) {
1733     _no_progress_skip_increment = false;
1734   } else {
1735     _no_progress_cnt++;
1736   }
1737 
1738   return deflated_count;
1739 }
1740 
1741 // Monitor cleanup on JavaThread::exit
1742 
1743 // Iterate through monitor cache and attempt to release thread's monitors
1744 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1745  private:
1746   JavaThread* _thread;
1747 
1748  public:
1749   ReleaseJavaMonitorsClosure(JavaThread* thread) : _thread(thread) {}
1750   void do_monitor(ObjectMonitor* mid) {
1751     intx rec = mid->complete_exit(_thread);
1752     _thread->dec_held_monitor_count(rec + 1);
1753   }
1754 };
1755 
1756 // Release all inflated monitors owned by current thread.  Lightweight monitors are
1757 // ignored.  This is meant to be called during JNI thread detach which assumes
1758 // all remaining monitors are heavyweight.  All exceptions are swallowed.
1759 // Scanning the extant monitor list can be time consuming.
1760 // A simple optimization is to add a per-thread flag that indicates a thread
1761 // called jni_monitorenter() during its lifetime.
1762 //
1763 // Instead of NoSafepointVerifier it might be cheaper to
1764 // use an idiom of the form:
1765 //   auto int tmp = SafepointSynchronize::_safepoint_counter ;
1766 //   <code that must not run at safepoint>
1767 //   guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1768 // Since the tests are extremely cheap we could leave them enabled
1769 // for normal product builds.
1770 
1771 void ObjectSynchronizer::release_monitors_owned_by_thread(JavaThread* current) {
1772   assert(current == JavaThread::current(), "must be current Java thread");
1773   NoSafepointVerifier nsv;
1774   ReleaseJavaMonitorsClosure rjmc(current);
1775   ObjectSynchronizer::owned_monitors_iterate(&rjmc, current);
1776   assert(!current->has_pending_exception(), "Should not be possible");
1777   current->clear_pending_exception();
1778   assert(current->held_monitor_count() == 0, "Should not be possible");
1779   // All monitors (including entered via JNI) have been unlocked above, so we need to clear jni count.
1780   current->clear_jni_monitor_count();
1781 }
1782 
1783 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
1784   switch (cause) {
1785     case inflate_cause_vm_internal:    return "VM Internal";
1786     case inflate_cause_monitor_enter:  return "Monitor Enter";
1787     case inflate_cause_wait:           return "Monitor Wait";
1788     case inflate_cause_notify:         return "Monitor Notify";
1789     case inflate_cause_hash_code:      return "Monitor Hash Code";
1790     case inflate_cause_jni_enter:      return "JNI Monitor Enter";
1791     case inflate_cause_jni_exit:       return "JNI Monitor Exit";
1792     default:
1793       ShouldNotReachHere();
1794   }
1795   return "Unknown";
1796 }
1797 
1798 //------------------------------------------------------------------------------
1799 // Debugging code
1800 
1801 u_char* ObjectSynchronizer::get_gvars_addr() {
1802   return (u_char*)&GVars;
1803 }
1804 
1805 u_char* ObjectSynchronizer::get_gvars_hc_sequence_addr() {
1806   return (u_char*)&GVars.hc_sequence;
1807 }
1808 
1809 size_t ObjectSynchronizer::get_gvars_size() {
1810   return sizeof(SharedGlobals);
1811 }
1812 
1813 u_char* ObjectSynchronizer::get_gvars_stw_random_addr() {
1814   return (u_char*)&GVars.stw_random;
1815 }
1816 
1817 // Do the final audit and print of ObjectMonitor stats; must be done
1818 // by the VMThread at VM exit time.
1819 void ObjectSynchronizer::do_final_audit_and_print_stats() {
1820   assert(Thread::current()->is_VM_thread(), "sanity check");
1821 
1822   if (is_final_audit()) {  // Only do the audit once.
1823     return;
1824   }
1825   set_is_final_audit();
1826   log_info(monitorinflation)("Starting the final audit.");
1827 
1828   if (log_is_enabled(Info, monitorinflation)) {
1829     // The other audit_and_print_stats() call is done at the Debug
1830     // level at a safepoint in SafepointSynchronize::do_cleanup_tasks.
1831     audit_and_print_stats(true /* on_exit */);
1832   }
1833 }
1834 
1835 // This function can be called at a safepoint or it can be called when
1836 // we are trying to exit the VM. When we are trying to exit the VM, the
1837 // list walker functions can run in parallel with the other list
1838 // operations so spin-locking is used for safety.
1839 //
1840 // Calls to this function can be added in various places as a debugging
1841 // aid; pass 'true' for the 'on_exit' parameter to have in-use monitor
1842 // details logged at the Info level and 'false' for the 'on_exit'
1843 // parameter to have in-use monitor details logged at the Trace level.
1844 //
1845 void ObjectSynchronizer::audit_and_print_stats(bool on_exit) {
1846   assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant");
1847 
1848   LogStreamHandle(Debug, monitorinflation) lsh_debug;
1849   LogStreamHandle(Info, monitorinflation) lsh_info;
1850   LogStreamHandle(Trace, monitorinflation) lsh_trace;
1851   LogStream* ls = nullptr;
1852   if (log_is_enabled(Trace, monitorinflation)) {
1853     ls = &lsh_trace;
1854   } else if (log_is_enabled(Debug, monitorinflation)) {
1855     ls = &lsh_debug;
1856   } else if (log_is_enabled(Info, monitorinflation)) {
1857     ls = &lsh_info;
1858   }
1859   assert(ls != nullptr, "sanity check");
1860 
1861   int error_cnt = 0;
1862 
1863   ls->print_cr("Checking in_use_list:");
1864   chk_in_use_list(ls, &error_cnt);
1865 
1866   if (error_cnt == 0) {
1867     ls->print_cr("No errors found in in_use_list checks.");
1868   } else {
1869     log_error(monitorinflation)("found in_use_list errors: error_cnt=%d", error_cnt);
1870   }
1871 
1872   if ((on_exit && log_is_enabled(Info, monitorinflation)) ||
1873       (!on_exit && log_is_enabled(Trace, monitorinflation))) {
1874     // When exiting this log output is at the Info level. When called
1875     // at a safepoint, this log output is at the Trace level since
1876     // there can be a lot of it.
1877     log_in_use_monitor_details(ls, !on_exit /* log_all */);
1878   }
1879 
1880   ls->flush();
1881 
1882   guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt);
1883 }
1884 
1885 // Check the in_use_list; log the results of the checks.
1886 void ObjectSynchronizer::chk_in_use_list(outputStream* out, int *error_cnt_p) {
1887   size_t l_in_use_count = _in_use_list.count();
1888   size_t l_in_use_max = _in_use_list.max();
1889   out->print_cr("count=" SIZE_FORMAT ", max=" SIZE_FORMAT, l_in_use_count,
1890                 l_in_use_max);
1891 
1892   size_t ck_in_use_count = 0;
1893   MonitorList::Iterator iter = _in_use_list.iterator();
1894   while (iter.has_next()) {
1895     ObjectMonitor* mid = iter.next();
1896     chk_in_use_entry(mid, out, error_cnt_p);
1897     ck_in_use_count++;
1898   }
1899 
1900   if (l_in_use_count == ck_in_use_count) {
1901     out->print_cr("in_use_count=" SIZE_FORMAT " equals ck_in_use_count="
1902                   SIZE_FORMAT, l_in_use_count, ck_in_use_count);
1903   } else {
1904     out->print_cr("WARNING: in_use_count=" SIZE_FORMAT " is not equal to "
1905                   "ck_in_use_count=" SIZE_FORMAT, l_in_use_count,
1906                   ck_in_use_count);
1907   }
1908 
1909   size_t ck_in_use_max = _in_use_list.max();
1910   if (l_in_use_max == ck_in_use_max) {
1911     out->print_cr("in_use_max=" SIZE_FORMAT " equals ck_in_use_max="
1912                   SIZE_FORMAT, l_in_use_max, ck_in_use_max);
1913   } else {
1914     out->print_cr("WARNING: in_use_max=" SIZE_FORMAT " is not equal to "
1915                   "ck_in_use_max=" SIZE_FORMAT, l_in_use_max, ck_in_use_max);
1916   }
1917 }
1918 
1919 // Check an in-use monitor entry; log any errors.
1920 void ObjectSynchronizer::chk_in_use_entry(ObjectMonitor* n, outputStream* out,
1921                                           int* error_cnt_p) {
1922   if (n->owner_is_DEFLATER_MARKER()) {
1923     // This could happen when monitor deflation blocks for a safepoint.
1924     return;
1925   }
1926 
1927   if (n->header().value() == 0) {
1928     out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor must "
1929                   "have non-null _header field.", p2i(n));
1930     *error_cnt_p = *error_cnt_p + 1;
1931   }
1932   const oop obj = n->object_peek();
1933   if (obj != nullptr) {
1934     const markWord mark = obj->mark();
1935     if (!mark.has_monitor()) {
1936       out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
1937                     "object does not think it has a monitor: obj="
1938                     INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n),
1939                     p2i(obj), mark.value());
1940       *error_cnt_p = *error_cnt_p + 1;
1941     }
1942     ObjectMonitor* const obj_mon = mark.monitor();
1943     if (n != obj_mon) {
1944       out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
1945                     "object does not refer to the same monitor: obj="
1946                     INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
1947                     INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
1948       *error_cnt_p = *error_cnt_p + 1;
1949     }
1950   }
1951 }
1952 
1953 // Log details about ObjectMonitors on the in_use_list. The 'BHL'
1954 // flags indicate why the entry is in-use, 'object' and 'object type'
1955 // indicate the associated object and its type.
1956 void ObjectSynchronizer::log_in_use_monitor_details(outputStream* out, bool log_all) {
1957   if (_in_use_list.count() > 0) {
1958     stringStream ss;
1959     out->print_cr("In-use monitor info:");
1960     out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
1961     out->print_cr("%18s  %s  %18s  %18s",
1962                   "monitor", "BHL", "object", "object type");
1963     out->print_cr("==================  ===  ==================  ==================");
1964 
1965     auto is_interesting = [&](ObjectMonitor* monitor) {
1966       return log_all || monitor->has_owner() || monitor->is_busy();
1967     };
1968 
1969     monitors_iterate([&](ObjectMonitor* monitor) {
1970       if (is_interesting(monitor)) {
1971         const oop obj = monitor->object_peek();
1972         const markWord mark = monitor->header();
1973         ResourceMark rm;
1974         out->print(INTPTR_FORMAT "  %d%d%d  " INTPTR_FORMAT "  %s", p2i(monitor),
1975                    monitor->is_busy(), mark.hash() != 0, monitor->owner() != nullptr,
1976                    p2i(obj), obj == nullptr ? "" : obj->klass()->external_name());
1977         if (monitor->is_busy()) {
1978           out->print(" (%s)", monitor->is_busy_to_string(&ss));
1979           ss.reset();
1980         }
1981         out->cr();
1982       }
1983     });
1984   }
1985 
1986   out->flush();
1987 }