1 /*
   2  * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "classfile/vmSymbols.hpp"
  28 #include "jfrfiles/jfrEventClasses.hpp"
  29 #include "logging/log.hpp"
  30 #include "memory/allStatic.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "nmt/memflags.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "runtime/atomic.hpp"
  35 #include "runtime/basicLock.inline.hpp"
  36 #include "runtime/globals_extension.hpp"
  37 #include "runtime/interfaceSupport.inline.hpp"
  38 #include "runtime/javaThread.inline.hpp"
  39 #include "runtime/lightweightSynchronizer.hpp"
  40 #include "runtime/lockStack.inline.hpp"
  41 #include "runtime/mutexLocker.hpp"
  42 #include "runtime/objectMonitor.inline.hpp"
  43 #include "runtime/os.hpp"
  44 #include "runtime/perfData.inline.hpp"
  45 #include "runtime/safepointMechanism.inline.hpp"
  46 #include "runtime/safepointVerifiers.hpp"
  47 #include "runtime/synchronizer.inline.hpp"
  48 #include "runtime/timerTrace.hpp"
  49 #include "runtime/trimNativeHeap.hpp"
  50 #include "utilities/concurrentHashTable.inline.hpp"
  51 #include "utilities/concurrentHashTableTasks.inline.hpp"
  52 #include "utilities/globalDefinitions.hpp"
  53 
  54 // ConcurrentHashTable storing links from objects to ObjectMonitors
  55 class ObjectMonitorTable : AllStatic {
  56   struct Config {
  57     using Value = ObjectMonitor*;
  58     static uintx get_hash(Value const& value, bool* is_dead) {
  59       return (uintx)value->hash();
  60     }
  61     static void* allocate_node(void* context, size_t size, Value const& value) {
  62       ObjectMonitorTable::inc_items_count();
  63       return AllocateHeap(size, MEMFLAGS::mtObjectMonitor);
  64     };
  65     static void free_node(void* context, void* memory, Value const& value) {
  66       ObjectMonitorTable::dec_items_count();
  67       FreeHeap(memory);
  68     }
  69   };
  70   using ConcurrentTable = ConcurrentHashTable<Config, MEMFLAGS::mtObjectMonitor>;
  71 
  72   static ConcurrentTable* _table;
  73   static volatile size_t _items_count;
  74   static size_t _table_size;
  75   static volatile bool _resize;
  76 
  77   class Lookup : public StackObj {
  78     oop _obj;
  79 
  80    public:
  81     explicit Lookup(oop obj) : _obj(obj) {}
  82 
  83     uintx get_hash() const {
  84       uintx hash = _obj->mark().hash();
  85       assert(hash != 0, "should have a hash");
  86       return hash;
  87     }
  88 
  89     bool equals(ObjectMonitor** value) {
  90       assert(*value != nullptr, "must be");
  91       return (*value)->object_refers_to(_obj);
  92     }
  93 
  94     bool is_dead(ObjectMonitor** value) {
  95       assert(*value != nullptr, "must be");
  96       return false;
  97     }
  98   };
  99 
 100   class LookupMonitor : public StackObj {
 101     ObjectMonitor* _monitor;
 102 
 103    public:
 104     explicit LookupMonitor(ObjectMonitor* monitor) : _monitor(monitor) {}
 105 
 106     uintx get_hash() const {
 107       return _monitor->hash();
 108     }
 109 
 110     bool equals(ObjectMonitor** value) {
 111       return (*value) == _monitor;
 112     }
 113 
 114     bool is_dead(ObjectMonitor** value) {
 115       assert(*value != nullptr, "must be");
 116       return (*value)->object_is_dead();
 117     }
 118   };
 119 
 120   static void inc_items_count() {
 121     Atomic::inc(&_items_count);
 122   }
 123 
 124   static void dec_items_count() {
 125     Atomic::dec(&_items_count);
 126   }
 127 
 128   static double get_load_factor() {
 129     return (double)_items_count / (double)_table_size;
 130   }
 131 
 132   static size_t table_size(Thread* current = Thread::current()) {
 133     return ((size_t)1) << _table->get_size_log2(current);
 134   }
 135 
 136   static size_t max_log_size() {
 137     // TODO[OMTable]: Evaluate the max size.
 138     // TODO[OMTable]: Need to fix init order to use Universe::heap()->max_capacity();
 139     //                Using MaxHeapSize directly this early may be wrong, and there
 140     //                are definitely rounding errors (alignment).
 141     const size_t max_capacity = MaxHeapSize;
 142     const size_t min_object_size = CollectedHeap::min_dummy_object_size() * HeapWordSize;
 143     const size_t max_objects = max_capacity / MAX2(MinObjAlignmentInBytes, checked_cast<int>(min_object_size));
 144     const size_t log_max_objects = log2i_graceful(max_objects);
 145 
 146     return MAX2(MIN2<size_t>(SIZE_BIG_LOG2, log_max_objects), min_log_size());
 147   }
 148 
 149   static size_t min_log_size() {
 150     // ~= log(AvgMonitorsPerThreadEstimate default)
 151     return 10;
 152   }
 153 
 154   template<typename V>
 155   static size_t clamp_log_size(V log_size) {
 156     return MAX2(MIN2(log_size, checked_cast<V>(max_log_size())), checked_cast<V>(min_log_size()));
 157   }
 158 
 159   static size_t initial_log_size() {
 160     const size_t estimate = log2i(MAX2(os::processor_count(), 1)) + log2i(MAX2(AvgMonitorsPerThreadEstimate, size_t(1)));
 161     return clamp_log_size(estimate);
 162   }
 163 
 164   static size_t grow_hint () {
 165     return ConcurrentTable::DEFAULT_GROW_HINT;
 166   }
 167 
 168  public:
 169   static void create() {
 170     _table = new ConcurrentTable(initial_log_size(), max_log_size(), grow_hint());
 171     _items_count = 0;
 172     _table_size = table_size();
 173     _resize = false;
 174   }
 175 
 176   static void verify_monitor_get_result(oop obj, ObjectMonitor* monitor) {
 177 #ifdef ASSERT
 178     if (SafepointSynchronize::is_at_safepoint()) {
 179       bool has_monitor = obj->mark().has_monitor();
 180       assert(has_monitor == (monitor != nullptr),
 181           "Inconsistency between markWord and ObjectMonitorTable has_monitor: %s monitor: " PTR_FORMAT,
 182           BOOL_TO_STR(has_monitor), p2i(monitor));
 183     }
 184 #endif
 185   }
 186 
 187   static ObjectMonitor* monitor_get(Thread* current, oop obj) {
 188     ObjectMonitor* result = nullptr;
 189     Lookup lookup_f(obj);
 190     auto found_f = [&](ObjectMonitor** found) {
 191       assert((*found)->object_peek() == obj, "must be");
 192       result = *found;
 193     };
 194     _table->get(current, lookup_f, found_f);
 195     verify_monitor_get_result(obj, result);
 196     return result;
 197   }
 198 
 199   static void try_notify_grow() {
 200     if (!_table->is_max_size_reached() && !Atomic::load(&_resize)) {
 201       Atomic::store(&_resize, true);
 202       if (Service_lock->try_lock()) {
 203         Service_lock->notify();
 204         Service_lock->unlock();
 205       }
 206     }
 207   }
 208 
 209   static bool should_shrink() {
 210     // Not implemented;
 211     return false;
 212   }
 213 
 214   static constexpr double GROW_LOAD_FACTOR = 0.75;
 215 
 216   static bool should_grow() {
 217     return get_load_factor() > GROW_LOAD_FACTOR && !_table->is_max_size_reached();
 218   }
 219 
 220   static bool should_resize() {
 221     return should_grow() || should_shrink() || Atomic::load(&_resize);
 222   }
 223 
 224   template<typename Task, typename... Args>
 225   static bool run_task(JavaThread* current, Task& task, const char* task_name, Args&... args) {
 226     if (task.prepare(current)) {
 227       log_trace(monitortable)("Started to %s", task_name);
 228       TraceTime timer(task_name, TRACETIME_LOG(Debug, monitortable, perf));
 229       while (task.do_task(current, args...)) {
 230         task.pause(current);
 231         {
 232           ThreadBlockInVM tbivm(current);
 233         }
 234         task.cont(current);
 235       }
 236       task.done(current);
 237       return true;
 238     }
 239     return false;
 240   }
 241 
 242   static bool grow(JavaThread* current) {
 243     ConcurrentTable::GrowTask grow_task(_table);
 244     if (run_task(current, grow_task, "Grow")) {
 245       _table_size = table_size(current);
 246       log_info(monitortable)("Grown to size: %zu", _table_size);
 247       return true;
 248     }
 249     return false;
 250   }
 251 
 252   static bool clean(JavaThread* current) {
 253     ConcurrentTable::BulkDeleteTask clean_task(_table);
 254     auto is_dead = [&](ObjectMonitor** monitor) {
 255       return (*monitor)->object_is_dead();
 256     };
 257     auto do_nothing = [&](ObjectMonitor** monitor) {};
 258     NativeHeapTrimmer::SuspendMark sm("ObjectMonitorTable");
 259     return run_task(current, clean_task, "Clean", is_dead, do_nothing);
 260   }
 261 
 262   static bool resize(JavaThread* current) {
 263     LogTarget(Info, monitortable) lt;
 264     bool success = false;
 265 
 266     if (should_grow()) {
 267       lt.print("Start growing with load factor %f", get_load_factor());
 268       success = grow(current);
 269     } else {
 270       if (!_table->is_max_size_reached() && Atomic::load(&_resize)) {
 271         lt.print("WARNING: Getting resize hints with load factor %f", get_load_factor());
 272       }
 273       lt.print("Start cleaning with load factor %f", get_load_factor());
 274       success = clean(current);
 275     }
 276 
 277     Atomic::store(&_resize, false);
 278 
 279     return success;
 280   }
 281 
 282   static ObjectMonitor* monitor_put_get(Thread* current, ObjectMonitor* monitor, oop obj) {
 283     // Enter the monitor into the concurrent hashtable.
 284     ObjectMonitor* result = monitor;
 285     Lookup lookup_f(obj);
 286     auto found_f = [&](ObjectMonitor** found) {
 287       assert((*found)->object_peek() == obj, "must be");
 288       result = *found;
 289     };
 290     bool grow;
 291     _table->insert_get(current, lookup_f, monitor, found_f, &grow);
 292     verify_monitor_get_result(obj, result);
 293     if (grow) {
 294       try_notify_grow();
 295     }
 296     return result;
 297   }
 298 
 299   static bool remove_monitor_entry(Thread* current, ObjectMonitor* monitor) {
 300     LookupMonitor lookup_f(monitor);
 301     return _table->remove(current, lookup_f);
 302   }
 303 
 304   static bool contains_monitor(Thread* current, ObjectMonitor* monitor) {
 305     LookupMonitor lookup_f(monitor);
 306     bool result = false;
 307     auto found_f = [&](ObjectMonitor** found) {
 308       result = true;
 309     };
 310     _table->get(current, lookup_f, found_f);
 311     return result;
 312   }
 313 
 314   static void print_on(outputStream* st) {
 315     auto printer = [&] (ObjectMonitor** entry) {
 316        ObjectMonitor* om = *entry;
 317        oop obj = om->object_peek();
 318        st->print("monitor=" PTR_FORMAT ", ", p2i(om));
 319        st->print("object=" PTR_FORMAT, p2i(obj));
 320        assert(obj->mark().hash() == om->hash(), "hash must match");
 321        st->cr();
 322        return true;
 323     };
 324     if (SafepointSynchronize::is_at_safepoint()) {
 325       _table->do_safepoint_scan(printer);
 326     } else {
 327       _table->do_scan(Thread::current(), printer);
 328     }
 329   }
 330 };
 331 
 332 ObjectMonitorTable::ConcurrentTable* ObjectMonitorTable::_table = nullptr;
 333 volatile size_t ObjectMonitorTable::_items_count = 0;
 334 size_t ObjectMonitorTable::_table_size = 0;
 335 volatile bool ObjectMonitorTable::_resize = false;
 336 
 337 ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted) {
 338   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 339 
 340   ObjectMonitor* monitor = get_monitor_from_table(current, object);
 341   if (monitor != nullptr) {
 342     *inserted = false;
 343     return monitor;
 344   }
 345 
 346   ObjectMonitor* alloced_monitor = new ObjectMonitor(object);
 347   alloced_monitor->set_owner_anonymous();
 348 
 349   // Try insert monitor
 350   monitor = add_monitor(current, alloced_monitor, object);
 351 
 352   *inserted = alloced_monitor == monitor;
 353   if (!*inserted) {
 354     delete alloced_monitor;
 355   }
 356 
 357   return monitor;
 358 }
 359 
 360 static void log_inflate(Thread* current, oop object, ObjectSynchronizer::InflateCause cause) {
 361   if (log_is_enabled(Trace, monitorinflation)) {
 362     ResourceMark rm(current);
 363     log_trace(monitorinflation)("inflate: object=" INTPTR_FORMAT ", mark="
 364                                 INTPTR_FORMAT ", type='%s' cause=%s", p2i(object),
 365                                 object->mark().value(), object->klass()->external_name(),
 366                                 ObjectSynchronizer::inflate_cause_name(cause));
 367   }
 368 }
 369 
 370 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
 371                                        const oop obj,
 372                                        ObjectSynchronizer::InflateCause cause) {
 373   assert(event != nullptr, "invariant");
 374   event->set_monitorClass(obj->klass());
 375   event->set_address((uintptr_t)(void*)obj);
 376   event->set_cause((u1)cause);
 377   event->commit();
 378 }
 379 
 380 ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor(oop object, JavaThread* current, ObjectSynchronizer::InflateCause cause) {
 381   assert(UseObjectMonitorTable, "must be");
 382 
 383   EventJavaMonitorInflate event;
 384 
 385   bool inserted;
 386   ObjectMonitor* monitor = get_or_insert_monitor_from_table(object, current, &inserted);
 387 
 388   if (inserted) {
 389     // Hopefully the performance counters are allocated on distinct
 390     // cache lines to avoid false sharing on MP systems ...
 391     OM_PERFDATA_OP(Inflations, inc());
 392     log_inflate(current, object, cause);
 393     if (event.should_commit()) {
 394       post_monitor_inflate_event(&event, object, cause);
 395     }
 396 
 397     // The monitor has an anonymous owner so it is safe from async deflation.
 398     ObjectSynchronizer::_in_use_list.add(monitor);
 399   }
 400 
 401   return monitor;
 402 }
 403 
 404 // Add the hashcode to the monitor to match the object and put it in the hashtable.
 405 ObjectMonitor* LightweightSynchronizer::add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj) {
 406   assert(UseObjectMonitorTable, "must be");
 407   assert(obj == monitor->object(), "must be");
 408 
 409   intptr_t hash = obj->mark().hash();
 410   assert(hash != 0, "must be set when claiming the object monitor");
 411   monitor->set_hash(hash);
 412 
 413   return ObjectMonitorTable::monitor_put_get(current, monitor, obj);
 414 }
 415 
 416 bool LightweightSynchronizer::remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj) {
 417   assert(UseObjectMonitorTable, "must be");
 418   assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead");
 419 
 420   return ObjectMonitorTable::remove_monitor_entry(current, monitor);
 421 }
 422 
 423 void LightweightSynchronizer::deflate_mark_word(oop obj) {
 424   assert(UseObjectMonitorTable, "must be");
 425 
 426   markWord mark = obj->mark_acquire();
 427   assert(!mark.has_no_hash(), "obj with inflated monitor must have had a hash");
 428 
 429   while (mark.has_monitor()) {
 430     const markWord new_mark = mark.clear_lock_bits().set_unlocked();
 431     mark = obj->cas_set_mark(new_mark, mark);
 432   }
 433 }
 434 
 435 void LightweightSynchronizer::initialize() {
 436   if (!UseObjectMonitorTable) {
 437     return;
 438   }
 439   ObjectMonitorTable::create();
 440 }
 441 
 442 bool LightweightSynchronizer::needs_resize() {
 443   if (!UseObjectMonitorTable) {
 444     return false;
 445   }
 446   return ObjectMonitorTable::should_resize();
 447 }
 448 
 449 bool LightweightSynchronizer::resize_table(JavaThread* current) {
 450   if (!UseObjectMonitorTable) {
 451     return true;
 452   }
 453   return ObjectMonitorTable::resize(current);
 454 }
 455 
 456 class LightweightSynchronizer::LockStackInflateContendedLocks : private OopClosure {
 457  private:
 458   oop _contended_oops[LockStack::CAPACITY];
 459   int _length;
 460 
 461   void do_oop(oop* o) final {
 462     oop obj = *o;
 463     if (obj->mark_acquire().has_monitor()) {
 464       if (_length > 0 && _contended_oops[_length - 1] == obj) {
 465         // Recursive
 466         return;
 467       }
 468       _contended_oops[_length++] = obj;
 469     }
 470   }
 471 
 472   void do_oop(narrowOop* o) final {
 473     ShouldNotReachHere();
 474   }
 475 
 476  public:
 477   LockStackInflateContendedLocks() :
 478     _contended_oops(),
 479     _length(0) {};
 480 
 481   void inflate(JavaThread* current) {
 482     assert(current == JavaThread::current(), "must be");
 483     current->lock_stack().oops_do(this);
 484     for (int i = 0; i < _length; i++) {
 485       LightweightSynchronizer::
 486         inflate_fast_locked_object(_contended_oops[i], ObjectSynchronizer::inflate_cause_vm_internal, current, current);
 487     }
 488   }
 489 };
 490 
 491 void LightweightSynchronizer::ensure_lock_stack_space(JavaThread* current) {
 492   assert(current == JavaThread::current(), "must be");
 493   LockStack& lock_stack = current->lock_stack();
 494 
 495   // Make room on lock_stack
 496   if (lock_stack.is_full()) {
 497     // Inflate contended objects
 498     LockStackInflateContendedLocks().inflate(current);
 499     if (lock_stack.is_full()) {
 500       // Inflate the oldest object
 501       inflate_fast_locked_object(lock_stack.bottom(), ObjectSynchronizer::inflate_cause_vm_internal, current, current);
 502     }
 503   }
 504 }
 505 
 506 class LightweightSynchronizer::CacheSetter : StackObj {
 507   JavaThread* const _thread;
 508   BasicLock* const _lock;
 509   ObjectMonitor* _monitor;
 510 
 511   NONCOPYABLE(CacheSetter);
 512 
 513  public:
 514   CacheSetter(JavaThread* thread, BasicLock* lock) :
 515     _thread(thread),
 516     _lock(lock),
 517     _monitor(nullptr) {}
 518 
 519   ~CacheSetter() {
 520     // Only use the cache if using the table.
 521     if (UseObjectMonitorTable) {
 522       if (_monitor != nullptr) {
 523         _thread->om_set_monitor_cache(_monitor);
 524         _lock->set_object_monitor_cache(_monitor);
 525       } else {
 526         _lock->clear_object_monitor_cache();
 527       }
 528     }
 529   }
 530 
 531   void set_monitor(ObjectMonitor* monitor) {
 532     assert(_monitor == nullptr, "only set once");
 533     _monitor = monitor;
 534   }
 535 
 536 };
 537 
 538 class LightweightSynchronizer::VerifyThreadState {
 539   bool _no_safepoint;
 540 
 541  public:
 542   VerifyThreadState(JavaThread* locking_thread, JavaThread* current) : _no_safepoint(locking_thread != current) {
 543     assert(current == Thread::current(), "must be");
 544     assert(locking_thread == current || locking_thread->is_obj_deopt_suspend(), "locking_thread may not run concurrently");
 545     if (_no_safepoint) {
 546       DEBUG_ONLY(JavaThread::current()->inc_no_safepoint_count();)
 547     }
 548   }
 549   ~VerifyThreadState() {
 550     if (_no_safepoint){
 551       DEBUG_ONLY(JavaThread::current()->dec_no_safepoint_count();)
 552     }
 553   }
 554 };
 555 
 556 inline bool LightweightSynchronizer::fast_lock_try_enter(oop obj, LockStack& lock_stack, JavaThread* current) {
 557   markWord mark = obj->mark();
 558   while (mark.is_unlocked()) {
 559     ensure_lock_stack_space(current);
 560     assert(!lock_stack.is_full(), "must have made room on the lock stack");
 561     assert(!lock_stack.contains(obj), "thread must not already hold the lock");
 562     // Try to swing into 'fast-locked' state.
 563     markWord locked_mark = mark.set_fast_locked();
 564     markWord old_mark = mark;
 565     mark = obj->cas_set_mark(locked_mark, old_mark);
 566     if (old_mark == mark) {
 567       // Successfully fast-locked, push object to lock-stack and return.
 568       lock_stack.push(obj);
 569       return true;
 570     }
 571   }
 572   return false;
 573 }
 574 
 575 bool LightweightSynchronizer::fast_lock_spin_enter(oop obj, LockStack& lock_stack, JavaThread* current, bool observed_deflation) {
 576   assert(UseObjectMonitorTable, "must be");
 577   // Will spin with exponential backoff with an accumulative O(2^spin_limit) spins.
 578   const int log_spin_limit = os::is_MP() ? LightweightFastLockingSpins : 1;
 579   const int log_min_safepoint_check_interval = 10;
 580 
 581   markWord mark = obj->mark();
 582   const auto should_spin = [&]() {
 583     if (!mark.has_monitor()) {
 584       // Spin while not inflated.
 585       return true;
 586     } else if (observed_deflation) {
 587       // Spin while monitor is being deflated.
 588       ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark);
 589       return monitor == nullptr || monitor->is_being_async_deflated();
 590     }
 591     // Else stop spinning.
 592     return false;
 593   };
 594   // Always attempt to lock once even when safepoint synchronizing.
 595   bool should_process = false;
 596   for (int i = 0; should_spin() && !should_process && i < log_spin_limit; i++) {
 597     // Spin with exponential backoff.
 598     const int total_spin_count = 1 << i;
 599     const int inner_spin_count = MIN2(1 << log_min_safepoint_check_interval, total_spin_count);
 600     const int outer_spin_count = total_spin_count / inner_spin_count;
 601     for (int outer = 0; outer < outer_spin_count; outer++) {
 602       should_process = SafepointMechanism::should_process(current);
 603       if (should_process) {
 604         // Stop spinning for safepoint.
 605         break;
 606       }
 607       for (int inner = 1; inner < inner_spin_count; inner++) {
 608         SpinPause();
 609       }
 610     }
 611 
 612     if (fast_lock_try_enter(obj, lock_stack, current)) return true;
 613   }
 614   return false;
 615 }
 616 
 617 void LightweightSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
 618   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 619   JavaThread* current = JavaThread::current();
 620   VerifyThreadState vts(locking_thread, current);
 621 
 622   if (obj->klass()->is_value_based()) {
 623     ObjectSynchronizer::handle_sync_on_value_based_class(obj, locking_thread);
 624   }
 625 
 626   locking_thread->inc_held_monitor_count();
 627 
 628   CacheSetter cache_setter(locking_thread, lock);
 629 
 630   LockStack& lock_stack = locking_thread->lock_stack();
 631 
 632   ObjectMonitor* monitor = nullptr;
 633   if (lock_stack.contains(obj())) {
 634     monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
 635     bool entered = monitor->enter_for(locking_thread);
 636     assert(entered, "recursive ObjectMonitor::enter_for must succeed");
 637   } else {
 638     // It is assumed that enter_for must enter on an object without contention.
 639     monitor = inflate_and_enter(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
 640   }
 641 
 642   assert(monitor != nullptr, "LightweightSynchronizer::enter_for must succeed");
 643   cache_setter.set_monitor(monitor);
 644 }
 645 
 646 void LightweightSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
 647   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 648   assert(current == JavaThread::current(), "must be");
 649 
 650   if (obj->klass()->is_value_based()) {
 651     ObjectSynchronizer::handle_sync_on_value_based_class(obj, current);
 652   }
 653 
 654   current->inc_held_monitor_count();
 655 
 656   CacheSetter cache_setter(current, lock);
 657 
 658   // Used when deflation is observed. Progress here requires progress
 659   // from the deflator. After observing that the deflator is not
 660   // making progress (after two yields), switch to sleeping.
 661   SpinYield spin_yield(0, 2);
 662   bool observed_deflation = false;
 663 
 664   LockStack& lock_stack = current->lock_stack();
 665 
 666   if (!lock_stack.is_full() && lock_stack.try_recursive_enter(obj())) {
 667     // Recursively fast locked
 668     return;
 669   }
 670 
 671   if (lock_stack.contains(obj())) {
 672     ObjectMonitor* monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, current, current);
 673     bool entered = monitor->enter(current);
 674     assert(entered, "recursive ObjectMonitor::enter must succeed");
 675     cache_setter.set_monitor(monitor);
 676     return;
 677   }
 678 
 679   while (true) {
 680     // Fast-locking does not use the 'lock' argument.
 681     // Fast-lock spinning to avoid inflating for short critical sections.
 682     // The goal is to only inflate when the extra cost of using ObjectMonitors
 683     // is worth it.
 684     // If deflation has been observed we also spin while deflation is ongoing.
 685     if (fast_lock_try_enter(obj(), lock_stack, current)) {
 686       return;
 687     } else if (UseObjectMonitorTable && fast_lock_spin_enter(obj(), lock_stack, current, observed_deflation)) {
 688       return;
 689     }
 690 
 691     if (observed_deflation) {
 692       spin_yield.wait();
 693     }
 694 
 695     ObjectMonitor* monitor = inflate_and_enter(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, current, current);
 696     if (monitor != nullptr) {
 697       cache_setter.set_monitor(monitor);
 698       return;
 699     }
 700 
 701     // If inflate_and_enter returns nullptr it is because a deflated monitor
 702     // was encountered. Fallback to fast locking. The deflater is responsible
 703     // for clearing out the monitor and transitioning the markWord back to
 704     // fast locking.
 705     observed_deflation = true;
 706   }
 707 }
 708 
 709 void LightweightSynchronizer::exit(oop object, JavaThread* current) {
 710   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 711   assert(current == Thread::current(), "must be");
 712 
 713   markWord mark = object->mark();
 714   assert(!mark.is_unlocked(), "must be");
 715 
 716   LockStack& lock_stack = current->lock_stack();
 717   if (mark.is_fast_locked()) {
 718     if (lock_stack.try_recursive_exit(object)) {
 719       // This is a recursive exit which succeeded
 720       return;
 721     }
 722     if (lock_stack.is_recursive(object)) {
 723       // Must inflate recursive locks if try_recursive_exit fails
 724       // This happens for un-structured unlocks, could potentially
 725       // fix try_recursive_exit to handle these.
 726       inflate_fast_locked_object(object, ObjectSynchronizer::inflate_cause_vm_internal, current, current);
 727     }
 728   }
 729 
 730   while (mark.is_fast_locked()) {
 731     markWord unlocked_mark = mark.set_unlocked();
 732     markWord old_mark = mark;
 733     mark = object->cas_set_mark(unlocked_mark, old_mark);
 734     if (old_mark == mark) {
 735       // CAS successful, remove from lock_stack
 736       size_t recursion = lock_stack.remove(object) - 1;
 737       assert(recursion == 0, "Should not have unlocked here");
 738       return;
 739     }
 740   }
 741 
 742   assert(mark.has_monitor(), "must be");
 743   // The monitor exists
 744   ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, object, mark);
 745   if (monitor->is_owner_anonymous()) {
 746     assert(current->lock_stack().contains(object), "current must have object on its lock stack");
 747     monitor->set_owner_from_anonymous(current);
 748     monitor->set_recursions(current->lock_stack().remove(object) - 1);
 749   }
 750 
 751   monitor->exit(current);
 752 }
 753 
 754 // LightweightSynchronizer::inflate_locked_or_imse is used to to get an inflated
 755 // ObjectMonitor* with LM_LIGHTWEIGHT. It is used from contexts which require
 756 // an inflated ObjectMonitor* for a monitor, and expects to throw a
 757 // java.lang.IllegalMonitorStateException if it is not held by the current
 758 // thread. Such as notify/wait and jni_exit. LM_LIGHTWEIGHT keeps it invariant
 759 // that it only inflates if it is already locked by the current thread or the
 760 // current thread is in the process of entering. To maintain this invariant we
 761 // need to throw a java.lang.IllegalMonitorStateException before inflating if
 762 // the current thread is not the owner.
 763 // LightweightSynchronizer::inflate_locked_or_imse facilitates this.
 764 ObjectMonitor* LightweightSynchronizer::inflate_locked_or_imse(oop obj, ObjectSynchronizer::InflateCause cause, TRAPS) {
 765   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 766   JavaThread* current = THREAD;
 767 
 768   for (;;) {
 769     markWord mark = obj->mark_acquire();
 770     if (mark.is_unlocked()) {
 771       // No lock, IMSE.
 772       THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
 773                  "current thread is not owner", nullptr);
 774     }
 775 
 776     if (mark.is_fast_locked()) {
 777       if (!current->lock_stack().contains(obj)) {
 778         // Fast locked by other thread, IMSE.
 779         THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
 780                    "current thread is not owner", nullptr);
 781       } else {
 782         // Current thread owns the lock, must inflate
 783         return inflate_fast_locked_object(obj, cause, current, current);
 784       }
 785     }
 786 
 787     assert(mark.has_monitor(), "must be");
 788     ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark);
 789     if (monitor != nullptr) {
 790       if (monitor->is_owner_anonymous()) {
 791         LockStack& lock_stack = current->lock_stack();
 792         if (lock_stack.contains(obj)) {
 793           // Current thread owns the lock but someone else inflated it.
 794           // Fix owner and pop lock stack.
 795           monitor->set_owner_from_anonymous(current);
 796           monitor->set_recursions(lock_stack.remove(obj) - 1);
 797         } else {
 798           // Fast locked (and inflated) by other thread, or deflation in progress, IMSE.
 799           THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
 800                      "current thread is not owner", nullptr);
 801         }
 802       }
 803       return monitor;
 804     }
 805   }
 806 }
 807 
 808 ObjectMonitor* LightweightSynchronizer::inflate_into_object_header(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* inflating_thread, Thread* current) {
 809 
 810   // The JavaThread* inflating_thread parameter is only used by LM_LIGHTWEIGHT and requires
 811   // that the inflating_thread == Thread::current() or is suspended throughout the call by
 812   // some other mechanism.
 813   // Even with LM_LIGHTWEIGHT the thread might be nullptr when called from a non
 814   // JavaThread. (As may still be the case from FastHashCode). However it is only
 815   // important for the correctness of the LM_LIGHTWEIGHT algorithm that the thread
 816   // is set when called from ObjectSynchronizer::enter from the owning thread,
 817   // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
 818   EventJavaMonitorInflate event;
 819 
 820   for (;;) {
 821     const markWord mark = object->mark_acquire();
 822 
 823     // The mark can be in one of the following states:
 824     // *  inflated     - Just return if using stack-locking.
 825     //                   If using fast-locking and the ObjectMonitor owner
 826     //                   is anonymous and the inflating_thread owns the
 827     //                   object lock, then we make the inflating_thread
 828     //                   the ObjectMonitor owner and remove the lock from
 829     //                   the inflating_thread's lock stack.
 830     // *  fast-locked  - Coerce it to inflated from fast-locked.
 831     // *  unlocked     - Aggressively inflate the object.
 832 
 833     // CASE: inflated
 834     if (mark.has_monitor()) {
 835       ObjectMonitor* inf = mark.monitor();
 836       markWord dmw = inf->header();
 837       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
 838       if (inf->is_owner_anonymous() &&
 839           inflating_thread != nullptr && inflating_thread->lock_stack().contains(object)) {
 840         inf->set_owner_from_anonymous(inflating_thread);
 841         size_t removed = inflating_thread->lock_stack().remove(object);
 842         inf->set_recursions(removed - 1);
 843       }
 844       return inf;
 845     }
 846 
 847     // CASE: fast-locked
 848     // Could be fast-locked either by the inflating_thread or by some other thread.
 849     //
 850     // Note that we allocate the ObjectMonitor speculatively, _before_
 851     // attempting to set the object's mark to the new ObjectMonitor. If
 852     // the inflating_thread owns the monitor, then we set the ObjectMonitor's
 853     // owner to the inflating_thread. Otherwise, we set the ObjectMonitor's owner
 854     // to anonymous. If we lose the race to set the object's mark to the
 855     // new ObjectMonitor, then we just delete it and loop around again.
 856     //
 857     if (mark.is_fast_locked()) {
 858       ObjectMonitor* monitor = new ObjectMonitor(object);
 859       monitor->set_header(mark.set_unlocked());
 860       bool own = inflating_thread != nullptr && inflating_thread->lock_stack().contains(object);
 861       if (own) {
 862         // Owned by inflating_thread.
 863         monitor->set_owner_from(nullptr, inflating_thread);
 864       } else {
 865         // Owned by somebody else.
 866         monitor->set_owner_anonymous();
 867       }
 868       markWord monitor_mark = markWord::encode(monitor);
 869       markWord old_mark = object->cas_set_mark(monitor_mark, mark);
 870       if (old_mark == mark) {
 871         // Success! Return inflated monitor.
 872         if (own) {
 873           size_t removed = inflating_thread->lock_stack().remove(object);
 874           monitor->set_recursions(removed - 1);
 875         }
 876         // Once the ObjectMonitor is configured and object is associated
 877         // with the ObjectMonitor, it is safe to allow async deflation:
 878         ObjectSynchronizer::_in_use_list.add(monitor);
 879 
 880         // Hopefully the performance counters are allocated on distinct
 881         // cache lines to avoid false sharing on MP systems ...
 882         OM_PERFDATA_OP(Inflations, inc());
 883         log_inflate(current, object, cause);
 884         if (event.should_commit()) {
 885           post_monitor_inflate_event(&event, object, cause);
 886         }
 887         return monitor;
 888       } else {
 889         delete monitor;
 890         continue;  // Interference -- just retry
 891       }
 892     }
 893 
 894     // CASE: unlocked
 895     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
 896     // If we know we're inflating for entry it's better to inflate by swinging a
 897     // pre-locked ObjectMonitor pointer into the object header.   A successful
 898     // CAS inflates the object *and* confers ownership to the inflating thread.
 899     // In the current implementation we use a 2-step mechanism where we CAS()
 900     // to inflate and then CAS() again to try to swing _owner from null to current.
 901     // An inflateTry() method that we could call from enter() would be useful.
 902 
 903     assert(mark.is_unlocked(), "invariant: header=" INTPTR_FORMAT, mark.value());
 904     ObjectMonitor* m = new ObjectMonitor(object);
 905     // prepare m for installation - set monitor to initial state
 906     m->set_header(mark);
 907 
 908     if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
 909       delete m;
 910       m = nullptr;
 911       continue;
 912       // interference - the markword changed - just retry.
 913       // The state-transitions are one-way, so there's no chance of
 914       // live-lock -- "Inflated" is an absorbing state.
 915     }
 916 
 917     // Once the ObjectMonitor is configured and object is associated
 918     // with the ObjectMonitor, it is safe to allow async deflation:
 919     ObjectSynchronizer::_in_use_list.add(m);
 920 
 921     // Hopefully the performance counters are allocated on distinct
 922     // cache lines to avoid false sharing on MP systems ...
 923     OM_PERFDATA_OP(Inflations, inc());
 924     log_inflate(current, object, cause);
 925     if (event.should_commit()) {
 926       post_monitor_inflate_event(&event, object, cause);
 927     }
 928     return m;
 929   }
 930 }
 931 
 932 ObjectMonitor* LightweightSynchronizer::inflate_fast_locked_object(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) {
 933   assert(LockingMode == LM_LIGHTWEIGHT, "only used for lightweight");
 934   VerifyThreadState vts(locking_thread, current);
 935   assert(locking_thread->lock_stack().contains(object), "locking_thread must have object on its lock stack");
 936 
 937   ObjectMonitor* monitor;
 938 
 939   if (!UseObjectMonitorTable) {
 940     return inflate_into_object_header(object, cause, locking_thread, current);
 941   }
 942 
 943   // Inflating requires a hash code
 944   ObjectSynchronizer::FastHashCode(current, object);
 945 
 946   markWord mark = object->mark_acquire();
 947   assert(!mark.is_unlocked(), "Cannot be unlocked");
 948 
 949   for (;;) {
 950     // Fetch the monitor from the table
 951     monitor = get_or_insert_monitor(object, current, cause);
 952 
 953     // ObjectMonitors are always inserted as anonymously owned, this thread is
 954     // the current holder of the monitor. So unless the entry is stale and
 955     // contains a deflating monitor it must be anonymously owned.
 956     if (monitor->is_owner_anonymous()) {
 957       // The monitor must be anonymously owned if it was added
 958       assert(monitor == get_monitor_from_table(current, object), "The monitor must be found");
 959       // New fresh monitor
 960       break;
 961     }
 962 
 963     // If the monitor was not anonymously owned then we got a deflating monitor
 964     // from the table. We need to let the deflator make progress and remove this
 965     // entry before we are allowed to add a new one.
 966     os::naked_yield();
 967     assert(monitor->is_being_async_deflated(), "Should be the reason");
 968   }
 969 
 970   // Set the mark word; loop to handle concurrent updates to other parts of the mark word
 971   while (mark.is_fast_locked()) {
 972     mark = object->cas_set_mark(mark.set_has_monitor(), mark);
 973   }
 974 
 975   // Indicate that the monitor now has a known owner
 976   monitor->set_owner_from_anonymous(locking_thread);
 977 
 978   // Remove the entry from the thread's lock stack
 979   monitor->set_recursions(locking_thread->lock_stack().remove(object) - 1);
 980 
 981   if (locking_thread == current) {
 982     // Only change the thread local state of the current thread.
 983     locking_thread->om_set_monitor_cache(monitor);
 984   }
 985 
 986   return monitor;
 987 }
 988 
 989 ObjectMonitor* LightweightSynchronizer::inflate_and_enter(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) {
 990   assert(LockingMode == LM_LIGHTWEIGHT, "only used for lightweight");
 991   VerifyThreadState vts(locking_thread, current);
 992 
 993   // Note: In some paths (deoptimization) the 'current' thread inflates and
 994   // enters the lock on behalf of the 'locking_thread' thread.
 995 
 996   ObjectMonitor* monitor = nullptr;
 997 
 998   if (!UseObjectMonitorTable) {
 999     // Do the old inflate and enter.
1000     monitor = inflate_into_object_header(object, cause, locking_thread, current);
1001 
1002     bool entered;
1003     if (locking_thread == current) {
1004       entered = monitor->enter(locking_thread);
1005     } else {
1006       entered = monitor->enter_for(locking_thread);
1007     }
1008 
1009     // enter returns false for deflation found.
1010     return entered ? monitor : nullptr;
1011   }
1012 
1013   NoSafepointVerifier nsv;
1014 
1015   // Lightweight monitors require that hash codes are installed first
1016   ObjectSynchronizer::FastHashCode(locking_thread, object);
1017 
1018   // Try to get the monitor from the thread-local cache.
1019   // There's no need to use the cache if we are locking
1020   // on behalf of another thread.
1021   if (current == locking_thread) {
1022     monitor = current->om_get_from_monitor_cache(object);
1023   }
1024 
1025   // Get or create the monitor
1026   if (monitor == nullptr) {
1027     monitor = get_or_insert_monitor(object, current, cause);
1028   }
1029 
1030   if (monitor->try_enter(locking_thread)) {
1031     return monitor;
1032   }
1033 
1034   // Holds is_being_async_deflated() stable throughout this function.
1035   ObjectMonitorContentionMark contention_mark(monitor);
1036 
1037   /// First handle the case where the monitor from the table is deflated
1038   if (monitor->is_being_async_deflated()) {
1039     // The MonitorDeflation thread is deflating the monitor. The locking thread
1040     // must spin until further progress has been made.
1041 
1042     const markWord mark = object->mark_acquire();
1043 
1044     if (mark.has_monitor()) {
1045       // Waiting on the deflation thread to remove the deflated monitor from the table.
1046       os::naked_yield();
1047 
1048     } else if (mark.is_fast_locked()) {
1049       // Some other thread managed to fast-lock the lock, or this is a
1050       // recursive lock from the same thread; yield for the deflation
1051       // thread to remove the deflated monitor from the table.
1052       os::naked_yield();
1053 
1054     } else {
1055       assert(mark.is_unlocked(), "Implied");
1056       // Retry immediately
1057     }
1058 
1059     // Retry
1060     return nullptr;
1061   }
1062 
1063   for (;;) {
1064     const markWord mark = object->mark_acquire();
1065     // The mark can be in one of the following states:
1066     // *  inflated     - If the ObjectMonitor owner is anonymous
1067     //                   and the locking_thread owns the object
1068     //                   lock, then we make the locking_thread
1069     //                   the ObjectMonitor owner and remove the
1070     //                   lock from the locking_thread's lock stack.
1071     // *  fast-locked  - Coerce it to inflated from fast-locked.
1072     // *  neutral      - Inflate the object. Successful CAS is locked
1073 
1074     // CASE: inflated
1075     if (mark.has_monitor()) {
1076       LockStack& lock_stack = locking_thread->lock_stack();
1077       if (monitor->is_owner_anonymous() && lock_stack.contains(object)) {
1078         // The lock is fast-locked by the locking thread,
1079         // convert it to a held monitor with a known owner.
1080         monitor->set_owner_from_anonymous(locking_thread);
1081         monitor->set_recursions(lock_stack.remove(object) - 1);
1082       }
1083 
1084       break; // Success
1085     }
1086 
1087     // CASE: fast-locked
1088     // Could be fast-locked either by locking_thread or by some other thread.
1089     //
1090     if (mark.is_fast_locked()) {
1091       markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
1092       if (old_mark != mark) {
1093         // CAS failed
1094         continue;
1095       }
1096 
1097       // Success! Return inflated monitor.
1098       LockStack& lock_stack = locking_thread->lock_stack();
1099       if (lock_stack.contains(object)) {
1100         // The lock is fast-locked by the locking thread,
1101         // convert it to a held monitor with a known owner.
1102         monitor->set_owner_from_anonymous(locking_thread);
1103         monitor->set_recursions(lock_stack.remove(object) - 1);
1104       }
1105 
1106       break; // Success
1107     }
1108 
1109     // CASE: neutral (unlocked)
1110 
1111     // Catch if the object's header is not neutral (not locked and
1112     // not marked is what we care about here).
1113     assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
1114     markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
1115     if (old_mark != mark) {
1116       // CAS failed
1117       continue;
1118     }
1119 
1120     // Transitioned from unlocked to monitor means locking_thread owns the lock.
1121     monitor->set_owner_from_anonymous(locking_thread);
1122 
1123     return monitor;
1124   }
1125 
1126   if (current == locking_thread) {
1127     // One round of spinning
1128     if (monitor->spin_enter(locking_thread)) {
1129       return monitor;
1130     }
1131 
1132     // Monitor is contended, take the time before entering to fix the lock stack.
1133     LockStackInflateContendedLocks().inflate(current);
1134   }
1135 
1136   // enter can block for safepoints; clear the unhandled object oop
1137   PauseNoSafepointVerifier pnsv(&nsv);
1138   object = nullptr;
1139 
1140   if (current == locking_thread) {
1141     monitor->enter_with_contention_mark(locking_thread, contention_mark);
1142   } else {
1143     monitor->enter_for_with_contention_mark(locking_thread, contention_mark);
1144   }
1145 
1146   return monitor;
1147 }
1148 
1149 void LightweightSynchronizer::deflate_monitor(Thread* current, oop obj, ObjectMonitor* monitor) {
1150   if (obj != nullptr) {
1151     deflate_mark_word(obj);
1152   }
1153   bool removed = remove_monitor(current, monitor, obj);
1154   if (obj != nullptr) {
1155     assert(removed, "Should have removed the entry if obj was alive");
1156   }
1157 }
1158 
1159 ObjectMonitor* LightweightSynchronizer::get_monitor_from_table(Thread* current, oop obj) {
1160   assert(UseObjectMonitorTable, "must be");
1161   return ObjectMonitorTable::monitor_get(current, obj);
1162 }
1163 
1164 bool LightweightSynchronizer::contains_monitor(Thread* current, ObjectMonitor* monitor) {
1165   assert(UseObjectMonitorTable, "must be");
1166   return ObjectMonitorTable::contains_monitor(current, monitor);
1167 }
1168 
1169 bool LightweightSynchronizer::quick_enter(oop obj, BasicLock* lock, JavaThread* current) {
1170   assert(current->thread_state() == _thread_in_Java, "must be");
1171   assert(obj != nullptr, "must be");
1172   NoSafepointVerifier nsv;
1173 
1174   // If quick_enter succeeds with entering, the cache should be in a valid initialized state.
1175   CacheSetter cache_setter(current, lock);
1176 
1177   LockStack& lock_stack = current->lock_stack();
1178   if (lock_stack.is_full()) {
1179     // Always go into runtime if the lock stack is full.
1180     return false;
1181   }
1182 
1183   const markWord mark = obj->mark();
1184 
1185 #ifndef _LP64
1186   // Only for 32bit which has limited support for fast locking outside the runtime.
1187   if (lock_stack.try_recursive_enter(obj)) {
1188     // Recursive lock successful.
1189     current->inc_held_monitor_count();
1190     return true;
1191   }
1192 
1193   if (mark.is_unlocked()) {
1194     markWord locked_mark = mark.set_fast_locked();
1195     if (obj->cas_set_mark(locked_mark, mark) == mark) {
1196       // Successfully fast-locked, push object to lock-stack and return.
1197       lock_stack.push(obj);
1198       current->inc_held_monitor_count();
1199       return true;
1200     }
1201   }
1202 #endif
1203 
1204   if (mark.has_monitor()) {
1205     ObjectMonitor* const monitor = UseObjectMonitorTable ? current->om_get_from_monitor_cache(obj) :
1206                                                            ObjectSynchronizer::read_monitor(mark);
1207 
1208     if (monitor == nullptr) {
1209       // Take the slow-path on a cache miss.
1210       return false;
1211     }
1212 
1213     if (monitor->try_enter(current)) {
1214       // ObjectMonitor enter successful.
1215       cache_setter.set_monitor(monitor);
1216       current->inc_held_monitor_count();
1217       return true;
1218     }
1219   }
1220 
1221   // Slow-path.
1222   return false;
1223 }