1 /*
   2  * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "classfile/vmSymbols.hpp"
  28 #include "javaThread.inline.hpp"
  29 #include "jfrfiles/jfrEventClasses.hpp"
  30 #include "logging/log.hpp"
  31 #include "logging/logStream.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "nmt/memflags.hpp"
  34 #include "oops/oop.inline.hpp"
  35 #include "runtime/atomic.hpp"
  36 #include "runtime/basicLock.inline.hpp"
  37 #include "runtime/globals_extension.hpp"
  38 #include "runtime/interfaceSupport.inline.hpp"
  39 #include "runtime/javaThread.hpp"
  40 #include "runtime/lightweightSynchronizer.hpp"
  41 #include "runtime/lockStack.inline.hpp"
  42 #include "runtime/mutexLocker.hpp"
  43 #include "runtime/objectMonitor.hpp"
  44 #include "runtime/objectMonitor.inline.hpp"
  45 #include "runtime/os.hpp"
  46 #include "runtime/perfData.inline.hpp"
  47 #include "runtime/safepointMechanism.inline.hpp"
  48 #include "runtime/safepointVerifiers.hpp"
  49 #include "runtime/synchronizer.inline.hpp"
  50 #include "runtime/timerTrace.hpp"
  51 #include "runtime/trimNativeHeap.hpp"
  52 #include "utilities/concurrentHashTable.inline.hpp"
  53 #include "utilities/concurrentHashTableTasks.inline.hpp"
  54 #include "utilities/globalDefinitions.hpp"
  55 
  56 
  57 //
  58 // Lightweight synchronization.
  59 //
  60 // When the lightweight synchronization needs to use a monitor the link
  61 // between the object and the monitor is stored in a concurrent hash table
  62 // instead of in the mark word. This has the benefit that it further decouples
  63 // the mark word from the synchronization code.
  64 //
  65 
  66 // ConcurrentHashTable storing links from objects to ObjectMonitors
  67 class ObjectMonitorWorld : public CHeapObj<MEMFLAGS::mtObjectMonitor> {
  68   struct Config {
  69     using Value = ObjectMonitor*;
  70     static uintx get_hash(Value const& value, bool* is_dead) {
  71       return (uintx)value->hash();
  72     }
  73     static void* allocate_node(void* context, size_t size, Value const& value) {
  74       reinterpret_cast<ObjectMonitorWorld*>(context)->inc_table_count();
  75       return AllocateHeap(size, MEMFLAGS::mtObjectMonitor);
  76     };
  77     static void free_node(void* context, void* memory, Value const& value) {
  78       reinterpret_cast<ObjectMonitorWorld*>(context)->dec_table_count();
  79       FreeHeap(memory);
  80     }
  81   };
  82   using ConcurrentTable = ConcurrentHashTable<Config, MEMFLAGS::mtObjectMonitor>;
  83 
  84   ConcurrentTable* _table;
  85   volatile size_t _table_count;
  86   size_t _table_size;
  87   volatile bool _resize;
  88 
  89   class Lookup : public StackObj {
  90     oop _obj;
  91 
  92   public:
  93     Lookup(oop obj) : _obj(obj) {}
  94 
  95     uintx get_hash() const {
  96       uintx hash = _obj->mark().hash();
  97       assert(hash != 0, "should have a hash");
  98       return hash;
  99     }
 100 
 101     bool equals(ObjectMonitor** value) {
 102       // The entry is going to be removed soon.
 103       assert(*value != nullptr, "must be");
 104       return (*value)->object_refers_to(_obj);
 105     }
 106 
 107     bool is_dead(ObjectMonitor** value) {
 108       assert(*value != nullptr, "must be");
 109       return (*value)->object_is_cleared();
 110     }
 111   };
 112 
 113   class LookupMonitor : public StackObj {
 114     ObjectMonitor* _monitor;
 115 
 116   public:
 117     LookupMonitor(ObjectMonitor* monitor) : _monitor(monitor) {}
 118 
 119     uintx get_hash() const {
 120       return _monitor->hash();
 121     }
 122 
 123     bool equals(ObjectMonitor** value) {
 124       return (*value) == _monitor;
 125     }
 126 
 127     bool is_dead(ObjectMonitor** value) {
 128       assert(*value != nullptr, "must be");
 129       return (*value)->object_is_dead();
 130     }
 131   };
 132 
 133   void inc_table_count() {
 134     Atomic::inc(&_table_count);
 135   }
 136 
 137   void dec_table_count() {
 138     Atomic::inc(&_table_count);
 139   }
 140 
 141   double get_load_factor() {
 142     return (double)_table_count/(double)_table_size;
 143   }
 144 
 145   size_t table_size(Thread* current = Thread::current()) {
 146     return ((size_t)1) << _table->get_size_log2(current);
 147   }
 148 
 149   static size_t max_log_size() {
 150     // TODO[OMWorld]: Evaluate the max size.
 151     // TODO[OMWorld]: Need to fix init order to use Universe::heap()->max_capacity();
 152     //                Using MaxHeapSize directly this early may be wrong, and there
 153     //                are definitely rounding errors (alignment).
 154     const size_t max_capacity = MaxHeapSize;
 155     const size_t min_object_size = CollectedHeap::min_dummy_object_size() * HeapWordSize;
 156     const size_t max_objects = max_capacity / MAX2(MinObjAlignmentInBytes, checked_cast<int>(min_object_size));
 157     const size_t log_max_objects = log2i_graceful(max_objects);
 158 
 159     return MAX2(MIN2<size_t>(SIZE_BIG_LOG2, log_max_objects), min_log_size());
 160   }
 161 
 162   static size_t min_log_size() {
 163     // ~= log(AvgMonitorsPerThreadEstimate default)
 164     return 10;
 165   }
 166 
 167   template<typename V>
 168   static size_t clamp_log_size(V log_size) {
 169     return MAX2(MIN2(log_size, checked_cast<V>(max_log_size())), checked_cast<V>(min_log_size()));
 170   }
 171 
 172   static size_t initial_log_size() {
 173     const size_t estimate = log2i(MAX2(os::processor_count(), 1)) + log2i(MAX2(AvgMonitorsPerThreadEstimate, size_t(1)));
 174     return clamp_log_size(estimate);
 175   }
 176 
 177   static size_t grow_hint () {
 178     return ConcurrentTable::DEFAULT_GROW_HINT;
 179   }
 180 
 181 public:
 182   ObjectMonitorWorld()
 183   : _table(new ConcurrentTable(initial_log_size(),
 184                                max_log_size(),
 185                                grow_hint(),
 186                                ConcurrentTable::DEFAULT_ENABLE_STATISTICS,
 187                                ConcurrentTable::DEFAULT_MUTEX_RANK,
 188                                this)),
 189     _table_count(0),
 190     _table_size(table_size()),
 191     _resize(false) {}
 192 
 193   void verify_monitor_get_result(oop obj, ObjectMonitor* monitor) {
 194 #ifdef ASSERT
 195     if (SafepointSynchronize::is_at_safepoint()) {
 196       bool has_monitor = obj->mark().has_monitor();
 197       assert(has_monitor == (monitor != nullptr),
 198           "Inconsistency between markWord and OMW table has_monitor: %s monitor: " PTR_FORMAT,
 199           BOOL_TO_STR(has_monitor), p2i(monitor));
 200     }
 201 #endif
 202   }
 203 
 204   ObjectMonitor* monitor_get(Thread* current, oop obj) {
 205     ObjectMonitor* result = nullptr;
 206     Lookup lookup_f(obj);
 207     auto found_f = [&](ObjectMonitor** found) {
 208       assert((*found)->object_peek() == obj, "must be");
 209       result = *found;
 210     };
 211     _table->get(current, lookup_f, found_f);
 212     verify_monitor_get_result(obj, result);
 213     return result;
 214   }
 215 
 216   void try_notify_grow() {
 217     if (!_table->is_max_size_reached() && !Atomic::load(&_resize)) {
 218       Atomic::store(&_resize, true);
 219       if (Service_lock->try_lock()) {
 220         Service_lock->notify();
 221         Service_lock->unlock();
 222       }
 223     }
 224   }
 225 
 226   bool should_shrink() {
 227     // No implemented;
 228     return false;
 229   }
 230 
 231   static constexpr double GROW_LOAD_FACTOR = 0.75;
 232 
 233   bool should_grow() {
 234     return get_load_factor() > GROW_LOAD_FACTOR && !_table->is_max_size_reached();
 235   }
 236 
 237   bool should_resize() {
 238     return should_grow() || should_shrink() || Atomic::load(&_resize);
 239   }
 240 
 241   template<typename Task, typename... Args>
 242   bool run_task(JavaThread* current, Task& task, const char* task_name, Args&... args) {
 243     if (task.prepare(current)) {
 244       log_trace(monitortable)("Started to %s", task_name);
 245       TraceTime timer(task_name, TRACETIME_LOG(Debug, monitortable, perf));
 246       while (task.do_task(current, args...)) {
 247         task.pause(current);
 248         {
 249           ThreadBlockInVM tbivm(current);
 250         }
 251         task.cont(current);
 252       }
 253       task.done(current);
 254       return true;
 255     }
 256     return false;
 257   }
 258 
 259   bool grow(JavaThread* current) {
 260     ConcurrentTable::GrowTask grow_task(_table);
 261     if (run_task(current, grow_task, "Grow")) {
 262       _table_size = table_size(current);
 263       log_info(monitortable)("Grown to size: %zu", _table_size);
 264       return true;
 265     }
 266     return false;
 267   }
 268 
 269   bool clean(JavaThread* current) {
 270     ConcurrentTable::BulkDeleteTask clean_task(_table);
 271     auto is_dead = [&](ObjectMonitor** monitor) {
 272       return (*monitor)->object_is_dead();
 273     };
 274     auto do_nothing = [&](ObjectMonitor** monitor) {};
 275     NativeHeapTrimmer::SuspendMark sm("omworld");
 276     return run_task(current, clean_task, "Clean", is_dead, do_nothing);
 277   }
 278 
 279   bool resize(JavaThread* current) {
 280     LogTarget(Info, monitortable) lt;
 281     bool success = false;
 282 
 283     if (should_grow()) {
 284       lt.print("Start growing with load factor %f", get_load_factor());
 285       success = grow(current);
 286     } else {
 287       if (!_table->is_max_size_reached() && Atomic::load(&_resize)) {
 288         lt.print("WARNING: Getting resize hints with load factor %f", get_load_factor());
 289       }
 290       lt.print("Start cleaning with load factor %f", get_load_factor());
 291       success = clean(current);
 292     }
 293 
 294     Atomic::store(&_resize, false);
 295 
 296     return success;
 297   }
 298 
 299   ObjectMonitor* monitor_put_get(Thread* current, ObjectMonitor* monitor, oop obj) {
 300     // Enter the monitor into the concurrent hashtable.
 301     ObjectMonitor* result = monitor;
 302     Lookup lookup_f(obj);
 303     auto found_f = [&](ObjectMonitor** found) {
 304       assert((*found)->object_peek() == obj, "must be");
 305       result = *found;
 306     };
 307     bool grow;
 308     _table->insert_get(current, lookup_f, monitor, found_f, &grow);
 309     verify_monitor_get_result(obj, result);
 310     if (grow) {
 311       try_notify_grow();
 312     }
 313     return result;
 314   }
 315 
 316   bool remove_monitor_entry(Thread* current, ObjectMonitor* monitor) {
 317     LookupMonitor lookup_f(monitor);
 318     return _table->remove(current, lookup_f);
 319   }
 320 
 321   bool contains_monitor(Thread* current, ObjectMonitor* monitor) {
 322     LookupMonitor lookup_f(monitor);
 323     bool result = false;
 324     auto found_f = [&](ObjectMonitor** found) {
 325       result = true;
 326     };
 327     _table->get(current, lookup_f, found_f);
 328     return result;
 329   }
 330 
 331   void print_on(outputStream* st) {
 332     auto printer = [&] (ObjectMonitor** entry) {
 333        ObjectMonitor* om = *entry;
 334        oop obj = om->object_peek();
 335        st->print("monitor " PTR_FORMAT " ", p2i(om));
 336        st->print("object " PTR_FORMAT, p2i(obj));
 337        assert(obj->mark().hash() == om->hash(), "hash must match");
 338        st->cr();
 339        return true;
 340     };
 341     if (SafepointSynchronize::is_at_safepoint()) {
 342       _table->do_safepoint_scan(printer);
 343     } else {
 344       _table->do_scan(Thread::current(), printer);
 345     }
 346   }
 347 };
 348 
 349 ObjectMonitorWorld* LightweightSynchronizer::_omworld = nullptr;
 350 
 351 ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor_from_table(oop object, JavaThread* current, bool try_read, bool* inserted) {
 352   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 353 
 354   if (try_read) {
 355     ObjectMonitor* monitor = get_monitor_from_table(current, object);
 356     if (monitor != nullptr) {
 357       *inserted = false;
 358       return monitor;
 359     }
 360   }
 361 
 362   ObjectMonitor* alloced_monitor = new ObjectMonitor(object);
 363   alloced_monitor->set_owner_anonymous();
 364 
 365   // Try insert monitor
 366   ObjectMonitor* monitor = add_monitor(current, alloced_monitor, object);
 367 
 368   *inserted = alloced_monitor == monitor;
 369   if (!*inserted) {
 370     delete alloced_monitor;
 371   }
 372 
 373   return monitor;
 374 }
 375 
 376 static void log_inflate(Thread* current, oop object, const ObjectSynchronizer::InflateCause cause) {
 377   if (log_is_enabled(Trace, monitorinflation)) {
 378     ResourceMark rm(current);
 379     log_info(monitorinflation)("inflate: object=" INTPTR_FORMAT ", mark="
 380                                INTPTR_FORMAT ", type='%s' cause %s", p2i(object),
 381                                object->mark().value(), object->klass()->external_name(),
 382                                ObjectSynchronizer::inflate_cause_name(cause));
 383   }
 384 }
 385 
 386 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
 387                                        const oop obj,
 388                                        ObjectSynchronizer::InflateCause cause) {
 389   assert(event != nullptr, "invariant");
 390   event->set_monitorClass(obj->klass());
 391   event->set_address((uintptr_t)(void*)obj);
 392   event->set_cause((u1)cause);
 393   event->commit();
 394 }
 395 
 396 
 397 ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor(oop object, JavaThread* current, const ObjectSynchronizer::InflateCause cause, bool try_read) {
 398   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 399 
 400   EventJavaMonitorInflate event;
 401 
 402   bool inserted;
 403   ObjectMonitor* monitor = get_or_insert_monitor_from_table(object, current, try_read, &inserted);
 404 
 405   if (inserted) {
 406     // Hopefully the performance counters are allocated on distinct
 407     // cache lines to avoid false sharing on MP systems ...
 408     OM_PERFDATA_OP(Inflations, inc());
 409     log_inflate(current, object, cause);
 410     if (event.should_commit()) {
 411       post_monitor_inflate_event(&event, object, cause);
 412     }
 413 
 414     // The monitor has an anonymous owner so it is safe from async deflation.
 415     ObjectSynchronizer::_in_use_list.add(monitor);
 416   }
 417 
 418   return monitor;
 419 }
 420 
 421 // Add the hashcode to the monitor to match the object and put it in the hashtable.
 422 ObjectMonitor* LightweightSynchronizer::add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj) {
 423   assert(UseObjectMonitorTable, "must be");
 424   assert(obj == monitor->object(), "must be");
 425 
 426   intptr_t hash = obj->mark().hash();
 427   assert(hash != 0, "must be set when claiming the object monitor");
 428   monitor->set_hash(hash);
 429 
 430   return _omworld->monitor_put_get(current, monitor, obj);
 431 }
 432 
 433 bool LightweightSynchronizer::remove_monitor(Thread* current, oop obj, ObjectMonitor* monitor) {
 434   assert(UseObjectMonitorTable, "must be");
 435   assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead");
 436 
 437   return _omworld->remove_monitor_entry(current, monitor);
 438 }
 439 
 440 void LightweightSynchronizer::deflate_mark_word(oop obj) {
 441   assert(UseObjectMonitorTable, "must be");
 442 
 443   markWord mark = obj->mark_acquire();
 444   assert(!mark.has_no_hash(), "obj with inflated monitor must have had a hash");
 445 
 446   while (mark.has_monitor()) {
 447     const markWord new_mark = mark.clear_lock_bits().set_unlocked();
 448     mark = obj->cas_set_mark(new_mark, mark);
 449   }
 450 }
 451 
 452 void LightweightSynchronizer::initialize() {
 453   if (!UseObjectMonitorTable) {
 454     return;
 455   }
 456   _omworld = new ObjectMonitorWorld();
 457 }
 458 
 459 bool LightweightSynchronizer::needs_resize() {
 460   if (!UseObjectMonitorTable) {
 461     return false;
 462   }
 463   return _omworld->should_resize();
 464 }
 465 
 466 bool LightweightSynchronizer::resize_table(JavaThread* current) {
 467   if (!UseObjectMonitorTable) {
 468     return true;
 469   }
 470   return _omworld->resize(current);
 471 }
 472 
 473 class LightweightSynchronizer::LockStackInflateContendedLocks : private OopClosure {
 474  private:
 475   oop _contended_oops[LockStack::CAPACITY];
 476   int _length;
 477 
 478   void do_oop(oop* o) final {
 479     oop obj = *o;
 480     if (obj->mark_acquire().has_monitor()) {
 481       if (_length > 0 && _contended_oops[_length-1] == obj) {
 482         // assert(VM_Version::supports_recursive_lightweight_locking(), "must be");
 483         // Recursive
 484         return;
 485       }
 486       _contended_oops[_length++] = obj;
 487     }
 488   }
 489 
 490   void do_oop(narrowOop* o) final {
 491     ShouldNotReachHere();
 492   }
 493 
 494  public:
 495   LockStackInflateContendedLocks() :
 496     _contended_oops(),
 497     _length(0) {};
 498 
 499   void inflate(JavaThread* current) {
 500     assert(current == JavaThread::current(), "must be");
 501     current->lock_stack().oops_do(this);
 502     for (int i = 0; i < _length; i++) {
 503       LightweightSynchronizer::
 504         inflate_fast_locked_object(_contended_oops[i], current, current, ObjectSynchronizer::inflate_cause_vm_internal);
 505     }
 506   }
 507 };
 508 
 509 void LightweightSynchronizer::ensure_lock_stack_space(JavaThread* current) {
 510   assert(current == JavaThread::current(), "must be");
 511   LockStack& lock_stack = current->lock_stack();
 512 
 513   // Make room on lock_stack
 514   if (lock_stack.is_full()) {
 515     // Inflate contented objects
 516     LockStackInflateContendedLocks().inflate(current);
 517     if (lock_stack.is_full()) {
 518       // Inflate the oldest object
 519       inflate_fast_locked_object(lock_stack.bottom(), current, current, ObjectSynchronizer::inflate_cause_vm_internal);
 520     }
 521   }
 522 }
 523 
 524 class LightweightSynchronizer::CacheSetter : StackObj {
 525   JavaThread* const _thread;
 526   BasicLock* const _lock;
 527   ObjectMonitor* _monitor;
 528 
 529   NONCOPYABLE(CacheSetter);
 530 
 531 public:
 532   CacheSetter(JavaThread* thread, BasicLock* lock) :
 533     _thread(thread),
 534     _lock(lock),
 535     _monitor(nullptr) {}
 536 
 537   ~CacheSetter() {
 538     // Only use the cache if using the table.
 539     if (UseObjectMonitorTable) {
 540       if (_monitor != nullptr) {
 541         _thread->om_set_monitor_cache(_monitor);
 542         _lock->set_object_monitor_cache(_monitor);
 543       } else {
 544         _lock->clear_object_monitor_cache();
 545       }
 546     }
 547   }
 548 
 549   void set_monitor(ObjectMonitor* monitor) {
 550     assert(_monitor == nullptr, "only set once");
 551     _monitor = monitor;
 552   }
 553 
 554 };
 555 
 556 class LightweightSynchronizer::VerifyThreadState {
 557   bool _no_safepoint;
 558   union {
 559     struct {} _dummy;
 560     NoSafepointVerifier _nsv;
 561   };
 562 
 563 public:
 564   VerifyThreadState(JavaThread* locking_thread, JavaThread* current) : _no_safepoint(locking_thread != current) {
 565     assert(current == Thread::current(), "must be");
 566     assert(locking_thread == current || locking_thread->is_obj_deopt_suspend(), "locking_thread may not run concurrently");
 567     if (_no_safepoint) {
 568       ::new (&_nsv) NoSafepointVerifier();
 569     }
 570   }
 571   ~VerifyThreadState() {
 572     if (_no_safepoint){
 573       _nsv.~NoSafepointVerifier();
 574     }
 575   }
 576 };
 577 
 578 inline bool LightweightSynchronizer::fast_lock_try_enter(oop obj, LockStack& lock_stack, JavaThread* current) {
 579   markWord mark = obj->mark();
 580   while (mark.is_unlocked()) {
 581     ensure_lock_stack_space(current);
 582     assert(!lock_stack.is_full(), "must have made room on the lock stack");
 583     assert(!lock_stack.contains(obj), "thread must not already hold the lock");
 584     // Try to swing into 'fast-locked' state.
 585     markWord locked_mark = mark.set_fast_locked();
 586     markWord old_mark = mark;
 587     mark = obj->cas_set_mark(locked_mark, old_mark);
 588     if (old_mark == mark) {
 589       // Successfully fast-locked, push object to lock-stack and return.
 590       lock_stack.push(obj);
 591       return true;
 592     }
 593   }
 594   return false;
 595 }
 596 
 597 
 598 bool LightweightSynchronizer::fast_lock_spin_enter(oop obj, LockStack& lock_stack, JavaThread* current, bool observed_deflation) {
 599   assert(UseObjectMonitorTable, "must be");
 600   // Will spin with exponential backoff with an accumulative O(2^spin_limit) spins.
 601   const int log_spin_limit = os::is_MP() ? LightweightFastLockingSpins : 1;
 602   const int log_min_safepoint_check_interval = 10;
 603 
 604   markWord mark = obj->mark();
 605   const auto should_spin = [&]() {
 606     if (!mark.has_monitor()) {
 607       // Spin while not inflated.
 608       return true;
 609     } else if (observed_deflation) {
 610       // Spin while monitor is being deflated.
 611       ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark);
 612       return monitor == nullptr || monitor->is_being_async_deflated();
 613     }
 614     // Else stop spinning.
 615     return false;
 616   };
 617   // Always attempt to lock once even when safepoint synchronizing.
 618   bool should_process = false;
 619   for (int i = 0; should_spin() && !should_process && i < log_spin_limit; i++) {
 620     // Spin with exponential backoff.
 621     const int total_spin_count = 1 << i;
 622     const int inner_spin_count = MIN2(1 << log_min_safepoint_check_interval, total_spin_count);
 623     const int outer_spin_count = total_spin_count / inner_spin_count;
 624     for (int outer = 0; outer < outer_spin_count; outer++) {
 625       should_process = SafepointMechanism::should_process(current);
 626       if (should_process) {
 627         // Stop spinning for safepoint.
 628         break;
 629       }
 630       for (int inner = 1; inner < inner_spin_count; inner++) {
 631         SpinPause();
 632       }
 633     }
 634 
 635     if (fast_lock_try_enter(obj, lock_stack, current)) return true;
 636   }
 637   return false;
 638 }
 639 
 640 void LightweightSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
 641   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 642   JavaThread* current = JavaThread::current();
 643   VerifyThreadState vts(locking_thread, current);
 644 
 645   if (obj->klass()->is_value_based()) {
 646     ObjectSynchronizer::handle_sync_on_value_based_class(obj, locking_thread);
 647   }
 648 
 649   locking_thread->inc_held_monitor_count();
 650 
 651   CacheSetter cache_setter(locking_thread, lock);
 652 
 653   LockStack& lock_stack = locking_thread->lock_stack();
 654 
 655   ObjectMonitor* monitor = nullptr;
 656   if (lock_stack.contains(obj())) {
 657     monitor = inflate_fast_locked_object(obj(), locking_thread, current, ObjectSynchronizer::inflate_cause_monitor_enter);
 658     bool entered = monitor->enter_for(locking_thread);
 659     assert(entered, "recursive ObjectMonitor::enter_for must succeed");
 660   } else {
 661     // It is assumed that enter_for must enter on an object without contention.
 662     monitor = inflate_and_enter(obj(), locking_thread, current, ObjectSynchronizer::inflate_cause_monitor_enter);
 663   }
 664 
 665   assert(monitor != nullptr, "LightweightSynchronizer::enter_for must succeed");
 666   cache_setter.set_monitor(monitor);
 667 }
 668 
 669 void LightweightSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
 670   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 671   assert(current == JavaThread::current(), "must be");
 672 
 673   if (obj->klass()->is_value_based()) {
 674     ObjectSynchronizer::handle_sync_on_value_based_class(obj, current);
 675   }
 676 
 677   current->inc_held_monitor_count();
 678 
 679   CacheSetter cache_setter(current, lock);
 680 
 681   // Used when deflation is observed. Progress here requires progress
 682   // from the deflator. After observing the that the deflator is not
 683   // making progress (after two yields), switch to sleeping.
 684   SpinYield spin_yield(0, 2);
 685   bool observed_deflation = false;
 686 
 687   LockStack& lock_stack = current->lock_stack();
 688 
 689   if (!lock_stack.is_full() && lock_stack.try_recursive_enter(obj())) {
 690     // Recursively fast locked
 691     return;
 692   }
 693 
 694   if (lock_stack.contains(obj())) {
 695     ObjectMonitor* monitor = inflate_fast_locked_object(obj(), current, current, ObjectSynchronizer::inflate_cause_monitor_enter);
 696     bool entered = monitor->enter(current);
 697     assert(entered, "recursive ObjectMonitor::enter must succeed");
 698     cache_setter.set_monitor(monitor);
 699     return;
 700   }
 701 
 702   while (true) {
 703     // Fast-locking does not use the 'lock' argument.
 704     // Fast-lock spinning to avoid inflating for short critical sections.
 705     // The goal is to only inflate when the extra cost of using ObjectMonitors
 706     // is worth it.
 707     // If deflation has been observed we also spin while deflation is onging.
 708     if (fast_lock_try_enter(obj(), lock_stack, current)) {
 709       return;
 710     } else if (UseObjectMonitorTable && fast_lock_spin_enter(obj(), lock_stack, current, observed_deflation)) {
 711       return;
 712     }
 713 
 714     if (observed_deflation) {
 715       spin_yield.wait();
 716     }
 717 
 718     ObjectMonitor* monitor = inflate_and_enter(obj(), current, current, ObjectSynchronizer::inflate_cause_monitor_enter);
 719     if (monitor != nullptr) {
 720       cache_setter.set_monitor(monitor);
 721       return;
 722     }
 723 
 724     // If inflate_and_enter returns nullptr it is because a deflated monitor
 725     // was encountered. Fallback to fast locking. The deflater is responsible
 726     // for clearing out the monitor and transitioning the markWord back to
 727     // fast locking.
 728     observed_deflation = true;
 729   }
 730 }
 731 
 732 void LightweightSynchronizer::exit(oop object, JavaThread* current) {
 733   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 734   assert(current == Thread::current(), "must be");
 735 
 736   markWord mark = object->mark();
 737   assert(!mark.is_unlocked(), "must be unlocked");
 738 
 739   LockStack& lock_stack = current->lock_stack();
 740   if (mark.is_fast_locked()) {
 741     if (lock_stack.try_recursive_exit(object)) {
 742       // This is a recursive exit which succeeded
 743       return;
 744     }
 745     if (lock_stack.is_recursive(object)) {
 746       // Must inflate recursive locks if try_recursive_exit fails
 747       // This happens for un-structured unlocks, could potentially
 748       // fix try_recursive_exit to handle these.
 749       inflate_fast_locked_object(object, current, current, ObjectSynchronizer::inflate_cause_vm_internal);
 750     }
 751   }
 752 
 753   // Fast-locking does not use the 'lock' argument.
 754   while (mark.is_fast_locked()) {
 755     markWord unlocked_mark = mark.set_unlocked();
 756     markWord old_mark = mark;
 757     mark = object->cas_set_mark(unlocked_mark, old_mark);
 758     if (old_mark == mark) {
 759       // CAS successful, remove from lock_stack
 760       size_t recursion = lock_stack.remove(object) - 1;
 761       assert(recursion == 0, "Should not have unlocked here");
 762       return;
 763     }
 764   }
 765 
 766   assert(mark.has_monitor(), "must be");
 767   // The monitor exists
 768   ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, object, mark);
 769   if (monitor->is_owner_anonymous()) {
 770     assert(current->lock_stack().contains(object), "current must have object on its lock stack");
 771     monitor->set_owner_from_anonymous(current);
 772     monitor->set_recursions(current->lock_stack().remove(object) - 1);
 773   }
 774 
 775   monitor->exit(current);
 776 }
 777 
 778 ObjectMonitor* LightweightSynchronizer::inflate_locked_or_imse(oop obj, const ObjectSynchronizer::InflateCause cause, TRAPS) {
 779   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 780   JavaThread* current = THREAD;
 781 
 782   for(;;) {
 783     markWord mark = obj->mark_acquire();
 784     if (mark.is_unlocked()) {
 785       // No lock, IMSE.
 786       THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
 787                 "current thread is not owner", nullptr);
 788     }
 789 
 790     if (mark.is_fast_locked()) {
 791       if (!current->lock_stack().contains(obj)) {
 792         // Fast locked by other thread, IMSE.
 793         THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
 794                   "current thread is not owner", nullptr);
 795       } else {
 796         // Current thread owns the lock, must inflate
 797         return inflate_fast_locked_object(obj, current, current, cause);
 798       }
 799     }
 800 
 801     assert(mark.has_monitor(), "must be");
 802     ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark);
 803     if (monitor != nullptr) {
 804       if (monitor->is_owner_anonymous()) {
 805         LockStack& lock_stack = current->lock_stack();
 806         if (lock_stack.contains(obj)) {
 807           // Current thread owns the lock but someone else inflated
 808           // fix owner and pop lock stack
 809           monitor->set_owner_from_anonymous(current);
 810           monitor->set_recursions(lock_stack.remove(obj) - 1);
 811         } else {
 812           // Fast locked (and inflated) by other thread, or deflation in progress, IMSE.
 813           THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
 814                     "current thread is not owner", nullptr);
 815         }
 816       }
 817       return monitor;
 818     }
 819   }
 820 }
 821 
 822 ObjectMonitor* LightweightSynchronizer::inflate_into_object_header(Thread* current, JavaThread* inflating_thread, oop object, const ObjectSynchronizer::InflateCause cause) {
 823 
 824   // The JavaThread* inflating_thread parameter is only used by LM_LIGHTWEIGHT and requires
 825   // that the inflating_thread == Thread::current() or is suspended throughout the call by
 826   // some other mechanism.
 827   // Even with LM_LIGHTWEIGHT the thread might be nullptr when called from a non
 828   // JavaThread. (As may still be the case from FastHashCode). However it is only
 829   // important for the correctness of the LM_LIGHTWEIGHT algorithm that the thread
 830   // is set when called from ObjectSynchronizer::enter from the owning thread,
 831   // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
 832   EventJavaMonitorInflate event;
 833 
 834   for (;;) {
 835     const markWord mark = object->mark_acquire();
 836 
 837     // The mark can be in one of the following states:
 838     // *  inflated     - Just return if using stack-locking.
 839     //                   If using fast-locking and the ObjectMonitor owner
 840     //                   is anonymous and the inflating_thread owns the
 841     //                   object lock, then we make the inflating_thread
 842     //                   the ObjectMonitor owner and remove the lock from
 843     //                   the inflating_thread's lock stack.
 844     // *  fast-locked  - Coerce it to inflated from fast-locked.
 845     // *  unlocked     - Aggressively inflate the object.
 846 
 847     // CASE: inflated
 848     if (mark.has_monitor()) {
 849       ObjectMonitor* inf = mark.monitor();
 850       markWord dmw = inf->header();
 851       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
 852       if (inf->is_owner_anonymous() &&
 853           inflating_thread != nullptr && inflating_thread->lock_stack().contains(object)) {
 854         inf->set_owner_from_anonymous(inflating_thread);
 855         size_t removed = inflating_thread->lock_stack().remove(object);
 856         inf->set_recursions(removed - 1);
 857       }
 858       return inf;
 859     }
 860 
 861     // CASE: fast-locked
 862     // Could be fast-locked either by the inflating_thread or by some other thread.
 863     //
 864     // Note that we allocate the ObjectMonitor speculatively, _before_
 865     // attempting to set the object's mark to the new ObjectMonitor. If
 866     // the inflating_thread owns the monitor, then we set the ObjectMonitor's
 867     // owner to the inflating_thread. Otherwise, we set the ObjectMonitor's owner
 868     // to anonymous. If we lose the race to set the object's mark to the
 869     // new ObjectMonitor, then we just delete it and loop around again.
 870     //
 871     if (mark.is_fast_locked()) {
 872       ObjectMonitor* monitor = new ObjectMonitor(object);
 873       monitor->set_header(mark.set_unlocked());
 874       bool own = inflating_thread != nullptr && inflating_thread->lock_stack().contains(object);
 875       if (own) {
 876         // Owned by inflating_thread.
 877         monitor->set_owner_from(nullptr, inflating_thread);
 878       } else {
 879         // Owned by somebody else.
 880         monitor->set_owner_anonymous();
 881       }
 882       markWord monitor_mark = markWord::encode(monitor);
 883       markWord old_mark = object->cas_set_mark(monitor_mark, mark);
 884       if (old_mark == mark) {
 885         // Success! Return inflated monitor.
 886         if (own) {
 887           size_t removed = inflating_thread->lock_stack().remove(object);
 888           monitor->set_recursions(removed - 1);
 889         }
 890         // Once the ObjectMonitor is configured and object is associated
 891         // with the ObjectMonitor, it is safe to allow async deflation:
 892         ObjectSynchronizer::_in_use_list.add(monitor);
 893 
 894         // Hopefully the performance counters are allocated on distinct
 895         // cache lines to avoid false sharing on MP systems ...
 896         OM_PERFDATA_OP(Inflations, inc());
 897         log_inflate(current, object, cause);
 898         if (event.should_commit()) {
 899           post_monitor_inflate_event(&event, object, cause);
 900         }
 901         return monitor;
 902       } else {
 903         delete monitor;
 904         continue;  // Interference -- just retry
 905       }
 906     }
 907 
 908     // CASE: unlocked
 909     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
 910     // If we know we're inflating for entry it's better to inflate by swinging a
 911     // pre-locked ObjectMonitor pointer into the object header.   A successful
 912     // CAS inflates the object *and* confers ownership to the inflating thread.
 913     // In the current implementation we use a 2-step mechanism where we CAS()
 914     // to inflate and then CAS() again to try to swing _owner from null to current.
 915     // An inflateTry() method that we could call from enter() would be useful.
 916 
 917     assert(mark.is_unlocked(), "invariant: header=" INTPTR_FORMAT, mark.value());
 918     ObjectMonitor* m = new ObjectMonitor(object);
 919     // prepare m for installation - set monitor to initial state
 920     m->set_header(mark);
 921 
 922     if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
 923       delete m;
 924       m = nullptr;
 925       continue;
 926       // interference - the markword changed - just retry.
 927       // The state-transitions are one-way, so there's no chance of
 928       // live-lock -- "Inflated" is an absorbing state.
 929     }
 930 
 931     // Once the ObjectMonitor is configured and object is associated
 932     // with the ObjectMonitor, it is safe to allow async deflation:
 933     ObjectSynchronizer::_in_use_list.add(m);
 934 
 935     // Hopefully the performance counters are allocated on distinct
 936     // cache lines to avoid false sharing on MP systems ...
 937     OM_PERFDATA_OP(Inflations, inc());
 938     log_inflate(current, object, cause);
 939     if (event.should_commit()) {
 940       post_monitor_inflate_event(&event, object, cause);
 941     }
 942     return m;
 943   }
 944 }
 945 
 946 ObjectMonitor* LightweightSynchronizer::inflate_fast_locked_object(oop object, JavaThread* locking_thread, JavaThread* current, const ObjectSynchronizer::InflateCause cause) {
 947   assert(LockingMode == LM_LIGHTWEIGHT, "only used for lightweight");
 948   VerifyThreadState vts(locking_thread, current);
 949   assert(locking_thread->lock_stack().contains(object), "locking_thread must have object on its lock stack");
 950 
 951   ObjectMonitor* monitor;
 952 
 953   if (!UseObjectMonitorTable) {
 954     return inflate_into_object_header(current, locking_thread, object, cause);
 955   }
 956 
 957   // Inflating requires a hash code
 958   ObjectSynchronizer::FastHashCode(current, object);
 959 
 960   markWord mark = object->mark_acquire();
 961   assert(!mark.is_unlocked(), "Cannot be unlocked");
 962 
 963   for (;;) {
 964   // Fetch the monitor from the table
 965     monitor = get_or_insert_monitor(object, current, cause, true /* try_read */);
 966 
 967     if (monitor->is_owner_anonymous()) {
 968       assert(monitor == get_monitor_from_table(current, object), "The monitor must be found");
 969       // New fresh monitor
 970       break;
 971     }
 972 
 973     os::naked_yield();
 974     assert(monitor->is_being_async_deflated(), "Should be the reason");
 975   }
 976 
 977   // Set the mark word; loop to handle concurrent updates to other parts of the mark word
 978   while (mark.is_fast_locked()) {
 979     mark = object->cas_set_mark(mark.set_has_monitor(), mark);
 980   }
 981 
 982   // Indicate that the monitor now has a known owner
 983   monitor->set_owner_from_anonymous(locking_thread);
 984 
 985   // Remove the entry from the thread's lock stack
 986   monitor->set_recursions(locking_thread->lock_stack().remove(object) - 1);
 987 
 988   if (locking_thread == current) {
 989     locking_thread->om_set_monitor_cache(monitor);
 990   }
 991 
 992   return monitor;
 993 }
 994 
 995 ObjectMonitor* LightweightSynchronizer::inflate_and_enter(oop object, JavaThread* locking_thread, JavaThread* current, const ObjectSynchronizer::InflateCause cause) {
 996   assert(LockingMode == LM_LIGHTWEIGHT, "only used for lightweight");
 997   VerifyThreadState vts(locking_thread, current);
 998 
 999   // Note: In some paths (deoptimization) the 'current' thread inflates and
1000   // enters the lock on behalf of the 'locking_thread' thread.
1001 
1002   ObjectMonitor* monitor = nullptr;
1003 
1004   if (!UseObjectMonitorTable) {
1005     // Do the old inflate and enter.
1006     monitor = inflate_into_object_header(current, locking_thread, object, cause);
1007 
1008     bool entered;
1009     if (locking_thread == current) {
1010       entered = monitor->enter(locking_thread);
1011     } else {
1012       entered = monitor->enter_for(locking_thread);
1013     }
1014 
1015     // enter returns false for deflation found.
1016     return entered ? monitor : nullptr;
1017   }
1018 
1019   NoSafepointVerifier nsv;
1020 
1021   // Lightweight monitors require that hash codes are installed first
1022   ObjectSynchronizer::FastHashCode(locking_thread, object);
1023 
1024   // Try to get the monitor from the thread-local cache.
1025   // There's no need to use the cache if we are locking
1026   // on behalf of another thread.
1027   if (current == locking_thread) {
1028     monitor = current->om_get_from_monitor_cache(object);
1029   }
1030 
1031   // Get or create the monitor
1032   if (monitor == nullptr) {
1033     monitor = get_or_insert_monitor(object, current, cause, true /* try_read */);
1034   }
1035 
1036   if (monitor->try_enter(locking_thread)) {
1037     return monitor;
1038   }
1039 
1040   // Holds is_being_async_deflated() stable throughout this function.
1041   ObjectMonitorContentionMark contention_mark(monitor);
1042 
1043   /// First handle the case where the monitor from the table is deflated
1044   if (monitor->is_being_async_deflated()) {
1045     // The MonitorDeflation thread is deflating the monitor. The locking thread
1046     // can either help transition the mark word or yield / spin until further
1047     // progress have been made.
1048 
1049     const markWord mark = object->mark_acquire();
1050 
1051     if (mark.has_monitor()) {
1052       // Waiting on the deflation thread to remove the deflated monitor from the table.
1053       os::naked_yield();
1054 
1055     } else if (mark.is_fast_locked()) {
1056       // Some other thread managed to fast-lock the lock, or this is a
1057       // recursive lock from the same thread; yield for the deflation
1058       // thread to remove the deflated monitor from the table.
1059       os::naked_yield();
1060 
1061     } else {
1062       assert(mark.is_unlocked(), "Implied");
1063       // Retry immediately
1064     }
1065 
1066     // Retry
1067     return nullptr;
1068   }
1069 
1070   for (;;) {
1071     const markWord mark = object->mark_acquire();
1072     // The mark can be in one of the following states:
1073     // *  inflated     - If the ObjectMonitor owner is anonymous
1074     //                   and the locking_thread thread owns the object
1075     //                   lock, then we make the locking_thread thread
1076     //                   the ObjectMonitor owner and remove the
1077     //                   lock from the locking_thread thread's lock stack.
1078     // *  fast-locked  - Coerce it to inflated from fast-locked.
1079     // *  neutral      - Inflate the object. Successful CAS is locked
1080 
1081     // CASE: inflated
1082     if (mark.has_monitor()) {
1083       LockStack& lock_stack = locking_thread->lock_stack();
1084       if (monitor->is_owner_anonymous() && lock_stack.contains(object)) {
1085         // The lock is fast-locked by the locking thread,
1086         // convert it to a held monitor with a known owner.
1087         monitor->set_owner_from_anonymous(locking_thread);
1088         monitor->set_recursions(lock_stack.remove(object) - 1);
1089       }
1090 
1091       break; // Success
1092     }
1093 
1094     // CASE: fast-locked
1095     // Could be fast-locked either by locking_thread or by some other thread.
1096     //
1097     if (mark.is_fast_locked()) {
1098       markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
1099       if (old_mark != mark) {
1100         // CAS failed
1101         continue;
1102       }
1103 
1104       // Success! Return inflated monitor.
1105       LockStack& lock_stack = locking_thread->lock_stack();
1106       if (lock_stack.contains(object)) {
1107         // The lock is fast-locked by the locking thread,
1108         // convert it to a held monitor with a known owner.
1109         monitor->set_owner_from_anonymous(locking_thread);
1110         monitor->set_recursions(lock_stack.remove(object) - 1);
1111       }
1112 
1113       break; // Success
1114     }
1115 
1116     // CASE: neutral (unlocked)
1117 
1118     // Catch if the object's header is not neutral (not locked and
1119     // not marked is what we care about here).
1120     assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
1121     markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
1122     if (old_mark != mark) {
1123       // CAS failed
1124       continue;
1125     }
1126 
1127     // Transitioned from unlocked to monitor means locking_thread owns the lock.
1128     monitor->set_owner_from_anonymous(locking_thread);
1129 
1130     return monitor;
1131   }
1132 
1133   if (current == locking_thread) {
1134     // One round of spinning
1135     if (monitor->spin_enter(locking_thread)) {
1136       return monitor;
1137     }
1138 
1139     // Monitor is contended, take the time befor entering to fix the lock stack.
1140     LockStackInflateContendedLocks().inflate(current);
1141   }
1142 
1143   // enter can block for safepoints; clear the unhandled object oop
1144   PauseNoSafepointVerifier pnsv(&nsv);
1145   object = nullptr;
1146 
1147   if (current == locking_thread) {
1148     monitor->enter_with_contention_mark(locking_thread, contention_mark);
1149   } else {
1150     monitor->enter_for_with_contention_mark(locking_thread, contention_mark);
1151   }
1152 
1153   return monitor;
1154 }
1155 
1156 void LightweightSynchronizer::deflate_monitor(Thread* current, oop obj, ObjectMonitor* monitor) {
1157   if (obj != nullptr) {
1158     deflate_mark_word(obj);
1159   }
1160   bool removed = remove_monitor(current, obj, monitor);
1161   if (obj != nullptr) {
1162     assert(removed, "Should have removed the entry if obj was alive");
1163   }
1164 }
1165 
1166 ObjectMonitor* LightweightSynchronizer::get_monitor_from_table(Thread* current, oop obj) {
1167   assert(UseObjectMonitorTable, "must be");
1168   return _omworld->monitor_get(current, obj);
1169 }
1170 
1171 bool LightweightSynchronizer::contains_monitor(Thread* current, ObjectMonitor* monitor) {
1172   assert(UseObjectMonitorTable, "must be");
1173   return _omworld->contains_monitor(current, monitor);
1174 }
1175 
1176 bool LightweightSynchronizer::quick_enter(oop obj, JavaThread* current, BasicLock* lock) {
1177   assert(current->thread_state() == _thread_in_Java, "must be");
1178   assert(obj != nullptr, "must be");
1179   NoSafepointVerifier nsv;
1180 
1181   CacheSetter cache_setter(current, lock);
1182 
1183   LockStack& lock_stack = current->lock_stack();
1184   if (lock_stack.is_full()) {
1185     // Always go into runtime if the lock stack is full.
1186     return false;
1187   }
1188 
1189   const markWord mark = obj->mark();
1190 
1191 #ifndef _LP64
1192   // Only for 32bit which have limited support for fast locking outside the runtime.
1193   if (lock_stack.try_recursive_enter(obj)) {
1194     // Recursive lock successful.
1195     current->inc_held_monitor_count();
1196     // Clears object monitor cache, because ?
1197     return true;
1198   }
1199 
1200   if (mark.is_unlocked()) {
1201     markWord locked_mark = mark.set_fast_locked();
1202     if (obj->cas_set_mark(locked_mark, mark) == mark) {
1203       // Successfully fast-locked, push object to lock-stack and return.
1204       lock_stack.push(obj);
1205       current->inc_held_monitor_count();
1206       return true;
1207     }
1208   }
1209 #endif
1210 
1211   if (mark.has_monitor()) {
1212     ObjectMonitor* const monitor = UseObjectMonitorTable ? current->om_get_from_monitor_cache(obj) :
1213                                                            ObjectSynchronizer::read_monitor(mark);
1214 
1215     if (monitor == nullptr) {
1216       // Take the slow-path on a cache miss.
1217       return false;
1218     }
1219 
1220     if (monitor->try_enter(current)) {
1221       // ObjectMonitor enter successful.
1222       cache_setter.set_monitor(monitor);
1223       current->inc_held_monitor_count();
1224       return true;
1225     }
1226   }
1227 
1228   // Slow-path.
1229   return false;
1230 }