1 /*
   2  * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "classfile/vmSymbols.hpp"
  28 #include "jfrfiles/jfrEventClasses.hpp"
  29 #include "logging/log.hpp"
  30 #include "memory/allStatic.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "nmt/memTag.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "runtime/atomic.hpp"
  35 #include "runtime/basicLock.inline.hpp"
  36 #include "runtime/globals_extension.hpp"
  37 #include "runtime/interfaceSupport.inline.hpp"
  38 #include "runtime/javaThread.inline.hpp"
  39 #include "runtime/lightweightSynchronizer.hpp"
  40 #include "runtime/lockStack.inline.hpp"
  41 #include "runtime/mutexLocker.hpp"
  42 #include "runtime/objectMonitor.inline.hpp"
  43 #include "runtime/os.hpp"
  44 #include "runtime/perfData.inline.hpp"
  45 #include "runtime/safepointMechanism.inline.hpp"
  46 #include "runtime/safepointVerifiers.hpp"
  47 #include "runtime/synchronizer.inline.hpp"
  48 #include "runtime/timerTrace.hpp"
  49 #include "runtime/trimNativeHeap.hpp"
  50 #include "utilities/concurrentHashTable.inline.hpp"
  51 #include "utilities/concurrentHashTableTasks.inline.hpp"
  52 #include "utilities/globalDefinitions.hpp"
  53 
  54 static uintx objhash(oop obj) {
  55   if (UseCompactObjectHeaders) {
  56     uintx hash = LightweightSynchronizer::get_hash(obj->mark(), obj);
  57     assert(hash != 0, "should have a hash");
  58     return hash;
  59   } else {
  60     uintx hash = obj->mark().hash();
  61     assert(hash != 0, "should have a hash");
  62     return hash;
  63   }
  64 }
  65 
  66 // ConcurrentHashTable storing links from objects to ObjectMonitors
  67 class ObjectMonitorTable : AllStatic {
  68   struct Config {
  69     using Value = ObjectMonitor*;
  70     static uintx get_hash(Value const& value, bool* is_dead) {
  71       return (uintx)value->hash();
  72     }
  73     static void* allocate_node(void* context, size_t size, Value const& value) {
  74       ObjectMonitorTable::inc_items_count();
  75       return AllocateHeap(size, mtObjectMonitor);
  76     };
  77     static void free_node(void* context, void* memory, Value const& value) {
  78       ObjectMonitorTable::dec_items_count();
  79       FreeHeap(memory);
  80     }
  81   };
  82   using ConcurrentTable = ConcurrentHashTable<Config, mtObjectMonitor>;
  83 
  84   static ConcurrentTable* _table;
  85   static volatile size_t _items_count;
  86   static size_t _table_size;
  87   static volatile bool _resize;
  88 
  89   class Lookup : public StackObj {
  90     oop _obj;
  91 
  92    public:
  93     explicit Lookup(oop obj) : _obj(obj) {}
  94 
  95     uintx get_hash() const {
  96       return objhash(_obj);
  97     }
  98 
  99     bool equals(ObjectMonitor** value) {
 100       assert(*value != nullptr, "must be");
 101       return (*value)->object_refers_to(_obj);
 102     }
 103 
 104     bool is_dead(ObjectMonitor** value) {
 105       assert(*value != nullptr, "must be");
 106       return false;
 107     }
 108   };
 109 
 110   class LookupMonitor : public StackObj {
 111     ObjectMonitor* _monitor;
 112 
 113    public:
 114     explicit LookupMonitor(ObjectMonitor* monitor) : _monitor(monitor) {}
 115 
 116     uintx get_hash() const {
 117       return _monitor->hash();
 118     }
 119 
 120     bool equals(ObjectMonitor** value) {
 121       return (*value) == _monitor;
 122     }
 123 
 124     bool is_dead(ObjectMonitor** value) {
 125       assert(*value != nullptr, "must be");
 126       return (*value)->object_is_dead();
 127     }
 128   };
 129 
 130   static void inc_items_count() {
 131     Atomic::inc(&_items_count);
 132   }
 133 
 134   static void dec_items_count() {
 135     Atomic::dec(&_items_count);
 136   }
 137 
 138   static double get_load_factor() {
 139     return (double)_items_count / (double)_table_size;
 140   }
 141 
 142   static size_t table_size(Thread* current = Thread::current()) {
 143     return ((size_t)1) << _table->get_size_log2(current);
 144   }
 145 
 146   static size_t max_log_size() {
 147     // TODO[OMTable]: Evaluate the max size.
 148     // TODO[OMTable]: Need to fix init order to use Universe::heap()->max_capacity();
 149     //                Using MaxHeapSize directly this early may be wrong, and there
 150     //                are definitely rounding errors (alignment).
 151     const size_t max_capacity = MaxHeapSize;
 152     const size_t min_object_size = CollectedHeap::min_dummy_object_size() * HeapWordSize;
 153     const size_t max_objects = max_capacity / MAX2(MinObjAlignmentInBytes, checked_cast<int>(min_object_size));
 154     const size_t log_max_objects = log2i_graceful(max_objects);
 155 
 156     return MAX2(MIN2<size_t>(SIZE_BIG_LOG2, log_max_objects), min_log_size());
 157   }
 158 
 159   static size_t min_log_size() {
 160     // ~= log(AvgMonitorsPerThreadEstimate default)
 161     return 10;
 162   }
 163 
 164   template<typename V>
 165   static size_t clamp_log_size(V log_size) {
 166     return MAX2(MIN2(log_size, checked_cast<V>(max_log_size())), checked_cast<V>(min_log_size()));
 167   }
 168 
 169   static size_t initial_log_size() {
 170     const size_t estimate = log2i(MAX2(os::processor_count(), 1)) + log2i(MAX2(AvgMonitorsPerThreadEstimate, size_t(1)));
 171     return clamp_log_size(estimate);
 172   }
 173 
 174   static size_t grow_hint () {
 175     return ConcurrentTable::DEFAULT_GROW_HINT;
 176   }
 177 
 178  public:
 179   static void create() {
 180     _table = new ConcurrentTable(initial_log_size(), max_log_size(), grow_hint());
 181     _items_count = 0;
 182     _table_size = table_size();
 183     _resize = false;
 184   }
 185 
 186   static void verify_monitor_get_result(oop obj, ObjectMonitor* monitor) {
 187 #ifdef ASSERT
 188     if (SafepointSynchronize::is_at_safepoint()) {
 189       bool has_monitor = obj->mark().has_monitor();
 190       assert(has_monitor == (monitor != nullptr),
 191           "Inconsistency between markWord and ObjectMonitorTable has_monitor: %s monitor: " PTR_FORMAT,
 192           BOOL_TO_STR(has_monitor), p2i(monitor));
 193     }
 194 #endif
 195   }
 196 
 197   static ObjectMonitor* monitor_get(Thread* current, oop obj) {
 198     ObjectMonitor* result = nullptr;
 199     Lookup lookup_f(obj);
 200     auto found_f = [&](ObjectMonitor** found) {
 201       assert((*found)->object_peek() == obj, "must be");
 202       result = *found;
 203     };
 204     _table->get(current, lookup_f, found_f);
 205     verify_monitor_get_result(obj, result);
 206     return result;
 207   }
 208 
 209   static void try_notify_grow() {
 210     if (!_table->is_max_size_reached() && !Atomic::load(&_resize)) {
 211       Atomic::store(&_resize, true);
 212       if (Service_lock->try_lock()) {
 213         Service_lock->notify();
 214         Service_lock->unlock();
 215       }
 216     }
 217   }
 218 
 219   static bool should_shrink() {
 220     // Not implemented;
 221     return false;
 222   }
 223 
 224   static constexpr double GROW_LOAD_FACTOR = 0.75;
 225 
 226   static bool should_grow() {
 227     return get_load_factor() > GROW_LOAD_FACTOR && !_table->is_max_size_reached();
 228   }
 229 
 230   static bool should_resize() {
 231     return should_grow() || should_shrink() || Atomic::load(&_resize);
 232   }
 233 
 234   template<typename Task, typename... Args>
 235   static bool run_task(JavaThread* current, Task& task, const char* task_name, Args&... args) {
 236     if (task.prepare(current)) {
 237       log_trace(monitortable)("Started to %s", task_name);
 238       TraceTime timer(task_name, TRACETIME_LOG(Debug, monitortable, perf));
 239       while (task.do_task(current, args...)) {
 240         task.pause(current);
 241         {
 242           ThreadBlockInVM tbivm(current);
 243         }
 244         task.cont(current);
 245       }
 246       task.done(current);
 247       return true;
 248     }
 249     return false;
 250   }
 251 
 252   static bool grow(JavaThread* current) {
 253     ConcurrentTable::GrowTask grow_task(_table);
 254     if (run_task(current, grow_task, "Grow")) {
 255       _table_size = table_size(current);
 256       log_info(monitortable)("Grown to size: %zu", _table_size);
 257       return true;
 258     }
 259     return false;
 260   }
 261 
 262   static bool clean(JavaThread* current) {
 263     ConcurrentTable::BulkDeleteTask clean_task(_table);
 264     auto is_dead = [&](ObjectMonitor** monitor) {
 265       return (*monitor)->object_is_dead();
 266     };
 267     auto do_nothing = [&](ObjectMonitor** monitor) {};
 268     NativeHeapTrimmer::SuspendMark sm("ObjectMonitorTable");
 269     return run_task(current, clean_task, "Clean", is_dead, do_nothing);
 270   }
 271 
 272   static bool resize(JavaThread* current) {
 273     LogTarget(Info, monitortable) lt;
 274     bool success = false;
 275 
 276     if (should_grow()) {
 277       lt.print("Start growing with load factor %f", get_load_factor());
 278       success = grow(current);
 279     } else {
 280       if (!_table->is_max_size_reached() && Atomic::load(&_resize)) {
 281         lt.print("WARNING: Getting resize hints with load factor %f", get_load_factor());
 282       }
 283       lt.print("Start cleaning with load factor %f", get_load_factor());
 284       success = clean(current);
 285     }
 286 
 287     Atomic::store(&_resize, false);
 288 
 289     return success;
 290   }
 291 
 292   static ObjectMonitor* monitor_put_get(Thread* current, ObjectMonitor* monitor, oop obj) {
 293     // Enter the monitor into the concurrent hashtable.
 294     ObjectMonitor* result = monitor;
 295     Lookup lookup_f(obj);
 296     auto found_f = [&](ObjectMonitor** found) {
 297       assert((*found)->object_peek() == obj, "must be");
 298       assert(objhash(obj) == (uintx)(*found)->hash(), "hash must match");
 299       result = *found;
 300     };
 301     bool grow;
 302     _table->insert_get(current, lookup_f, monitor, found_f, &grow);
 303     verify_monitor_get_result(obj, result);
 304     if (grow) {
 305       try_notify_grow();
 306     }
 307     return result;
 308   }
 309 
 310   static bool remove_monitor_entry(Thread* current, ObjectMonitor* monitor) {
 311     LookupMonitor lookup_f(monitor);
 312     return _table->remove(current, lookup_f);
 313   }
 314 
 315   static bool contains_monitor(Thread* current, ObjectMonitor* monitor) {
 316     LookupMonitor lookup_f(monitor);
 317     bool result = false;
 318     auto found_f = [&](ObjectMonitor** found) {
 319       result = true;
 320     };
 321     _table->get(current, lookup_f, found_f);
 322     return result;
 323   }
 324 
 325   static void print_on(outputStream* st) {
 326     auto printer = [&] (ObjectMonitor** entry) {
 327        ObjectMonitor* om = *entry;
 328        oop obj = om->object_peek();
 329        st->print("monitor=" PTR_FORMAT ", ", p2i(om));
 330        st->print("object=" PTR_FORMAT, p2i(obj));
 331        assert(objhash(obj) == (uintx)om->hash(), "hash must match");
 332        st->cr();
 333        return true;
 334     };
 335     if (SafepointSynchronize::is_at_safepoint()) {
 336       _table->do_safepoint_scan(printer);
 337     } else {
 338       _table->do_scan(Thread::current(), printer);
 339     }
 340   }
 341 };
 342 
 343 ObjectMonitorTable::ConcurrentTable* ObjectMonitorTable::_table = nullptr;
 344 volatile size_t ObjectMonitorTable::_items_count = 0;
 345 size_t ObjectMonitorTable::_table_size = 0;
 346 volatile bool ObjectMonitorTable::_resize = false;
 347 
 348 ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted) {
 349   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 350 
 351   ObjectMonitor* monitor = get_monitor_from_table(current, object);
 352   if (monitor != nullptr) {
 353     *inserted = false;
 354     return monitor;
 355   }
 356 
 357   ObjectMonitor* alloced_monitor = new ObjectMonitor(object);
 358   alloced_monitor->set_anonymous_owner();
 359 
 360   // Try insert monitor
 361   monitor = add_monitor(current, alloced_monitor, object);
 362 
 363   *inserted = alloced_monitor == monitor;
 364   if (!*inserted) {
 365     delete alloced_monitor;
 366   }
 367 
 368   return monitor;
 369 }
 370 
 371 static void log_inflate(Thread* current, oop object, ObjectSynchronizer::InflateCause cause) {
 372   if (log_is_enabled(Trace, monitorinflation)) {
 373     ResourceMark rm(current);
 374     log_trace(monitorinflation)("inflate: object=" INTPTR_FORMAT ", mark="
 375                                 INTPTR_FORMAT ", type='%s' cause=%s", p2i(object),
 376                                 object->mark().value(), object->klass()->external_name(),
 377                                 ObjectSynchronizer::inflate_cause_name(cause));
 378   }
 379 }
 380 
 381 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
 382                                        const oop obj,
 383                                        ObjectSynchronizer::InflateCause cause) {
 384   assert(event != nullptr, "invariant");
 385   event->set_monitorClass(obj->klass());
 386   event->set_address((uintptr_t)(void*)obj);
 387   event->set_cause((u1)cause);
 388   event->commit();
 389 }
 390 
 391 ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor(oop object, JavaThread* current, ObjectSynchronizer::InflateCause cause) {
 392   assert(UseObjectMonitorTable, "must be");
 393 
 394   EventJavaMonitorInflate event;
 395 
 396   bool inserted;
 397   ObjectMonitor* monitor = get_or_insert_monitor_from_table(object, current, &inserted);
 398 
 399   if (inserted) {
 400     // Hopefully the performance counters are allocated on distinct
 401     // cache lines to avoid false sharing on MP systems ...
 402     OM_PERFDATA_OP(Inflations, inc());
 403     log_inflate(current, object, cause);
 404     if (event.should_commit()) {
 405       post_monitor_inflate_event(&event, object, cause);
 406     }
 407 
 408     // The monitor has an anonymous owner so it is safe from async deflation.
 409     ObjectSynchronizer::_in_use_list.add(monitor);
 410   }
 411 
 412   return monitor;
 413 }
 414 
 415 // Add the hashcode to the monitor to match the object and put it in the hashtable.
 416 ObjectMonitor* LightweightSynchronizer::add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj) {
 417   assert(UseObjectMonitorTable, "must be");
 418   assert(obj == monitor->object(), "must be");
 419 
 420   intptr_t hash = objhash(obj);
 421   assert(hash != 0, "must be set when claiming the object monitor");
 422   monitor->set_hash(hash);
 423 
 424   return ObjectMonitorTable::monitor_put_get(current, monitor, obj);
 425 }
 426 
 427 bool LightweightSynchronizer::remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj) {
 428   assert(UseObjectMonitorTable, "must be");
 429   assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead");
 430 
 431   return ObjectMonitorTable::remove_monitor_entry(current, monitor);
 432 }
 433 
 434 void LightweightSynchronizer::deflate_mark_word(oop obj) {
 435   assert(UseObjectMonitorTable, "must be");
 436 
 437   markWord mark = obj->mark_acquire();
 438   assert(!mark.has_no_hash(), "obj with inflated monitor must have had a hash");
 439 
 440   while (mark.has_monitor()) {
 441     const markWord new_mark = mark.clear_lock_bits().set_unlocked();
 442     mark = obj->cas_set_mark(new_mark, mark);
 443   }
 444 }
 445 
 446 void LightweightSynchronizer::initialize() {
 447   if (!UseObjectMonitorTable) {
 448     return;
 449   }
 450   ObjectMonitorTable::create();
 451 }
 452 
 453 bool LightweightSynchronizer::needs_resize() {
 454   if (!UseObjectMonitorTable) {
 455     return false;
 456   }
 457   return ObjectMonitorTable::should_resize();
 458 }
 459 
 460 bool LightweightSynchronizer::resize_table(JavaThread* current) {
 461   if (!UseObjectMonitorTable) {
 462     return true;
 463   }
 464   return ObjectMonitorTable::resize(current);
 465 }
 466 
 467 class LightweightSynchronizer::LockStackInflateContendedLocks : private OopClosure {
 468  private:
 469   oop _contended_oops[LockStack::CAPACITY];
 470   int _length;
 471 
 472   void do_oop(oop* o) final {
 473     oop obj = *o;
 474     if (obj->mark_acquire().has_monitor()) {
 475       if (_length > 0 && _contended_oops[_length - 1] == obj) {
 476         // Recursive
 477         return;
 478       }
 479       _contended_oops[_length++] = obj;
 480     }
 481   }
 482 
 483   void do_oop(narrowOop* o) final {
 484     ShouldNotReachHere();
 485   }
 486 
 487  public:
 488   LockStackInflateContendedLocks() :
 489     _contended_oops(),
 490     _length(0) {};
 491 
 492   void inflate(JavaThread* current) {
 493     assert(current == JavaThread::current(), "must be");
 494     current->lock_stack().oops_do(this);
 495     for (int i = 0; i < _length; i++) {
 496       LightweightSynchronizer::
 497         inflate_fast_locked_object(_contended_oops[i], ObjectSynchronizer::inflate_cause_vm_internal, current, current);
 498     }
 499   }
 500 };
 501 
 502 void LightweightSynchronizer::ensure_lock_stack_space(JavaThread* current) {
 503   assert(current == JavaThread::current(), "must be");
 504   LockStack& lock_stack = current->lock_stack();
 505 
 506   // Make room on lock_stack
 507   if (lock_stack.is_full()) {
 508     // Inflate contended objects
 509     LockStackInflateContendedLocks().inflate(current);
 510     if (lock_stack.is_full()) {
 511       // Inflate the oldest object
 512       inflate_fast_locked_object(lock_stack.bottom(), ObjectSynchronizer::inflate_cause_vm_internal, current, current);
 513     }
 514   }
 515 }
 516 
 517 class LightweightSynchronizer::CacheSetter : StackObj {
 518   JavaThread* const _thread;
 519   BasicLock* const _lock;
 520   ObjectMonitor* _monitor;
 521 
 522   NONCOPYABLE(CacheSetter);
 523 
 524  public:
 525   CacheSetter(JavaThread* thread, BasicLock* lock) :
 526     _thread(thread),
 527     _lock(lock),
 528     _monitor(nullptr) {}
 529 
 530   ~CacheSetter() {
 531     // Only use the cache if using the table.
 532     if (UseObjectMonitorTable) {
 533       if (_monitor != nullptr) {
 534         _thread->om_set_monitor_cache(_monitor);
 535         _lock->set_object_monitor_cache(_monitor);
 536       } else {
 537         _lock->clear_object_monitor_cache();
 538       }
 539     }
 540   }
 541 
 542   void set_monitor(ObjectMonitor* monitor) {
 543     assert(_monitor == nullptr, "only set once");
 544     _monitor = monitor;
 545   }
 546 
 547 };
 548 
 549 class LightweightSynchronizer::VerifyThreadState {
 550   bool _no_safepoint;
 551 
 552  public:
 553   VerifyThreadState(JavaThread* locking_thread, JavaThread* current) : _no_safepoint(locking_thread != current) {
 554     assert(current == Thread::current(), "must be");
 555     assert(locking_thread == current || locking_thread->is_obj_deopt_suspend(), "locking_thread may not run concurrently");
 556     if (_no_safepoint) {
 557       DEBUG_ONLY(JavaThread::current()->inc_no_safepoint_count();)
 558     }
 559   }
 560   ~VerifyThreadState() {
 561     if (_no_safepoint){
 562       DEBUG_ONLY(JavaThread::current()->dec_no_safepoint_count();)
 563     }
 564   }
 565 };
 566 
 567 inline bool LightweightSynchronizer::fast_lock_try_enter(oop obj, LockStack& lock_stack, JavaThread* current) {
 568   markWord mark = obj->mark();
 569   while (mark.is_unlocked()) {
 570     ensure_lock_stack_space(current);
 571     assert(!lock_stack.is_full(), "must have made room on the lock stack");
 572     assert(!lock_stack.contains(obj), "thread must not already hold the lock");
 573     // Try to swing into 'fast-locked' state.
 574     markWord locked_mark = mark.set_fast_locked();
 575     markWord old_mark = mark;
 576     mark = obj->cas_set_mark(locked_mark, old_mark);
 577     if (old_mark == mark) {
 578       // Successfully fast-locked, push object to lock-stack and return.
 579       lock_stack.push(obj);
 580       return true;
 581     }
 582   }
 583   return false;
 584 }
 585 
 586 bool LightweightSynchronizer::fast_lock_spin_enter(oop obj, LockStack& lock_stack, JavaThread* current, bool observed_deflation) {
 587   assert(UseObjectMonitorTable, "must be");
 588   // Will spin with exponential backoff with an accumulative O(2^spin_limit) spins.
 589   const int log_spin_limit = os::is_MP() ? LightweightFastLockingSpins : 1;
 590   const int log_min_safepoint_check_interval = 10;
 591 
 592   markWord mark = obj->mark();
 593   const auto should_spin = [&]() {
 594     if (!mark.has_monitor()) {
 595       // Spin while not inflated.
 596       return true;
 597     } else if (observed_deflation) {
 598       // Spin while monitor is being deflated.
 599       ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark);
 600       return monitor == nullptr || monitor->is_being_async_deflated();
 601     }
 602     // Else stop spinning.
 603     return false;
 604   };
 605   // Always attempt to lock once even when safepoint synchronizing.
 606   bool should_process = false;
 607   for (int i = 0; should_spin() && !should_process && i < log_spin_limit; i++) {
 608     // Spin with exponential backoff.
 609     const int total_spin_count = 1 << i;
 610     const int inner_spin_count = MIN2(1 << log_min_safepoint_check_interval, total_spin_count);
 611     const int outer_spin_count = total_spin_count / inner_spin_count;
 612     for (int outer = 0; outer < outer_spin_count; outer++) {
 613       should_process = SafepointMechanism::should_process(current);
 614       if (should_process) {
 615         // Stop spinning for safepoint.
 616         break;
 617       }
 618       for (int inner = 1; inner < inner_spin_count; inner++) {
 619         SpinPause();
 620       }
 621     }
 622 
 623     if (fast_lock_try_enter(obj, lock_stack, current)) return true;
 624   }
 625   return false;
 626 }
 627 
 628 void LightweightSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
 629   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 630   JavaThread* current = JavaThread::current();
 631   VerifyThreadState vts(locking_thread, current);
 632 
 633   if (obj->klass()->is_value_based()) {
 634     ObjectSynchronizer::handle_sync_on_value_based_class(obj, locking_thread);
 635   }
 636 
 637   CacheSetter cache_setter(locking_thread, lock);
 638 
 639   LockStack& lock_stack = locking_thread->lock_stack();
 640 
 641   ObjectMonitor* monitor = nullptr;
 642   if (lock_stack.contains(obj())) {
 643     monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
 644     bool entered = monitor->enter_for(locking_thread);
 645     assert(entered, "recursive ObjectMonitor::enter_for must succeed");
 646   } else {
 647     do {
 648       // It is assumed that enter_for must enter on an object without contention.
 649       monitor = inflate_and_enter(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
 650       // But there may still be a race with deflation.
 651     } while (monitor == nullptr);
 652   }
 653 
 654   assert(monitor != nullptr, "LightweightSynchronizer::enter_for must succeed");
 655   cache_setter.set_monitor(monitor);
 656 }
 657 
 658 void LightweightSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
 659   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 660   assert(current == JavaThread::current(), "must be");
 661 
 662   if (obj->klass()->is_value_based()) {
 663     ObjectSynchronizer::handle_sync_on_value_based_class(obj, current);
 664   }
 665 
 666   CacheSetter cache_setter(current, lock);
 667 
 668   // Used when deflation is observed. Progress here requires progress
 669   // from the deflator. After observing that the deflator is not
 670   // making progress (after two yields), switch to sleeping.
 671   SpinYield spin_yield(0, 2);
 672   bool observed_deflation = false;
 673 
 674   LockStack& lock_stack = current->lock_stack();
 675 
 676   if (!lock_stack.is_full() && lock_stack.try_recursive_enter(obj())) {
 677     // Recursively fast locked
 678     return;
 679   }
 680 
 681   if (lock_stack.contains(obj())) {
 682     ObjectMonitor* monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, current, current);
 683     bool entered = monitor->enter(current);
 684     assert(entered, "recursive ObjectMonitor::enter must succeed");
 685     cache_setter.set_monitor(monitor);
 686     return;
 687   }
 688 
 689   while (true) {
 690     // Fast-locking does not use the 'lock' argument.
 691     // Fast-lock spinning to avoid inflating for short critical sections.
 692     // The goal is to only inflate when the extra cost of using ObjectMonitors
 693     // is worth it.
 694     // If deflation has been observed we also spin while deflation is ongoing.
 695     if (fast_lock_try_enter(obj(), lock_stack, current)) {
 696       return;
 697     } else if (UseObjectMonitorTable && fast_lock_spin_enter(obj(), lock_stack, current, observed_deflation)) {
 698       return;
 699     }
 700 
 701     if (observed_deflation) {
 702       spin_yield.wait();
 703     }
 704 
 705     ObjectMonitor* monitor = inflate_and_enter(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, current, current);
 706     if (monitor != nullptr) {
 707       cache_setter.set_monitor(monitor);
 708       return;
 709     }
 710 
 711     // If inflate_and_enter returns nullptr it is because a deflated monitor
 712     // was encountered. Fallback to fast locking. The deflater is responsible
 713     // for clearing out the monitor and transitioning the markWord back to
 714     // fast locking.
 715     observed_deflation = true;
 716   }
 717 }
 718 
 719 void LightweightSynchronizer::exit(oop object, JavaThread* current) {
 720   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 721   assert(current == Thread::current(), "must be");
 722 
 723   markWord mark = object->mark();
 724   assert(!mark.is_unlocked(), "must be");
 725 
 726   LockStack& lock_stack = current->lock_stack();
 727   if (mark.is_fast_locked()) {
 728     if (lock_stack.try_recursive_exit(object)) {
 729       // This is a recursive exit which succeeded
 730       return;
 731     }
 732     if (lock_stack.is_recursive(object)) {
 733       // Must inflate recursive locks if try_recursive_exit fails
 734       // This happens for un-structured unlocks, could potentially
 735       // fix try_recursive_exit to handle these.
 736       inflate_fast_locked_object(object, ObjectSynchronizer::inflate_cause_vm_internal, current, current);
 737     }
 738   }
 739 
 740   while (mark.is_fast_locked()) {
 741     markWord unlocked_mark = mark.set_unlocked();
 742     markWord old_mark = mark;
 743     mark = object->cas_set_mark(unlocked_mark, old_mark);
 744     if (old_mark == mark) {
 745       // CAS successful, remove from lock_stack
 746       size_t recursion = lock_stack.remove(object) - 1;
 747       assert(recursion == 0, "Should not have unlocked here");
 748       return;
 749     }
 750   }
 751 
 752   assert(mark.has_monitor(), "must be");
 753   // The monitor exists
 754   ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, object, mark);
 755   if (monitor->has_anonymous_owner()) {
 756     assert(current->lock_stack().contains(object), "current must have object on its lock stack");
 757     monitor->set_owner_from_anonymous(current);
 758     monitor->set_recursions(current->lock_stack().remove(object) - 1);
 759   }
 760 
 761   monitor->exit(current);
 762 }
 763 
 764 // LightweightSynchronizer::inflate_locked_or_imse is used to to get an inflated
 765 // ObjectMonitor* with LM_LIGHTWEIGHT. It is used from contexts which require
 766 // an inflated ObjectMonitor* for a monitor, and expects to throw a
 767 // java.lang.IllegalMonitorStateException if it is not held by the current
 768 // thread. Such as notify/wait and jni_exit. LM_LIGHTWEIGHT keeps it invariant
 769 // that it only inflates if it is already locked by the current thread or the
 770 // current thread is in the process of entering. To maintain this invariant we
 771 // need to throw a java.lang.IllegalMonitorStateException before inflating if
 772 // the current thread is not the owner.
 773 // LightweightSynchronizer::inflate_locked_or_imse facilitates this.
 774 ObjectMonitor* LightweightSynchronizer::inflate_locked_or_imse(oop obj, ObjectSynchronizer::InflateCause cause, TRAPS) {
 775   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 776   JavaThread* current = THREAD;
 777 
 778   for (;;) {
 779     markWord mark = obj->mark_acquire();
 780     if (mark.is_unlocked()) {
 781       // No lock, IMSE.
 782       THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
 783                  "current thread is not owner", nullptr);
 784     }
 785 
 786     if (mark.is_fast_locked()) {
 787       if (!current->lock_stack().contains(obj)) {
 788         // Fast locked by other thread, IMSE.
 789         THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
 790                    "current thread is not owner", nullptr);
 791       } else {
 792         // Current thread owns the lock, must inflate
 793         return inflate_fast_locked_object(obj, cause, current, current);
 794       }
 795     }
 796 
 797     assert(mark.has_monitor(), "must be");
 798     ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark);
 799     if (monitor != nullptr) {
 800       if (monitor->has_anonymous_owner()) {
 801         LockStack& lock_stack = current->lock_stack();
 802         if (lock_stack.contains(obj)) {
 803           // Current thread owns the lock but someone else inflated it.
 804           // Fix owner and pop lock stack.
 805           monitor->set_owner_from_anonymous(current);
 806           monitor->set_recursions(lock_stack.remove(obj) - 1);
 807         } else {
 808           // Fast locked (and inflated) by other thread, or deflation in progress, IMSE.
 809           THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
 810                      "current thread is not owner", nullptr);
 811         }
 812       }
 813       return monitor;
 814     }
 815   }
 816 }
 817 
 818 ObjectMonitor* LightweightSynchronizer::inflate_into_object_header(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, Thread* current) {
 819 
 820   // The JavaThread* locking_thread parameter is only used by LM_LIGHTWEIGHT and requires
 821   // that the locking_thread == Thread::current() or is suspended throughout the call by
 822   // some other mechanism.
 823   // Even with LM_LIGHTWEIGHT the thread might be nullptr when called from a non
 824   // JavaThread. (As may still be the case from FastHashCode). However it is only
 825   // important for the correctness of the LM_LIGHTWEIGHT algorithm that the thread
 826   // is set when called from ObjectSynchronizer::enter from the owning thread,
 827   // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
 828   EventJavaMonitorInflate event;
 829 
 830   for (;;) {
 831     const markWord mark = object->mark_acquire();
 832 
 833     // The mark can be in one of the following states:
 834     // *  inflated     - Just return if using stack-locking.
 835     //                   If using fast-locking and the ObjectMonitor owner
 836     //                   is anonymous and the locking_thread owns the
 837     //                   object lock, then we make the locking_thread
 838     //                   the ObjectMonitor owner and remove the lock from
 839     //                   the locking_thread's lock stack.
 840     // *  fast-locked  - Coerce it to inflated from fast-locked.
 841     // *  unlocked     - Aggressively inflate the object.
 842 
 843     // CASE: inflated
 844     if (mark.has_monitor()) {
 845       ObjectMonitor* inf = mark.monitor();
 846       markWord dmw = inf->header();
 847       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
 848       if (inf->has_anonymous_owner() &&
 849           locking_thread != nullptr && locking_thread->lock_stack().contains(object)) {
 850         inf->set_owner_from_anonymous(locking_thread);
 851         size_t removed = locking_thread->lock_stack().remove(object);
 852         inf->set_recursions(removed - 1);
 853       }
 854       return inf;
 855     }
 856 
 857     // CASE: fast-locked
 858     // Could be fast-locked either by the locking_thread or by some other thread.
 859     //
 860     // Note that we allocate the ObjectMonitor speculatively, _before_
 861     // attempting to set the object's mark to the new ObjectMonitor. If
 862     // the locking_thread owns the monitor, then we set the ObjectMonitor's
 863     // owner to the locking_thread. Otherwise, we set the ObjectMonitor's owner
 864     // to anonymous. If we lose the race to set the object's mark to the
 865     // new ObjectMonitor, then we just delete it and loop around again.
 866     //
 867     if (mark.is_fast_locked()) {
 868       ObjectMonitor* monitor = new ObjectMonitor(object);
 869       monitor->set_header(mark.set_unlocked());
 870       bool own = locking_thread != nullptr && locking_thread->lock_stack().contains(object);
 871       if (own) {
 872         // Owned by locking_thread.
 873         monitor->set_owner(locking_thread);
 874       } else {
 875         // Owned by somebody else.
 876         monitor->set_anonymous_owner();
 877       }
 878       markWord monitor_mark = markWord::encode(monitor);
 879       markWord old_mark = object->cas_set_mark(monitor_mark, mark);
 880       if (old_mark == mark) {
 881         // Success! Return inflated monitor.
 882         if (own) {
 883           size_t removed = locking_thread->lock_stack().remove(object);
 884           monitor->set_recursions(removed - 1);
 885         }
 886         // Once the ObjectMonitor is configured and object is associated
 887         // with the ObjectMonitor, it is safe to allow async deflation:
 888         ObjectSynchronizer::_in_use_list.add(monitor);
 889 
 890         // Hopefully the performance counters are allocated on distinct
 891         // cache lines to avoid false sharing on MP systems ...
 892         OM_PERFDATA_OP(Inflations, inc());
 893         log_inflate(current, object, cause);
 894         if (event.should_commit()) {
 895           post_monitor_inflate_event(&event, object, cause);
 896         }
 897         return monitor;
 898       } else {
 899         delete monitor;
 900         continue;  // Interference -- just retry
 901       }
 902     }
 903 
 904     // CASE: unlocked
 905     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
 906     // If we know we're inflating for entry it's better to inflate by swinging a
 907     // pre-locked ObjectMonitor pointer into the object header.   A successful
 908     // CAS inflates the object *and* confers ownership to the inflating thread.
 909     // In the current implementation we use a 2-step mechanism where we CAS()
 910     // to inflate and then CAS() again to try to swing _owner from null to current.
 911     // An inflateTry() method that we could call from enter() would be useful.
 912 
 913     assert(mark.is_unlocked(), "invariant: header=" INTPTR_FORMAT, mark.value());
 914     ObjectMonitor* m = new ObjectMonitor(object);
 915     // prepare m for installation - set monitor to initial state
 916     m->set_header(mark);
 917 
 918     if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
 919       delete m;
 920       m = nullptr;
 921       continue;
 922       // interference - the markword changed - just retry.
 923       // The state-transitions are one-way, so there's no chance of
 924       // live-lock -- "Inflated" is an absorbing state.
 925     }
 926 
 927     // Once the ObjectMonitor is configured and object is associated
 928     // with the ObjectMonitor, it is safe to allow async deflation:
 929     ObjectSynchronizer::_in_use_list.add(m);
 930 
 931     // Hopefully the performance counters are allocated on distinct
 932     // cache lines to avoid false sharing on MP systems ...
 933     OM_PERFDATA_OP(Inflations, inc());
 934     log_inflate(current, object, cause);
 935     if (event.should_commit()) {
 936       post_monitor_inflate_event(&event, object, cause);
 937     }
 938     return m;
 939   }
 940 }
 941 
 942 ObjectMonitor* LightweightSynchronizer::inflate_fast_locked_object(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) {
 943   assert(LockingMode == LM_LIGHTWEIGHT, "only used for lightweight");
 944   VerifyThreadState vts(locking_thread, current);
 945   assert(locking_thread->lock_stack().contains(object), "locking_thread must have object on its lock stack");
 946 
 947   ObjectMonitor* monitor;
 948 
 949   if (!UseObjectMonitorTable) {
 950     return inflate_into_object_header(object, cause, locking_thread, current);
 951   }
 952 
 953   // Inflating requires a hash code
 954   ObjectSynchronizer::FastHashCode(current, object);
 955 
 956   markWord mark = object->mark_acquire();
 957   assert(!mark.is_unlocked(), "Cannot be unlocked");
 958 
 959   for (;;) {
 960     // Fetch the monitor from the table
 961     monitor = get_or_insert_monitor(object, current, cause);
 962 
 963     // ObjectMonitors are always inserted as anonymously owned, this thread is
 964     // the current holder of the monitor. So unless the entry is stale and
 965     // contains a deflating monitor it must be anonymously owned.
 966     if (monitor->has_anonymous_owner()) {
 967       // The monitor must be anonymously owned if it was added
 968       assert(monitor == get_monitor_from_table(current, object), "The monitor must be found");
 969       // New fresh monitor
 970       break;
 971     }
 972 
 973     // If the monitor was not anonymously owned then we got a deflating monitor
 974     // from the table. We need to let the deflator make progress and remove this
 975     // entry before we are allowed to add a new one.
 976     os::naked_yield();
 977     assert(monitor->is_being_async_deflated(), "Should be the reason");
 978   }
 979 
 980   // Set the mark word; loop to handle concurrent updates to other parts of the mark word
 981   while (mark.is_fast_locked()) {
 982     mark = object->cas_set_mark(mark.set_has_monitor(), mark);
 983   }
 984 
 985   // Indicate that the monitor now has a known owner
 986   monitor->set_owner_from_anonymous(locking_thread);
 987 
 988   // Remove the entry from the thread's lock stack
 989   monitor->set_recursions(locking_thread->lock_stack().remove(object) - 1);
 990 
 991   if (locking_thread == current) {
 992     // Only change the thread local state of the current thread.
 993     locking_thread->om_set_monitor_cache(monitor);
 994   }
 995 
 996   return monitor;
 997 }
 998 
 999 ObjectMonitor* LightweightSynchronizer::inflate_and_enter(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) {
1000   assert(LockingMode == LM_LIGHTWEIGHT, "only used for lightweight");
1001   VerifyThreadState vts(locking_thread, current);
1002 
1003   // Note: In some paths (deoptimization) the 'current' thread inflates and
1004   // enters the lock on behalf of the 'locking_thread' thread.
1005 
1006   ObjectMonitor* monitor = nullptr;
1007 
1008   if (!UseObjectMonitorTable) {
1009     // Do the old inflate and enter.
1010     monitor = inflate_into_object_header(object, cause, locking_thread, current);
1011 
1012     bool entered;
1013     if (locking_thread == current) {
1014       entered = monitor->enter(locking_thread);
1015     } else {
1016       entered = monitor->enter_for(locking_thread);
1017     }
1018 
1019     // enter returns false for deflation found.
1020     return entered ? monitor : nullptr;
1021   }
1022 
1023   NoSafepointVerifier nsv;
1024 
1025   // Lightweight monitors require that hash codes are installed first
1026   ObjectSynchronizer::FastHashCode(locking_thread, object);
1027 
1028   // Try to get the monitor from the thread-local cache.
1029   // There's no need to use the cache if we are locking
1030   // on behalf of another thread.
1031   if (current == locking_thread) {
1032     monitor = current->om_get_from_monitor_cache(object);
1033   }
1034 
1035   // Get or create the monitor
1036   if (monitor == nullptr) {
1037     monitor = get_or_insert_monitor(object, current, cause);
1038   }
1039 
1040   if (monitor->try_enter(locking_thread)) {
1041     return monitor;
1042   }
1043 
1044   // Holds is_being_async_deflated() stable throughout this function.
1045   ObjectMonitorContentionMark contention_mark(monitor);
1046 
1047   /// First handle the case where the monitor from the table is deflated
1048   if (monitor->is_being_async_deflated()) {
1049     // The MonitorDeflation thread is deflating the monitor. The locking thread
1050     // must spin until further progress has been made.
1051 
1052     const markWord mark = object->mark_acquire();
1053 
1054     if (mark.has_monitor()) {
1055       // Waiting on the deflation thread to remove the deflated monitor from the table.
1056       os::naked_yield();
1057 
1058     } else if (mark.is_fast_locked()) {
1059       // Some other thread managed to fast-lock the lock, or this is a
1060       // recursive lock from the same thread; yield for the deflation
1061       // thread to remove the deflated monitor from the table.
1062       os::naked_yield();
1063 
1064     } else {
1065       assert(mark.is_unlocked(), "Implied");
1066       // Retry immediately
1067     }
1068 
1069     // Retry
1070     return nullptr;
1071   }
1072 
1073   for (;;) {
1074     const markWord mark = object->mark_acquire();
1075     // The mark can be in one of the following states:
1076     // *  inflated     - If the ObjectMonitor owner is anonymous
1077     //                   and the locking_thread owns the object
1078     //                   lock, then we make the locking_thread
1079     //                   the ObjectMonitor owner and remove the
1080     //                   lock from the locking_thread's lock stack.
1081     // *  fast-locked  - Coerce it to inflated from fast-locked.
1082     // *  neutral      - Inflate the object. Successful CAS is locked
1083 
1084     // CASE: inflated
1085     if (mark.has_monitor()) {
1086       LockStack& lock_stack = locking_thread->lock_stack();
1087       if (monitor->has_anonymous_owner() && lock_stack.contains(object)) {
1088         // The lock is fast-locked by the locking thread,
1089         // convert it to a held monitor with a known owner.
1090         monitor->set_owner_from_anonymous(locking_thread);
1091         monitor->set_recursions(lock_stack.remove(object) - 1);
1092       }
1093 
1094       break; // Success
1095     }
1096 
1097     // CASE: fast-locked
1098     // Could be fast-locked either by locking_thread or by some other thread.
1099     //
1100     if (mark.is_fast_locked()) {
1101       markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
1102       if (old_mark != mark) {
1103         // CAS failed
1104         continue;
1105       }
1106 
1107       // Success! Return inflated monitor.
1108       LockStack& lock_stack = locking_thread->lock_stack();
1109       if (lock_stack.contains(object)) {
1110         // The lock is fast-locked by the locking thread,
1111         // convert it to a held monitor with a known owner.
1112         monitor->set_owner_from_anonymous(locking_thread);
1113         monitor->set_recursions(lock_stack.remove(object) - 1);
1114       }
1115 
1116       break; // Success
1117     }
1118 
1119     // CASE: neutral (unlocked)
1120 
1121     // Catch if the object's header is not neutral (not locked and
1122     // not marked is what we care about here).
1123     assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
1124     markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
1125     if (old_mark != mark) {
1126       // CAS failed
1127       continue;
1128     }
1129 
1130     // Transitioned from unlocked to monitor means locking_thread owns the lock.
1131     monitor->set_owner_from_anonymous(locking_thread);
1132 
1133     return monitor;
1134   }
1135 
1136   if (current == locking_thread) {
1137     // One round of spinning
1138     if (monitor->spin_enter(locking_thread)) {
1139       return monitor;
1140     }
1141 
1142     // Monitor is contended, take the time before entering to fix the lock stack.
1143     LockStackInflateContendedLocks().inflate(current);
1144   }
1145 
1146   // enter can block for safepoints; clear the unhandled object oop
1147   PauseNoSafepointVerifier pnsv(&nsv);
1148   object = nullptr;
1149 
1150   if (current == locking_thread) {
1151     monitor->enter_with_contention_mark(locking_thread, contention_mark);
1152   } else {
1153     monitor->enter_for_with_contention_mark(locking_thread, contention_mark);
1154   }
1155 
1156   return monitor;
1157 }
1158 
1159 void LightweightSynchronizer::deflate_monitor(Thread* current, oop obj, ObjectMonitor* monitor) {
1160   if (obj != nullptr) {
1161     deflate_mark_word(obj);
1162   }
1163   bool removed = remove_monitor(current, monitor, obj);
1164   if (obj != nullptr) {
1165     assert(removed, "Should have removed the entry if obj was alive");
1166   }
1167 }
1168 
1169 ObjectMonitor* LightweightSynchronizer::get_monitor_from_table(Thread* current, oop obj) {
1170   assert(UseObjectMonitorTable, "must be");
1171   return ObjectMonitorTable::monitor_get(current, obj);
1172 }
1173 
1174 bool LightweightSynchronizer::contains_monitor(Thread* current, ObjectMonitor* monitor) {
1175   assert(UseObjectMonitorTable, "must be");
1176   return ObjectMonitorTable::contains_monitor(current, monitor);
1177 }
1178 
1179 bool LightweightSynchronizer::quick_enter(oop obj, BasicLock* lock, JavaThread* current) {
1180   assert(current->thread_state() == _thread_in_Java, "must be");
1181   assert(obj != nullptr, "must be");
1182   NoSafepointVerifier nsv;
1183 
1184   // If quick_enter succeeds with entering, the cache should be in a valid initialized state.
1185   CacheSetter cache_setter(current, lock);
1186 
1187   LockStack& lock_stack = current->lock_stack();
1188   if (lock_stack.is_full()) {
1189     // Always go into runtime if the lock stack is full.
1190     return false;
1191   }
1192 
1193   const markWord mark = obj->mark();
1194 
1195 #ifndef _LP64
1196   // Only for 32bit which has limited support for fast locking outside the runtime.
1197   if (lock_stack.try_recursive_enter(obj)) {
1198     // Recursive lock successful.
1199     return true;
1200   }
1201 
1202   if (mark.is_unlocked()) {
1203     markWord locked_mark = mark.set_fast_locked();
1204     if (obj->cas_set_mark(locked_mark, mark) == mark) {
1205       // Successfully fast-locked, push object to lock-stack and return.
1206       lock_stack.push(obj);
1207       return true;
1208     }
1209   }
1210 #endif
1211 
1212   if (mark.has_monitor()) {
1213     ObjectMonitor* const monitor = UseObjectMonitorTable ? current->om_get_from_monitor_cache(obj) :
1214                                                            ObjectSynchronizer::read_monitor(mark);
1215 
1216     if (monitor == nullptr) {
1217       // Take the slow-path on a cache miss.
1218       return false;
1219     }
1220 
1221     if (monitor->try_enter(current)) {
1222       // ObjectMonitor enter successful.
1223       cache_setter.set_monitor(monitor);
1224       return true;
1225     }
1226   }
1227 
1228   // Slow-path.
1229   return false;
1230 }
1231 
1232 uint32_t LightweightSynchronizer::get_hash(markWord mark, oop obj, Klass* klass) {
1233   assert(UseCompactObjectHeaders, "Only with compact i-hash");
1234   //assert(mark.is_neutral() | mark.is_fast_locked(), "only from neutral or fast-locked mark: " INTPTR_FORMAT, mark.value());
1235   assert(mark.is_hashed(), "only from hashed or copied object");
1236   if (mark.is_hashed_expanded()) {
1237     return obj->int_field(klass->hash_offset_in_bytes(obj));
1238   } else {
1239     assert(mark.is_hashed_not_expanded(), "must be hashed");
1240     assert(hashCode == 6 || hashCode == 2, "must have idempotent hashCode");
1241     // Already marked as hashed, but not yet copied. Recompute hash and return it.
1242     return ObjectSynchronizer::get_next_hash(nullptr, obj); // recompute hash
1243   }
1244 }
1245 
1246 uint32_t LightweightSynchronizer::get_hash(markWord mark, oop obj) {
1247   return get_hash(mark, obj, mark.klass());
1248 }