1 /*
   2  * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "classfile/vmSymbols.hpp"
  26 #include "jfrfiles/jfrEventClasses.hpp"
  27 #include "logging/log.hpp"
  28 #include "memory/allStatic.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "nmt/memTag.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/atomic.hpp"
  33 #include "runtime/basicLock.inline.hpp"
  34 #include "runtime/globals_extension.hpp"
  35 #include "runtime/interfaceSupport.inline.hpp"
  36 #include "runtime/javaThread.inline.hpp"
  37 #include "runtime/lightweightSynchronizer.hpp"
  38 #include "runtime/lockStack.inline.hpp"
  39 #include "runtime/mutexLocker.hpp"
  40 #include "runtime/objectMonitor.inline.hpp"
  41 #include "runtime/os.hpp"
  42 #include "runtime/safepointMechanism.inline.hpp"
  43 #include "runtime/safepointVerifiers.hpp"
  44 #include "runtime/synchronizer.inline.hpp"
  45 #include "runtime/timerTrace.hpp"
  46 #include "runtime/trimNativeHeap.hpp"
  47 #include "utilities/concurrentHashTable.inline.hpp"
  48 #include "utilities/concurrentHashTableTasks.inline.hpp"
  49 #include "utilities/globalDefinitions.hpp"
  50 
  51 // ConcurrentHashTable storing links from objects to ObjectMonitors
  52 class ObjectMonitorTable : AllStatic {
  53   struct Config {
  54     using Value = ObjectMonitor*;
  55     static uintx get_hash(Value const& value, bool* is_dead) {
  56       return (uintx)value->hash();
  57     }
  58     static void* allocate_node(void* context, size_t size, Value const& value) {
  59       ObjectMonitorTable::inc_items_count();
  60       return AllocateHeap(size, mtObjectMonitor);
  61     };
  62     static void free_node(void* context, void* memory, Value const& value) {
  63       ObjectMonitorTable::dec_items_count();
  64       FreeHeap(memory);
  65     }
  66   };
  67   using ConcurrentTable = ConcurrentHashTable<Config, mtObjectMonitor>;
  68 
  69   static ConcurrentTable* _table;
  70   static volatile size_t _items_count;
  71   static size_t _table_size;
  72   static volatile bool _resize;
  73 
  74   class Lookup : public StackObj {
  75     oop _obj;
  76 
  77    public:
  78     explicit Lookup(oop obj) : _obj(obj) {}
  79 
  80     uintx get_hash() const {
  81       uintx hash = _obj->mark().hash();
  82       assert(hash != 0, "should have a hash");
  83       return hash;
  84     }
  85 
  86     bool equals(ObjectMonitor** value) {
  87       assert(*value != nullptr, "must be");
  88       return (*value)->object_refers_to(_obj);
  89     }
  90 
  91     bool is_dead(ObjectMonitor** value) {
  92       assert(*value != nullptr, "must be");
  93       return false;
  94     }
  95   };
  96 
  97   class LookupMonitor : public StackObj {
  98     ObjectMonitor* _monitor;
  99 
 100    public:
 101     explicit LookupMonitor(ObjectMonitor* monitor) : _monitor(monitor) {}
 102 
 103     uintx get_hash() const {
 104       return _monitor->hash();
 105     }
 106 
 107     bool equals(ObjectMonitor** value) {
 108       return (*value) == _monitor;
 109     }
 110 
 111     bool is_dead(ObjectMonitor** value) {
 112       assert(*value != nullptr, "must be");
 113       return (*value)->object_is_dead();
 114     }
 115   };
 116 
 117   static void inc_items_count() {
 118     Atomic::inc(&_items_count);
 119   }
 120 
 121   static void dec_items_count() {
 122     Atomic::dec(&_items_count);
 123   }
 124 
 125   static double get_load_factor() {
 126     return (double)_items_count / (double)_table_size;
 127   }
 128 
 129   static size_t table_size(Thread* current = Thread::current()) {
 130     return ((size_t)1) << _table->get_size_log2(current);
 131   }
 132 
 133   static size_t max_log_size() {
 134     // TODO[OMTable]: Evaluate the max size.
 135     // TODO[OMTable]: Need to fix init order to use Universe::heap()->max_capacity();
 136     //                Using MaxHeapSize directly this early may be wrong, and there
 137     //                are definitely rounding errors (alignment).
 138     const size_t max_capacity = MaxHeapSize;
 139     const size_t min_object_size = CollectedHeap::min_dummy_object_size() * HeapWordSize;
 140     const size_t max_objects = max_capacity / MAX2(MinObjAlignmentInBytes, checked_cast<int>(min_object_size));
 141     const size_t log_max_objects = log2i_graceful(max_objects);
 142 
 143     return MAX2(MIN2<size_t>(SIZE_BIG_LOG2, log_max_objects), min_log_size());
 144   }
 145 
 146   static size_t min_log_size() {
 147     // ~= log(AvgMonitorsPerThreadEstimate default)
 148     return 10;
 149   }
 150 
 151   template<typename V>
 152   static size_t clamp_log_size(V log_size) {
 153     return MAX2(MIN2(log_size, checked_cast<V>(max_log_size())), checked_cast<V>(min_log_size()));
 154   }
 155 
 156   static size_t initial_log_size() {
 157     const size_t estimate = log2i(MAX2(os::processor_count(), 1)) + log2i(MAX2(AvgMonitorsPerThreadEstimate, size_t(1)));
 158     return clamp_log_size(estimate);
 159   }
 160 
 161   static size_t grow_hint () {
 162     return ConcurrentTable::DEFAULT_GROW_HINT;
 163   }
 164 
 165  public:
 166   static void create() {
 167     _table = new ConcurrentTable(initial_log_size(), max_log_size(), grow_hint());
 168     _items_count = 0;
 169     _table_size = table_size();
 170     _resize = false;
 171   }
 172 
 173   static void verify_monitor_get_result(oop obj, ObjectMonitor* monitor) {
 174 #ifdef ASSERT
 175     if (SafepointSynchronize::is_at_safepoint()) {
 176       bool has_monitor = obj->mark().has_monitor();
 177       assert(has_monitor == (monitor != nullptr),
 178           "Inconsistency between markWord and ObjectMonitorTable has_monitor: %s monitor: " PTR_FORMAT,
 179           BOOL_TO_STR(has_monitor), p2i(monitor));
 180     }
 181 #endif
 182   }
 183 
 184   static ObjectMonitor* monitor_get(Thread* current, oop obj) {
 185     ObjectMonitor* result = nullptr;
 186     Lookup lookup_f(obj);
 187     auto found_f = [&](ObjectMonitor** found) {
 188       assert((*found)->object_peek() == obj, "must be");
 189       result = *found;
 190     };
 191     _table->get(current, lookup_f, found_f);
 192     verify_monitor_get_result(obj, result);
 193     return result;
 194   }
 195 
 196   static void try_notify_grow() {
 197     if (!_table->is_max_size_reached() && !Atomic::load(&_resize)) {
 198       Atomic::store(&_resize, true);
 199       if (Service_lock->try_lock()) {
 200         Service_lock->notify();
 201         Service_lock->unlock();
 202       }
 203     }
 204   }
 205 
 206   static bool should_shrink() {
 207     // Not implemented;
 208     return false;
 209   }
 210 
 211   static constexpr double GROW_LOAD_FACTOR = 0.75;
 212 
 213   static bool should_grow() {
 214     return get_load_factor() > GROW_LOAD_FACTOR && !_table->is_max_size_reached();
 215   }
 216 
 217   static bool should_resize() {
 218     return should_grow() || should_shrink() || Atomic::load(&_resize);
 219   }
 220 
 221   template<typename Task, typename... Args>
 222   static bool run_task(JavaThread* current, Task& task, const char* task_name, Args&... args) {
 223     if (task.prepare(current)) {
 224       log_trace(monitortable)("Started to %s", task_name);
 225       TraceTime timer(task_name, TRACETIME_LOG(Debug, monitortable, perf));
 226       while (task.do_task(current, args...)) {
 227         task.pause(current);
 228         {
 229           ThreadBlockInVM tbivm(current);
 230         }
 231         task.cont(current);
 232       }
 233       task.done(current);
 234       return true;
 235     }
 236     return false;
 237   }
 238 
 239   static bool grow(JavaThread* current) {
 240     ConcurrentTable::GrowTask grow_task(_table);
 241     if (run_task(current, grow_task, "Grow")) {
 242       _table_size = table_size(current);
 243       log_info(monitortable)("Grown to size: %zu", _table_size);
 244       return true;
 245     }
 246     return false;
 247   }
 248 
 249   static bool clean(JavaThread* current) {
 250     ConcurrentTable::BulkDeleteTask clean_task(_table);
 251     auto is_dead = [&](ObjectMonitor** monitor) {
 252       return (*monitor)->object_is_dead();
 253     };
 254     auto do_nothing = [&](ObjectMonitor** monitor) {};
 255     NativeHeapTrimmer::SuspendMark sm("ObjectMonitorTable");
 256     return run_task(current, clean_task, "Clean", is_dead, do_nothing);
 257   }
 258 
 259   static bool resize(JavaThread* current) {
 260     LogTarget(Info, monitortable) lt;
 261     bool success = false;
 262 
 263     if (should_grow()) {
 264       lt.print("Start growing with load factor %f", get_load_factor());
 265       success = grow(current);
 266     } else {
 267       if (!_table->is_max_size_reached() && Atomic::load(&_resize)) {
 268         lt.print("WARNING: Getting resize hints with load factor %f", get_load_factor());
 269       }
 270       lt.print("Start cleaning with load factor %f", get_load_factor());
 271       success = clean(current);
 272     }
 273 
 274     Atomic::store(&_resize, false);
 275 
 276     return success;
 277   }
 278 
 279   static ObjectMonitor* monitor_put_get(Thread* current, ObjectMonitor* monitor, oop obj) {
 280     // Enter the monitor into the concurrent hashtable.
 281     ObjectMonitor* result = monitor;
 282     Lookup lookup_f(obj);
 283     auto found_f = [&](ObjectMonitor** found) {
 284       assert((*found)->object_peek() == obj, "must be");
 285       result = *found;
 286     };
 287     bool grow;
 288     _table->insert_get(current, lookup_f, monitor, found_f, &grow);
 289     verify_monitor_get_result(obj, result);
 290     if (grow) {
 291       try_notify_grow();
 292     }
 293     return result;
 294   }
 295 
 296   static bool remove_monitor_entry(Thread* current, ObjectMonitor* monitor) {
 297     LookupMonitor lookup_f(monitor);
 298     return _table->remove(current, lookup_f);
 299   }
 300 
 301   static bool contains_monitor(Thread* current, ObjectMonitor* monitor) {
 302     LookupMonitor lookup_f(monitor);
 303     bool result = false;
 304     auto found_f = [&](ObjectMonitor** found) {
 305       result = true;
 306     };
 307     _table->get(current, lookup_f, found_f);
 308     return result;
 309   }
 310 
 311   static void print_on(outputStream* st) {
 312     auto printer = [&] (ObjectMonitor** entry) {
 313        ObjectMonitor* om = *entry;
 314        oop obj = om->object_peek();
 315        st->print("monitor=" PTR_FORMAT ", ", p2i(om));
 316        st->print("object=" PTR_FORMAT, p2i(obj));
 317        assert(obj->mark().hash() == om->hash(), "hash must match");
 318        st->cr();
 319        return true;
 320     };
 321     if (SafepointSynchronize::is_at_safepoint()) {
 322       _table->do_safepoint_scan(printer);
 323     } else {
 324       _table->do_scan(Thread::current(), printer);
 325     }
 326   }
 327 };
 328 
 329 ObjectMonitorTable::ConcurrentTable* ObjectMonitorTable::_table = nullptr;
 330 volatile size_t ObjectMonitorTable::_items_count = 0;
 331 size_t ObjectMonitorTable::_table_size = 0;
 332 volatile bool ObjectMonitorTable::_resize = false;
 333 
 334 ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted) {
 335   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 336 
 337   ObjectMonitor* monitor = get_monitor_from_table(current, object);
 338   if (monitor != nullptr) {
 339     *inserted = false;
 340     return monitor;
 341   }
 342 
 343   ObjectMonitor* alloced_monitor = new ObjectMonitor(object);
 344   alloced_monitor->set_anonymous_owner();
 345 
 346   // Try insert monitor
 347   monitor = add_monitor(current, alloced_monitor, object);
 348 
 349   *inserted = alloced_monitor == monitor;
 350   if (!*inserted) {
 351     delete alloced_monitor;
 352   }
 353 
 354   return monitor;
 355 }
 356 
 357 static void log_inflate(Thread* current, oop object, ObjectSynchronizer::InflateCause cause) {
 358   if (log_is_enabled(Trace, monitorinflation)) {
 359     ResourceMark rm(current);
 360     log_trace(monitorinflation)("inflate: object=" INTPTR_FORMAT ", mark="
 361                                 INTPTR_FORMAT ", type='%s' cause=%s", p2i(object),
 362                                 object->mark().value(), object->klass()->external_name(),
 363                                 ObjectSynchronizer::inflate_cause_name(cause));
 364   }
 365 }
 366 
 367 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
 368                                        const oop obj,
 369                                        ObjectSynchronizer::InflateCause cause) {
 370   assert(event != nullptr, "invariant");
 371   const Klass* monitor_klass = obj->klass();
 372   if (ObjectMonitor::is_jfr_excluded(monitor_klass)) {
 373     return;
 374   }
 375   event->set_monitorClass(monitor_klass);
 376   event->set_address((uintptr_t)(void*)obj);
 377   event->set_cause((u1)cause);
 378   event->commit();
 379 }
 380 
 381 ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor(oop object, JavaThread* current, ObjectSynchronizer::InflateCause cause) {
 382   assert(UseObjectMonitorTable, "must be");
 383 
 384   EventJavaMonitorInflate event;
 385 
 386   bool inserted;
 387   ObjectMonitor* monitor = get_or_insert_monitor_from_table(object, current, &inserted);
 388 
 389   if (inserted) {
 390     log_inflate(current, object, cause);
 391     if (event.should_commit()) {
 392       post_monitor_inflate_event(&event, object, cause);
 393     }
 394 
 395     // The monitor has an anonymous owner so it is safe from async deflation.
 396     ObjectSynchronizer::_in_use_list.add(monitor);
 397   }
 398 
 399   return monitor;
 400 }
 401 
 402 // Add the hashcode to the monitor to match the object and put it in the hashtable.
 403 ObjectMonitor* LightweightSynchronizer::add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj) {
 404   assert(UseObjectMonitorTable, "must be");
 405   assert(obj == monitor->object(), "must be");
 406 
 407   intptr_t hash = obj->mark().hash();
 408   assert(hash != 0, "must be set when claiming the object monitor");
 409   monitor->set_hash(hash);
 410 
 411   return ObjectMonitorTable::monitor_put_get(current, monitor, obj);
 412 }
 413 
 414 bool LightweightSynchronizer::remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj) {
 415   assert(UseObjectMonitorTable, "must be");
 416   assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead");
 417 
 418   return ObjectMonitorTable::remove_monitor_entry(current, monitor);
 419 }
 420 
 421 void LightweightSynchronizer::deflate_mark_word(oop obj) {
 422   assert(UseObjectMonitorTable, "must be");
 423 
 424   markWord mark = obj->mark_acquire();
 425   assert(!mark.has_no_hash(), "obj with inflated monitor must have had a hash");
 426 
 427   while (mark.has_monitor()) {
 428     const markWord new_mark = mark.clear_lock_bits().set_unlocked();
 429     mark = obj->cas_set_mark(new_mark, mark);
 430   }
 431 }
 432 
 433 void LightweightSynchronizer::initialize() {
 434   if (!UseObjectMonitorTable) {
 435     return;
 436   }
 437   ObjectMonitorTable::create();
 438 }
 439 
 440 bool LightweightSynchronizer::needs_resize() {
 441   if (!UseObjectMonitorTable) {
 442     return false;
 443   }
 444   return ObjectMonitorTable::should_resize();
 445 }
 446 
 447 bool LightweightSynchronizer::resize_table(JavaThread* current) {
 448   if (!UseObjectMonitorTable) {
 449     return true;
 450   }
 451   return ObjectMonitorTable::resize(current);
 452 }
 453 
 454 class LightweightSynchronizer::LockStackInflateContendedLocks : private OopClosure {
 455  private:
 456   oop _contended_oops[LockStack::CAPACITY];
 457   int _length;
 458 
 459   void do_oop(oop* o) final {
 460     oop obj = *o;
 461     if (obj->mark_acquire().has_monitor()) {
 462       if (_length > 0 && _contended_oops[_length - 1] == obj) {
 463         // Recursive
 464         return;
 465       }
 466       _contended_oops[_length++] = obj;
 467     }
 468   }
 469 
 470   void do_oop(narrowOop* o) final {
 471     ShouldNotReachHere();
 472   }
 473 
 474  public:
 475   LockStackInflateContendedLocks() :
 476     _contended_oops(),
 477     _length(0) {};
 478 
 479   void inflate(JavaThread* current) {
 480     assert(current == JavaThread::current(), "must be");
 481     current->lock_stack().oops_do(this);
 482     for (int i = 0; i < _length; i++) {
 483       LightweightSynchronizer::
 484         inflate_fast_locked_object(_contended_oops[i], ObjectSynchronizer::inflate_cause_vm_internal, current, current);
 485     }
 486   }
 487 };
 488 
 489 void LightweightSynchronizer::ensure_lock_stack_space(JavaThread* current) {
 490   assert(current == JavaThread::current(), "must be");
 491   LockStack& lock_stack = current->lock_stack();
 492 
 493   // Make room on lock_stack
 494   if (lock_stack.is_full()) {
 495     // Inflate contended objects
 496     LockStackInflateContendedLocks().inflate(current);
 497     if (lock_stack.is_full()) {
 498       // Inflate the oldest object
 499       inflate_fast_locked_object(lock_stack.bottom(), ObjectSynchronizer::inflate_cause_vm_internal, current, current);
 500     }
 501   }
 502 }
 503 
 504 class LightweightSynchronizer::CacheSetter : StackObj {
 505   JavaThread* const _thread;
 506   BasicLock* const _lock;
 507   ObjectMonitor* _monitor;
 508 
 509   NONCOPYABLE(CacheSetter);
 510 
 511  public:
 512   CacheSetter(JavaThread* thread, BasicLock* lock) :
 513     _thread(thread),
 514     _lock(lock),
 515     _monitor(nullptr) {}
 516 
 517   ~CacheSetter() {
 518     // Only use the cache if using the table.
 519     if (UseObjectMonitorTable) {
 520       if (_monitor != nullptr) {
 521         _thread->om_set_monitor_cache(_monitor);
 522         _lock->set_object_monitor_cache(_monitor);
 523       } else {
 524         _lock->clear_object_monitor_cache();
 525       }
 526     }
 527   }
 528 
 529   void set_monitor(ObjectMonitor* monitor) {
 530     assert(_monitor == nullptr, "only set once");
 531     _monitor = monitor;
 532   }
 533 
 534 };
 535 
 536 class LightweightSynchronizer::VerifyThreadState {
 537   bool _no_safepoint;
 538 
 539  public:
 540   VerifyThreadState(JavaThread* locking_thread, JavaThread* current) : _no_safepoint(locking_thread != current) {
 541     assert(current == Thread::current(), "must be");
 542     assert(locking_thread == current || locking_thread->is_obj_deopt_suspend(), "locking_thread may not run concurrently");
 543     if (_no_safepoint) {
 544       DEBUG_ONLY(JavaThread::current()->inc_no_safepoint_count();)
 545     }
 546   }
 547   ~VerifyThreadState() {
 548     if (_no_safepoint){
 549       DEBUG_ONLY(JavaThread::current()->dec_no_safepoint_count();)
 550     }
 551   }
 552 };
 553 
 554 inline bool LightweightSynchronizer::fast_lock_try_enter(oop obj, LockStack& lock_stack, JavaThread* current) {
 555   markWord mark = obj->mark();
 556   while (mark.is_unlocked()) {
 557     ensure_lock_stack_space(current);
 558     assert(!lock_stack.is_full(), "must have made room on the lock stack");
 559     assert(!lock_stack.contains(obj), "thread must not already hold the lock");
 560     // Try to swing into 'fast-locked' state.
 561     markWord locked_mark = mark.set_fast_locked();
 562     markWord old_mark = mark;
 563     mark = obj->cas_set_mark(locked_mark, old_mark);
 564     if (old_mark == mark) {
 565       // Successfully fast-locked, push object to lock-stack and return.
 566       lock_stack.push(obj);
 567       return true;
 568     }
 569   }
 570   return false;
 571 }
 572 
 573 bool LightweightSynchronizer::fast_lock_spin_enter(oop obj, LockStack& lock_stack, JavaThread* current, bool observed_deflation) {
 574   assert(UseObjectMonitorTable, "must be");
 575   // Will spin with exponential backoff with an accumulative O(2^spin_limit) spins.
 576   const int log_spin_limit = os::is_MP() ? LightweightFastLockingSpins : 1;
 577   const int log_min_safepoint_check_interval = 10;
 578 
 579   markWord mark = obj->mark();
 580   const auto should_spin = [&]() {
 581     if (!mark.has_monitor()) {
 582       // Spin while not inflated.
 583       return true;
 584     } else if (observed_deflation) {
 585       // Spin while monitor is being deflated.
 586       ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark);
 587       return monitor == nullptr || monitor->is_being_async_deflated();
 588     }
 589     // Else stop spinning.
 590     return false;
 591   };
 592   // Always attempt to lock once even when safepoint synchronizing.
 593   bool should_process = false;
 594   for (int i = 0; should_spin() && !should_process && i < log_spin_limit; i++) {
 595     // Spin with exponential backoff.
 596     const int total_spin_count = 1 << i;
 597     const int inner_spin_count = MIN2(1 << log_min_safepoint_check_interval, total_spin_count);
 598     const int outer_spin_count = total_spin_count / inner_spin_count;
 599     for (int outer = 0; outer < outer_spin_count; outer++) {
 600       should_process = SafepointMechanism::should_process(current);
 601       if (should_process) {
 602         // Stop spinning for safepoint.
 603         break;
 604       }
 605       for (int inner = 1; inner < inner_spin_count; inner++) {
 606         SpinPause();
 607       }
 608     }
 609 
 610     if (fast_lock_try_enter(obj, lock_stack, current)) return true;
 611   }
 612   return false;
 613 }
 614 
 615 void LightweightSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
 616   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 617   JavaThread* current = JavaThread::current();
 618   VerifyThreadState vts(locking_thread, current);
 619 
 620   if (obj->klass()->is_value_based()) {
 621     ObjectSynchronizer::handle_sync_on_value_based_class(obj, locking_thread);
 622   }
 623 
 624   CacheSetter cache_setter(locking_thread, lock);
 625 
 626   LockStack& lock_stack = locking_thread->lock_stack();
 627 
 628   ObjectMonitor* monitor = nullptr;
 629   if (lock_stack.contains(obj())) {
 630     monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
 631     bool entered = monitor->enter_for(locking_thread);
 632     assert(entered, "recursive ObjectMonitor::enter_for must succeed");
 633   } else {
 634     do {
 635       // It is assumed that enter_for must enter on an object without contention.
 636       monitor = inflate_and_enter(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
 637       // But there may still be a race with deflation.
 638     } while (monitor == nullptr);
 639   }
 640 
 641   assert(monitor != nullptr, "LightweightSynchronizer::enter_for must succeed");
 642   cache_setter.set_monitor(monitor);
 643 }
 644 
 645 void LightweightSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
 646   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 647   assert(current == JavaThread::current(), "must be");
 648 
 649   if (obj->klass()->is_value_based()) {
 650     ObjectSynchronizer::handle_sync_on_value_based_class(obj, current);
 651   }
 652 
 653   CacheSetter cache_setter(current, lock);
 654 
 655   // Used when deflation is observed. Progress here requires progress
 656   // from the deflator. After observing that the deflator is not
 657   // making progress (after two yields), switch to sleeping.
 658   SpinYield spin_yield(0, 2);
 659   bool observed_deflation = false;
 660 
 661   LockStack& lock_stack = current->lock_stack();
 662 
 663   if (!lock_stack.is_full() && lock_stack.try_recursive_enter(obj())) {
 664     // Recursively fast locked
 665     return;
 666   }
 667 
 668   if (lock_stack.contains(obj())) {
 669     ObjectMonitor* monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, current, current);
 670     bool entered = monitor->enter(current);
 671     assert(entered, "recursive ObjectMonitor::enter must succeed");
 672     cache_setter.set_monitor(monitor);
 673     return;
 674   }
 675 
 676   while (true) {
 677     // Fast-locking does not use the 'lock' argument.
 678     // Fast-lock spinning to avoid inflating for short critical sections.
 679     // The goal is to only inflate when the extra cost of using ObjectMonitors
 680     // is worth it.
 681     // If deflation has been observed we also spin while deflation is ongoing.
 682     if (fast_lock_try_enter(obj(), lock_stack, current)) {
 683       return;
 684     } else if (UseObjectMonitorTable && fast_lock_spin_enter(obj(), lock_stack, current, observed_deflation)) {
 685       return;
 686     }
 687 
 688     if (observed_deflation) {
 689       spin_yield.wait();
 690     }
 691 
 692     ObjectMonitor* monitor = inflate_and_enter(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, current, current);
 693     if (monitor != nullptr) {
 694       cache_setter.set_monitor(monitor);
 695       return;
 696     }
 697 
 698     // If inflate_and_enter returns nullptr it is because a deflated monitor
 699     // was encountered. Fallback to fast locking. The deflater is responsible
 700     // for clearing out the monitor and transitioning the markWord back to
 701     // fast locking.
 702     observed_deflation = true;
 703   }
 704 }
 705 
 706 void LightweightSynchronizer::exit(oop object, JavaThread* current) {
 707   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 708   assert(current == Thread::current(), "must be");
 709 
 710   markWord mark = object->mark();
 711   assert(!mark.is_unlocked(), "must be");
 712 
 713   LockStack& lock_stack = current->lock_stack();
 714   if (mark.is_fast_locked()) {
 715     if (lock_stack.try_recursive_exit(object)) {
 716       // This is a recursive exit which succeeded
 717       return;
 718     }
 719     if (lock_stack.is_recursive(object)) {
 720       // Must inflate recursive locks if try_recursive_exit fails
 721       // This happens for un-structured unlocks, could potentially
 722       // fix try_recursive_exit to handle these.
 723       inflate_fast_locked_object(object, ObjectSynchronizer::inflate_cause_vm_internal, current, current);
 724     }
 725   }
 726 
 727   while (mark.is_fast_locked()) {
 728     markWord unlocked_mark = mark.set_unlocked();
 729     markWord old_mark = mark;
 730     mark = object->cas_set_mark(unlocked_mark, old_mark);
 731     if (old_mark == mark) {
 732       // CAS successful, remove from lock_stack
 733       size_t recursion = lock_stack.remove(object) - 1;
 734       assert(recursion == 0, "Should not have unlocked here");
 735       return;
 736     }
 737   }
 738 
 739   assert(mark.has_monitor(), "must be");
 740   // The monitor exists
 741   ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, object, mark);
 742   if (monitor->has_anonymous_owner()) {
 743     assert(current->lock_stack().contains(object), "current must have object on its lock stack");
 744     monitor->set_owner_from_anonymous(current);
 745     monitor->set_recursions(current->lock_stack().remove(object) - 1);
 746   }
 747 
 748   monitor->exit(current);
 749 }
 750 
 751 // LightweightSynchronizer::inflate_locked_or_imse is used to to get an inflated
 752 // ObjectMonitor* with LM_LIGHTWEIGHT. It is used from contexts which require
 753 // an inflated ObjectMonitor* for a monitor, and expects to throw a
 754 // java.lang.IllegalMonitorStateException if it is not held by the current
 755 // thread. Such as notify/wait and jni_exit. LM_LIGHTWEIGHT keeps it invariant
 756 // that it only inflates if it is already locked by the current thread or the
 757 // current thread is in the process of entering. To maintain this invariant we
 758 // need to throw a java.lang.IllegalMonitorStateException before inflating if
 759 // the current thread is not the owner.
 760 // LightweightSynchronizer::inflate_locked_or_imse facilitates this.
 761 ObjectMonitor* LightweightSynchronizer::inflate_locked_or_imse(oop obj, ObjectSynchronizer::InflateCause cause, TRAPS) {
 762   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 763   JavaThread* current = THREAD;
 764 
 765   for (;;) {
 766     markWord mark = obj->mark_acquire();
 767     if (mark.is_unlocked()) {
 768       // No lock, IMSE.
 769       THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
 770                  "current thread is not owner", nullptr);
 771     }
 772 
 773     if (mark.is_fast_locked()) {
 774       if (!current->lock_stack().contains(obj)) {
 775         // Fast locked by other thread, IMSE.
 776         THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
 777                    "current thread is not owner", nullptr);
 778       } else {
 779         // Current thread owns the lock, must inflate
 780         return inflate_fast_locked_object(obj, cause, current, current);
 781       }
 782     }
 783 
 784     assert(mark.has_monitor(), "must be");
 785     ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark);
 786     if (monitor != nullptr) {
 787       if (monitor->has_anonymous_owner()) {
 788         LockStack& lock_stack = current->lock_stack();
 789         if (lock_stack.contains(obj)) {
 790           // Current thread owns the lock but someone else inflated it.
 791           // Fix owner and pop lock stack.
 792           monitor->set_owner_from_anonymous(current);
 793           monitor->set_recursions(lock_stack.remove(obj) - 1);
 794         } else {
 795           // Fast locked (and inflated) by other thread, or deflation in progress, IMSE.
 796           THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
 797                      "current thread is not owner", nullptr);
 798         }
 799       }
 800       return monitor;
 801     }
 802   }
 803 }
 804 
 805 ObjectMonitor* LightweightSynchronizer::inflate_into_object_header(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, Thread* current) {
 806 
 807   // The JavaThread* locking_thread parameter is only used by LM_LIGHTWEIGHT and requires
 808   // that the locking_thread == Thread::current() or is suspended throughout the call by
 809   // some other mechanism.
 810   // Even with LM_LIGHTWEIGHT the thread might be nullptr when called from a non
 811   // JavaThread. (As may still be the case from FastHashCode). However it is only
 812   // important for the correctness of the LM_LIGHTWEIGHT algorithm that the thread
 813   // is set when called from ObjectSynchronizer::enter from the owning thread,
 814   // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
 815   EventJavaMonitorInflate event;
 816 
 817   for (;;) {
 818     const markWord mark = object->mark_acquire();
 819 
 820     // The mark can be in one of the following states:
 821     // *  inflated     - Just return if using stack-locking.
 822     //                   If using fast-locking and the ObjectMonitor owner
 823     //                   is anonymous and the locking_thread owns the
 824     //                   object lock, then we make the locking_thread
 825     //                   the ObjectMonitor owner and remove the lock from
 826     //                   the locking_thread's lock stack.
 827     // *  fast-locked  - Coerce it to inflated from fast-locked.
 828     // *  unlocked     - Aggressively inflate the object.
 829 
 830     // CASE: inflated
 831     if (mark.has_monitor()) {
 832       ObjectMonitor* inf = mark.monitor();
 833       markWord dmw = inf->header();
 834       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
 835       if (inf->has_anonymous_owner() &&
 836           locking_thread != nullptr && locking_thread->lock_stack().contains(object)) {
 837         inf->set_owner_from_anonymous(locking_thread);
 838         size_t removed = locking_thread->lock_stack().remove(object);
 839         inf->set_recursions(removed - 1);
 840       }
 841       return inf;
 842     }
 843 
 844     // CASE: fast-locked
 845     // Could be fast-locked either by the locking_thread or by some other thread.
 846     //
 847     // Note that we allocate the ObjectMonitor speculatively, _before_
 848     // attempting to set the object's mark to the new ObjectMonitor. If
 849     // the locking_thread owns the monitor, then we set the ObjectMonitor's
 850     // owner to the locking_thread. Otherwise, we set the ObjectMonitor's owner
 851     // to anonymous. If we lose the race to set the object's mark to the
 852     // new ObjectMonitor, then we just delete it and loop around again.
 853     //
 854     if (mark.is_fast_locked()) {
 855       ObjectMonitor* monitor = new ObjectMonitor(object);
 856       monitor->set_header(mark.set_unlocked());
 857       bool own = locking_thread != nullptr && locking_thread->lock_stack().contains(object);
 858       if (own) {
 859         // Owned by locking_thread.
 860         monitor->set_owner(locking_thread);
 861       } else {
 862         // Owned by somebody else.
 863         monitor->set_anonymous_owner();
 864       }
 865       markWord monitor_mark = markWord::encode(monitor);
 866       markWord old_mark = object->cas_set_mark(monitor_mark, mark);
 867       if (old_mark == mark) {
 868         // Success! Return inflated monitor.
 869         if (own) {
 870           size_t removed = locking_thread->lock_stack().remove(object);
 871           monitor->set_recursions(removed - 1);
 872         }
 873         // Once the ObjectMonitor is configured and object is associated
 874         // with the ObjectMonitor, it is safe to allow async deflation:
 875         ObjectSynchronizer::_in_use_list.add(monitor);
 876 
 877         log_inflate(current, object, cause);
 878         if (event.should_commit()) {
 879           post_monitor_inflate_event(&event, object, cause);
 880         }
 881         return monitor;
 882       } else {
 883         delete monitor;
 884         continue;  // Interference -- just retry
 885       }
 886     }
 887 
 888     // CASE: unlocked
 889     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
 890     // If we know we're inflating for entry it's better to inflate by swinging a
 891     // pre-locked ObjectMonitor pointer into the object header.   A successful
 892     // CAS inflates the object *and* confers ownership to the inflating thread.
 893     // In the current implementation we use a 2-step mechanism where we CAS()
 894     // to inflate and then CAS() again to try to swing _owner from null to current.
 895     // An inflateTry() method that we could call from enter() would be useful.
 896 
 897     assert(mark.is_unlocked(), "invariant: header=" INTPTR_FORMAT, mark.value());
 898     ObjectMonitor* m = new ObjectMonitor(object);
 899     // prepare m for installation - set monitor to initial state
 900     m->set_header(mark);
 901 
 902     if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
 903       delete m;
 904       m = nullptr;
 905       continue;
 906       // interference - the markword changed - just retry.
 907       // The state-transitions are one-way, so there's no chance of
 908       // live-lock -- "Inflated" is an absorbing state.
 909     }
 910 
 911     // Once the ObjectMonitor is configured and object is associated
 912     // with the ObjectMonitor, it is safe to allow async deflation:
 913     ObjectSynchronizer::_in_use_list.add(m);
 914 
 915     log_inflate(current, object, cause);
 916     if (event.should_commit()) {
 917       post_monitor_inflate_event(&event, object, cause);
 918     }
 919     return m;
 920   }
 921 }
 922 
 923 ObjectMonitor* LightweightSynchronizer::inflate_fast_locked_object(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) {
 924   assert(LockingMode == LM_LIGHTWEIGHT, "only used for lightweight");
 925   VerifyThreadState vts(locking_thread, current);
 926   assert(locking_thread->lock_stack().contains(object), "locking_thread must have object on its lock stack");
 927 
 928   ObjectMonitor* monitor;
 929 
 930   if (!UseObjectMonitorTable) {
 931     return inflate_into_object_header(object, cause, locking_thread, current);
 932   }
 933 
 934   // Inflating requires a hash code
 935   ObjectSynchronizer::FastHashCode(current, object);
 936 
 937   markWord mark = object->mark_acquire();
 938   assert(!mark.is_unlocked(), "Cannot be unlocked");
 939 
 940   for (;;) {
 941     // Fetch the monitor from the table
 942     monitor = get_or_insert_monitor(object, current, cause);
 943 
 944     // ObjectMonitors are always inserted as anonymously owned, this thread is
 945     // the current holder of the monitor. So unless the entry is stale and
 946     // contains a deflating monitor it must be anonymously owned.
 947     if (monitor->has_anonymous_owner()) {
 948       // The monitor must be anonymously owned if it was added
 949       assert(monitor == get_monitor_from_table(current, object), "The monitor must be found");
 950       // New fresh monitor
 951       break;
 952     }
 953 
 954     // If the monitor was not anonymously owned then we got a deflating monitor
 955     // from the table. We need to let the deflator make progress and remove this
 956     // entry before we are allowed to add a new one.
 957     os::naked_yield();
 958     assert(monitor->is_being_async_deflated(), "Should be the reason");
 959   }
 960 
 961   // Set the mark word; loop to handle concurrent updates to other parts of the mark word
 962   while (mark.is_fast_locked()) {
 963     mark = object->cas_set_mark(mark.set_has_monitor(), mark);
 964   }
 965 
 966   // Indicate that the monitor now has a known owner
 967   monitor->set_owner_from_anonymous(locking_thread);
 968 
 969   // Remove the entry from the thread's lock stack
 970   monitor->set_recursions(locking_thread->lock_stack().remove(object) - 1);
 971 
 972   if (locking_thread == current) {
 973     // Only change the thread local state of the current thread.
 974     locking_thread->om_set_monitor_cache(monitor);
 975   }
 976 
 977   return monitor;
 978 }
 979 
 980 ObjectMonitor* LightweightSynchronizer::inflate_and_enter(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) {
 981   assert(LockingMode == LM_LIGHTWEIGHT, "only used for lightweight");
 982   VerifyThreadState vts(locking_thread, current);
 983 
 984   // Note: In some paths (deoptimization) the 'current' thread inflates and
 985   // enters the lock on behalf of the 'locking_thread' thread.
 986 
 987   ObjectMonitor* monitor = nullptr;
 988 
 989   if (!UseObjectMonitorTable) {
 990     // Do the old inflate and enter.
 991     monitor = inflate_into_object_header(object, cause, locking_thread, current);
 992 
 993     bool entered;
 994     if (locking_thread == current) {
 995       entered = monitor->enter(locking_thread);
 996     } else {
 997       entered = monitor->enter_for(locking_thread);
 998     }
 999 
1000     // enter returns false for deflation found.
1001     return entered ? monitor : nullptr;
1002   }
1003 
1004   NoSafepointVerifier nsv;
1005 
1006   // Lightweight monitors require that hash codes are installed first
1007   ObjectSynchronizer::FastHashCode(locking_thread, object);
1008 
1009   // Try to get the monitor from the thread-local cache.
1010   // There's no need to use the cache if we are locking
1011   // on behalf of another thread.
1012   if (current == locking_thread) {
1013     monitor = current->om_get_from_monitor_cache(object);
1014   }
1015 
1016   // Get or create the monitor
1017   if (monitor == nullptr) {
1018     monitor = get_or_insert_monitor(object, current, cause);
1019   }
1020 
1021   if (monitor->try_enter(locking_thread)) {
1022     return monitor;
1023   }
1024 
1025   // Holds is_being_async_deflated() stable throughout this function.
1026   ObjectMonitorContentionMark contention_mark(monitor);
1027 
1028   /// First handle the case where the monitor from the table is deflated
1029   if (monitor->is_being_async_deflated()) {
1030     // The MonitorDeflation thread is deflating the monitor. The locking thread
1031     // must spin until further progress has been made.
1032 
1033     const markWord mark = object->mark_acquire();
1034 
1035     if (mark.has_monitor()) {
1036       // Waiting on the deflation thread to remove the deflated monitor from the table.
1037       os::naked_yield();
1038 
1039     } else if (mark.is_fast_locked()) {
1040       // Some other thread managed to fast-lock the lock, or this is a
1041       // recursive lock from the same thread; yield for the deflation
1042       // thread to remove the deflated monitor from the table.
1043       os::naked_yield();
1044 
1045     } else {
1046       assert(mark.is_unlocked(), "Implied");
1047       // Retry immediately
1048     }
1049 
1050     // Retry
1051     return nullptr;
1052   }
1053 
1054   for (;;) {
1055     const markWord mark = object->mark_acquire();
1056     // The mark can be in one of the following states:
1057     // *  inflated     - If the ObjectMonitor owner is anonymous
1058     //                   and the locking_thread owns the object
1059     //                   lock, then we make the locking_thread
1060     //                   the ObjectMonitor owner and remove the
1061     //                   lock from the locking_thread's lock stack.
1062     // *  fast-locked  - Coerce it to inflated from fast-locked.
1063     // *  neutral      - Inflate the object. Successful CAS is locked
1064 
1065     // CASE: inflated
1066     if (mark.has_monitor()) {
1067       LockStack& lock_stack = locking_thread->lock_stack();
1068       if (monitor->has_anonymous_owner() && lock_stack.contains(object)) {
1069         // The lock is fast-locked by the locking thread,
1070         // convert it to a held monitor with a known owner.
1071         monitor->set_owner_from_anonymous(locking_thread);
1072         monitor->set_recursions(lock_stack.remove(object) - 1);
1073       }
1074 
1075       break; // Success
1076     }
1077 
1078     // CASE: fast-locked
1079     // Could be fast-locked either by locking_thread or by some other thread.
1080     //
1081     if (mark.is_fast_locked()) {
1082       markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
1083       if (old_mark != mark) {
1084         // CAS failed
1085         continue;
1086       }
1087 
1088       // Success! Return inflated monitor.
1089       LockStack& lock_stack = locking_thread->lock_stack();
1090       if (lock_stack.contains(object)) {
1091         // The lock is fast-locked by the locking thread,
1092         // convert it to a held monitor with a known owner.
1093         monitor->set_owner_from_anonymous(locking_thread);
1094         monitor->set_recursions(lock_stack.remove(object) - 1);
1095       }
1096 
1097       break; // Success
1098     }
1099 
1100     // CASE: neutral (unlocked)
1101 
1102     // Catch if the object's header is not neutral (not locked and
1103     // not marked is what we care about here).
1104     assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
1105     markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
1106     if (old_mark != mark) {
1107       // CAS failed
1108       continue;
1109     }
1110 
1111     // Transitioned from unlocked to monitor means locking_thread owns the lock.
1112     monitor->set_owner_from_anonymous(locking_thread);
1113 
1114     return monitor;
1115   }
1116 
1117   if (current == locking_thread) {
1118     // One round of spinning
1119     if (monitor->spin_enter(locking_thread)) {
1120       return monitor;
1121     }
1122 
1123     // Monitor is contended, take the time before entering to fix the lock stack.
1124     LockStackInflateContendedLocks().inflate(current);
1125   }
1126 
1127   // enter can block for safepoints; clear the unhandled object oop
1128   PauseNoSafepointVerifier pnsv(&nsv);
1129   object = nullptr;
1130 
1131   if (current == locking_thread) {
1132     monitor->enter_with_contention_mark(locking_thread, contention_mark);
1133   } else {
1134     monitor->enter_for_with_contention_mark(locking_thread, contention_mark);
1135   }
1136 
1137   return monitor;
1138 }
1139 
1140 void LightweightSynchronizer::deflate_monitor(Thread* current, oop obj, ObjectMonitor* monitor) {
1141   if (obj != nullptr) {
1142     deflate_mark_word(obj);
1143   }
1144   bool removed = remove_monitor(current, monitor, obj);
1145   if (obj != nullptr) {
1146     assert(removed, "Should have removed the entry if obj was alive");
1147   }
1148 }
1149 
1150 ObjectMonitor* LightweightSynchronizer::get_monitor_from_table(Thread* current, oop obj) {
1151   assert(UseObjectMonitorTable, "must be");
1152   return ObjectMonitorTable::monitor_get(current, obj);
1153 }
1154 
1155 bool LightweightSynchronizer::contains_monitor(Thread* current, ObjectMonitor* monitor) {
1156   assert(UseObjectMonitorTable, "must be");
1157   return ObjectMonitorTable::contains_monitor(current, monitor);
1158 }
1159 
1160 bool LightweightSynchronizer::quick_enter(oop obj, BasicLock* lock, JavaThread* current) {
1161   assert(current->thread_state() == _thread_in_Java, "must be");
1162   assert(obj != nullptr, "must be");
1163   NoSafepointVerifier nsv;
1164 
1165   // If quick_enter succeeds with entering, the cache should be in a valid initialized state.
1166   CacheSetter cache_setter(current, lock);
1167 
1168   LockStack& lock_stack = current->lock_stack();
1169   if (lock_stack.is_full()) {
1170     // Always go into runtime if the lock stack is full.
1171     return false;
1172   }
1173 
1174   const markWord mark = obj->mark();
1175 
1176 #ifndef _LP64
1177   // Only for 32bit which has limited support for fast locking outside the runtime.
1178   if (lock_stack.try_recursive_enter(obj)) {
1179     // Recursive lock successful.
1180     return true;
1181   }
1182 
1183   if (mark.is_unlocked()) {
1184     markWord locked_mark = mark.set_fast_locked();
1185     if (obj->cas_set_mark(locked_mark, mark) == mark) {
1186       // Successfully fast-locked, push object to lock-stack and return.
1187       lock_stack.push(obj);
1188       return true;
1189     }
1190   }
1191 #endif
1192 
1193   if (mark.has_monitor()) {
1194     ObjectMonitor* const monitor = UseObjectMonitorTable ? current->om_get_from_monitor_cache(obj) :
1195                                                            ObjectSynchronizer::read_monitor(mark);
1196 
1197     if (monitor == nullptr) {
1198       // Take the slow-path on a cache miss.
1199       return false;
1200     }
1201 
1202     if (monitor->try_enter(current)) {
1203       // ObjectMonitor enter successful.
1204       cache_setter.set_monitor(monitor);
1205       return true;
1206     }
1207   }
1208 
1209   // Slow-path.
1210   return false;
1211 }