1 /*
   2  * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "classfile/vmSymbols.hpp"
  28 #include "javaThread.inline.hpp"
  29 #include "jfrfiles/jfrEventClasses.hpp"
  30 #include "logging/log.hpp"
  31 #include "logging/logStream.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "runtime/basicLock.inline.hpp"
  35 #include "runtime/globals_extension.hpp"
  36 #include "runtime/javaThread.hpp"
  37 #include "runtime/lightweightSynchronizer.hpp"
  38 #include "runtime/lockStack.inline.hpp"
  39 #include "runtime/mutexLocker.hpp"
  40 #include "runtime/objectMonitor.inline.hpp"
  41 #include "runtime/perfData.inline.hpp"
  42 #include "runtime/safepointVerifiers.hpp"
  43 #include "runtime/synchronizer.hpp"
  44 #include "utilities/concurrentHashTable.inline.hpp"
  45 #include "utilities/globalDefinitions.hpp"
  46 
  47 
  48 //
  49 // Lightweight synchronization.
  50 //
  51 // When the lightweight synchronization needs to use a monitor the link
  52 // between the object and the monitor is stored in a concurrent hash table
  53 // instead of in the mark word. This has the benefit that it further decouples
  54 // the mark word from the synchronization code.
  55 //
  56 
  57 // ConcurrentHashTable storing links from objects to ObjectMonitors
  58 class ObjectMonitorWorld : public CHeapObj<mtOMWorld> {
  59   struct Config {
  60     using Value = ObjectMonitor*;
  61     static uintx get_hash(Value const& value, bool* is_dead) {
  62       return (uintx)value->hash();
  63     }
  64     static void* allocate_node(void* context, size_t size, Value const& value) {
  65       return AllocateHeap(size, mtOMWorld);
  66     };
  67     static void free_node(void* context, void* memory, Value const& value) {
  68       FreeHeap(memory);
  69     }
  70   };
  71   using ConcurrentTable = ConcurrentHashTable<Config, mtOMWorld>;
  72 
  73   ConcurrentTable* _table;
  74   volatile bool _resize;
  75   uint32_t _shrink_count;
  76 
  77   class Lookup : public StackObj {
  78     oop _obj;
  79 
  80   public:
  81     Lookup(oop obj) : _obj(obj) {}
  82 
  83     uintx get_hash() const {
  84       uintx hash = _obj->mark().hash();
  85       assert(hash != 0, "should have a hash");
  86       return hash;
  87     }
  88 
  89     bool equals(ObjectMonitor** value) {
  90       // The entry is going to be removed soon.
  91       assert(*value != nullptr, "must be");
  92       return (*value)->object_refers_to(_obj);
  93     }
  94 
  95     bool is_dead(ObjectMonitor** value) {
  96       assert(*value != nullptr, "must be");
  97       return (*value)->object_is_cleared();
  98     }
  99   };
 100 
 101   class LookupMonitor : public StackObj {
 102     ObjectMonitor* _monitor;
 103 
 104   public:
 105     LookupMonitor(ObjectMonitor* monitor) : _monitor(monitor) {}
 106 
 107     uintx get_hash() const {
 108       return _monitor->hash();
 109     }
 110 
 111     bool equals(ObjectMonitor** value) {
 112       return (*value) == _monitor;
 113     }
 114 
 115     bool is_dead(ObjectMonitor** value) {
 116       assert(*value != nullptr, "must be");
 117       return (*value)->object_is_dead();
 118     }
 119   };
 120 
 121   static size_t max_log_size() {
 122     // TODO[OMWorld]: Evaluate the max size.
 123     // TODO[OMWorld]: Need to fix init order to use Universe::heap()->max_capacity();
 124     //                Using MaxHeapSize directly this early may be wrong, and there
 125     //                are definitely rounding errors (alignment).
 126     const size_t max_capacity = MaxHeapSize;
 127     const size_t min_object_size = CollectedHeap::min_dummy_object_size() * HeapWordSize;
 128     const size_t max_objects = max_capacity / MAX2(MinObjAlignmentInBytes, checked_cast<int>(min_object_size));
 129     const size_t log_max_objects = log2i_graceful(max_objects);
 130 
 131     return MAX2(MIN2<size_t>(SIZE_BIG_LOG2, log_max_objects), min_log_size());
 132   }
 133 
 134   static size_t min_log_size() {
 135     // TODO[OMWorld]: Evaluate the min size, currently ~= log(AvgMonitorsPerThreadEstimate default)
 136     return 10;
 137   }
 138 
 139   template<typename V>
 140   static size_t clamp_log_size(V log_size) {
 141     return MAX2(MIN2(log_size, checked_cast<V>(max_log_size())), checked_cast<V>(min_log_size()));
 142   }
 143 
 144   static size_t initial_log_size() {
 145     const size_t estimate = log2i(MAX2(os::processor_count(), 1)) + log2i(MAX2(AvgMonitorsPerThreadEstimate, size_t(1)));
 146     return clamp_log_size(estimate);
 147   }
 148 
 149   static size_t grow_hint () {
 150     // TODO[OMWorld]: Evaluate why 4 is a good grow hint.
 151     //                Have seen grow hint hits when lower with a
 152     //                load factor as low as 0.1. (Grow Hint = 3)
 153     // TODO[OMWorld]: Evaluate the hash code used, are large buckets
 154     //                expected even with a low load factor. Or is it
 155     //                something with the hashing used.
 156     return ConcurrentTable::DEFAULT_GROW_HINT;
 157   }
 158 
 159   static size_t log_shrink_difference() {
 160     // TODO[OMWorld]: Evaluate shrink heuristics, currently disabled by
 161     //                default, and only really shrinks if AvgMonitorsPerThreadEstimate
 162     //                is also set to a none default value
 163     return 2;
 164   }
 165 
 166 public:
 167   ObjectMonitorWorld()
 168   : _table(new ConcurrentTable(initial_log_size(), max_log_size(), grow_hint())),
 169     _resize(false),
 170     _shrink_count(0) {}
 171 
 172   void verify_monitor_get_result(oop obj, ObjectMonitor* monitor) {
 173 #ifdef ASSERT
 174     if (SafepointSynchronize::is_at_safepoint()) {
 175       bool has_monitor = obj->mark().has_monitor();
 176       assert(has_monitor == (monitor != nullptr),
 177           "Inconsistency between markWord and OMW table has_monitor: %s monitor: " PTR_FORMAT,
 178           BOOL_TO_STR(has_monitor), p2i(monitor));
 179     }
 180 #endif
 181   }
 182 
 183   ObjectMonitor* monitor_get(Thread* current, oop obj) {
 184     ObjectMonitor* result = nullptr;
 185     Lookup lookup_f(obj);
 186     auto found_f = [&](ObjectMonitor** found) {
 187       assert((*found)->object_peek() == obj, "must be");
 188       result = *found;
 189     };
 190     _table->get(current, lookup_f, found_f);
 191     verify_monitor_get_result(obj, result);
 192     return result;
 193   }
 194 
 195   void try_notify_grow() {
 196     if (!_table->is_max_size_reached() && !Atomic::load(&_resize)) {
 197       Atomic::store(&_resize, true);
 198       if (MonitorDeflation_lock->try_lock()) {
 199         MonitorDeflation_lock->notify();
 200         MonitorDeflation_lock->unlock();
 201       }
 202     }
 203   }
 204 
 205   void set_table_max(JavaThread* current) {
 206     while (!_table->is_max_size_reached()) {
 207       _table->grow(current);
 208     }
 209   }
 210 
 211   bool needs_shrink(size_t log_target, size_t log_size) {
 212     return OMShrinkCHT && log_target + log_shrink_difference() <= log_size;
 213   }
 214 
 215   bool needs_grow(size_t log_target, size_t log_size) {
 216     return log_size < log_target;
 217   }
 218 
 219   bool needs_resize(JavaThread* current, size_t ceiling, size_t count, size_t max) {
 220     const size_t log_size = _table->get_size_log2(current);
 221     const int log_ceiling = log2i_graceful(ceiling);
 222     const int log_max = log2i_graceful(max);
 223     const size_t log_count = log2i(MAX2(count, size_t(1)));
 224     const size_t log_target = clamp_log_size(MAX2(log_ceiling, log_max) + 2);
 225 
 226     return needs_grow(log_target, log_size) || needs_shrink(log_target, log_size) || Atomic::load(&_resize);
 227   }
 228 
 229   bool resize(JavaThread* current, size_t ceiling, size_t count, size_t max) {
 230     const size_t log_size = _table->get_size_log2(current);
 231     const int log_ceiling = log2i_graceful(ceiling);
 232     const int log_max = log2i_graceful(max);
 233     const size_t log_count = log2i(MAX2(count, size_t(1)));
 234     const size_t log_target = clamp_log_size(MAX2(log_ceiling, log_max) + 2);
 235     LogTarget(Info, monitorinflation) lt;
 236 
 237     auto print_table_stats = [&]() {
 238       ResourceMark rm;
 239       LogStream ls(lt);
 240       auto vs_f = [](Config::Value* v) { return sizeof(Config::Value); };
 241       _table->statistics_to(current, vs_f, &ls, "ObjectMonitorWorld");
 242     };
 243 
 244     bool success = true;
 245 
 246     if (needs_grow(log_target, log_size)) {
 247       // Grow
 248       lt.print("Growing to %02zu->%02zu", log_size, log_target);
 249       success = _table->grow(current, log_target);
 250       print_table_stats();
 251     } else if (!_table->is_max_size_reached() && Atomic::load(&_resize)) {
 252       lt.print("WARNING: Getting resize hints with Size: %02zu Ceiling: %2i Target: %02zu", log_size, log_ceiling, log_target);
 253       print_table_stats();
 254       success = false;
 255     }
 256 
 257     if (needs_shrink(log_target, log_size)) {
 258       _shrink_count++;
 259       // Shrink
 260       lt.print("Shrinking to %02zu->%02zu", log_size, log_target);
 261       success = _table->shrink(current, log_target);
 262       print_table_stats();
 263     }
 264 
 265     if (success) {
 266       Atomic::store(&_resize, _table->is_max_size_reached());
 267     }
 268 
 269     return success;
 270   }
 271 
 272   ObjectMonitor* monitor_put_get(Thread* current, ObjectMonitor* monitor, oop obj) {
 273     // Enter the monitor into the concurrent hashtable.
 274     ObjectMonitor* result = monitor;
 275     Lookup lookup_f(obj);
 276     auto found_f = [&](ObjectMonitor** found) {
 277       assert((*found)->object_peek() == obj, "must be");
 278       result = *found;
 279     };
 280     bool grow;
 281     _table->insert_get(current, lookup_f, monitor, found_f, &grow);
 282     verify_monitor_get_result(obj, result);
 283     if (grow) {
 284       try_notify_grow();
 285     }
 286     return result;
 287   }
 288 
 289   bool remove_monitor_entry(Thread* current, ObjectMonitor* monitor) {
 290     LookupMonitor lookup_f(monitor);
 291     return _table->remove(current, lookup_f);
 292   }
 293 
 294   bool contains_monitor(Thread* current, ObjectMonitor* monitor) {
 295     LookupMonitor lookup_f(monitor);
 296     bool result = false;
 297     auto found_f = [&](ObjectMonitor** found) {
 298       result = true;
 299     };
 300     _table->get(current, lookup_f, found_f);
 301     return result;
 302   }
 303 
 304   void print_on(outputStream* st) {
 305     auto printer = [&] (ObjectMonitor** entry) {
 306        ObjectMonitor* om = *entry;
 307        oop obj = om->object_peek();
 308        st->print("monitor " PTR_FORMAT " ", p2i(om));
 309        st->print("object " PTR_FORMAT, p2i(obj));
 310        assert(obj->mark().hash() == om->hash(), "hash must match");
 311        st->cr();
 312        return true;
 313     };
 314     if (SafepointSynchronize::is_at_safepoint()) {
 315       _table->do_safepoint_scan(printer);
 316     } else {
 317       _table->do_scan(Thread::current(), printer);
 318     }
 319   }
 320 };
 321 
 322 ObjectMonitorWorld* LightweightSynchronizer::_omworld = nullptr;
 323 
 324 ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor_from_table(oop object, JavaThread* current, bool try_read, bool* inserted) {
 325   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 326 
 327   if (try_read) {
 328     ObjectMonitor* monitor = read_monitor(current, object);
 329     if (monitor != nullptr) {
 330       *inserted = false;
 331       return monitor;
 332     }
 333   }
 334 
 335   ObjectMonitor* alloced_monitor = new ObjectMonitor(object);
 336   alloced_monitor->set_owner_anonymous();
 337 
 338   // Try insert monitor
 339   ObjectMonitor* monitor = add_monitor(current, alloced_monitor, object);
 340 
 341   *inserted = alloced_monitor == monitor;
 342   if (!*inserted) {
 343     delete alloced_monitor;
 344   }
 345 
 346   return monitor;
 347 }
 348 
 349 static void log_inflate(Thread* current, oop object, const ObjectSynchronizer::InflateCause cause) {
 350   if (log_is_enabled(Trace, monitorinflation)) {
 351     ResourceMark rm(current);
 352     log_info(monitorinflation)("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
 353                                INTPTR_FORMAT ", type='%s' cause %s", p2i(object),
 354                                object->mark().value(), object->klass()->external_name(),
 355                                ObjectSynchronizer::inflate_cause_name(cause));
 356   }
 357 }
 358 
 359 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
 360                                        const oop obj,
 361                                        ObjectSynchronizer::InflateCause cause) {
 362   assert(event != nullptr, "invariant");
 363   event->set_monitorClass(obj->klass());
 364   event->set_address((uintptr_t)(void*)obj);
 365   event->set_cause((u1)cause);
 366   event->commit();
 367 }
 368 
 369 ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor(oop object, JavaThread* current, const ObjectSynchronizer::InflateCause cause, bool try_read) {
 370   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 371 
 372   EventJavaMonitorInflate event;
 373 
 374   bool inserted;
 375   ObjectMonitor* monitor = get_or_insert_monitor_from_table(object, current, try_read, &inserted);
 376 
 377   if (inserted) {
 378     // Hopefully the performance counters are allocated on distinct
 379     // cache lines to avoid false sharing on MP systems ...
 380     OM_PERFDATA_OP(Inflations, inc());
 381     log_inflate(current, object, cause);
 382     if (event.should_commit()) {
 383       post_monitor_inflate_event(&event, object, cause);
 384     }
 385 
 386     // The monitor has an anonymous owner so it is safe from async deflation.
 387     ObjectSynchronizer::_in_use_list.add(monitor);
 388   }
 389 
 390   return monitor;
 391 }
 392 
 393 // Add the hashcode to the monitor to match the object and put it in the hashtable.
 394 ObjectMonitor* LightweightSynchronizer::add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj) {
 395   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 396   assert(obj == monitor->object(), "must be");
 397 
 398   intptr_t hash = obj->mark().hash();
 399   assert(hash != 0, "must be set when claiming the object monitor");
 400   monitor->set_hash(hash);
 401 
 402   return _omworld->monitor_put_get(current, monitor, obj);
 403 }
 404 
 405 bool LightweightSynchronizer::remove_monitor(Thread* current, oop obj, ObjectMonitor* monitor) {
 406   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 407   assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead");
 408 
 409   return _omworld->remove_monitor_entry(current, monitor);
 410 }
 411 
 412 void LightweightSynchronizer::deflate_mark_word(oop obj) {
 413   assert(LockingMode == LM_LIGHTWEIGHT, "must use lightweight locking");
 414 
 415   markWord mark = obj->mark_acquire();
 416   assert(!mark.has_no_hash(), "obj with inflated monitor must have had a hash");
 417 
 418   while (mark.has_monitor()) {
 419     const markWord new_mark = mark.clear_lock_bits().set_unlocked();
 420     mark = obj->cas_set_mark(new_mark, mark);
 421   }
 422 }
 423 
 424 void LightweightSynchronizer::initialize() {
 425   _omworld = new ObjectMonitorWorld();
 426 
 427   if (!FLAG_IS_CMDLINE(AvgMonitorsPerThreadEstimate)) {
 428     // This is updated after ceiling is set and ObjectMonitorWorld is created;
 429     // TODO[OMWorld]: Clean this up and find a good initial ceiling,
 430     //                and initial HashTable size
 431     FLAG_SET_ERGO(AvgMonitorsPerThreadEstimate, 0);
 432   }
 433 }
 434 
 435 void LightweightSynchronizer::set_table_max(JavaThread* current) {
 436   if (LockingMode != LM_LIGHTWEIGHT) {
 437     return;
 438   }
 439   _omworld->set_table_max(current);
 440 }
 441 
 442 bool LightweightSynchronizer::needs_resize(JavaThread *current) {
 443   if (LockingMode != LM_LIGHTWEIGHT) {
 444     return false;
 445   }
 446   return _omworld->needs_resize(current,
 447                                   ObjectSynchronizer::in_use_list_ceiling(),
 448                                   ObjectSynchronizer::_in_use_list.count(),
 449                                   ObjectSynchronizer::_in_use_list.max());
 450 }
 451 
 452 bool LightweightSynchronizer::resize_table(JavaThread* current) {
 453   if (LockingMode != LM_LIGHTWEIGHT) {
 454     return true;
 455   }
 456   return _omworld->resize(current,
 457                           ObjectSynchronizer::in_use_list_ceiling(),
 458                           ObjectSynchronizer::_in_use_list.count(),
 459                           ObjectSynchronizer::_in_use_list.max());
 460 }
 461 
 462 class LockStackInflateContendedLocks : private OopClosure {
 463  private:
 464   oop _contended_oops[LockStack::CAPACITY];
 465   int _length;
 466 
 467   void do_oop(oop* o) final {
 468     oop obj = *o;
 469     if (obj->mark_acquire().has_monitor()) {
 470       if (_length > 0 && _contended_oops[_length-1] == obj) {
 471         // assert(VM_Version::supports_recursive_lightweight_locking(), "must be");
 472         // Recursive
 473         return;
 474       }
 475       _contended_oops[_length++] = obj;
 476     }
 477   }
 478 
 479   void do_oop(narrowOop* o) final {
 480     ShouldNotReachHere();
 481   }
 482 
 483  public:
 484   LockStackInflateContendedLocks() :
 485     _contended_oops(),
 486     _length(0) {};
 487 
 488   void inflate(JavaThread* locking_thread, JavaThread* current) {
 489     locking_thread->lock_stack().oops_do(this);
 490     for (int i = 0; i < _length; i++) {
 491       LightweightSynchronizer::
 492         inflate_fast_locked_object(_contended_oops[i], locking_thread, current, ObjectSynchronizer::inflate_cause_vm_internal);
 493     }
 494   }
 495 };
 496 
 497 void LightweightSynchronizer::ensure_lock_stack_space(JavaThread* current) {
 498   assert(current == JavaThread::current(), "must be");
 499   LockStack& lock_stack = current->lock_stack();
 500 
 501   // Make room on lock_stack
 502   if (lock_stack.is_full()) {
 503     // Inflate contented objects
 504     LockStackInflateContendedLocks().inflate(current, current);
 505     if (lock_stack.is_full()) {
 506       // Inflate the oldest object
 507       inflate_fast_locked_object(lock_stack.bottom(), current, current, ObjectSynchronizer::inflate_cause_vm_internal);
 508     }
 509   }
 510 }
 511 
 512 class LightweightSynchronizer::CacheSetter : StackObj {
 513   JavaThread* const _thread;
 514   BasicLock* const _lock;
 515   ObjectMonitor* _monitor;
 516 
 517   NONCOPYABLE(CacheSetter);
 518 
 519 public:
 520   CacheSetter(JavaThread* thread, BasicLock* lock) :
 521     _thread(thread),
 522     _lock(lock),
 523     _monitor(nullptr) {}
 524 
 525   ~CacheSetter() {
 526     if (_monitor != nullptr) {
 527       _thread->om_set_monitor_cache(_monitor);
 528       _lock->set_object_monitor_cache(_monitor);
 529     } else {
 530       _lock->clear_object_monitor_cache();
 531     }
 532   }
 533 
 534   void set_monitor(ObjectMonitor* monitor) {
 535     assert(_monitor == nullptr, "only set once");
 536     _monitor = monitor;
 537   }
 538 
 539 };
 540 
 541 class VerifyThreadState {
 542   bool _no_safepoint;
 543   union {
 544     struct {} _dummy;
 545     NoSafepointVerifier _nsv;
 546   };
 547 
 548 public:
 549   VerifyThreadState(JavaThread* locking_thread, JavaThread* current) : _no_safepoint(locking_thread != current) {
 550     assert(current == Thread::current(), "must be");
 551     assert(locking_thread == current || locking_thread->is_obj_deopt_suspend(), "locking_thread may not run concurrently");
 552     if (_no_safepoint) {
 553       ::new (&_nsv) NoSafepointVerifier();
 554     }
 555   }
 556   ~VerifyThreadState() {
 557     if (_no_safepoint){
 558       _nsv.~NoSafepointVerifier();
 559     }
 560   }
 561 };
 562 
 563 void LightweightSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
 564   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 565   JavaThread* current = JavaThread::current();
 566   VerifyThreadState vts(locking_thread, current);
 567 
 568   // TODO[OMWorld]: Is this necessary?
 569   if (obj->klass()->is_value_based()) {
 570     ObjectSynchronizer::handle_sync_on_value_based_class(obj, locking_thread);
 571   }
 572 
 573   locking_thread->inc_held_monitor_count();
 574 
 575   CacheSetter cache_setter(locking_thread, lock);
 576 
 577   LockStack& lock_stack = locking_thread->lock_stack();
 578 
 579   ObjectMonitor* monitor = nullptr;
 580   if (lock_stack.contains(obj())) {
 581     monitor = inflate_fast_locked_object(obj(), locking_thread, current, ObjectSynchronizer::inflate_cause_monitor_enter);
 582     bool entered = monitor->enter_for(locking_thread);
 583     assert(entered, "recursive ObjectMonitor::enter_for must succeed");
 584   } else {
 585     // It is assumed that enter_for must enter on an object without contention.
 586     // TODO[OMWorld]: We also assume that this re-lock is on either a new never
 587     //                inflated monitor, or one that is already locked by the
 588     //                locking_thread. Should we have this stricter restriction?
 589     monitor = inflate_and_enter(obj(), locking_thread, current, ObjectSynchronizer::inflate_cause_monitor_enter);
 590   }
 591 
 592   assert(monitor != nullptr, "LightweightSynchronizer::enter_for must succeed");
 593   cache_setter.set_monitor(monitor);
 594 }
 595 
 596 void LightweightSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
 597   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 598   assert(current == JavaThread::current(), "must be");
 599 
 600   if (obj->klass()->is_value_based()) {
 601     ObjectSynchronizer::handle_sync_on_value_based_class(obj, current);
 602   }
 603 
 604   current->inc_held_monitor_count();
 605 
 606   CacheSetter cache_setter(current, lock);
 607 
 608   SpinYield spin_yield(0, 2);
 609   bool first_time = true;
 610 
 611   LockStack& lock_stack = current->lock_stack();
 612 
 613   if (!lock_stack.is_full() && lock_stack.try_recursive_enter(obj())) {
 614     // TODO[OMWorld]: Maybe guard this by the value in the markWord (only is fast locked)
 615     //                Currently this is done when exiting. Doing it early could remove,
 616     //                LockStack::CAPACITY - 1 slow paths in the best case. But need to fix
 617     //                some of the inflation counters for this change.
 618 
 619     // Recursively fast locked
 620     return;
 621   }
 622 
 623   if (lock_stack.contains(obj())) {
 624     ObjectMonitor* monitor = inflate_fast_locked_object(obj(), current, current, ObjectSynchronizer::inflate_cause_monitor_enter);
 625     bool entered = monitor->enter(current);
 626     assert(entered, "recursive ObjectMonitor::enter must succeed");
 627     cache_setter.set_monitor(monitor);
 628     return;
 629   }
 630 
 631   const int spins = OMSpins;
 632   const int yields = OMYields;
 633 
 634   while (true) {
 635 
 636     SpinYield fast_lock_spin_yield(spins, yields);
 637     // Fast-locking does not use the 'lock' argument.
 638     markWord mark = obj()->mark_acquire();
 639     const bool try_spin = !first_time || !mark.has_monitor();
 640     for (int attempts = spins + yields; try_spin && attempts > 0; attempts--) {
 641       while (mark.is_unlocked()) {
 642         ensure_lock_stack_space(current);
 643         assert(!lock_stack.is_full(), "must have made room on the lock stack");
 644         assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
 645         // Try to swing into 'fast-locked' state.
 646         markWord locked_mark = mark.set_fast_locked();
 647         markWord old_mark = mark;
 648         mark = obj()->cas_set_mark(locked_mark, old_mark);
 649         if (old_mark == mark) {
 650           // Successfully fast-locked, push object to lock-stack and return.
 651           lock_stack.push(obj());
 652           return;
 653         }
 654       }
 655 
 656       fast_lock_spin_yield.wait();
 657       mark = obj()->mark_acquire();
 658     }
 659 
 660     if (!first_time) {
 661       spin_yield.wait();
 662     }
 663 
 664     ObjectMonitor* monitor = inflate_and_enter(obj(), current, current, ObjectSynchronizer::inflate_cause_monitor_enter);
 665     if (monitor != nullptr) {
 666       cache_setter.set_monitor(monitor);
 667       return;
 668     }
 669 
 670     first_time = false;
 671   }
 672 }
 673 
 674 void LightweightSynchronizer::exit(oop object, JavaThread* current) {
 675   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 676   assert(current == Thread::current(), "must be");
 677 
 678   markWord mark = object->mark();
 679   assert(!mark.is_unlocked(), "must be unlocked");
 680 
 681   LockStack& lock_stack = current->lock_stack();
 682   if (mark.is_fast_locked()) {
 683     if (lock_stack.try_recursive_exit(object)) {
 684       // This is a recursive exit which succeeded
 685       return;
 686     }
 687     if (lock_stack.is_recursive(object)) {
 688       // Must inflate recursive locks if try_recursive_exit fails
 689       // This happens for un-structured unlocks, could potentially
 690       // fix try_recursive_exit to handle these.
 691       inflate_fast_locked_object(object, current, current, ObjectSynchronizer::inflate_cause_vm_internal);
 692     }
 693   }
 694 
 695   // Fast-locking does not use the 'lock' argument.
 696   while (mark.is_fast_locked()) {
 697     markWord unlocked_mark = mark.set_unlocked();
 698     markWord old_mark = mark;
 699     mark = object->cas_set_mark(unlocked_mark, old_mark);
 700     if (old_mark == mark) {
 701       // CAS successful, remove from lock_stack
 702       size_t recursion = lock_stack.remove(object) - 1;
 703       assert(recursion == 0, "Should not have unlocked here");
 704       return;
 705     }
 706   }
 707 
 708   assert(mark.has_monitor(), "must be");
 709   // The monitor is
 710   ObjectMonitor* monitor = read_monitor(current, object);
 711   if (monitor->is_owner_anonymous()) {
 712     assert(current->lock_stack().contains(object), "current must have object on its lock stack");
 713     monitor->set_owner_from_anonymous(current);
 714     monitor->set_recursions(current->lock_stack().remove(object) - 1);
 715     current->_contended_inflation++;
 716   }
 717 
 718   monitor->exit(current);
 719 }
 720 
 721 // TODO[OMWorld]: Rename this. No idea what to call it, used by notify/notifyall/wait and jni exit
 722 ObjectMonitor* LightweightSynchronizer::inflate_locked_or_imse(oop obj, const ObjectSynchronizer::InflateCause cause, TRAPS) {
 723   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 724   JavaThread* current = THREAD;
 725 
 726   for(;;) {
 727     markWord mark = obj->mark_acquire();
 728     if (mark.is_unlocked()) {
 729       // No lock, IMSE.
 730       THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
 731                 "current thread is not owner", nullptr);
 732     }
 733 
 734     if (mark.is_fast_locked()) {
 735       if (!current->lock_stack().contains(obj)) {
 736         // Fast locked by other thread, IMSE.
 737         THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
 738                   "current thread is not owner", nullptr);
 739       } else {
 740         // Current thread owns the lock, must inflate
 741         return inflate_fast_locked_object(obj, current, current, cause);
 742       }
 743     }
 744 
 745     assert(mark.has_monitor(), "must be");
 746     ObjectMonitor* monitor = read_monitor(current, obj);
 747     if (monitor != nullptr) {
 748       if (monitor->is_owner_anonymous()) {
 749         LockStack& lock_stack = current->lock_stack();
 750         if (lock_stack.contains(obj)) {
 751           // Current thread owns the lock but someone else inflated
 752           // fix owner and pop lock stack
 753           monitor->set_owner_from_anonymous(current);
 754           monitor->set_recursions(lock_stack.remove(obj) - 1);
 755           current->_contended_inflation++;
 756         } else {
 757           // Fast locked (and inflated) by other thread, or deflation in progress, IMSE.
 758           THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
 759                     "current thread is not owner", nullptr);
 760         }
 761       }
 762       return monitor;
 763     }
 764   }
 765 }
 766 
 767 ObjectMonitor* LightweightSynchronizer::inflate_fast_locked_object(oop object, JavaThread* locking_thread, JavaThread* current, const ObjectSynchronizer::InflateCause cause) {
 768   assert(LockingMode == LM_LIGHTWEIGHT, "only used for lightweight");
 769   VerifyThreadState vts(locking_thread, current);
 770   assert(locking_thread->lock_stack().contains(object), "locking_thread must have object on its lock stack");
 771 
 772   // Inflating requires a hash code
 773   FastHashCode(current, object);
 774 
 775   markWord mark = object->mark_acquire();
 776   assert(!mark.is_unlocked(), "Cannot be unlocked");
 777 
 778   ObjectMonitor* monitor;
 779 
 780   for (;;) {
 781   // Fetch the monitor from the table
 782     monitor = get_or_insert_monitor(object, current, cause, true /* try_read */);
 783 
 784     if (monitor->is_owner_anonymous()) {
 785       assert(monitor == read_monitor(current, object), "The monitor must be in the table");
 786       // New fresh monitor
 787       break;
 788     }
 789 
 790     os::naked_yield();
 791     assert(monitor->is_being_async_deflated(), "Should be the reason");
 792   }
 793 
 794   // Set the mark word; loop to handle concurrent updates to other parts of the mark word
 795   while (mark.is_fast_locked()) {
 796     mark = object->cas_set_mark(mark.set_has_monitor(), mark);
 797   }
 798 
 799   // Indicate that the monitor now has a known owner
 800   monitor->set_owner_from_anonymous(locking_thread);
 801 
 802   // Remove the entry from the thread's lock stack
 803   monitor->set_recursions(locking_thread->lock_stack().remove(object) - 1);
 804 
 805   if (locking_thread == current) {
 806     locking_thread->om_set_monitor_cache(monitor);
 807   }
 808 
 809   if (cause == ObjectSynchronizer::inflate_cause_wait) {
 810     locking_thread->_wait_inflation++;
 811   } else if (cause == ObjectSynchronizer::inflate_cause_monitor_enter) {
 812     locking_thread->_recursive_inflation++;
 813   } else if (cause == ObjectSynchronizer::inflate_cause_vm_internal) {
 814     locking_thread->_lock_stack_inflation++;
 815   }
 816 
 817   return monitor;
 818 }
 819 
 820 ObjectMonitor* LightweightSynchronizer::inflate_and_enter(oop object, JavaThread* locking_thread, JavaThread* current, const ObjectSynchronizer::InflateCause cause) {
 821   assert(LockingMode == LM_LIGHTWEIGHT, "only used for lightweight");
 822   VerifyThreadState vts(locking_thread, current);
 823   NoSafepointVerifier nsv;
 824 
 825   // Note: In some paths (deoptimization) the 'current' thread inflates and
 826   // enters the lock on behalf of the 'locking_thread' thread.
 827 
 828   // Lightweight monitors require that hash codes are installed first
 829   FastHashCode(locking_thread, object);
 830 
 831   ObjectMonitor* monitor = nullptr;
 832 
 833   // Try to get the monitor from the thread-local cache.
 834   // There's no need to use the cache if we are locking
 835   // on behalf of another thread.
 836   if (current == locking_thread) {
 837     monitor = current->om_get_from_monitor_cache(object);
 838   }
 839 
 840   // Get or create the monitor
 841   if (monitor == nullptr) {
 842     monitor = get_or_insert_monitor(object, current, cause, true /* try_read */);
 843   }
 844 
 845   if (monitor->try_enter(locking_thread)) {
 846     return monitor;
 847   }
 848 
 849   // Holds is_being_async_deflated() stable throughout this function.
 850   ObjectMonitorContentionMark contention_mark(monitor);
 851 
 852   /// First handle the case where the monitor from the table is deflated
 853   if (monitor->is_being_async_deflated()) {
 854     // The MonitorDeflation thread is deflating the monitor. The locking thread
 855     // can either help transition the mark word or yield / spin until further
 856     // progress have been made.
 857 
 858     const markWord mark = object->mark_acquire();
 859 
 860     if (mark.has_monitor()) {
 861       // Waiting on the deflation thread to remove the deflated monitor from the table.
 862       os::naked_yield();
 863 
 864     } else if (mark.is_fast_locked()) {
 865       // Some other thread managed to fast-lock the lock, or this is a
 866       // recursive lock from the same thread; yield for the deflation
 867       // thread to remove the deflated monitor from the table.
 868       os::naked_yield();
 869 
 870     } else {
 871       assert(mark.is_unlocked(), "Implied");
 872       // Retry immediately
 873     }
 874 
 875     // Retry
 876     return nullptr;
 877   }
 878 
 879   for (;;) {
 880     const markWord mark = object->mark_acquire();
 881     // The mark can be in one of the following states:
 882     // *  inflated     - If the ObjectMonitor owner is anonymous
 883     //                   and the locking_thread thread owns the object
 884     //                   lock, then we make the locking_thread thread
 885     //                   the ObjectMonitor owner and remove the
 886     //                   lock from the locking_thread thread's lock stack.
 887     // *  fast-locked  - Coerce it to inflated from fast-locked.
 888     // *  neutral      - Inflate the object. Successful CAS is locked
 889 
 890     // CASE: inflated
 891     if (mark.has_monitor()) {
 892       LockStack& lock_stack = locking_thread->lock_stack();
 893       if (monitor->is_owner_anonymous() && lock_stack.contains(object)) {
 894         // The lock is fast-locked by the locking thread,
 895         // convert it to a held monitor with a known owner.
 896         monitor->set_owner_from_anonymous(locking_thread);
 897         monitor->set_recursions(lock_stack.remove(object) - 1);
 898         locking_thread->_contended_recursive_inflation++;
 899       }
 900 
 901       break; // Success
 902     }
 903 
 904     // CASE: fast-locked
 905     // Could be fast-locked either by locking_thread or by some other thread.
 906     //
 907     if (mark.is_fast_locked()) {
 908       markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
 909       if (old_mark != mark) {
 910         // CAS failed
 911         continue;
 912       }
 913 
 914       // Success! Return inflated monitor.
 915       LockStack& lock_stack = locking_thread->lock_stack();
 916       if (lock_stack.contains(object)) {
 917         // The lock is fast-locked by the locking thread,
 918         // convert it to a held monitor with a known owner.
 919         monitor->set_owner_from_anonymous(locking_thread);
 920         monitor->set_recursions(lock_stack.remove(object) - 1);
 921         locking_thread->_recursive_inflation++;
 922       }
 923 
 924       break; // Success
 925     }
 926 
 927     // CASE: neutral (unlocked)
 928 
 929     // Catch if the object's header is not neutral (not locked and
 930     // not marked is what we care about here).
 931     assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
 932     markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
 933     if (old_mark != mark) {
 934       // CAS failed
 935       continue;
 936     }
 937 
 938     // Transitioned from unlocked to monitor means locking_thread owns the lock.
 939     monitor->set_owner_from_anonymous(locking_thread);
 940 
 941     locking_thread->_unlocked_inflation++;
 942 
 943     return monitor;
 944   }
 945 
 946   if (current == locking_thread) {
 947     // One round of spinning
 948     if (monitor->spin_enter(locking_thread)) {
 949       return monitor;
 950     }
 951 
 952     // Monitor is contended, take the time befor entering to fix the lock stack.
 953     LockStackInflateContendedLocks().inflate(locking_thread, current);
 954   }
 955 
 956   // enter can block for safepoints; clear the unhandled object oop
 957   PauseNoSafepointVerifier pnsv(&nsv);
 958   object = nullptr;
 959 
 960   if (current == locking_thread) {
 961     monitor->enter_with_contention_mark(locking_thread, contention_mark);
 962   } else {
 963     monitor->enter_for_with_contention_mark(locking_thread, contention_mark);
 964   }
 965 
 966   return monitor;
 967 }
 968 
 969 void LightweightSynchronizer::deflate_monitor(Thread* current, oop obj, ObjectMonitor* monitor) {
 970   if (obj != nullptr) {
 971     deflate_mark_word(obj);
 972   }
 973   bool removed = remove_monitor(current, obj, monitor);
 974   if (obj != nullptr) {
 975     assert(removed, "Should have removed the entry if obj was alive");
 976   }
 977 }
 978 
 979 void LightweightSynchronizer::deflate_anon_monitor(Thread* current, oop obj, ObjectMonitor* monitor) {
 980   markWord mark = obj->mark_acquire();
 981   assert(!mark.has_no_hash(), "obj with inflated monitor must have had a hash");
 982 
 983   while (mark.has_monitor()) {
 984     const markWord new_mark = mark.set_fast_locked();
 985     mark = obj->cas_set_mark(new_mark, mark);
 986   }
 987 
 988   bool removed = remove_monitor(current, obj, monitor);
 989   assert(removed, "Should have removed the entry");
 990 }
 991 
 992 ObjectMonitor* LightweightSynchronizer::read_monitor(Thread* current, oop obj) {
 993   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 994   return _omworld->monitor_get(current, obj);
 995 }
 996 
 997 bool LightweightSynchronizer::contains_monitor(Thread* current, ObjectMonitor* monitor) {
 998   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 999   return _omworld->contains_monitor(current, monitor);
1000 }
1001 
1002 intptr_t LightweightSynchronizer::FastHashCode(Thread* current, oop obj) {
1003   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1004 
1005   markWord mark = obj->mark_acquire();
1006   for(;;) {
1007     intptr_t hash = mark.hash();
1008     if (hash != 0) {
1009       return hash;
1010     }
1011 
1012     hash = ObjectSynchronizer::get_next_hash(current, obj);
1013     const markWord old_mark = mark;
1014     const markWord new_mark = old_mark.copy_set_hash(hash);
1015 
1016     mark = obj->cas_set_mark(new_mark, old_mark);
1017     if (old_mark == mark) {
1018       return hash;
1019     }
1020   }
1021 }
1022 
1023 bool LightweightSynchronizer::quick_enter(oop obj, JavaThread* current, BasicLock* lock) {
1024   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1025   assert(current->thread_state() == _thread_in_Java, "must be");
1026   assert(obj != nullptr, "must be");
1027   NoSafepointVerifier nsv;
1028 
1029   CacheSetter cache_setter(current, lock);
1030 
1031   LockStack& lock_stack = current->lock_stack();
1032   if (lock_stack.is_full()) {
1033     // Always go into runtime if the lock stack is full.
1034     return false;
1035   }
1036 
1037   if (lock_stack.try_recursive_enter(obj)) {
1038     // Recursive lock successful.
1039     current->inc_held_monitor_count();
1040     return true;
1041   }
1042 
1043   const markWord mark = obj->mark();
1044 
1045   if (mark.has_monitor()) {
1046     ObjectMonitor* const monitor = current->om_get_from_monitor_cache(obj);
1047 
1048     if (monitor == nullptr) {
1049       // Take the slow-path on a cache miss.
1050       return false;
1051     }
1052 
1053     if (monitor->try_enter(current)) {
1054       // ObjectMonitor enter successful.
1055       cache_setter.set_monitor(monitor);
1056       current->inc_held_monitor_count();
1057       return true;
1058     }
1059   }
1060 
1061   // Slow-path.
1062   return false;
1063 }