1 /*
2 * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/vmSymbols.hpp"
26 #include "jfrfiles/jfrEventClasses.hpp"
27 #include "logging/log.hpp"
28 #include "memory/allStatic.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "nmt/memTag.hpp"
31 #include "oops/oop.inline.hpp"
32 #include "runtime/atomic.hpp"
33 #include "runtime/basicLock.inline.hpp"
34 #include "runtime/globals_extension.hpp"
35 #include "runtime/interfaceSupport.inline.hpp"
36 #include "runtime/javaThread.inline.hpp"
37 #include "runtime/lightweightSynchronizer.hpp"
38 #include "runtime/lockStack.inline.hpp"
39 #include "runtime/mutexLocker.hpp"
40 #include "runtime/objectMonitor.inline.hpp"
41 #include "runtime/os.hpp"
42 #include "runtime/safepointMechanism.inline.hpp"
43 #include "runtime/safepointVerifiers.hpp"
44 #include "runtime/synchronizer.inline.hpp"
45 #include "runtime/timerTrace.hpp"
46 #include "runtime/trimNativeHeap.hpp"
47 #include "utilities/concurrentHashTable.inline.hpp"
48 #include "utilities/concurrentHashTableTasks.inline.hpp"
49 #include "utilities/globalDefinitions.hpp"
50
51 static uintx objhash(oop obj) {
52 if (UseCompactObjectHeaders) {
53 uintx hash = LightweightSynchronizer::get_hash(obj->mark(), obj);
54 assert(hash != 0, "should have a hash");
55 return hash;
56 } else {
57 uintx hash = obj->mark().hash();
58 assert(hash != 0, "should have a hash");
59 return hash;
60 }
61 }
62
63 // ConcurrentHashTable storing links from objects to ObjectMonitors
64 class ObjectMonitorTable : AllStatic {
65 struct Config {
66 using Value = ObjectMonitor*;
67 static uintx get_hash(Value const& value, bool* is_dead) {
68 return (uintx)value->hash();
69 }
70 static void* allocate_node(void* context, size_t size, Value const& value) {
71 ObjectMonitorTable::inc_items_count();
72 return AllocateHeap(size, mtObjectMonitor);
73 };
74 static void free_node(void* context, void* memory, Value const& value) {
75 ObjectMonitorTable::dec_items_count();
76 FreeHeap(memory);
77 }
78 };
79 using ConcurrentTable = ConcurrentHashTable<Config, mtObjectMonitor>;
80
81 static ConcurrentTable* _table;
82 static volatile size_t _items_count;
83 static size_t _table_size;
84 static volatile bool _resize;
85
86 class Lookup : public StackObj {
87 oop _obj;
88
89 public:
90 explicit Lookup(oop obj) : _obj(obj) {}
91
92 uintx get_hash() const {
93 return objhash(_obj);
94 }
95
96 bool equals(ObjectMonitor** value) {
97 assert(*value != nullptr, "must be");
98 return (*value)->object_refers_to(_obj);
99 }
100
101 bool is_dead(ObjectMonitor** value) {
102 assert(*value != nullptr, "must be");
103 return false;
104 }
105 };
106
107 class LookupMonitor : public StackObj {
108 ObjectMonitor* _monitor;
109
110 public:
111 explicit LookupMonitor(ObjectMonitor* monitor) : _monitor(monitor) {}
112
113 uintx get_hash() const {
114 return _monitor->hash();
115 }
116
117 bool equals(ObjectMonitor** value) {
118 return (*value) == _monitor;
119 }
120
121 bool is_dead(ObjectMonitor** value) {
122 assert(*value != nullptr, "must be");
123 return (*value)->object_is_dead();
124 }
125 };
126
127 static void inc_items_count() {
128 Atomic::inc(&_items_count, memory_order_relaxed);
129 }
130
131 static void dec_items_count() {
132 Atomic::dec(&_items_count, memory_order_relaxed);
133 }
134
135 static double get_load_factor() {
136 size_t count = Atomic::load(&_items_count);
137 return (double)count / (double)_table_size;
138 }
139
140 static size_t table_size(Thread* current = Thread::current()) {
141 return ((size_t)1) << _table->get_size_log2(current);
142 }
143
144 static size_t max_log_size() {
145 // TODO[OMTable]: Evaluate the max size.
146 // TODO[OMTable]: Need to fix init order to use Universe::heap()->max_capacity();
147 // Using MaxHeapSize directly this early may be wrong, and there
148 // are definitely rounding errors (alignment).
149 const size_t max_capacity = MaxHeapSize;
150 const size_t min_object_size = CollectedHeap::min_dummy_object_size() * HeapWordSize;
151 const size_t max_objects = max_capacity / MAX2(MinObjAlignmentInBytes, checked_cast<int>(min_object_size));
152 const size_t log_max_objects = log2i_graceful(max_objects);
153
154 return MAX2(MIN2<size_t>(SIZE_BIG_LOG2, log_max_objects), min_log_size());
155 }
156
157 static size_t min_log_size() {
158 // ~= log(AvgMonitorsPerThreadEstimate default)
159 return 10;
160 }
161
162 template<typename V>
163 static size_t clamp_log_size(V log_size) {
164 return MAX2(MIN2(log_size, checked_cast<V>(max_log_size())), checked_cast<V>(min_log_size()));
165 }
166
167 static size_t initial_log_size() {
168 const size_t estimate = log2i(MAX2(os::processor_count(), 1)) + log2i(MAX2(AvgMonitorsPerThreadEstimate, size_t(1)));
169 return clamp_log_size(estimate);
170 }
171
172 static size_t grow_hint () {
173 return ConcurrentTable::DEFAULT_GROW_HINT;
174 }
175
176 public:
177 static void create() {
178 _table = new ConcurrentTable(initial_log_size(), max_log_size(), grow_hint());
179 _items_count = 0;
180 _table_size = table_size();
181 _resize = false;
182 }
183
184 static void verify_monitor_get_result(oop obj, ObjectMonitor* monitor) {
185 #ifdef ASSERT
186 if (SafepointSynchronize::is_at_safepoint()) {
187 bool has_monitor = obj->mark().has_monitor();
188 assert(has_monitor == (monitor != nullptr),
189 "Inconsistency between markWord and ObjectMonitorTable has_monitor: %s monitor: " PTR_FORMAT,
190 BOOL_TO_STR(has_monitor), p2i(monitor));
191 }
192 #endif
193 }
194
195 static ObjectMonitor* monitor_get(Thread* current, oop obj) {
196 ObjectMonitor* result = nullptr;
197 Lookup lookup_f(obj);
198 auto found_f = [&](ObjectMonitor** found) {
199 assert((*found)->object_peek() == obj, "must be");
200 result = *found;
201 };
202 _table->get(current, lookup_f, found_f);
203 verify_monitor_get_result(obj, result);
204 return result;
205 }
206
207 static void try_notify_grow() {
208 if (!_table->is_max_size_reached() && !Atomic::load(&_resize)) {
209 Atomic::store(&_resize, true);
210 if (Service_lock->try_lock()) {
211 Service_lock->notify();
212 Service_lock->unlock();
213 }
214 }
215 }
216
217 static bool should_shrink() {
218 // Not implemented;
219 return false;
220 }
221
222 static constexpr double GROW_LOAD_FACTOR = 0.75;
223
224 static bool should_grow() {
225 return get_load_factor() > GROW_LOAD_FACTOR && !_table->is_max_size_reached();
226 }
227
228 static bool should_resize() {
229 return should_grow() || should_shrink() || Atomic::load(&_resize);
230 }
231
232 template<typename Task, typename... Args>
233 static bool run_task(JavaThread* current, Task& task, const char* task_name, Args&... args) {
234 if (task.prepare(current)) {
235 log_trace(monitortable)("Started to %s", task_name);
236 TraceTime timer(task_name, TRACETIME_LOG(Debug, monitortable, perf));
237 while (task.do_task(current, args...)) {
238 task.pause(current);
239 {
240 ThreadBlockInVM tbivm(current);
241 }
242 task.cont(current);
243 }
244 task.done(current);
245 return true;
246 }
247 return false;
248 }
249
250 static bool grow(JavaThread* current) {
251 ConcurrentTable::GrowTask grow_task(_table);
252 if (run_task(current, grow_task, "Grow")) {
253 _table_size = table_size(current);
254 log_info(monitortable)("Grown to size: %zu", _table_size);
255 return true;
256 }
257 return false;
258 }
259
260 static bool clean(JavaThread* current) {
261 ConcurrentTable::BulkDeleteTask clean_task(_table);
262 auto is_dead = [&](ObjectMonitor** monitor) {
263 return (*monitor)->object_is_dead();
264 };
265 auto do_nothing = [&](ObjectMonitor** monitor) {};
266 NativeHeapTrimmer::SuspendMark sm("ObjectMonitorTable");
267 return run_task(current, clean_task, "Clean", is_dead, do_nothing);
268 }
269
270 static bool resize(JavaThread* current) {
271 LogTarget(Info, monitortable) lt;
272 bool success = false;
273
274 if (should_grow()) {
275 lt.print("Start growing with load factor %f", get_load_factor());
276 success = grow(current);
277 } else {
278 if (!_table->is_max_size_reached() && Atomic::load(&_resize)) {
279 lt.print("WARNING: Getting resize hints with load factor %f", get_load_factor());
280 }
281 lt.print("Start cleaning with load factor %f", get_load_factor());
282 success = clean(current);
283 }
284
285 Atomic::store(&_resize, false);
286
287 return success;
288 }
289
290 static ObjectMonitor* monitor_put_get(Thread* current, ObjectMonitor* monitor, oop obj) {
291 // Enter the monitor into the concurrent hashtable.
292 ObjectMonitor* result = monitor;
293 Lookup lookup_f(obj);
294 auto found_f = [&](ObjectMonitor** found) {
295 assert((*found)->object_peek() == obj, "must be");
296 assert(objhash(obj) == (uintx)(*found)->hash(), "hash must match");
297 result = *found;
298 };
299 bool grow;
300 _table->insert_get(current, lookup_f, monitor, found_f, &grow);
301 verify_monitor_get_result(obj, result);
302 if (grow) {
303 try_notify_grow();
304 }
305 return result;
306 }
307
308 static bool remove_monitor_entry(Thread* current, ObjectMonitor* monitor) {
309 LookupMonitor lookup_f(monitor);
310 return _table->remove(current, lookup_f);
311 }
312
313 static bool contains_monitor(Thread* current, ObjectMonitor* monitor) {
314 LookupMonitor lookup_f(monitor);
315 bool result = false;
316 auto found_f = [&](ObjectMonitor** found) {
317 result = true;
318 };
319 _table->get(current, lookup_f, found_f);
320 return result;
321 }
322
323 static void print_on(outputStream* st) {
324 auto printer = [&] (ObjectMonitor** entry) {
325 ObjectMonitor* om = *entry;
326 oop obj = om->object_peek();
327 st->print("monitor=" PTR_FORMAT ", ", p2i(om));
328 st->print("object=" PTR_FORMAT, p2i(obj));
329 assert(objhash(obj) == (uintx)om->hash(), "hash must match");
330 st->cr();
331 return true;
332 };
333 if (SafepointSynchronize::is_at_safepoint()) {
334 _table->do_safepoint_scan(printer);
335 } else {
336 _table->do_scan(Thread::current(), printer);
337 }
338 }
339 };
340
341 ObjectMonitorTable::ConcurrentTable* ObjectMonitorTable::_table = nullptr;
342 volatile size_t ObjectMonitorTable::_items_count = 0;
343 size_t ObjectMonitorTable::_table_size = 0;
344 volatile bool ObjectMonitorTable::_resize = false;
345
346 ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted) {
347 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
348
349 ObjectMonitor* monitor = get_monitor_from_table(current, object);
350 if (monitor != nullptr) {
351 *inserted = false;
352 return monitor;
353 }
354
355 ObjectMonitor* alloced_monitor = new ObjectMonitor(object);
356 alloced_monitor->set_anonymous_owner();
357
358 // Try insert monitor
359 monitor = add_monitor(current, alloced_monitor, object);
360
361 *inserted = alloced_monitor == monitor;
362 if (!*inserted) {
363 delete alloced_monitor;
364 }
365
366 return monitor;
367 }
368
369 static void log_inflate(Thread* current, oop object, ObjectSynchronizer::InflateCause cause) {
370 if (log_is_enabled(Trace, monitorinflation)) {
371 ResourceMark rm(current);
372 log_trace(monitorinflation)("inflate: object=" INTPTR_FORMAT ", mark="
373 INTPTR_FORMAT ", type='%s' cause=%s", p2i(object),
374 object->mark().value(), object->klass()->external_name(),
375 ObjectSynchronizer::inflate_cause_name(cause));
376 }
377 }
378
379 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
380 const oop obj,
381 ObjectSynchronizer::InflateCause cause) {
382 assert(event != nullptr, "invariant");
383 const Klass* monitor_klass = obj->klass();
384 if (ObjectMonitor::is_jfr_excluded(monitor_klass)) {
385 return;
386 }
387 event->set_monitorClass(monitor_klass);
388 event->set_address((uintptr_t)(void*)obj);
389 event->set_cause((u1)cause);
390 event->commit();
391 }
392
393 ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor(oop object, JavaThread* current, ObjectSynchronizer::InflateCause cause) {
394 assert(UseObjectMonitorTable, "must be");
395
396 EventJavaMonitorInflate event;
397
398 bool inserted;
399 ObjectMonitor* monitor = get_or_insert_monitor_from_table(object, current, &inserted);
400
401 if (inserted) {
402 log_inflate(current, object, cause);
403 if (event.should_commit()) {
404 post_monitor_inflate_event(&event, object, cause);
405 }
406
407 // The monitor has an anonymous owner so it is safe from async deflation.
408 ObjectSynchronizer::_in_use_list.add(monitor);
409 }
410
411 return monitor;
412 }
413
414 // Add the hashcode to the monitor to match the object and put it in the hashtable.
415 ObjectMonitor* LightweightSynchronizer::add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj) {
416 assert(UseObjectMonitorTable, "must be");
417 assert(obj == monitor->object(), "must be");
418
419 intptr_t hash = objhash(obj);
420 assert(hash != 0, "must be set when claiming the object monitor");
421 monitor->set_hash(hash);
422
423 return ObjectMonitorTable::monitor_put_get(current, monitor, obj);
424 }
425
426 bool LightweightSynchronizer::remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj) {
427 assert(UseObjectMonitorTable, "must be");
428 assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead");
429
430 return ObjectMonitorTable::remove_monitor_entry(current, monitor);
431 }
432
433 void LightweightSynchronizer::deflate_mark_word(oop obj) {
434 assert(UseObjectMonitorTable, "must be");
435
436 markWord mark = obj->mark_acquire();
437 assert(!mark.has_no_hash(), "obj with inflated monitor must have had a hash");
438
439 while (mark.has_monitor()) {
440 const markWord new_mark = mark.clear_lock_bits().set_unlocked();
441 mark = obj->cas_set_mark(new_mark, mark);
442 }
443 }
444
445 void LightweightSynchronizer::initialize() {
446 if (!UseObjectMonitorTable) {
447 return;
448 }
449 ObjectMonitorTable::create();
450 }
451
452 bool LightweightSynchronizer::needs_resize() {
453 if (!UseObjectMonitorTable) {
454 return false;
455 }
456 return ObjectMonitorTable::should_resize();
457 }
458
459 bool LightweightSynchronizer::resize_table(JavaThread* current) {
460 if (!UseObjectMonitorTable) {
461 return true;
462 }
463 return ObjectMonitorTable::resize(current);
464 }
465
466 class LightweightSynchronizer::LockStackInflateContendedLocks : private OopClosure {
467 private:
468 oop _contended_oops[LockStack::CAPACITY];
469 int _length;
470
471 void do_oop(oop* o) final {
472 oop obj = *o;
473 if (obj->mark_acquire().has_monitor()) {
474 if (_length > 0 && _contended_oops[_length - 1] == obj) {
475 // Recursive
476 return;
477 }
478 _contended_oops[_length++] = obj;
479 }
480 }
481
482 void do_oop(narrowOop* o) final {
483 ShouldNotReachHere();
484 }
485
486 public:
487 LockStackInflateContendedLocks() :
488 _contended_oops(),
489 _length(0) {};
490
491 void inflate(JavaThread* current) {
492 assert(current == JavaThread::current(), "must be");
493 current->lock_stack().oops_do(this);
494 for (int i = 0; i < _length; i++) {
495 LightweightSynchronizer::
496 inflate_fast_locked_object(_contended_oops[i], ObjectSynchronizer::inflate_cause_vm_internal, current, current);
497 }
498 }
499 };
500
501 void LightweightSynchronizer::ensure_lock_stack_space(JavaThread* current) {
502 assert(current == JavaThread::current(), "must be");
503 LockStack& lock_stack = current->lock_stack();
504
505 // Make room on lock_stack
506 if (lock_stack.is_full()) {
507 // Inflate contended objects
508 LockStackInflateContendedLocks().inflate(current);
509 if (lock_stack.is_full()) {
510 // Inflate the oldest object
511 inflate_fast_locked_object(lock_stack.bottom(), ObjectSynchronizer::inflate_cause_vm_internal, current, current);
512 }
513 }
514 }
515
516 class LightweightSynchronizer::CacheSetter : StackObj {
517 JavaThread* const _thread;
518 BasicLock* const _lock;
519 ObjectMonitor* _monitor;
520
521 NONCOPYABLE(CacheSetter);
522
523 public:
524 CacheSetter(JavaThread* thread, BasicLock* lock) :
525 _thread(thread),
526 _lock(lock),
527 _monitor(nullptr) {}
528
529 ~CacheSetter() {
530 // Only use the cache if using the table.
531 if (UseObjectMonitorTable) {
532 if (_monitor != nullptr) {
533 // If the monitor is already in the BasicLock cache then it is most
534 // likely in the thread cache, do not set it again to avoid reordering.
535 if (_monitor != _lock->object_monitor_cache()) {
536 _thread->om_set_monitor_cache(_monitor);
537 _lock->set_object_monitor_cache(_monitor);
538 }
539 } else {
540 _lock->clear_object_monitor_cache();
541 }
542 }
543 }
544
545 void set_monitor(ObjectMonitor* monitor) {
546 assert(_monitor == nullptr, "only set once");
547 _monitor = monitor;
548 }
549
550 };
551
552 // Reads first from the BasicLock cache then from the OMCache in the current thread.
553 // C2 fast-path may have put the monitor in the cache in the BasicLock.
554 inline static ObjectMonitor* read_caches(JavaThread* current, BasicLock* lock, oop object) {
555 ObjectMonitor* monitor = lock->object_monitor_cache();
556 if (monitor == nullptr) {
557 monitor = current->om_get_from_monitor_cache(object);
558 }
559 return monitor;
560 }
561
562 class LightweightSynchronizer::VerifyThreadState {
563 bool _no_safepoint;
564
565 public:
566 VerifyThreadState(JavaThread* locking_thread, JavaThread* current) : _no_safepoint(locking_thread != current) {
567 assert(current == Thread::current(), "must be");
568 assert(locking_thread == current || locking_thread->is_obj_deopt_suspend(), "locking_thread may not run concurrently");
569 if (_no_safepoint) {
570 DEBUG_ONLY(JavaThread::current()->inc_no_safepoint_count();)
571 }
572 }
573 ~VerifyThreadState() {
574 if (_no_safepoint){
575 DEBUG_ONLY(JavaThread::current()->dec_no_safepoint_count();)
576 }
577 }
578 };
579
580 inline bool LightweightSynchronizer::fast_lock_try_enter(oop obj, LockStack& lock_stack, JavaThread* current) {
581 markWord mark = obj->mark();
582 while (mark.is_unlocked()) {
583 ensure_lock_stack_space(current);
584 assert(!lock_stack.is_full(), "must have made room on the lock stack");
585 assert(!lock_stack.contains(obj), "thread must not already hold the lock");
586 // Try to swing into 'fast-locked' state.
587 markWord locked_mark = mark.set_fast_locked();
588 markWord old_mark = mark;
589 mark = obj->cas_set_mark(locked_mark, old_mark);
590 if (old_mark == mark) {
591 // Successfully fast-locked, push object to lock-stack and return.
592 lock_stack.push(obj);
593 return true;
594 }
595 }
596 return false;
597 }
598
599 bool LightweightSynchronizer::fast_lock_spin_enter(oop obj, LockStack& lock_stack, JavaThread* current, bool observed_deflation) {
600 assert(UseObjectMonitorTable, "must be");
601 // Will spin with exponential backoff with an accumulative O(2^spin_limit) spins.
602 const int log_spin_limit = os::is_MP() ? LightweightFastLockingSpins : 1;
603 const int log_min_safepoint_check_interval = 10;
604
605 markWord mark = obj->mark();
606 const auto should_spin = [&]() {
607 if (!mark.has_monitor()) {
608 // Spin while not inflated.
609 return true;
610 } else if (observed_deflation) {
611 // Spin while monitor is being deflated.
612 ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark);
613 return monitor == nullptr || monitor->is_being_async_deflated();
614 }
615 // Else stop spinning.
616 return false;
617 };
618 // Always attempt to lock once even when safepoint synchronizing.
619 bool should_process = false;
620 for (int i = 0; should_spin() && !should_process && i < log_spin_limit; i++) {
621 // Spin with exponential backoff.
622 const int total_spin_count = 1 << i;
623 const int inner_spin_count = MIN2(1 << log_min_safepoint_check_interval, total_spin_count);
624 const int outer_spin_count = total_spin_count / inner_spin_count;
625 for (int outer = 0; outer < outer_spin_count; outer++) {
626 should_process = SafepointMechanism::should_process(current);
627 if (should_process) {
628 // Stop spinning for safepoint.
629 break;
630 }
631 for (int inner = 1; inner < inner_spin_count; inner++) {
632 SpinPause();
633 }
634 }
635
636 if (fast_lock_try_enter(obj, lock_stack, current)) return true;
637 }
638 return false;
639 }
640
641 void LightweightSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
642 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
643 assert(!UseObjectMonitorTable || lock->object_monitor_cache() == nullptr, "must be cleared");
644 JavaThread* current = JavaThread::current();
645 VerifyThreadState vts(locking_thread, current);
646
647 if (obj->klass()->is_value_based()) {
648 ObjectSynchronizer::handle_sync_on_value_based_class(obj, locking_thread);
649 }
650
651 LockStack& lock_stack = locking_thread->lock_stack();
652
653 ObjectMonitor* monitor = nullptr;
654 if (lock_stack.contains(obj())) {
655 monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
656 bool entered = monitor->enter_for(locking_thread);
657 assert(entered, "recursive ObjectMonitor::enter_for must succeed");
658 } else {
659 do {
660 // It is assumed that enter_for must enter on an object without contention.
661 monitor = inflate_and_enter(obj(), lock, ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
662 // But there may still be a race with deflation.
663 } while (monitor == nullptr);
664 }
665
666 assert(monitor != nullptr, "LightweightSynchronizer::enter_for must succeed");
667 assert(!UseObjectMonitorTable || lock->object_monitor_cache() == nullptr, "unused. already cleared");
668 }
669
670 void LightweightSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
671 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
672 assert(current == JavaThread::current(), "must be");
673
674 if (obj->klass()->is_value_based()) {
675 ObjectSynchronizer::handle_sync_on_value_based_class(obj, current);
676 }
677
678 CacheSetter cache_setter(current, lock);
679
680 // Used when deflation is observed. Progress here requires progress
681 // from the deflator. After observing that the deflator is not
682 // making progress (after two yields), switch to sleeping.
683 SpinYield spin_yield(0, 2);
684 bool observed_deflation = false;
685
686 LockStack& lock_stack = current->lock_stack();
687
688 if (!lock_stack.is_full() && lock_stack.try_recursive_enter(obj())) {
689 // Recursively fast locked
690 return;
691 }
692
693 if (lock_stack.contains(obj())) {
694 ObjectMonitor* monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, current, current);
695 bool entered = monitor->enter(current);
696 assert(entered, "recursive ObjectMonitor::enter must succeed");
697 cache_setter.set_monitor(monitor);
698 return;
699 }
700
701 while (true) {
702 // Fast-locking does not use the 'lock' argument.
703 // Fast-lock spinning to avoid inflating for short critical sections.
704 // The goal is to only inflate when the extra cost of using ObjectMonitors
705 // is worth it.
706 // If deflation has been observed we also spin while deflation is ongoing.
707 if (fast_lock_try_enter(obj(), lock_stack, current)) {
708 return;
709 } else if (UseObjectMonitorTable && fast_lock_spin_enter(obj(), lock_stack, current, observed_deflation)) {
710 return;
711 }
712
713 if (observed_deflation) {
714 spin_yield.wait();
715 }
716
717 ObjectMonitor* monitor = inflate_and_enter(obj(), lock, ObjectSynchronizer::inflate_cause_monitor_enter, current, current);
718 if (monitor != nullptr) {
719 cache_setter.set_monitor(monitor);
720 return;
721 }
722
723 // If inflate_and_enter returns nullptr it is because a deflated monitor
724 // was encountered. Fallback to fast locking. The deflater is responsible
725 // for clearing out the monitor and transitioning the markWord back to
726 // fast locking.
727 observed_deflation = true;
728 }
729 }
730
731 void LightweightSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
732 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
733 assert(current == Thread::current(), "must be");
734
735 markWord mark = object->mark();
736 assert(!mark.is_unlocked(), "must be");
737
738 LockStack& lock_stack = current->lock_stack();
739 if (mark.is_fast_locked()) {
740 if (lock_stack.try_recursive_exit(object)) {
741 // This is a recursive exit which succeeded
742 return;
743 }
744 if (lock_stack.is_recursive(object)) {
745 // Must inflate recursive locks if try_recursive_exit fails
746 // This happens for un-structured unlocks, could potentially
747 // fix try_recursive_exit to handle these.
748 inflate_fast_locked_object(object, ObjectSynchronizer::inflate_cause_vm_internal, current, current);
749 }
750 }
751
752 while (mark.is_fast_locked()) {
753 markWord unlocked_mark = mark.set_unlocked();
754 markWord old_mark = mark;
755 mark = object->cas_set_mark(unlocked_mark, old_mark);
756 if (old_mark == mark) {
757 // CAS successful, remove from lock_stack
758 size_t recursion = lock_stack.remove(object) - 1;
759 assert(recursion == 0, "Should not have unlocked here");
760 return;
761 }
762 }
763
764 assert(mark.has_monitor(), "must be");
765 // The monitor exists
766 ObjectMonitor* monitor;
767 if (UseObjectMonitorTable) {
768 monitor = read_caches(current, lock, object);
769 if (monitor == nullptr) {
770 monitor = get_monitor_from_table(current, object);
771 }
772 } else {
773 monitor = ObjectSynchronizer::read_monitor(mark);
774 }
775 if (monitor->has_anonymous_owner()) {
776 assert(current->lock_stack().contains(object), "current must have object on its lock stack");
777 monitor->set_owner_from_anonymous(current);
778 monitor->set_recursions(current->lock_stack().remove(object) - 1);
779 }
780
781 monitor->exit(current);
782 }
783
784 // LightweightSynchronizer::inflate_locked_or_imse is used to to get an inflated
785 // ObjectMonitor* with LM_LIGHTWEIGHT. It is used from contexts which require
786 // an inflated ObjectMonitor* for a monitor, and expects to throw a
787 // java.lang.IllegalMonitorStateException if it is not held by the current
788 // thread. Such as notify/wait and jni_exit. LM_LIGHTWEIGHT keeps it invariant
789 // that it only inflates if it is already locked by the current thread or the
790 // current thread is in the process of entering. To maintain this invariant we
791 // need to throw a java.lang.IllegalMonitorStateException before inflating if
792 // the current thread is not the owner.
793 // LightweightSynchronizer::inflate_locked_or_imse facilitates this.
794 ObjectMonitor* LightweightSynchronizer::inflate_locked_or_imse(oop obj, ObjectSynchronizer::InflateCause cause, TRAPS) {
795 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
796 JavaThread* current = THREAD;
797
798 for (;;) {
799 markWord mark = obj->mark_acquire();
800 if (mark.is_unlocked()) {
801 // No lock, IMSE.
802 THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
803 "current thread is not owner", nullptr);
804 }
805
806 if (mark.is_fast_locked()) {
807 if (!current->lock_stack().contains(obj)) {
808 // Fast locked by other thread, IMSE.
809 THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
810 "current thread is not owner", nullptr);
811 } else {
812 // Current thread owns the lock, must inflate
813 return inflate_fast_locked_object(obj, cause, current, current);
814 }
815 }
816
817 assert(mark.has_monitor(), "must be");
818 ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark);
819 if (monitor != nullptr) {
820 if (monitor->has_anonymous_owner()) {
821 LockStack& lock_stack = current->lock_stack();
822 if (lock_stack.contains(obj)) {
823 // Current thread owns the lock but someone else inflated it.
824 // Fix owner and pop lock stack.
825 monitor->set_owner_from_anonymous(current);
826 monitor->set_recursions(lock_stack.remove(obj) - 1);
827 } else {
828 // Fast locked (and inflated) by other thread, or deflation in progress, IMSE.
829 THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
830 "current thread is not owner", nullptr);
831 }
832 }
833 return monitor;
834 }
835 }
836 }
837
838 ObjectMonitor* LightweightSynchronizer::inflate_into_object_header(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, Thread* current) {
839
840 // The JavaThread* locking_thread parameter is only used by LM_LIGHTWEIGHT and requires
841 // that the locking_thread == Thread::current() or is suspended throughout the call by
842 // some other mechanism.
843 // Even with LM_LIGHTWEIGHT the thread might be nullptr when called from a non
844 // JavaThread. (As may still be the case from FastHashCode). However it is only
845 // important for the correctness of the LM_LIGHTWEIGHT algorithm that the thread
846 // is set when called from ObjectSynchronizer::enter from the owning thread,
847 // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
848 EventJavaMonitorInflate event;
849
850 for (;;) {
851 const markWord mark = object->mark_acquire();
852
853 // The mark can be in one of the following states:
854 // * inflated - Just return if using stack-locking.
855 // If using fast-locking and the ObjectMonitor owner
856 // is anonymous and the locking_thread owns the
857 // object lock, then we make the locking_thread
858 // the ObjectMonitor owner and remove the lock from
859 // the locking_thread's lock stack.
860 // * fast-locked - Coerce it to inflated from fast-locked.
861 // * unlocked - Aggressively inflate the object.
862
863 // CASE: inflated
864 if (mark.has_monitor()) {
865 ObjectMonitor* inf = mark.monitor();
866 markWord dmw = inf->header();
867 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
868 if (inf->has_anonymous_owner() &&
869 locking_thread != nullptr && locking_thread->lock_stack().contains(object)) {
870 inf->set_owner_from_anonymous(locking_thread);
871 size_t removed = locking_thread->lock_stack().remove(object);
872 inf->set_recursions(removed - 1);
873 }
874 return inf;
875 }
876
877 // CASE: fast-locked
878 // Could be fast-locked either by the locking_thread or by some other thread.
879 //
880 // Note that we allocate the ObjectMonitor speculatively, _before_
881 // attempting to set the object's mark to the new ObjectMonitor. If
882 // the locking_thread owns the monitor, then we set the ObjectMonitor's
883 // owner to the locking_thread. Otherwise, we set the ObjectMonitor's owner
884 // to anonymous. If we lose the race to set the object's mark to the
885 // new ObjectMonitor, then we just delete it and loop around again.
886 //
887 if (mark.is_fast_locked()) {
888 ObjectMonitor* monitor = new ObjectMonitor(object);
889 monitor->set_header(mark.set_unlocked());
890 bool own = locking_thread != nullptr && locking_thread->lock_stack().contains(object);
891 if (own) {
892 // Owned by locking_thread.
893 monitor->set_owner(locking_thread);
894 } else {
895 // Owned by somebody else.
896 monitor->set_anonymous_owner();
897 }
898 markWord monitor_mark = markWord::encode(monitor);
899 markWord old_mark = object->cas_set_mark(monitor_mark, mark);
900 if (old_mark == mark) {
901 // Success! Return inflated monitor.
902 if (own) {
903 size_t removed = locking_thread->lock_stack().remove(object);
904 monitor->set_recursions(removed - 1);
905 }
906 // Once the ObjectMonitor is configured and object is associated
907 // with the ObjectMonitor, it is safe to allow async deflation:
908 ObjectSynchronizer::_in_use_list.add(monitor);
909
910 log_inflate(current, object, cause);
911 if (event.should_commit()) {
912 post_monitor_inflate_event(&event, object, cause);
913 }
914 return monitor;
915 } else {
916 delete monitor;
917 continue; // Interference -- just retry
918 }
919 }
920
921 // CASE: unlocked
922 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
923 // If we know we're inflating for entry it's better to inflate by swinging a
924 // pre-locked ObjectMonitor pointer into the object header. A successful
925 // CAS inflates the object *and* confers ownership to the inflating thread.
926 // In the current implementation we use a 2-step mechanism where we CAS()
927 // to inflate and then CAS() again to try to swing _owner from null to current.
928 // An inflateTry() method that we could call from enter() would be useful.
929
930 assert(mark.is_unlocked(), "invariant: header=" INTPTR_FORMAT, mark.value());
931 ObjectMonitor* m = new ObjectMonitor(object);
932 // prepare m for installation - set monitor to initial state
933 m->set_header(mark);
934
935 if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
936 delete m;
937 m = nullptr;
938 continue;
939 // interference - the markword changed - just retry.
940 // The state-transitions are one-way, so there's no chance of
941 // live-lock -- "Inflated" is an absorbing state.
942 }
943
944 // Once the ObjectMonitor is configured and object is associated
945 // with the ObjectMonitor, it is safe to allow async deflation:
946 ObjectSynchronizer::_in_use_list.add(m);
947
948 log_inflate(current, object, cause);
949 if (event.should_commit()) {
950 post_monitor_inflate_event(&event, object, cause);
951 }
952 return m;
953 }
954 }
955
956 ObjectMonitor* LightweightSynchronizer::inflate_fast_locked_object(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) {
957 assert(LockingMode == LM_LIGHTWEIGHT, "only used for lightweight");
958 VerifyThreadState vts(locking_thread, current);
959 assert(locking_thread->lock_stack().contains(object), "locking_thread must have object on its lock stack");
960
961 ObjectMonitor* monitor;
962
963 if (!UseObjectMonitorTable) {
964 return inflate_into_object_header(object, cause, locking_thread, current);
965 }
966
967 // Inflating requires a hash code
968 ObjectSynchronizer::FastHashCode(current, object);
969
970 markWord mark = object->mark_acquire();
971 assert(!mark.is_unlocked(), "Cannot be unlocked");
972
973 for (;;) {
974 // Fetch the monitor from the table
975 monitor = get_or_insert_monitor(object, current, cause);
976
977 // ObjectMonitors are always inserted as anonymously owned, this thread is
978 // the current holder of the monitor. So unless the entry is stale and
979 // contains a deflating monitor it must be anonymously owned.
980 if (monitor->has_anonymous_owner()) {
981 // The monitor must be anonymously owned if it was added
982 assert(monitor == get_monitor_from_table(current, object), "The monitor must be found");
983 // New fresh monitor
984 break;
985 }
986
987 // If the monitor was not anonymously owned then we got a deflating monitor
988 // from the table. We need to let the deflator make progress and remove this
989 // entry before we are allowed to add a new one.
990 os::naked_yield();
991 assert(monitor->is_being_async_deflated(), "Should be the reason");
992 }
993
994 // Set the mark word; loop to handle concurrent updates to other parts of the mark word
995 while (mark.is_fast_locked()) {
996 mark = object->cas_set_mark(mark.set_has_monitor(), mark);
997 }
998
999 // Indicate that the monitor now has a known owner
1000 monitor->set_owner_from_anonymous(locking_thread);
1001
1002 // Remove the entry from the thread's lock stack
1003 monitor->set_recursions(locking_thread->lock_stack().remove(object) - 1);
1004
1005 if (locking_thread == current) {
1006 // Only change the thread local state of the current thread.
1007 locking_thread->om_set_monitor_cache(monitor);
1008 }
1009
1010 return monitor;
1011 }
1012
1013 ObjectMonitor* LightweightSynchronizer::inflate_and_enter(oop object, BasicLock* lock, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) {
1014 assert(LockingMode == LM_LIGHTWEIGHT, "only used for lightweight");
1015 VerifyThreadState vts(locking_thread, current);
1016
1017 // Note: In some paths (deoptimization) the 'current' thread inflates and
1018 // enters the lock on behalf of the 'locking_thread' thread.
1019
1020 ObjectMonitor* monitor = nullptr;
1021
1022 if (!UseObjectMonitorTable) {
1023 // Do the old inflate and enter.
1024 monitor = inflate_into_object_header(object, cause, locking_thread, current);
1025
1026 bool entered;
1027 if (locking_thread == current) {
1028 entered = monitor->enter(locking_thread);
1029 } else {
1030 entered = monitor->enter_for(locking_thread);
1031 }
1032
1033 // enter returns false for deflation found.
1034 return entered ? monitor : nullptr;
1035 }
1036
1037 NoSafepointVerifier nsv;
1038
1039 // Try to get the monitor from the thread-local cache.
1040 // There's no need to use the cache if we are locking
1041 // on behalf of another thread.
1042 if (current == locking_thread) {
1043 monitor = read_caches(current, lock, object);
1044 }
1045
1046 // Get or create the monitor
1047 if (monitor == nullptr) {
1048 // Lightweight monitors require that hash codes are installed first
1049 ObjectSynchronizer::FastHashCode(locking_thread, object);
1050 monitor = get_or_insert_monitor(object, current, cause);
1051 }
1052
1053 if (monitor->try_enter(locking_thread)) {
1054 return monitor;
1055 }
1056
1057 // Holds is_being_async_deflated() stable throughout this function.
1058 ObjectMonitorContentionMark contention_mark(monitor);
1059
1060 /// First handle the case where the monitor from the table is deflated
1061 if (monitor->is_being_async_deflated()) {
1062 // The MonitorDeflation thread is deflating the monitor. The locking thread
1063 // must spin until further progress has been made.
1064
1065 // Clear the BasicLock cache as it may contain this monitor.
1066 lock->clear_object_monitor_cache();
1067
1068 const markWord mark = object->mark_acquire();
1069
1070 if (mark.has_monitor()) {
1071 // Waiting on the deflation thread to remove the deflated monitor from the table.
1072 os::naked_yield();
1073
1074 } else if (mark.is_fast_locked()) {
1075 // Some other thread managed to fast-lock the lock, or this is a
1076 // recursive lock from the same thread; yield for the deflation
1077 // thread to remove the deflated monitor from the table.
1078 os::naked_yield();
1079
1080 } else {
1081 assert(mark.is_unlocked(), "Implied");
1082 // Retry immediately
1083 }
1084
1085 // Retry
1086 return nullptr;
1087 }
1088
1089 for (;;) {
1090 const markWord mark = object->mark_acquire();
1091 // The mark can be in one of the following states:
1092 // * inflated - If the ObjectMonitor owner is anonymous
1093 // and the locking_thread owns the object
1094 // lock, then we make the locking_thread
1095 // the ObjectMonitor owner and remove the
1096 // lock from the locking_thread's lock stack.
1097 // * fast-locked - Coerce it to inflated from fast-locked.
1098 // * neutral - Inflate the object. Successful CAS is locked
1099
1100 // CASE: inflated
1101 if (mark.has_monitor()) {
1102 LockStack& lock_stack = locking_thread->lock_stack();
1103 if (monitor->has_anonymous_owner() && lock_stack.contains(object)) {
1104 // The lock is fast-locked by the locking thread,
1105 // convert it to a held monitor with a known owner.
1106 monitor->set_owner_from_anonymous(locking_thread);
1107 monitor->set_recursions(lock_stack.remove(object) - 1);
1108 }
1109
1110 break; // Success
1111 }
1112
1113 // CASE: fast-locked
1114 // Could be fast-locked either by locking_thread or by some other thread.
1115 //
1116 if (mark.is_fast_locked()) {
1117 markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
1118 if (old_mark != mark) {
1119 // CAS failed
1120 continue;
1121 }
1122
1123 // Success! Return inflated monitor.
1124 LockStack& lock_stack = locking_thread->lock_stack();
1125 if (lock_stack.contains(object)) {
1126 // The lock is fast-locked by the locking thread,
1127 // convert it to a held monitor with a known owner.
1128 monitor->set_owner_from_anonymous(locking_thread);
1129 monitor->set_recursions(lock_stack.remove(object) - 1);
1130 }
1131
1132 break; // Success
1133 }
1134
1135 // CASE: neutral (unlocked)
1136
1137 // Catch if the object's header is not neutral (not locked and
1138 // not marked is what we care about here).
1139 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
1140 markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
1141 if (old_mark != mark) {
1142 // CAS failed
1143 continue;
1144 }
1145
1146 // Transitioned from unlocked to monitor means locking_thread owns the lock.
1147 monitor->set_owner_from_anonymous(locking_thread);
1148
1149 return monitor;
1150 }
1151
1152 if (current == locking_thread) {
1153 // One round of spinning
1154 if (monitor->spin_enter(locking_thread)) {
1155 return monitor;
1156 }
1157
1158 // Monitor is contended, take the time before entering to fix the lock stack.
1159 LockStackInflateContendedLocks().inflate(current);
1160 }
1161
1162 // enter can block for safepoints; clear the unhandled object oop
1163 PauseNoSafepointVerifier pnsv(&nsv);
1164 object = nullptr;
1165
1166 if (current == locking_thread) {
1167 monitor->enter_with_contention_mark(locking_thread, contention_mark);
1168 } else {
1169 monitor->enter_for_with_contention_mark(locking_thread, contention_mark);
1170 }
1171
1172 return monitor;
1173 }
1174
1175 void LightweightSynchronizer::deflate_monitor(Thread* current, oop obj, ObjectMonitor* monitor) {
1176 if (obj != nullptr) {
1177 deflate_mark_word(obj);
1178 }
1179 bool removed = remove_monitor(current, monitor, obj);
1180 if (obj != nullptr) {
1181 assert(removed, "Should have removed the entry if obj was alive");
1182 }
1183 }
1184
1185 ObjectMonitor* LightweightSynchronizer::get_monitor_from_table(Thread* current, oop obj) {
1186 assert(UseObjectMonitorTable, "must be");
1187 return ObjectMonitorTable::monitor_get(current, obj);
1188 }
1189
1190 bool LightweightSynchronizer::contains_monitor(Thread* current, ObjectMonitor* monitor) {
1191 assert(UseObjectMonitorTable, "must be");
1192 return ObjectMonitorTable::contains_monitor(current, monitor);
1193 }
1194
1195 bool LightweightSynchronizer::quick_enter(oop obj, BasicLock* lock, JavaThread* current) {
1196 assert(current->thread_state() == _thread_in_Java, "must be");
1197 assert(obj != nullptr, "must be");
1198 NoSafepointVerifier nsv;
1199
1200 LockStack& lock_stack = current->lock_stack();
1201 if (lock_stack.is_full()) {
1202 // Always go into runtime if the lock stack is full.
1203 return false;
1204 }
1205
1206 const markWord mark = obj->mark();
1207
1208 #ifndef _LP64
1209 // Only for 32bit which has limited support for fast locking outside the runtime.
1210 if (lock_stack.try_recursive_enter(obj)) {
1211 // Recursive lock successful.
1212 return true;
1213 }
1214
1215 if (mark.is_unlocked()) {
1216 markWord locked_mark = mark.set_fast_locked();
1217 if (obj->cas_set_mark(locked_mark, mark) == mark) {
1218 // Successfully fast-locked, push object to lock-stack and return.
1219 lock_stack.push(obj);
1220 return true;
1221 }
1222 }
1223 #endif
1224
1225 if (mark.has_monitor()) {
1226 ObjectMonitor* monitor;
1227 if (UseObjectMonitorTable) {
1228 monitor = read_caches(current, lock, obj);
1229 } else {
1230 monitor = ObjectSynchronizer::read_monitor(mark);
1231 }
1232
1233 if (monitor == nullptr) {
1234 // Take the slow-path on a cache miss.
1235 return false;
1236 }
1237
1238 if (UseObjectMonitorTable) {
1239 // Set the monitor regardless of success.
1240 // Either we successfully lock on the monitor, or we retry with the
1241 // monitor in the slow path. If the monitor gets deflated, it will be
1242 // cleared, either by the CacheSetter if we fast lock in enter or in
1243 // inflate_and_enter when we see that the monitor is deflated.
1244 lock->set_object_monitor_cache(monitor);
1245 }
1246
1247 if (monitor->spin_enter(current)) {
1248 return true;
1249 }
1250 }
1251
1252 // Slow-path.
1253 return false;
1254 }
1255
1256 uint32_t LightweightSynchronizer::get_hash(markWord mark, oop obj, Klass* klass) {
1257 assert(UseCompactObjectHeaders, "Only with compact i-hash");
1258 //assert(mark.is_neutral() | mark.is_fast_locked(), "only from neutral or fast-locked mark: " INTPTR_FORMAT, mark.value());
1259 assert(mark.is_hashed(), "only from hashed or copied object");
1260 if (mark.is_hashed_expanded()) {
1261 return obj->int_field(klass->hash_offset_in_bytes(obj, mark));
1262 } else {
1263 assert(mark.is_hashed_not_expanded(), "must be hashed");
1264 assert(hashCode == 6 || hashCode == 2, "must have idempotent hashCode");
1265 // Already marked as hashed, but not yet copied. Recompute hash and return it.
1266 return ObjectSynchronizer::get_next_hash(nullptr, obj); // recompute hash
1267 }
1268 }
1269
1270 uint32_t LightweightSynchronizer::get_hash(markWord mark, oop obj) {
1271 return get_hash(mark, obj, mark.klass());
1272 }