1 /*
2 * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/vmSymbols.hpp"
26 #include "jfrfiles/jfrEventClasses.hpp"
27 #include "logging/log.hpp"
28 #include "memory/allStatic.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "nmt/memTag.hpp"
31 #include "oops/oop.inline.hpp"
32 #include "runtime/atomic.hpp"
33 #include "runtime/basicLock.inline.hpp"
34 #include "runtime/globals_extension.hpp"
35 #include "runtime/interfaceSupport.inline.hpp"
36 #include "runtime/javaThread.inline.hpp"
37 #include "runtime/lightweightSynchronizer.hpp"
38 #include "runtime/lockStack.inline.hpp"
39 #include "runtime/mutexLocker.hpp"
40 #include "runtime/objectMonitor.inline.hpp"
41 #include "runtime/os.hpp"
42 #include "runtime/safepointMechanism.inline.hpp"
43 #include "runtime/safepointVerifiers.hpp"
44 #include "runtime/synchronizer.inline.hpp"
45 #include "runtime/timerTrace.hpp"
46 #include "runtime/trimNativeHeap.hpp"
47 #include "utilities/concurrentHashTable.inline.hpp"
48 #include "utilities/concurrentHashTableTasks.inline.hpp"
49 #include "utilities/globalDefinitions.hpp"
50
51 // ConcurrentHashTable storing links from objects to ObjectMonitors
52 class ObjectMonitorTable : AllStatic {
53 struct Config {
54 using Value = ObjectMonitor*;
55 static uintx get_hash(Value const& value, bool* is_dead) {
56 return (uintx)value->hash();
57 }
58 static void* allocate_node(void* context, size_t size, Value const& value) {
59 ObjectMonitorTable::inc_items_count();
60 return AllocateHeap(size, mtObjectMonitor);
61 };
62 static void free_node(void* context, void* memory, Value const& value) {
63 ObjectMonitorTable::dec_items_count();
64 FreeHeap(memory);
65 }
66 };
67 using ConcurrentTable = ConcurrentHashTable<Config, mtObjectMonitor>;
68
69 static ConcurrentTable* _table;
70 static volatile size_t _items_count;
71 static size_t _table_size;
72 static volatile bool _resize;
73
74 class Lookup : public StackObj {
75 oop _obj;
76
77 public:
78 explicit Lookup(oop obj) : _obj(obj) {}
79
80 uintx get_hash() const {
81 uintx hash = _obj->mark().hash();
82 assert(hash != 0, "should have a hash");
83 return hash;
84 }
85
86 bool equals(ObjectMonitor** value) {
87 assert(*value != nullptr, "must be");
88 return (*value)->object_refers_to(_obj);
89 }
90
91 bool is_dead(ObjectMonitor** value) {
92 assert(*value != nullptr, "must be");
93 return false;
94 }
95 };
96
97 class LookupMonitor : public StackObj {
98 ObjectMonitor* _monitor;
99
100 public:
101 explicit LookupMonitor(ObjectMonitor* monitor) : _monitor(monitor) {}
102
103 uintx get_hash() const {
104 return _monitor->hash();
105 }
106
107 bool equals(ObjectMonitor** value) {
108 return (*value) == _monitor;
109 }
110
111 bool is_dead(ObjectMonitor** value) {
112 assert(*value != nullptr, "must be");
113 return (*value)->object_is_dead();
114 }
115 };
116
117 static void inc_items_count() {
118 Atomic::inc(&_items_count, memory_order_relaxed);
119 }
120
121 static void dec_items_count() {
122 Atomic::dec(&_items_count, memory_order_relaxed);
123 }
124
125 static double get_load_factor() {
126 size_t count = Atomic::load(&_items_count);
127 return (double)count / (double)_table_size;
128 }
129
130 static size_t table_size(Thread* current = Thread::current()) {
131 return ((size_t)1) << _table->get_size_log2(current);
132 }
133
134 static size_t max_log_size() {
135 // TODO[OMTable]: Evaluate the max size.
136 // TODO[OMTable]: Need to fix init order to use Universe::heap()->max_capacity();
137 // Using MaxHeapSize directly this early may be wrong, and there
138 // are definitely rounding errors (alignment).
139 const size_t max_capacity = MaxHeapSize;
140 const size_t min_object_size = CollectedHeap::min_dummy_object_size() * HeapWordSize;
141 const size_t max_objects = max_capacity / MAX2(MinObjAlignmentInBytes, checked_cast<int>(min_object_size));
142 const size_t log_max_objects = log2i_graceful(max_objects);
143
144 return MAX2(MIN2<size_t>(SIZE_BIG_LOG2, log_max_objects), min_log_size());
145 }
146
147 static size_t min_log_size() {
148 // ~= log(AvgMonitorsPerThreadEstimate default)
149 return 10;
150 }
151
152 template<typename V>
153 static size_t clamp_log_size(V log_size) {
154 return MAX2(MIN2(log_size, checked_cast<V>(max_log_size())), checked_cast<V>(min_log_size()));
155 }
156
157 static size_t initial_log_size() {
158 const size_t estimate = log2i(MAX2(os::processor_count(), 1)) + log2i(MAX2(AvgMonitorsPerThreadEstimate, size_t(1)));
159 return clamp_log_size(estimate);
160 }
161
162 static size_t grow_hint () {
163 return ConcurrentTable::DEFAULT_GROW_HINT;
164 }
165
166 public:
167 static void create() {
168 _table = new ConcurrentTable(initial_log_size(), max_log_size(), grow_hint());
169 _items_count = 0;
170 _table_size = table_size();
171 _resize = false;
172 }
173
174 static void verify_monitor_get_result(oop obj, ObjectMonitor* monitor) {
175 #ifdef ASSERT
176 if (SafepointSynchronize::is_at_safepoint()) {
177 bool has_monitor = obj->mark().has_monitor();
178 assert(has_monitor == (monitor != nullptr),
179 "Inconsistency between markWord and ObjectMonitorTable has_monitor: %s monitor: " PTR_FORMAT,
180 BOOL_TO_STR(has_monitor), p2i(monitor));
181 }
182 #endif
183 }
184
185 static ObjectMonitor* monitor_get(Thread* current, oop obj) {
186 ObjectMonitor* result = nullptr;
187 Lookup lookup_f(obj);
188 auto found_f = [&](ObjectMonitor** found) {
189 assert((*found)->object_peek() == obj, "must be");
190 result = *found;
191 };
192 _table->get(current, lookup_f, found_f);
193 verify_monitor_get_result(obj, result);
194 return result;
195 }
196
197 static void try_notify_grow() {
198 if (!_table->is_max_size_reached() && !Atomic::load(&_resize)) {
199 Atomic::store(&_resize, true);
200 if (Service_lock->try_lock()) {
201 Service_lock->notify();
202 Service_lock->unlock();
203 }
204 }
205 }
206
207 static bool should_shrink() {
208 // Not implemented;
209 return false;
210 }
211
212 static constexpr double GROW_LOAD_FACTOR = 0.75;
213
214 static bool should_grow() {
215 return get_load_factor() > GROW_LOAD_FACTOR && !_table->is_max_size_reached();
216 }
217
218 static bool should_resize() {
219 return should_grow() || should_shrink() || Atomic::load(&_resize);
220 }
221
222 template<typename Task, typename... Args>
223 static bool run_task(JavaThread* current, Task& task, const char* task_name, Args&... args) {
224 if (task.prepare(current)) {
225 log_trace(monitortable)("Started to %s", task_name);
226 TraceTime timer(task_name, TRACETIME_LOG(Debug, monitortable, perf));
227 while (task.do_task(current, args...)) {
228 task.pause(current);
229 {
230 ThreadBlockInVM tbivm(current);
231 }
232 task.cont(current);
233 }
234 task.done(current);
235 return true;
236 }
237 return false;
238 }
239
240 static bool grow(JavaThread* current) {
241 ConcurrentTable::GrowTask grow_task(_table);
242 if (run_task(current, grow_task, "Grow")) {
243 _table_size = table_size(current);
244 log_info(monitortable)("Grown to size: %zu", _table_size);
245 return true;
246 }
247 return false;
248 }
249
250 static bool clean(JavaThread* current) {
251 ConcurrentTable::BulkDeleteTask clean_task(_table);
252 auto is_dead = [&](ObjectMonitor** monitor) {
253 return (*monitor)->object_is_dead();
254 };
255 auto do_nothing = [&](ObjectMonitor** monitor) {};
256 NativeHeapTrimmer::SuspendMark sm("ObjectMonitorTable");
257 return run_task(current, clean_task, "Clean", is_dead, do_nothing);
258 }
259
260 static bool resize(JavaThread* current) {
261 LogTarget(Info, monitortable) lt;
262 bool success = false;
263
264 if (should_grow()) {
265 lt.print("Start growing with load factor %f", get_load_factor());
266 success = grow(current);
267 } else {
268 if (!_table->is_max_size_reached() && Atomic::load(&_resize)) {
269 lt.print("WARNING: Getting resize hints with load factor %f", get_load_factor());
270 }
271 lt.print("Start cleaning with load factor %f", get_load_factor());
272 success = clean(current);
273 }
274
275 Atomic::store(&_resize, false);
276
277 return success;
278 }
279
280 static ObjectMonitor* monitor_put_get(Thread* current, ObjectMonitor* monitor, oop obj) {
281 // Enter the monitor into the concurrent hashtable.
282 ObjectMonitor* result = monitor;
283 Lookup lookup_f(obj);
284 auto found_f = [&](ObjectMonitor** found) {
285 assert((*found)->object_peek() == obj, "must be");
286 result = *found;
287 };
288 bool grow;
289 _table->insert_get(current, lookup_f, monitor, found_f, &grow);
290 verify_monitor_get_result(obj, result);
291 if (grow) {
292 try_notify_grow();
293 }
294 return result;
295 }
296
297 static bool remove_monitor_entry(Thread* current, ObjectMonitor* monitor) {
298 LookupMonitor lookup_f(monitor);
299 return _table->remove(current, lookup_f);
300 }
301
302 static bool contains_monitor(Thread* current, ObjectMonitor* monitor) {
303 LookupMonitor lookup_f(monitor);
304 bool result = false;
305 auto found_f = [&](ObjectMonitor** found) {
306 result = true;
307 };
308 _table->get(current, lookup_f, found_f);
309 return result;
310 }
311
312 static void print_on(outputStream* st) {
313 auto printer = [&] (ObjectMonitor** entry) {
314 ObjectMonitor* om = *entry;
315 oop obj = om->object_peek();
316 st->print("monitor=" PTR_FORMAT ", ", p2i(om));
317 st->print("object=" PTR_FORMAT, p2i(obj));
318 assert(obj->mark().hash() == om->hash(), "hash must match");
319 st->cr();
320 return true;
321 };
322 if (SafepointSynchronize::is_at_safepoint()) {
323 _table->do_safepoint_scan(printer);
324 } else {
325 _table->do_scan(Thread::current(), printer);
326 }
327 }
328 };
329
330 ObjectMonitorTable::ConcurrentTable* ObjectMonitorTable::_table = nullptr;
331 volatile size_t ObjectMonitorTable::_items_count = 0;
332 size_t ObjectMonitorTable::_table_size = 0;
333 volatile bool ObjectMonitorTable::_resize = false;
334
335 ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted) {
336 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
337
338 ObjectMonitor* monitor = get_monitor_from_table(current, object);
339 if (monitor != nullptr) {
340 *inserted = false;
341 return monitor;
342 }
343
344 ObjectMonitor* alloced_monitor = new ObjectMonitor(object);
345 alloced_monitor->set_anonymous_owner();
346
347 // Try insert monitor
348 monitor = add_monitor(current, alloced_monitor, object);
349
350 *inserted = alloced_monitor == monitor;
351 if (!*inserted) {
352 delete alloced_monitor;
353 }
354
355 return monitor;
356 }
357
358 static void log_inflate(Thread* current, oop object, ObjectSynchronizer::InflateCause cause) {
359 if (log_is_enabled(Trace, monitorinflation)) {
360 ResourceMark rm(current);
361 log_trace(monitorinflation)("inflate: object=" INTPTR_FORMAT ", mark="
362 INTPTR_FORMAT ", type='%s' cause=%s", p2i(object),
363 object->mark().value(), object->klass()->external_name(),
364 ObjectSynchronizer::inflate_cause_name(cause));
365 }
366 }
367
368 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
369 const oop obj,
370 ObjectSynchronizer::InflateCause cause) {
371 assert(event != nullptr, "invariant");
372 const Klass* monitor_klass = obj->klass();
373 if (ObjectMonitor::is_jfr_excluded(monitor_klass)) {
374 return;
375 }
376 event->set_monitorClass(monitor_klass);
377 event->set_address((uintptr_t)(void*)obj);
378 event->set_cause((u1)cause);
379 event->commit();
380 }
381
382 ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor(oop object, JavaThread* current, ObjectSynchronizer::InflateCause cause) {
383 assert(UseObjectMonitorTable, "must be");
384
385 EventJavaMonitorInflate event;
386
387 bool inserted;
388 ObjectMonitor* monitor = get_or_insert_monitor_from_table(object, current, &inserted);
389
390 if (inserted) {
391 log_inflate(current, object, cause);
392 if (event.should_commit()) {
393 post_monitor_inflate_event(&event, object, cause);
394 }
395
396 // The monitor has an anonymous owner so it is safe from async deflation.
397 ObjectSynchronizer::_in_use_list.add(monitor);
398 }
399
400 return monitor;
401 }
402
403 // Add the hashcode to the monitor to match the object and put it in the hashtable.
404 ObjectMonitor* LightweightSynchronizer::add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj) {
405 assert(UseObjectMonitorTable, "must be");
406 assert(obj == monitor->object(), "must be");
407
408 intptr_t hash = obj->mark().hash();
409 assert(hash != 0, "must be set when claiming the object monitor");
410 monitor->set_hash(hash);
411
412 return ObjectMonitorTable::monitor_put_get(current, monitor, obj);
413 }
414
415 bool LightweightSynchronizer::remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj) {
416 assert(UseObjectMonitorTable, "must be");
417 assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead");
418
419 return ObjectMonitorTable::remove_monitor_entry(current, monitor);
420 }
421
422 void LightweightSynchronizer::deflate_mark_word(oop obj) {
423 assert(UseObjectMonitorTable, "must be");
424
425 markWord mark = obj->mark_acquire();
426 assert(!mark.has_no_hash(), "obj with inflated monitor must have had a hash");
427
428 while (mark.has_monitor()) {
429 const markWord new_mark = mark.clear_lock_bits().set_unlocked();
430 mark = obj->cas_set_mark(new_mark, mark);
431 }
432 }
433
434 void LightweightSynchronizer::initialize() {
435 if (!UseObjectMonitorTable) {
436 return;
437 }
438 ObjectMonitorTable::create();
439 }
440
441 bool LightweightSynchronizer::needs_resize() {
442 if (!UseObjectMonitorTable) {
443 return false;
444 }
445 return ObjectMonitorTable::should_resize();
446 }
447
448 bool LightweightSynchronizer::resize_table(JavaThread* current) {
449 if (!UseObjectMonitorTable) {
450 return true;
451 }
452 return ObjectMonitorTable::resize(current);
453 }
454
455 class LightweightSynchronizer::LockStackInflateContendedLocks : private OopClosure {
456 private:
457 oop _contended_oops[LockStack::CAPACITY];
458 int _length;
459
460 void do_oop(oop* o) final {
461 oop obj = *o;
462 if (obj->mark_acquire().has_monitor()) {
463 if (_length > 0 && _contended_oops[_length - 1] == obj) {
464 // Recursive
465 return;
466 }
467 _contended_oops[_length++] = obj;
468 }
469 }
470
471 void do_oop(narrowOop* o) final {
472 ShouldNotReachHere();
473 }
474
475 public:
476 LockStackInflateContendedLocks() :
477 _contended_oops(),
478 _length(0) {};
479
480 void inflate(JavaThread* current) {
481 assert(current == JavaThread::current(), "must be");
482 current->lock_stack().oops_do(this);
483 for (int i = 0; i < _length; i++) {
484 LightweightSynchronizer::
485 inflate_fast_locked_object(_contended_oops[i], ObjectSynchronizer::inflate_cause_vm_internal, current, current);
486 }
487 }
488 };
489
490 void LightweightSynchronizer::ensure_lock_stack_space(JavaThread* current) {
491 assert(current == JavaThread::current(), "must be");
492 LockStack& lock_stack = current->lock_stack();
493
494 // Make room on lock_stack
495 if (lock_stack.is_full()) {
496 // Inflate contended objects
497 LockStackInflateContendedLocks().inflate(current);
498 if (lock_stack.is_full()) {
499 // Inflate the oldest object
500 inflate_fast_locked_object(lock_stack.bottom(), ObjectSynchronizer::inflate_cause_vm_internal, current, current);
501 }
502 }
503 }
504
505 class LightweightSynchronizer::CacheSetter : StackObj {
506 JavaThread* const _thread;
507 BasicLock* const _lock;
508 ObjectMonitor* _monitor;
509
510 NONCOPYABLE(CacheSetter);
511
512 public:
513 CacheSetter(JavaThread* thread, BasicLock* lock) :
514 _thread(thread),
515 _lock(lock),
516 _monitor(nullptr) {}
517
518 ~CacheSetter() {
519 // Only use the cache if using the table.
520 if (UseObjectMonitorTable) {
521 if (_monitor != nullptr) {
522 // If the monitor is already in the BasicLock cache then it is most
523 // likely in the thread cache, do not set it again to avoid reordering.
524 if (_monitor != _lock->object_monitor_cache()) {
525 _thread->om_set_monitor_cache(_monitor);
526 _lock->set_object_monitor_cache(_monitor);
527 }
528 } else {
529 _lock->clear_object_monitor_cache();
530 }
531 }
532 }
533
534 void set_monitor(ObjectMonitor* monitor) {
535 assert(_monitor == nullptr, "only set once");
536 _monitor = monitor;
537 }
538
539 };
540
541 // Reads first from the BasicLock cache then from the OMCache in the current thread.
542 // C2 fast-path may have put the monitor in the cache in the BasicLock.
543 inline static ObjectMonitor* read_caches(JavaThread* current, BasicLock* lock, oop object) {
544 ObjectMonitor* monitor = lock->object_monitor_cache();
545 if (monitor == nullptr) {
546 monitor = current->om_get_from_monitor_cache(object);
547 }
548 return monitor;
549 }
550
551 class LightweightSynchronizer::VerifyThreadState {
552 bool _no_safepoint;
553
554 public:
555 VerifyThreadState(JavaThread* locking_thread, JavaThread* current) : _no_safepoint(locking_thread != current) {
556 assert(current == Thread::current(), "must be");
557 assert(locking_thread == current || locking_thread->is_obj_deopt_suspend(), "locking_thread may not run concurrently");
558 if (_no_safepoint) {
559 DEBUG_ONLY(JavaThread::current()->inc_no_safepoint_count();)
560 }
561 }
562 ~VerifyThreadState() {
563 if (_no_safepoint){
564 DEBUG_ONLY(JavaThread::current()->dec_no_safepoint_count();)
565 }
566 }
567 };
568
569 inline bool LightweightSynchronizer::fast_lock_try_enter(oop obj, LockStack& lock_stack, JavaThread* current) {
570 markWord mark = obj->mark();
571 while (mark.is_unlocked()) {
572 ensure_lock_stack_space(current);
573 assert(!lock_stack.is_full(), "must have made room on the lock stack");
574 assert(!lock_stack.contains(obj), "thread must not already hold the lock");
575 // Try to swing into 'fast-locked' state.
576 markWord locked_mark = mark.set_fast_locked();
577 markWord old_mark = mark;
578 mark = obj->cas_set_mark(locked_mark, old_mark);
579 if (old_mark == mark) {
580 // Successfully fast-locked, push object to lock-stack and return.
581 lock_stack.push(obj);
582 return true;
583 }
584 }
585 return false;
586 }
587
588 bool LightweightSynchronizer::fast_lock_spin_enter(oop obj, LockStack& lock_stack, JavaThread* current, bool observed_deflation) {
589 assert(UseObjectMonitorTable, "must be");
590 // Will spin with exponential backoff with an accumulative O(2^spin_limit) spins.
591 const int log_spin_limit = os::is_MP() ? LightweightFastLockingSpins : 1;
592 const int log_min_safepoint_check_interval = 10;
593
594 markWord mark = obj->mark();
595 const auto should_spin = [&]() {
596 if (!mark.has_monitor()) {
597 // Spin while not inflated.
598 return true;
599 } else if (observed_deflation) {
600 // Spin while monitor is being deflated.
601 ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark);
602 return monitor == nullptr || monitor->is_being_async_deflated();
603 }
604 // Else stop spinning.
605 return false;
606 };
607 // Always attempt to lock once even when safepoint synchronizing.
608 bool should_process = false;
609 for (int i = 0; should_spin() && !should_process && i < log_spin_limit; i++) {
610 // Spin with exponential backoff.
611 const int total_spin_count = 1 << i;
612 const int inner_spin_count = MIN2(1 << log_min_safepoint_check_interval, total_spin_count);
613 const int outer_spin_count = total_spin_count / inner_spin_count;
614 for (int outer = 0; outer < outer_spin_count; outer++) {
615 should_process = SafepointMechanism::should_process(current);
616 if (should_process) {
617 // Stop spinning for safepoint.
618 break;
619 }
620 for (int inner = 1; inner < inner_spin_count; inner++) {
621 SpinPause();
622 }
623 }
624
625 if (fast_lock_try_enter(obj, lock_stack, current)) return true;
626 }
627 return false;
628 }
629
630 void LightweightSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
631 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
632 assert(!UseObjectMonitorTable || lock->object_monitor_cache() == nullptr, "must be cleared");
633 JavaThread* current = JavaThread::current();
634 VerifyThreadState vts(locking_thread, current);
635
636 if (obj->klass()->is_value_based()) {
637 ObjectSynchronizer::handle_sync_on_value_based_class(obj, locking_thread);
638 }
639
640 LockStack& lock_stack = locking_thread->lock_stack();
641
642 ObjectMonitor* monitor = nullptr;
643 if (lock_stack.contains(obj())) {
644 monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
645 bool entered = monitor->enter_for(locking_thread);
646 assert(entered, "recursive ObjectMonitor::enter_for must succeed");
647 } else {
648 do {
649 // It is assumed that enter_for must enter on an object without contention.
650 monitor = inflate_and_enter(obj(), lock, ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
651 // But there may still be a race with deflation.
652 } while (monitor == nullptr);
653 }
654
655 assert(monitor != nullptr, "LightweightSynchronizer::enter_for must succeed");
656 assert(!UseObjectMonitorTable || lock->object_monitor_cache() == nullptr, "unused. already cleared");
657 }
658
659 void LightweightSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
660 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
661 assert(current == JavaThread::current(), "must be");
662
663 if (obj->klass()->is_value_based()) {
664 ObjectSynchronizer::handle_sync_on_value_based_class(obj, current);
665 }
666
667 CacheSetter cache_setter(current, lock);
668
669 // Used when deflation is observed. Progress here requires progress
670 // from the deflator. After observing that the deflator is not
671 // making progress (after two yields), switch to sleeping.
672 SpinYield spin_yield(0, 2);
673 bool observed_deflation = false;
674
675 LockStack& lock_stack = current->lock_stack();
676
677 if (!lock_stack.is_full() && lock_stack.try_recursive_enter(obj())) {
678 // Recursively fast locked
679 return;
680 }
681
682 if (lock_stack.contains(obj())) {
683 ObjectMonitor* monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, current, current);
684 bool entered = monitor->enter(current);
685 assert(entered, "recursive ObjectMonitor::enter must succeed");
686 cache_setter.set_monitor(monitor);
687 return;
688 }
689
690 while (true) {
691 // Fast-locking does not use the 'lock' argument.
692 // Fast-lock spinning to avoid inflating for short critical sections.
693 // The goal is to only inflate when the extra cost of using ObjectMonitors
694 // is worth it.
695 // If deflation has been observed we also spin while deflation is ongoing.
696 if (fast_lock_try_enter(obj(), lock_stack, current)) {
697 return;
698 } else if (UseObjectMonitorTable && fast_lock_spin_enter(obj(), lock_stack, current, observed_deflation)) {
699 return;
700 }
701
702 if (observed_deflation) {
703 spin_yield.wait();
704 }
705
706 ObjectMonitor* monitor = inflate_and_enter(obj(), lock, ObjectSynchronizer::inflate_cause_monitor_enter, current, current);
707 if (monitor != nullptr) {
708 cache_setter.set_monitor(monitor);
709 return;
710 }
711
712 // If inflate_and_enter returns nullptr it is because a deflated monitor
713 // was encountered. Fallback to fast locking. The deflater is responsible
714 // for clearing out the monitor and transitioning the markWord back to
715 // fast locking.
716 observed_deflation = true;
717 }
718 }
719
720 void LightweightSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
721 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
722 assert(current == Thread::current(), "must be");
723
724 markWord mark = object->mark();
725 assert(!mark.is_unlocked(), "must be");
726
727 LockStack& lock_stack = current->lock_stack();
728 if (mark.is_fast_locked()) {
729 if (lock_stack.try_recursive_exit(object)) {
730 // This is a recursive exit which succeeded
731 return;
732 }
733 if (lock_stack.is_recursive(object)) {
734 // Must inflate recursive locks if try_recursive_exit fails
735 // This happens for un-structured unlocks, could potentially
736 // fix try_recursive_exit to handle these.
737 inflate_fast_locked_object(object, ObjectSynchronizer::inflate_cause_vm_internal, current, current);
738 }
739 }
740
741 while (mark.is_fast_locked()) {
742 markWord unlocked_mark = mark.set_unlocked();
743 markWord old_mark = mark;
744 mark = object->cas_set_mark(unlocked_mark, old_mark);
745 if (old_mark == mark) {
746 // CAS successful, remove from lock_stack
747 size_t recursion = lock_stack.remove(object) - 1;
748 assert(recursion == 0, "Should not have unlocked here");
749 return;
750 }
751 }
752
753 assert(mark.has_monitor(), "must be");
754 // The monitor exists
755 ObjectMonitor* monitor;
756 if (UseObjectMonitorTable) {
757 monitor = read_caches(current, lock, object);
758 if (monitor == nullptr) {
759 monitor = get_monitor_from_table(current, object);
760 }
761 } else {
762 monitor = ObjectSynchronizer::read_monitor(mark);
763 }
764 if (monitor->has_anonymous_owner()) {
765 assert(current->lock_stack().contains(object), "current must have object on its lock stack");
766 monitor->set_owner_from_anonymous(current);
767 monitor->set_recursions(current->lock_stack().remove(object) - 1);
768 }
769
770 monitor->exit(current);
771 }
772
773 // LightweightSynchronizer::inflate_locked_or_imse is used to to get an inflated
774 // ObjectMonitor* with LM_LIGHTWEIGHT. It is used from contexts which require
775 // an inflated ObjectMonitor* for a monitor, and expects to throw a
776 // java.lang.IllegalMonitorStateException if it is not held by the current
777 // thread. Such as notify/wait and jni_exit. LM_LIGHTWEIGHT keeps it invariant
778 // that it only inflates if it is already locked by the current thread or the
779 // current thread is in the process of entering. To maintain this invariant we
780 // need to throw a java.lang.IllegalMonitorStateException before inflating if
781 // the current thread is not the owner.
782 // LightweightSynchronizer::inflate_locked_or_imse facilitates this.
783 ObjectMonitor* LightweightSynchronizer::inflate_locked_or_imse(oop obj, ObjectSynchronizer::InflateCause cause, TRAPS) {
784 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
785 JavaThread* current = THREAD;
786
787 for (;;) {
788 markWord mark = obj->mark_acquire();
789 if (mark.is_unlocked()) {
790 // No lock, IMSE.
791 THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
792 "current thread is not owner", nullptr);
793 }
794
795 if (mark.is_fast_locked()) {
796 if (!current->lock_stack().contains(obj)) {
797 // Fast locked by other thread, IMSE.
798 THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
799 "current thread is not owner", nullptr);
800 } else {
801 // Current thread owns the lock, must inflate
802 return inflate_fast_locked_object(obj, cause, current, current);
803 }
804 }
805
806 assert(mark.has_monitor(), "must be");
807 ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark);
808 if (monitor != nullptr) {
809 if (monitor->has_anonymous_owner()) {
810 LockStack& lock_stack = current->lock_stack();
811 if (lock_stack.contains(obj)) {
812 // Current thread owns the lock but someone else inflated it.
813 // Fix owner and pop lock stack.
814 monitor->set_owner_from_anonymous(current);
815 monitor->set_recursions(lock_stack.remove(obj) - 1);
816 } else {
817 // Fast locked (and inflated) by other thread, or deflation in progress, IMSE.
818 THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
819 "current thread is not owner", nullptr);
820 }
821 }
822 return monitor;
823 }
824 }
825 }
826
827 ObjectMonitor* LightweightSynchronizer::inflate_into_object_header(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, Thread* current) {
828
829 // The JavaThread* locking_thread parameter is only used by LM_LIGHTWEIGHT and requires
830 // that the locking_thread == Thread::current() or is suspended throughout the call by
831 // some other mechanism.
832 // Even with LM_LIGHTWEIGHT the thread might be nullptr when called from a non
833 // JavaThread. (As may still be the case from FastHashCode). However it is only
834 // important for the correctness of the LM_LIGHTWEIGHT algorithm that the thread
835 // is set when called from ObjectSynchronizer::enter from the owning thread,
836 // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
837 EventJavaMonitorInflate event;
838
839 for (;;) {
840 const markWord mark = object->mark_acquire();
841
842 // The mark can be in one of the following states:
843 // * inflated - Just return if using stack-locking.
844 // If using fast-locking and the ObjectMonitor owner
845 // is anonymous and the locking_thread owns the
846 // object lock, then we make the locking_thread
847 // the ObjectMonitor owner and remove the lock from
848 // the locking_thread's lock stack.
849 // * fast-locked - Coerce it to inflated from fast-locked.
850 // * unlocked - Aggressively inflate the object.
851
852 // CASE: inflated
853 if (mark.has_monitor()) {
854 ObjectMonitor* inf = mark.monitor();
855 markWord dmw = inf->header();
856 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
857 if (inf->has_anonymous_owner() &&
858 locking_thread != nullptr && locking_thread->lock_stack().contains(object)) {
859 inf->set_owner_from_anonymous(locking_thread);
860 size_t removed = locking_thread->lock_stack().remove(object);
861 inf->set_recursions(removed - 1);
862 }
863 return inf;
864 }
865
866 // CASE: fast-locked
867 // Could be fast-locked either by the locking_thread or by some other thread.
868 //
869 // Note that we allocate the ObjectMonitor speculatively, _before_
870 // attempting to set the object's mark to the new ObjectMonitor. If
871 // the locking_thread owns the monitor, then we set the ObjectMonitor's
872 // owner to the locking_thread. Otherwise, we set the ObjectMonitor's owner
873 // to anonymous. If we lose the race to set the object's mark to the
874 // new ObjectMonitor, then we just delete it and loop around again.
875 //
876 if (mark.is_fast_locked()) {
877 ObjectMonitor* monitor = new ObjectMonitor(object);
878 monitor->set_header(mark.set_unlocked());
879 bool own = locking_thread != nullptr && locking_thread->lock_stack().contains(object);
880 if (own) {
881 // Owned by locking_thread.
882 monitor->set_owner(locking_thread);
883 } else {
884 // Owned by somebody else.
885 monitor->set_anonymous_owner();
886 }
887 markWord monitor_mark = markWord::encode(monitor);
888 markWord old_mark = object->cas_set_mark(monitor_mark, mark);
889 if (old_mark == mark) {
890 // Success! Return inflated monitor.
891 if (own) {
892 size_t removed = locking_thread->lock_stack().remove(object);
893 monitor->set_recursions(removed - 1);
894 }
895 // Once the ObjectMonitor is configured and object is associated
896 // with the ObjectMonitor, it is safe to allow async deflation:
897 ObjectSynchronizer::_in_use_list.add(monitor);
898
899 log_inflate(current, object, cause);
900 if (event.should_commit()) {
901 post_monitor_inflate_event(&event, object, cause);
902 }
903 return monitor;
904 } else {
905 delete monitor;
906 continue; // Interference -- just retry
907 }
908 }
909
910 // CASE: unlocked
911 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
912 // If we know we're inflating for entry it's better to inflate by swinging a
913 // pre-locked ObjectMonitor pointer into the object header. A successful
914 // CAS inflates the object *and* confers ownership to the inflating thread.
915 // In the current implementation we use a 2-step mechanism where we CAS()
916 // to inflate and then CAS() again to try to swing _owner from null to current.
917 // An inflateTry() method that we could call from enter() would be useful.
918
919 assert(mark.is_unlocked(), "invariant: header=" INTPTR_FORMAT, mark.value());
920 ObjectMonitor* m = new ObjectMonitor(object);
921 // prepare m for installation - set monitor to initial state
922 m->set_header(mark);
923
924 if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
925 delete m;
926 m = nullptr;
927 continue;
928 // interference - the markword changed - just retry.
929 // The state-transitions are one-way, so there's no chance of
930 // live-lock -- "Inflated" is an absorbing state.
931 }
932
933 // Once the ObjectMonitor is configured and object is associated
934 // with the ObjectMonitor, it is safe to allow async deflation:
935 ObjectSynchronizer::_in_use_list.add(m);
936
937 log_inflate(current, object, cause);
938 if (event.should_commit()) {
939 post_monitor_inflate_event(&event, object, cause);
940 }
941 return m;
942 }
943 }
944
945 ObjectMonitor* LightweightSynchronizer::inflate_fast_locked_object(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) {
946 assert(LockingMode == LM_LIGHTWEIGHT, "only used for lightweight");
947 VerifyThreadState vts(locking_thread, current);
948 assert(locking_thread->lock_stack().contains(object), "locking_thread must have object on its lock stack");
949
950 ObjectMonitor* monitor;
951
952 if (!UseObjectMonitorTable) {
953 return inflate_into_object_header(object, cause, locking_thread, current);
954 }
955
956 // Inflating requires a hash code
957 ObjectSynchronizer::FastHashCode(current, object);
958
959 markWord mark = object->mark_acquire();
960 assert(!mark.is_unlocked(), "Cannot be unlocked");
961
962 for (;;) {
963 // Fetch the monitor from the table
964 monitor = get_or_insert_monitor(object, current, cause);
965
966 // ObjectMonitors are always inserted as anonymously owned, this thread is
967 // the current holder of the monitor. So unless the entry is stale and
968 // contains a deflating monitor it must be anonymously owned.
969 if (monitor->has_anonymous_owner()) {
970 // The monitor must be anonymously owned if it was added
971 assert(monitor == get_monitor_from_table(current, object), "The monitor must be found");
972 // New fresh monitor
973 break;
974 }
975
976 // If the monitor was not anonymously owned then we got a deflating monitor
977 // from the table. We need to let the deflator make progress and remove this
978 // entry before we are allowed to add a new one.
979 os::naked_yield();
980 assert(monitor->is_being_async_deflated(), "Should be the reason");
981 }
982
983 // Set the mark word; loop to handle concurrent updates to other parts of the mark word
984 while (mark.is_fast_locked()) {
985 mark = object->cas_set_mark(mark.set_has_monitor(), mark);
986 }
987
988 // Indicate that the monitor now has a known owner
989 monitor->set_owner_from_anonymous(locking_thread);
990
991 // Remove the entry from the thread's lock stack
992 monitor->set_recursions(locking_thread->lock_stack().remove(object) - 1);
993
994 if (locking_thread == current) {
995 // Only change the thread local state of the current thread.
996 locking_thread->om_set_monitor_cache(monitor);
997 }
998
999 return monitor;
1000 }
1001
1002 ObjectMonitor* LightweightSynchronizer::inflate_and_enter(oop object, BasicLock* lock, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) {
1003 assert(LockingMode == LM_LIGHTWEIGHT, "only used for lightweight");
1004 VerifyThreadState vts(locking_thread, current);
1005
1006 // Note: In some paths (deoptimization) the 'current' thread inflates and
1007 // enters the lock on behalf of the 'locking_thread' thread.
1008
1009 ObjectMonitor* monitor = nullptr;
1010
1011 if (!UseObjectMonitorTable) {
1012 // Do the old inflate and enter.
1013 monitor = inflate_into_object_header(object, cause, locking_thread, current);
1014
1015 bool entered;
1016 if (locking_thread == current) {
1017 entered = monitor->enter(locking_thread);
1018 } else {
1019 entered = monitor->enter_for(locking_thread);
1020 }
1021
1022 // enter returns false for deflation found.
1023 return entered ? monitor : nullptr;
1024 }
1025
1026 NoSafepointVerifier nsv;
1027
1028 // Try to get the monitor from the thread-local cache.
1029 // There's no need to use the cache if we are locking
1030 // on behalf of another thread.
1031 if (current == locking_thread) {
1032 monitor = read_caches(current, lock, object);
1033 }
1034
1035 // Get or create the monitor
1036 if (monitor == nullptr) {
1037 // Lightweight monitors require that hash codes are installed first
1038 ObjectSynchronizer::FastHashCode(locking_thread, object);
1039 monitor = get_or_insert_monitor(object, current, cause);
1040 }
1041
1042 if (monitor->try_enter(locking_thread)) {
1043 return monitor;
1044 }
1045
1046 // Holds is_being_async_deflated() stable throughout this function.
1047 ObjectMonitorContentionMark contention_mark(monitor);
1048
1049 /// First handle the case where the monitor from the table is deflated
1050 if (monitor->is_being_async_deflated()) {
1051 // The MonitorDeflation thread is deflating the monitor. The locking thread
1052 // must spin until further progress has been made.
1053
1054 // Clear the BasicLock cache as it may contain this monitor.
1055 lock->clear_object_monitor_cache();
1056
1057 const markWord mark = object->mark_acquire();
1058
1059 if (mark.has_monitor()) {
1060 // Waiting on the deflation thread to remove the deflated monitor from the table.
1061 os::naked_yield();
1062
1063 } else if (mark.is_fast_locked()) {
1064 // Some other thread managed to fast-lock the lock, or this is a
1065 // recursive lock from the same thread; yield for the deflation
1066 // thread to remove the deflated monitor from the table.
1067 os::naked_yield();
1068
1069 } else {
1070 assert(mark.is_unlocked(), "Implied");
1071 // Retry immediately
1072 }
1073
1074 // Retry
1075 return nullptr;
1076 }
1077
1078 for (;;) {
1079 const markWord mark = object->mark_acquire();
1080 // The mark can be in one of the following states:
1081 // * inflated - If the ObjectMonitor owner is anonymous
1082 // and the locking_thread owns the object
1083 // lock, then we make the locking_thread
1084 // the ObjectMonitor owner and remove the
1085 // lock from the locking_thread's lock stack.
1086 // * fast-locked - Coerce it to inflated from fast-locked.
1087 // * neutral - Inflate the object. Successful CAS is locked
1088
1089 // CASE: inflated
1090 if (mark.has_monitor()) {
1091 LockStack& lock_stack = locking_thread->lock_stack();
1092 if (monitor->has_anonymous_owner() && lock_stack.contains(object)) {
1093 // The lock is fast-locked by the locking thread,
1094 // convert it to a held monitor with a known owner.
1095 monitor->set_owner_from_anonymous(locking_thread);
1096 monitor->set_recursions(lock_stack.remove(object) - 1);
1097 }
1098
1099 break; // Success
1100 }
1101
1102 // CASE: fast-locked
1103 // Could be fast-locked either by locking_thread or by some other thread.
1104 //
1105 if (mark.is_fast_locked()) {
1106 markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
1107 if (old_mark != mark) {
1108 // CAS failed
1109 continue;
1110 }
1111
1112 // Success! Return inflated monitor.
1113 LockStack& lock_stack = locking_thread->lock_stack();
1114 if (lock_stack.contains(object)) {
1115 // The lock is fast-locked by the locking thread,
1116 // convert it to a held monitor with a known owner.
1117 monitor->set_owner_from_anonymous(locking_thread);
1118 monitor->set_recursions(lock_stack.remove(object) - 1);
1119 }
1120
1121 break; // Success
1122 }
1123
1124 // CASE: neutral (unlocked)
1125
1126 // Catch if the object's header is not neutral (not locked and
1127 // not marked is what we care about here).
1128 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
1129 markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
1130 if (old_mark != mark) {
1131 // CAS failed
1132 continue;
1133 }
1134
1135 // Transitioned from unlocked to monitor means locking_thread owns the lock.
1136 monitor->set_owner_from_anonymous(locking_thread);
1137
1138 return monitor;
1139 }
1140
1141 if (current == locking_thread) {
1142 // One round of spinning
1143 if (monitor->spin_enter(locking_thread)) {
1144 return monitor;
1145 }
1146
1147 // Monitor is contended, take the time before entering to fix the lock stack.
1148 LockStackInflateContendedLocks().inflate(current);
1149 }
1150
1151 // enter can block for safepoints; clear the unhandled object oop
1152 PauseNoSafepointVerifier pnsv(&nsv);
1153 object = nullptr;
1154
1155 if (current == locking_thread) {
1156 monitor->enter_with_contention_mark(locking_thread, contention_mark);
1157 } else {
1158 monitor->enter_for_with_contention_mark(locking_thread, contention_mark);
1159 }
1160
1161 return monitor;
1162 }
1163
1164 void LightweightSynchronizer::deflate_monitor(Thread* current, oop obj, ObjectMonitor* monitor) {
1165 if (obj != nullptr) {
1166 deflate_mark_word(obj);
1167 }
1168 bool removed = remove_monitor(current, monitor, obj);
1169 if (obj != nullptr) {
1170 assert(removed, "Should have removed the entry if obj was alive");
1171 }
1172 }
1173
1174 ObjectMonitor* LightweightSynchronizer::get_monitor_from_table(Thread* current, oop obj) {
1175 assert(UseObjectMonitorTable, "must be");
1176 return ObjectMonitorTable::monitor_get(current, obj);
1177 }
1178
1179 bool LightweightSynchronizer::contains_monitor(Thread* current, ObjectMonitor* monitor) {
1180 assert(UseObjectMonitorTable, "must be");
1181 return ObjectMonitorTable::contains_monitor(current, monitor);
1182 }
1183
1184 bool LightweightSynchronizer::quick_enter(oop obj, BasicLock* lock, JavaThread* current) {
1185 assert(current->thread_state() == _thread_in_Java, "must be");
1186 assert(obj != nullptr, "must be");
1187 NoSafepointVerifier nsv;
1188
1189 LockStack& lock_stack = current->lock_stack();
1190 if (lock_stack.is_full()) {
1191 // Always go into runtime if the lock stack is full.
1192 return false;
1193 }
1194
1195 const markWord mark = obj->mark();
1196
1197 #ifndef _LP64
1198 // Only for 32bit which has limited support for fast locking outside the runtime.
1199 if (lock_stack.try_recursive_enter(obj)) {
1200 // Recursive lock successful.
1201 return true;
1202 }
1203
1204 if (mark.is_unlocked()) {
1205 markWord locked_mark = mark.set_fast_locked();
1206 if (obj->cas_set_mark(locked_mark, mark) == mark) {
1207 // Successfully fast-locked, push object to lock-stack and return.
1208 lock_stack.push(obj);
1209 return true;
1210 }
1211 }
1212 #endif
1213
1214 if (mark.has_monitor()) {
1215 ObjectMonitor* monitor;
1216 if (UseObjectMonitorTable) {
1217 monitor = read_caches(current, lock, obj);
1218 } else {
1219 monitor = ObjectSynchronizer::read_monitor(mark);
1220 }
1221
1222 if (monitor == nullptr) {
1223 // Take the slow-path on a cache miss.
1224 return false;
1225 }
1226
1227 if (UseObjectMonitorTable) {
1228 // Set the monitor regardless of success.
1229 // Either we successfully lock on the monitor, or we retry with the
1230 // monitor in the slow path. If the monitor gets deflated, it will be
1231 // cleared, either by the CacheSetter if we fast lock in enter or in
1232 // inflate_and_enter when we see that the monitor is deflated.
1233 lock->set_object_monitor_cache(monitor);
1234 }
1235
1236 if (monitor->spin_enter(current)) {
1237 return true;
1238 }
1239 }
1240
1241 // Slow-path.
1242 return false;
1243 }